Merge branch 'trivial' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[pandora-kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
49
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
52         LPFC_UNKNOWN_IOCB,
53         LPFC_UNSOL_IOCB,
54         LPFC_SOL_IOCB,
55         LPFC_ABORT_IOCB
56 } lpfc_iocb_type;
57
58
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61                                   uint32_t);
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63                               uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65                                                          struct lpfc_iocbq *);
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67                                       struct hbq_dmabuf *);
68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69                                     struct lpfc_cqe *);
70
71 static IOCB_t *
72 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
73 {
74         return &iocbq->iocb;
75 }
76
77 /**
78  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
79  * @q: The Work Queue to operate on.
80  * @wqe: The work Queue Entry to put on the Work queue.
81  *
82  * This routine will copy the contents of @wqe to the next available entry on
83  * the @q. This function will then ring the Work Queue Doorbell to signal the
84  * HBA to start processing the Work Queue Entry. This function returns 0 if
85  * successful. If no entries are available on @q then this function will return
86  * -ENOMEM.
87  * The caller is expected to hold the hbalock when calling this routine.
88  **/
89 static uint32_t
90 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
91 {
92         union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
93         struct lpfc_register doorbell;
94         uint32_t host_index;
95
96         /* If the host has not yet processed the next entry then we are done */
97         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
98                 return -ENOMEM;
99         /* set consumption flag every once in a while */
100         if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
101                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
102         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
103                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
104         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
105
106         /* Update the host index before invoking device */
107         host_index = q->host_index;
108         q->host_index = ((q->host_index + 1) % q->entry_count);
109
110         /* Ring Doorbell */
111         doorbell.word0 = 0;
112         bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
113         bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
114         bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
115         writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
116         readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
117
118         return 0;
119 }
120
121 /**
122  * lpfc_sli4_wq_release - Updates internal hba index for WQ
123  * @q: The Work Queue to operate on.
124  * @index: The index to advance the hba index to.
125  *
126  * This routine will update the HBA index of a queue to reflect consumption of
127  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
128  * an entry the host calls this function to update the queue's internal
129  * pointers. This routine returns the number of entries that were consumed by
130  * the HBA.
131  **/
132 static uint32_t
133 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
134 {
135         uint32_t released = 0;
136
137         if (q->hba_index == index)
138                 return 0;
139         do {
140                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
141                 released++;
142         } while (q->hba_index != index);
143         return released;
144 }
145
146 /**
147  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
148  * @q: The Mailbox Queue to operate on.
149  * @wqe: The Mailbox Queue Entry to put on the Work queue.
150  *
151  * This routine will copy the contents of @mqe to the next available entry on
152  * the @q. This function will then ring the Work Queue Doorbell to signal the
153  * HBA to start processing the Work Queue Entry. This function returns 0 if
154  * successful. If no entries are available on @q then this function will return
155  * -ENOMEM.
156  * The caller is expected to hold the hbalock when calling this routine.
157  **/
158 static uint32_t
159 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
160 {
161         struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
162         struct lpfc_register doorbell;
163         uint32_t host_index;
164
165         /* If the host has not yet processed the next entry then we are done */
166         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
167                 return -ENOMEM;
168         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
169         /* Save off the mailbox pointer for completion */
170         q->phba->mbox = (MAILBOX_t *)temp_mqe;
171
172         /* Update the host index before invoking device */
173         host_index = q->host_index;
174         q->host_index = ((q->host_index + 1) % q->entry_count);
175
176         /* Ring Doorbell */
177         doorbell.word0 = 0;
178         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
179         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
180         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
181         readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
182         return 0;
183 }
184
185 /**
186  * lpfc_sli4_mq_release - Updates internal hba index for MQ
187  * @q: The Mailbox Queue to operate on.
188  *
189  * This routine will update the HBA index of a queue to reflect consumption of
190  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
191  * an entry the host calls this function to update the queue's internal
192  * pointers. This routine returns the number of entries that were consumed by
193  * the HBA.
194  **/
195 static uint32_t
196 lpfc_sli4_mq_release(struct lpfc_queue *q)
197 {
198         /* Clear the mailbox pointer for completion */
199         q->phba->mbox = NULL;
200         q->hba_index = ((q->hba_index + 1) % q->entry_count);
201         return 1;
202 }
203
204 /**
205  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
206  * @q: The Event Queue to get the first valid EQE from
207  *
208  * This routine will get the first valid Event Queue Entry from @q, update
209  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
210  * the Queue (no more work to do), or the Queue is full of EQEs that have been
211  * processed, but not popped back to the HBA then this routine will return NULL.
212  **/
213 static struct lpfc_eqe *
214 lpfc_sli4_eq_get(struct lpfc_queue *q)
215 {
216         struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
217
218         /* If the next EQE is not valid then we are done */
219         if (!bf_get_le32(lpfc_eqe_valid, eqe))
220                 return NULL;
221         /* If the host has not yet processed the next entry then we are done */
222         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
223                 return NULL;
224
225         q->hba_index = ((q->hba_index + 1) % q->entry_count);
226         return eqe;
227 }
228
229 /**
230  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
231  * @q: The Event Queue that the host has completed processing for.
232  * @arm: Indicates whether the host wants to arms this CQ.
233  *
234  * This routine will mark all Event Queue Entries on @q, from the last
235  * known completed entry to the last entry that was processed, as completed
236  * by clearing the valid bit for each completion queue entry. Then it will
237  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
238  * The internal host index in the @q will be updated by this routine to indicate
239  * that the host has finished processing the entries. The @arm parameter
240  * indicates that the queue should be rearmed when ringing the doorbell.
241  *
242  * This function will return the number of EQEs that were popped.
243  **/
244 uint32_t
245 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
246 {
247         uint32_t released = 0;
248         struct lpfc_eqe *temp_eqe;
249         struct lpfc_register doorbell;
250
251         /* while there are valid entries */
252         while (q->hba_index != q->host_index) {
253                 temp_eqe = q->qe[q->host_index].eqe;
254                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
255                 released++;
256                 q->host_index = ((q->host_index + 1) % q->entry_count);
257         }
258         if (unlikely(released == 0 && !arm))
259                 return 0;
260
261         /* ring doorbell for number popped */
262         doorbell.word0 = 0;
263         if (arm) {
264                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
265                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
266         }
267         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
268         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
269         bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
270         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
271         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
272         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
273                 readl(q->phba->sli4_hba.EQCQDBregaddr);
274         return released;
275 }
276
277 /**
278  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
279  * @q: The Completion Queue to get the first valid CQE from
280  *
281  * This routine will get the first valid Completion Queue Entry from @q, update
282  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
283  * the Queue (no more work to do), or the Queue is full of CQEs that have been
284  * processed, but not popped back to the HBA then this routine will return NULL.
285  **/
286 static struct lpfc_cqe *
287 lpfc_sli4_cq_get(struct lpfc_queue *q)
288 {
289         struct lpfc_cqe *cqe;
290
291         /* If the next CQE is not valid then we are done */
292         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
293                 return NULL;
294         /* If the host has not yet processed the next entry then we are done */
295         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
296                 return NULL;
297
298         cqe = q->qe[q->hba_index].cqe;
299         q->hba_index = ((q->hba_index + 1) % q->entry_count);
300         return cqe;
301 }
302
303 /**
304  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
305  * @q: The Completion Queue that the host has completed processing for.
306  * @arm: Indicates whether the host wants to arms this CQ.
307  *
308  * This routine will mark all Completion queue entries on @q, from the last
309  * known completed entry to the last entry that was processed, as completed
310  * by clearing the valid bit for each completion queue entry. Then it will
311  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
312  * The internal host index in the @q will be updated by this routine to indicate
313  * that the host has finished processing the entries. The @arm parameter
314  * indicates that the queue should be rearmed when ringing the doorbell.
315  *
316  * This function will return the number of CQEs that were released.
317  **/
318 uint32_t
319 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
320 {
321         uint32_t released = 0;
322         struct lpfc_cqe *temp_qe;
323         struct lpfc_register doorbell;
324
325         /* while there are valid entries */
326         while (q->hba_index != q->host_index) {
327                 temp_qe = q->qe[q->host_index].cqe;
328                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
329                 released++;
330                 q->host_index = ((q->host_index + 1) % q->entry_count);
331         }
332         if (unlikely(released == 0 && !arm))
333                 return 0;
334
335         /* ring doorbell for number popped */
336         doorbell.word0 = 0;
337         if (arm)
338                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
339         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
340         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
341         bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
342         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
343         return released;
344 }
345
346 /**
347  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
348  * @q: The Header Receive Queue to operate on.
349  * @wqe: The Receive Queue Entry to put on the Receive queue.
350  *
351  * This routine will copy the contents of @wqe to the next available entry on
352  * the @q. This function will then ring the Receive Queue Doorbell to signal the
353  * HBA to start processing the Receive Queue Entry. This function returns the
354  * index that the rqe was copied to if successful. If no entries are available
355  * on @q then this function will return -ENOMEM.
356  * The caller is expected to hold the hbalock when calling this routine.
357  **/
358 static int
359 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
360                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
361 {
362         struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
363         struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
364         struct lpfc_register doorbell;
365         int put_index = hq->host_index;
366
367         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
368                 return -EINVAL;
369         if (hq->host_index != dq->host_index)
370                 return -EINVAL;
371         /* If the host has not yet processed the next entry then we are done */
372         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
373                 return -EBUSY;
374         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
375         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
376
377         /* Update the host index to point to the next slot */
378         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
379         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
380
381         /* Ring The Header Receive Queue Doorbell */
382         if (!(hq->host_index % hq->entry_repost)) {
383                 doorbell.word0 = 0;
384                 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
385                        hq->entry_repost);
386                 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
387                 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
388         }
389         return put_index;
390 }
391
392 /**
393  * lpfc_sli4_rq_release - Updates internal hba index for RQ
394  * @q: The Header Receive Queue to operate on.
395  *
396  * This routine will update the HBA index of a queue to reflect consumption of
397  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
398  * consumed an entry the host calls this function to update the queue's
399  * internal pointers. This routine returns the number of entries that were
400  * consumed by the HBA.
401  **/
402 static uint32_t
403 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
404 {
405         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
406                 return 0;
407         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
408         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
409         return 1;
410 }
411
412 /**
413  * lpfc_cmd_iocb - Get next command iocb entry in the ring
414  * @phba: Pointer to HBA context object.
415  * @pring: Pointer to driver SLI ring object.
416  *
417  * This function returns pointer to next command iocb entry
418  * in the command ring. The caller must hold hbalock to prevent
419  * other threads consume the next command iocb.
420  * SLI-2/SLI-3 provide different sized iocbs.
421  **/
422 static inline IOCB_t *
423 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
424 {
425         return (IOCB_t *) (((char *) pring->cmdringaddr) +
426                            pring->cmdidx * phba->iocb_cmd_size);
427 }
428
429 /**
430  * lpfc_resp_iocb - Get next response iocb entry in the ring
431  * @phba: Pointer to HBA context object.
432  * @pring: Pointer to driver SLI ring object.
433  *
434  * This function returns pointer to next response iocb entry
435  * in the response ring. The caller must hold hbalock to make sure
436  * that no other thread consume the next response iocb.
437  * SLI-2/SLI-3 provide different sized iocbs.
438  **/
439 static inline IOCB_t *
440 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
441 {
442         return (IOCB_t *) (((char *) pring->rspringaddr) +
443                            pring->rspidx * phba->iocb_rsp_size);
444 }
445
446 /**
447  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
448  * @phba: Pointer to HBA context object.
449  *
450  * This function is called with hbalock held. This function
451  * allocates a new driver iocb object from the iocb pool. If the
452  * allocation is successful, it returns pointer to the newly
453  * allocated iocb object else it returns NULL.
454  **/
455 static struct lpfc_iocbq *
456 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
457 {
458         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
459         struct lpfc_iocbq * iocbq = NULL;
460
461         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
462         if (iocbq)
463                 phba->iocb_cnt++;
464         if (phba->iocb_cnt > phba->iocb_max)
465                 phba->iocb_max = phba->iocb_cnt;
466         return iocbq;
467 }
468
469 /**
470  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
471  * @phba: Pointer to HBA context object.
472  * @xritag: XRI value.
473  *
474  * This function clears the sglq pointer from the array of acive
475  * sglq's. The xritag that is passed in is used to index into the
476  * array. Before the xritag can be used it needs to be adjusted
477  * by subtracting the xribase.
478  *
479  * Returns sglq ponter = success, NULL = Failure.
480  **/
481 static struct lpfc_sglq *
482 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
483 {
484         struct lpfc_sglq *sglq;
485
486         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
487         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
488         return sglq;
489 }
490
491 /**
492  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
493  * @phba: Pointer to HBA context object.
494  * @xritag: XRI value.
495  *
496  * This function returns the sglq pointer from the array of acive
497  * sglq's. The xritag that is passed in is used to index into the
498  * array. Before the xritag can be used it needs to be adjusted
499  * by subtracting the xribase.
500  *
501  * Returns sglq ponter = success, NULL = Failure.
502  **/
503 struct lpfc_sglq *
504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
505 {
506         struct lpfc_sglq *sglq;
507
508         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
509         return sglq;
510 }
511
512 /**
513  * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
514  * @phba: Pointer to HBA context object.
515  * @ndlp: nodelist pointer for this target.
516  * @xritag: xri used in this exchange.
517  * @rxid: Remote Exchange ID.
518  * @send_rrq: Flag used to determine if we should send rrq els cmd.
519  *
520  * This function is called with hbalock held.
521  * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
522  * rrq struct and adds it to the active_rrq_list.
523  *
524  * returns  0 for rrq slot for this xri
525  *         < 0  Were not able to get rrq mem or invalid parameter.
526  **/
527 static int
528 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
529                 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
530 {
531         struct lpfc_node_rrq *rrq;
532         int empty;
533         uint32_t did = 0;
534
535
536         if (!ndlp)
537                 return -EINVAL;
538
539         if (!phba->cfg_enable_rrq)
540                 return -EINVAL;
541
542         if (phba->pport->load_flag & FC_UNLOADING) {
543                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
544                 goto out;
545         }
546         did = ndlp->nlp_DID;
547
548         /*
549          * set the active bit even if there is no mem available.
550          */
551         if (NLP_CHK_FREE_REQ(ndlp))
552                 goto out;
553
554         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
555                 goto out;
556
557         if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
558                 goto out;
559
560         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
561         if (rrq) {
562                 rrq->send_rrq = send_rrq;
563                 rrq->xritag = xritag;
564                 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
565                 rrq->ndlp = ndlp;
566                 rrq->nlp_DID = ndlp->nlp_DID;
567                 rrq->vport = ndlp->vport;
568                 rrq->rxid = rxid;
569                 empty = list_empty(&phba->active_rrq_list);
570                 rrq->send_rrq = send_rrq;
571                 list_add_tail(&rrq->list, &phba->active_rrq_list);
572                 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
573                         phba->hba_flag |= HBA_RRQ_ACTIVE;
574                         if (empty)
575                                 lpfc_worker_wake_up(phba);
576                 }
577                 return 0;
578         }
579 out:
580         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
581                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
582                         " DID:0x%x Send:%d\n",
583                         xritag, rxid, did, send_rrq);
584         return -EINVAL;
585 }
586
587 /**
588  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
589  * @phba: Pointer to HBA context object.
590  * @xritag: xri used in this exchange.
591  * @rrq: The RRQ to be cleared.
592  *
593  **/
594 void
595 lpfc_clr_rrq_active(struct lpfc_hba *phba,
596                     uint16_t xritag,
597                     struct lpfc_node_rrq *rrq)
598 {
599         struct lpfc_nodelist *ndlp = NULL;
600
601         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
602                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
603
604         /* The target DID could have been swapped (cable swap)
605          * we should use the ndlp from the findnode if it is
606          * available.
607          */
608         if ((!ndlp) && rrq->ndlp)
609                 ndlp = rrq->ndlp;
610
611         if (!ndlp)
612                 goto out;
613
614         if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
615                 rrq->send_rrq = 0;
616                 rrq->xritag = 0;
617                 rrq->rrq_stop_time = 0;
618         }
619 out:
620         mempool_free(rrq, phba->rrq_pool);
621 }
622
623 /**
624  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
625  * @phba: Pointer to HBA context object.
626  *
627  * This function is called with hbalock held. This function
628  * Checks if stop_time (ratov from setting rrq active) has
629  * been reached, if it has and the send_rrq flag is set then
630  * it will call lpfc_send_rrq. If the send_rrq flag is not set
631  * then it will just call the routine to clear the rrq and
632  * free the rrq resource.
633  * The timer is set to the next rrq that is going to expire before
634  * leaving the routine.
635  *
636  **/
637 void
638 lpfc_handle_rrq_active(struct lpfc_hba *phba)
639 {
640         struct lpfc_node_rrq *rrq;
641         struct lpfc_node_rrq *nextrrq;
642         unsigned long next_time;
643         unsigned long iflags;
644         LIST_HEAD(send_rrq);
645
646         spin_lock_irqsave(&phba->hbalock, iflags);
647         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
648         next_time = jiffies + HZ * (phba->fc_ratov + 1);
649         list_for_each_entry_safe(rrq, nextrrq,
650                                  &phba->active_rrq_list, list) {
651                 if (time_after(jiffies, rrq->rrq_stop_time))
652                         list_move(&rrq->list, &send_rrq);
653                 else if (time_before(rrq->rrq_stop_time, next_time))
654                         next_time = rrq->rrq_stop_time;
655         }
656         spin_unlock_irqrestore(&phba->hbalock, iflags);
657         if (!list_empty(&phba->active_rrq_list))
658                 mod_timer(&phba->rrq_tmr, next_time);
659         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
660                 list_del(&rrq->list);
661                 if (!rrq->send_rrq)
662                         /* this call will free the rrq */
663                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
664                 else if (lpfc_send_rrq(phba, rrq)) {
665                         /* if we send the rrq then the completion handler
666                         *  will clear the bit in the xribitmap.
667                         */
668                         lpfc_clr_rrq_active(phba, rrq->xritag,
669                                             rrq);
670                 }
671         }
672 }
673
674 /**
675  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
676  * @vport: Pointer to vport context object.
677  * @xri: The xri used in the exchange.
678  * @did: The targets DID for this exchange.
679  *
680  * returns NULL = rrq not found in the phba->active_rrq_list.
681  *         rrq = rrq for this xri and target.
682  **/
683 struct lpfc_node_rrq *
684 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
685 {
686         struct lpfc_hba *phba = vport->phba;
687         struct lpfc_node_rrq *rrq;
688         struct lpfc_node_rrq *nextrrq;
689         unsigned long iflags;
690
691         if (phba->sli_rev != LPFC_SLI_REV4)
692                 return NULL;
693         spin_lock_irqsave(&phba->hbalock, iflags);
694         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
695                 if (rrq->vport == vport && rrq->xritag == xri &&
696                                 rrq->nlp_DID == did){
697                         list_del(&rrq->list);
698                         spin_unlock_irqrestore(&phba->hbalock, iflags);
699                         return rrq;
700                 }
701         }
702         spin_unlock_irqrestore(&phba->hbalock, iflags);
703         return NULL;
704 }
705
706 /**
707  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
708  * @vport: Pointer to vport context object.
709  * @ndlp: Pointer to the lpfc_node_list structure.
710  * If ndlp is NULL Remove all active RRQs for this vport from the
711  * phba->active_rrq_list and clear the rrq.
712  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
713  **/
714 void
715 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
716
717 {
718         struct lpfc_hba *phba = vport->phba;
719         struct lpfc_node_rrq *rrq;
720         struct lpfc_node_rrq *nextrrq;
721         unsigned long iflags;
722         LIST_HEAD(rrq_list);
723
724         if (phba->sli_rev != LPFC_SLI_REV4)
725                 return;
726         if (!ndlp) {
727                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
728                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
729         }
730         spin_lock_irqsave(&phba->hbalock, iflags);
731         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
732                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
733                         list_move(&rrq->list, &rrq_list);
734         spin_unlock_irqrestore(&phba->hbalock, iflags);
735
736         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
737                 list_del(&rrq->list);
738                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
739         }
740 }
741
742 /**
743  * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
744  * @phba: Pointer to HBA context object.
745  *
746  * Remove all rrqs from the phba->active_rrq_list and free them by
747  * calling __lpfc_clr_active_rrq
748  *
749  **/
750 void
751 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
752 {
753         struct lpfc_node_rrq *rrq;
754         struct lpfc_node_rrq *nextrrq;
755         unsigned long next_time;
756         unsigned long iflags;
757         LIST_HEAD(rrq_list);
758
759         if (phba->sli_rev != LPFC_SLI_REV4)
760                 return;
761         spin_lock_irqsave(&phba->hbalock, iflags);
762         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
763         next_time = jiffies + HZ * (phba->fc_ratov * 2);
764         list_splice_init(&phba->active_rrq_list, &rrq_list);
765         spin_unlock_irqrestore(&phba->hbalock, iflags);
766
767         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
768                 list_del(&rrq->list);
769                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
770         }
771         if (!list_empty(&phba->active_rrq_list))
772                 mod_timer(&phba->rrq_tmr, next_time);
773 }
774
775
776 /**
777  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
778  * @phba: Pointer to HBA context object.
779  * @ndlp: Targets nodelist pointer for this exchange.
780  * @xritag the xri in the bitmap to test.
781  *
782  * This function is called with hbalock held. This function
783  * returns 0 = rrq not active for this xri
784  *         1 = rrq is valid for this xri.
785  **/
786 int
787 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
788                         uint16_t  xritag)
789 {
790         if (!ndlp)
791                 return 0;
792         if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
793                         return 1;
794         else
795                 return 0;
796 }
797
798 /**
799  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
800  * @phba: Pointer to HBA context object.
801  * @ndlp: nodelist pointer for this target.
802  * @xritag: xri used in this exchange.
803  * @rxid: Remote Exchange ID.
804  * @send_rrq: Flag used to determine if we should send rrq els cmd.
805  *
806  * This function takes the hbalock.
807  * The active bit is always set in the active rrq xri_bitmap even
808  * if there is no slot avaiable for the other rrq information.
809  *
810  * returns 0 rrq actived for this xri
811  *         < 0 No memory or invalid ndlp.
812  **/
813 int
814 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
815                         uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
816 {
817         int ret;
818         unsigned long iflags;
819
820         spin_lock_irqsave(&phba->hbalock, iflags);
821         ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
822         spin_unlock_irqrestore(&phba->hbalock, iflags);
823         return ret;
824 }
825
826 /**
827  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
828  * @phba: Pointer to HBA context object.
829  * @piocb: Pointer to the iocbq.
830  *
831  * This function is called with hbalock held. This function
832  * gets a new driver sglq object from the sglq list. If the
833  * list is not empty then it is successful, it returns pointer to the newly
834  * allocated sglq object else it returns NULL.
835  **/
836 static struct lpfc_sglq *
837 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
838 {
839         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
840         struct lpfc_sglq *sglq = NULL;
841         struct lpfc_sglq *start_sglq = NULL;
842         struct lpfc_scsi_buf *lpfc_cmd;
843         struct lpfc_nodelist *ndlp;
844         int found = 0;
845
846         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
847                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
848                 ndlp = lpfc_cmd->rdata->pnode;
849         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
850                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
851                 ndlp = piocbq->context_un.ndlp;
852         else
853                 ndlp = piocbq->context1;
854
855         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
856         start_sglq = sglq;
857         while (!found) {
858                 if (!sglq)
859                         return NULL;
860                 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
861                         /* This xri has an rrq outstanding for this DID.
862                          * put it back in the list and get another xri.
863                          */
864                         list_add_tail(&sglq->list, lpfc_sgl_list);
865                         sglq = NULL;
866                         list_remove_head(lpfc_sgl_list, sglq,
867                                                 struct lpfc_sglq, list);
868                         if (sglq == start_sglq) {
869                                 sglq = NULL;
870                                 break;
871                         } else
872                                 continue;
873                 }
874                 sglq->ndlp = ndlp;
875                 found = 1;
876                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
877                 sglq->state = SGL_ALLOCATED;
878         }
879         return sglq;
880 }
881
882 /**
883  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
884  * @phba: Pointer to HBA context object.
885  *
886  * This function is called with no lock held. This function
887  * allocates a new driver iocb object from the iocb pool. If the
888  * allocation is successful, it returns pointer to the newly
889  * allocated iocb object else it returns NULL.
890  **/
891 struct lpfc_iocbq *
892 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
893 {
894         struct lpfc_iocbq * iocbq = NULL;
895         unsigned long iflags;
896
897         spin_lock_irqsave(&phba->hbalock, iflags);
898         iocbq = __lpfc_sli_get_iocbq(phba);
899         spin_unlock_irqrestore(&phba->hbalock, iflags);
900         return iocbq;
901 }
902
903 /**
904  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
905  * @phba: Pointer to HBA context object.
906  * @iocbq: Pointer to driver iocb object.
907  *
908  * This function is called with hbalock held to release driver
909  * iocb object to the iocb pool. The iotag in the iocb object
910  * does not change for each use of the iocb object. This function
911  * clears all other fields of the iocb object when it is freed.
912  * The sqlq structure that holds the xritag and phys and virtual
913  * mappings for the scatter gather list is retrieved from the
914  * active array of sglq. The get of the sglq pointer also clears
915  * the entry in the array. If the status of the IO indiactes that
916  * this IO was aborted then the sglq entry it put on the
917  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
918  * IO has good status or fails for any other reason then the sglq
919  * entry is added to the free list (lpfc_sgl_list).
920  **/
921 static void
922 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
923 {
924         struct lpfc_sglq *sglq;
925         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
926         unsigned long iflag = 0;
927         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
928
929         if (iocbq->sli4_xritag == NO_XRI)
930                 sglq = NULL;
931         else
932                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
933
934         if (sglq)  {
935                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
936                         (sglq->state != SGL_XRI_ABORTED)) {
937                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
938                                         iflag);
939                         list_add(&sglq->list,
940                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
941                         spin_unlock_irqrestore(
942                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
943                 } else {
944                         sglq->state = SGL_FREED;
945                         sglq->ndlp = NULL;
946                         list_add_tail(&sglq->list,
947                                 &phba->sli4_hba.lpfc_sgl_list);
948
949                         /* Check if TXQ queue needs to be serviced */
950                         if (pring->txq_cnt)
951                                 lpfc_worker_wake_up(phba);
952                 }
953         }
954
955
956         /*
957          * Clean all volatile data fields, preserve iotag and node struct.
958          */
959         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
960         iocbq->sli4_lxritag = NO_XRI;
961         iocbq->sli4_xritag = NO_XRI;
962         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
963 }
964
965
966 /**
967  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
968  * @phba: Pointer to HBA context object.
969  * @iocbq: Pointer to driver iocb object.
970  *
971  * This function is called with hbalock held to release driver
972  * iocb object to the iocb pool. The iotag in the iocb object
973  * does not change for each use of the iocb object. This function
974  * clears all other fields of the iocb object when it is freed.
975  **/
976 static void
977 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
978 {
979         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
980
981         /*
982          * Clean all volatile data fields, preserve iotag and node struct.
983          */
984         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
985         iocbq->sli4_xritag = NO_XRI;
986         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
987 }
988
989 /**
990  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
991  * @phba: Pointer to HBA context object.
992  * @iocbq: Pointer to driver iocb object.
993  *
994  * This function is called with hbalock held to release driver
995  * iocb object to the iocb pool. The iotag in the iocb object
996  * does not change for each use of the iocb object. This function
997  * clears all other fields of the iocb object when it is freed.
998  **/
999 static void
1000 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1001 {
1002         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1003         phba->iocb_cnt--;
1004 }
1005
1006 /**
1007  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1008  * @phba: Pointer to HBA context object.
1009  * @iocbq: Pointer to driver iocb object.
1010  *
1011  * This function is called with no lock held to release the iocb to
1012  * iocb pool.
1013  **/
1014 void
1015 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1016 {
1017         unsigned long iflags;
1018
1019         /*
1020          * Clean all volatile data fields, preserve iotag and node struct.
1021          */
1022         spin_lock_irqsave(&phba->hbalock, iflags);
1023         __lpfc_sli_release_iocbq(phba, iocbq);
1024         spin_unlock_irqrestore(&phba->hbalock, iflags);
1025 }
1026
1027 /**
1028  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1029  * @phba: Pointer to HBA context object.
1030  * @iocblist: List of IOCBs.
1031  * @ulpstatus: ULP status in IOCB command field.
1032  * @ulpWord4: ULP word-4 in IOCB command field.
1033  *
1034  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1035  * on the list by invoking the complete callback function associated with the
1036  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1037  * fields.
1038  **/
1039 void
1040 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1041                       uint32_t ulpstatus, uint32_t ulpWord4)
1042 {
1043         struct lpfc_iocbq *piocb;
1044
1045         while (!list_empty(iocblist)) {
1046                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1047
1048                 if (!piocb->iocb_cmpl)
1049                         lpfc_sli_release_iocbq(phba, piocb);
1050                 else {
1051                         piocb->iocb.ulpStatus = ulpstatus;
1052                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1053                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1054                 }
1055         }
1056         return;
1057 }
1058
1059 /**
1060  * lpfc_sli_iocb_cmd_type - Get the iocb type
1061  * @iocb_cmnd: iocb command code.
1062  *
1063  * This function is called by ring event handler function to get the iocb type.
1064  * This function translates the iocb command to an iocb command type used to
1065  * decide the final disposition of each completed IOCB.
1066  * The function returns
1067  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1068  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1069  * LPFC_ABORT_IOCB   if it is an abort iocb
1070  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1071  *
1072  * The caller is not required to hold any lock.
1073  **/
1074 static lpfc_iocb_type
1075 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1076 {
1077         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1078
1079         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1080                 return 0;
1081
1082         switch (iocb_cmnd) {
1083         case CMD_XMIT_SEQUENCE_CR:
1084         case CMD_XMIT_SEQUENCE_CX:
1085         case CMD_XMIT_BCAST_CN:
1086         case CMD_XMIT_BCAST_CX:
1087         case CMD_ELS_REQUEST_CR:
1088         case CMD_ELS_REQUEST_CX:
1089         case CMD_CREATE_XRI_CR:
1090         case CMD_CREATE_XRI_CX:
1091         case CMD_GET_RPI_CN:
1092         case CMD_XMIT_ELS_RSP_CX:
1093         case CMD_GET_RPI_CR:
1094         case CMD_FCP_IWRITE_CR:
1095         case CMD_FCP_IWRITE_CX:
1096         case CMD_FCP_IREAD_CR:
1097         case CMD_FCP_IREAD_CX:
1098         case CMD_FCP_ICMND_CR:
1099         case CMD_FCP_ICMND_CX:
1100         case CMD_FCP_TSEND_CX:
1101         case CMD_FCP_TRSP_CX:
1102         case CMD_FCP_TRECEIVE_CX:
1103         case CMD_FCP_AUTO_TRSP_CX:
1104         case CMD_ADAPTER_MSG:
1105         case CMD_ADAPTER_DUMP:
1106         case CMD_XMIT_SEQUENCE64_CR:
1107         case CMD_XMIT_SEQUENCE64_CX:
1108         case CMD_XMIT_BCAST64_CN:
1109         case CMD_XMIT_BCAST64_CX:
1110         case CMD_ELS_REQUEST64_CR:
1111         case CMD_ELS_REQUEST64_CX:
1112         case CMD_FCP_IWRITE64_CR:
1113         case CMD_FCP_IWRITE64_CX:
1114         case CMD_FCP_IREAD64_CR:
1115         case CMD_FCP_IREAD64_CX:
1116         case CMD_FCP_ICMND64_CR:
1117         case CMD_FCP_ICMND64_CX:
1118         case CMD_FCP_TSEND64_CX:
1119         case CMD_FCP_TRSP64_CX:
1120         case CMD_FCP_TRECEIVE64_CX:
1121         case CMD_GEN_REQUEST64_CR:
1122         case CMD_GEN_REQUEST64_CX:
1123         case CMD_XMIT_ELS_RSP64_CX:
1124         case DSSCMD_IWRITE64_CR:
1125         case DSSCMD_IWRITE64_CX:
1126         case DSSCMD_IREAD64_CR:
1127         case DSSCMD_IREAD64_CX:
1128                 type = LPFC_SOL_IOCB;
1129                 break;
1130         case CMD_ABORT_XRI_CN:
1131         case CMD_ABORT_XRI_CX:
1132         case CMD_CLOSE_XRI_CN:
1133         case CMD_CLOSE_XRI_CX:
1134         case CMD_XRI_ABORTED_CX:
1135         case CMD_ABORT_MXRI64_CN:
1136         case CMD_XMIT_BLS_RSP64_CX:
1137                 type = LPFC_ABORT_IOCB;
1138                 break;
1139         case CMD_RCV_SEQUENCE_CX:
1140         case CMD_RCV_ELS_REQ_CX:
1141         case CMD_RCV_SEQUENCE64_CX:
1142         case CMD_RCV_ELS_REQ64_CX:
1143         case CMD_ASYNC_STATUS:
1144         case CMD_IOCB_RCV_SEQ64_CX:
1145         case CMD_IOCB_RCV_ELS64_CX:
1146         case CMD_IOCB_RCV_CONT64_CX:
1147         case CMD_IOCB_RET_XRI64_CX:
1148                 type = LPFC_UNSOL_IOCB;
1149                 break;
1150         case CMD_IOCB_XMIT_MSEQ64_CR:
1151         case CMD_IOCB_XMIT_MSEQ64_CX:
1152         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1153         case CMD_IOCB_RCV_ELS_LIST64_CX:
1154         case CMD_IOCB_CLOSE_EXTENDED_CN:
1155         case CMD_IOCB_ABORT_EXTENDED_CN:
1156         case CMD_IOCB_RET_HBQE64_CN:
1157         case CMD_IOCB_FCP_IBIDIR64_CR:
1158         case CMD_IOCB_FCP_IBIDIR64_CX:
1159         case CMD_IOCB_FCP_ITASKMGT64_CX:
1160         case CMD_IOCB_LOGENTRY_CN:
1161         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1162                 printk("%s - Unhandled SLI-3 Command x%x\n",
1163                                 __func__, iocb_cmnd);
1164                 type = LPFC_UNKNOWN_IOCB;
1165                 break;
1166         default:
1167                 type = LPFC_UNKNOWN_IOCB;
1168                 break;
1169         }
1170
1171         return type;
1172 }
1173
1174 /**
1175  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1176  * @phba: Pointer to HBA context object.
1177  *
1178  * This function is called from SLI initialization code
1179  * to configure every ring of the HBA's SLI interface. The
1180  * caller is not required to hold any lock. This function issues
1181  * a config_ring mailbox command for each ring.
1182  * This function returns zero if successful else returns a negative
1183  * error code.
1184  **/
1185 static int
1186 lpfc_sli_ring_map(struct lpfc_hba *phba)
1187 {
1188         struct lpfc_sli *psli = &phba->sli;
1189         LPFC_MBOXQ_t *pmb;
1190         MAILBOX_t *pmbox;
1191         int i, rc, ret = 0;
1192
1193         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1194         if (!pmb)
1195                 return -ENOMEM;
1196         pmbox = &pmb->u.mb;
1197         phba->link_state = LPFC_INIT_MBX_CMDS;
1198         for (i = 0; i < psli->num_rings; i++) {
1199                 lpfc_config_ring(phba, i, pmb);
1200                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1201                 if (rc != MBX_SUCCESS) {
1202                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1203                                         "0446 Adapter failed to init (%d), "
1204                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1205                                         "ring %d\n",
1206                                         rc, pmbox->mbxCommand,
1207                                         pmbox->mbxStatus, i);
1208                         phba->link_state = LPFC_HBA_ERROR;
1209                         ret = -ENXIO;
1210                         break;
1211                 }
1212         }
1213         mempool_free(pmb, phba->mbox_mem_pool);
1214         return ret;
1215 }
1216
1217 /**
1218  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1219  * @phba: Pointer to HBA context object.
1220  * @pring: Pointer to driver SLI ring object.
1221  * @piocb: Pointer to the driver iocb object.
1222  *
1223  * This function is called with hbalock held. The function adds the
1224  * new iocb to txcmplq of the given ring. This function always returns
1225  * 0. If this function is called for ELS ring, this function checks if
1226  * there is a vport associated with the ELS command. This function also
1227  * starts els_tmofunc timer if this is an ELS command.
1228  **/
1229 static int
1230 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1231                         struct lpfc_iocbq *piocb)
1232 {
1233         list_add_tail(&piocb->list, &pring->txcmplq);
1234         piocb->iocb_flag |= LPFC_IO_ON_Q;
1235         pring->txcmplq_cnt++;
1236         if (pring->txcmplq_cnt > pring->txcmplq_max)
1237                 pring->txcmplq_max = pring->txcmplq_cnt;
1238
1239         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1240            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1241            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1242                 if (!piocb->vport)
1243                         BUG();
1244                 else
1245                         mod_timer(&piocb->vport->els_tmofunc,
1246                                   jiffies + HZ * (phba->fc_ratov << 1));
1247         }
1248
1249
1250         return 0;
1251 }
1252
1253 /**
1254  * lpfc_sli_ringtx_get - Get first element of the txq
1255  * @phba: Pointer to HBA context object.
1256  * @pring: Pointer to driver SLI ring object.
1257  *
1258  * This function is called with hbalock held to get next
1259  * iocb in txq of the given ring. If there is any iocb in
1260  * the txq, the function returns first iocb in the list after
1261  * removing the iocb from the list, else it returns NULL.
1262  **/
1263 struct lpfc_iocbq *
1264 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1265 {
1266         struct lpfc_iocbq *cmd_iocb;
1267
1268         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1269         if (cmd_iocb != NULL)
1270                 pring->txq_cnt--;
1271         return cmd_iocb;
1272 }
1273
1274 /**
1275  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1276  * @phba: Pointer to HBA context object.
1277  * @pring: Pointer to driver SLI ring object.
1278  *
1279  * This function is called with hbalock held and the caller must post the
1280  * iocb without releasing the lock. If the caller releases the lock,
1281  * iocb slot returned by the function is not guaranteed to be available.
1282  * The function returns pointer to the next available iocb slot if there
1283  * is available slot in the ring, else it returns NULL.
1284  * If the get index of the ring is ahead of the put index, the function
1285  * will post an error attention event to the worker thread to take the
1286  * HBA to offline state.
1287  **/
1288 static IOCB_t *
1289 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1290 {
1291         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1292         uint32_t  max_cmd_idx = pring->numCiocb;
1293         if ((pring->next_cmdidx == pring->cmdidx) &&
1294            (++pring->next_cmdidx >= max_cmd_idx))
1295                 pring->next_cmdidx = 0;
1296
1297         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
1298
1299                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1300
1301                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
1302                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1303                                         "0315 Ring %d issue: portCmdGet %d "
1304                                         "is bigger than cmd ring %d\n",
1305                                         pring->ringno,
1306                                         pring->local_getidx, max_cmd_idx);
1307
1308                         phba->link_state = LPFC_HBA_ERROR;
1309                         /*
1310                          * All error attention handlers are posted to
1311                          * worker thread
1312                          */
1313                         phba->work_ha |= HA_ERATT;
1314                         phba->work_hs = HS_FFER3;
1315
1316                         lpfc_worker_wake_up(phba);
1317
1318                         return NULL;
1319                 }
1320
1321                 if (pring->local_getidx == pring->next_cmdidx)
1322                         return NULL;
1323         }
1324
1325         return lpfc_cmd_iocb(phba, pring);
1326 }
1327
1328 /**
1329  * lpfc_sli_next_iotag - Get an iotag for the iocb
1330  * @phba: Pointer to HBA context object.
1331  * @iocbq: Pointer to driver iocb object.
1332  *
1333  * This function gets an iotag for the iocb. If there is no unused iotag and
1334  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1335  * array and assigns a new iotag.
1336  * The function returns the allocated iotag if successful, else returns zero.
1337  * Zero is not a valid iotag.
1338  * The caller is not required to hold any lock.
1339  **/
1340 uint16_t
1341 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1342 {
1343         struct lpfc_iocbq **new_arr;
1344         struct lpfc_iocbq **old_arr;
1345         size_t new_len;
1346         struct lpfc_sli *psli = &phba->sli;
1347         uint16_t iotag;
1348
1349         spin_lock_irq(&phba->hbalock);
1350         iotag = psli->last_iotag;
1351         if(++iotag < psli->iocbq_lookup_len) {
1352                 psli->last_iotag = iotag;
1353                 psli->iocbq_lookup[iotag] = iocbq;
1354                 spin_unlock_irq(&phba->hbalock);
1355                 iocbq->iotag = iotag;
1356                 return iotag;
1357         } else if (psli->iocbq_lookup_len < (0xffff
1358                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1359                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1360                 spin_unlock_irq(&phba->hbalock);
1361                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1362                                   GFP_KERNEL);
1363                 if (new_arr) {
1364                         spin_lock_irq(&phba->hbalock);
1365                         old_arr = psli->iocbq_lookup;
1366                         if (new_len <= psli->iocbq_lookup_len) {
1367                                 /* highly unprobable case */
1368                                 kfree(new_arr);
1369                                 iotag = psli->last_iotag;
1370                                 if(++iotag < psli->iocbq_lookup_len) {
1371                                         psli->last_iotag = iotag;
1372                                         psli->iocbq_lookup[iotag] = iocbq;
1373                                         spin_unlock_irq(&phba->hbalock);
1374                                         iocbq->iotag = iotag;
1375                                         return iotag;
1376                                 }
1377                                 spin_unlock_irq(&phba->hbalock);
1378                                 return 0;
1379                         }
1380                         if (psli->iocbq_lookup)
1381                                 memcpy(new_arr, old_arr,
1382                                        ((psli->last_iotag  + 1) *
1383                                         sizeof (struct lpfc_iocbq *)));
1384                         psli->iocbq_lookup = new_arr;
1385                         psli->iocbq_lookup_len = new_len;
1386                         psli->last_iotag = iotag;
1387                         psli->iocbq_lookup[iotag] = iocbq;
1388                         spin_unlock_irq(&phba->hbalock);
1389                         iocbq->iotag = iotag;
1390                         kfree(old_arr);
1391                         return iotag;
1392                 }
1393         } else
1394                 spin_unlock_irq(&phba->hbalock);
1395
1396         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1397                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1398                         psli->last_iotag);
1399
1400         return 0;
1401 }
1402
1403 /**
1404  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1405  * @phba: Pointer to HBA context object.
1406  * @pring: Pointer to driver SLI ring object.
1407  * @iocb: Pointer to iocb slot in the ring.
1408  * @nextiocb: Pointer to driver iocb object which need to be
1409  *            posted to firmware.
1410  *
1411  * This function is called with hbalock held to post a new iocb to
1412  * the firmware. This function copies the new iocb to ring iocb slot and
1413  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1414  * a completion call back for this iocb else the function will free the
1415  * iocb object.
1416  **/
1417 static void
1418 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1419                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1420 {
1421         /*
1422          * Set up an iotag
1423          */
1424         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1425
1426
1427         if (pring->ringno == LPFC_ELS_RING) {
1428                 lpfc_debugfs_slow_ring_trc(phba,
1429                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1430                         *(((uint32_t *) &nextiocb->iocb) + 4),
1431                         *(((uint32_t *) &nextiocb->iocb) + 6),
1432                         *(((uint32_t *) &nextiocb->iocb) + 7));
1433         }
1434
1435         /*
1436          * Issue iocb command to adapter
1437          */
1438         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1439         wmb();
1440         pring->stats.iocb_cmd++;
1441
1442         /*
1443          * If there is no completion routine to call, we can release the
1444          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1445          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1446          */
1447         if (nextiocb->iocb_cmpl)
1448                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1449         else
1450                 __lpfc_sli_release_iocbq(phba, nextiocb);
1451
1452         /*
1453          * Let the HBA know what IOCB slot will be the next one the
1454          * driver will put a command into.
1455          */
1456         pring->cmdidx = pring->next_cmdidx;
1457         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1458 }
1459
1460 /**
1461  * lpfc_sli_update_full_ring - Update the chip attention register
1462  * @phba: Pointer to HBA context object.
1463  * @pring: Pointer to driver SLI ring object.
1464  *
1465  * The caller is not required to hold any lock for calling this function.
1466  * This function updates the chip attention bits for the ring to inform firmware
1467  * that there are pending work to be done for this ring and requests an
1468  * interrupt when there is space available in the ring. This function is
1469  * called when the driver is unable to post more iocbs to the ring due
1470  * to unavailability of space in the ring.
1471  **/
1472 static void
1473 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1474 {
1475         int ringno = pring->ringno;
1476
1477         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1478
1479         wmb();
1480
1481         /*
1482          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1483          * The HBA will tell us when an IOCB entry is available.
1484          */
1485         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1486         readl(phba->CAregaddr); /* flush */
1487
1488         pring->stats.iocb_cmd_full++;
1489 }
1490
1491 /**
1492  * lpfc_sli_update_ring - Update chip attention register
1493  * @phba: Pointer to HBA context object.
1494  * @pring: Pointer to driver SLI ring object.
1495  *
1496  * This function updates the chip attention register bit for the
1497  * given ring to inform HBA that there is more work to be done
1498  * in this ring. The caller is not required to hold any lock.
1499  **/
1500 static void
1501 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1502 {
1503         int ringno = pring->ringno;
1504
1505         /*
1506          * Tell the HBA that there is work to do in this ring.
1507          */
1508         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1509                 wmb();
1510                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1511                 readl(phba->CAregaddr); /* flush */
1512         }
1513 }
1514
1515 /**
1516  * lpfc_sli_resume_iocb - Process iocbs in the txq
1517  * @phba: Pointer to HBA context object.
1518  * @pring: Pointer to driver SLI ring object.
1519  *
1520  * This function is called with hbalock held to post pending iocbs
1521  * in the txq to the firmware. This function is called when driver
1522  * detects space available in the ring.
1523  **/
1524 static void
1525 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1526 {
1527         IOCB_t *iocb;
1528         struct lpfc_iocbq *nextiocb;
1529
1530         /*
1531          * Check to see if:
1532          *  (a) there is anything on the txq to send
1533          *  (b) link is up
1534          *  (c) link attention events can be processed (fcp ring only)
1535          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1536          */
1537         if (pring->txq_cnt &&
1538             lpfc_is_link_up(phba) &&
1539             (pring->ringno != phba->sli.fcp_ring ||
1540              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1541
1542                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1543                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1544                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1545
1546                 if (iocb)
1547                         lpfc_sli_update_ring(phba, pring);
1548                 else
1549                         lpfc_sli_update_full_ring(phba, pring);
1550         }
1551
1552         return;
1553 }
1554
1555 /**
1556  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1557  * @phba: Pointer to HBA context object.
1558  * @hbqno: HBQ number.
1559  *
1560  * This function is called with hbalock held to get the next
1561  * available slot for the given HBQ. If there is free slot
1562  * available for the HBQ it will return pointer to the next available
1563  * HBQ entry else it will return NULL.
1564  **/
1565 static struct lpfc_hbq_entry *
1566 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1567 {
1568         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1569
1570         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1571             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1572                 hbqp->next_hbqPutIdx = 0;
1573
1574         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1575                 uint32_t raw_index = phba->hbq_get[hbqno];
1576                 uint32_t getidx = le32_to_cpu(raw_index);
1577
1578                 hbqp->local_hbqGetIdx = getidx;
1579
1580                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1581                         lpfc_printf_log(phba, KERN_ERR,
1582                                         LOG_SLI | LOG_VPORT,
1583                                         "1802 HBQ %d: local_hbqGetIdx "
1584                                         "%u is > than hbqp->entry_count %u\n",
1585                                         hbqno, hbqp->local_hbqGetIdx,
1586                                         hbqp->entry_count);
1587
1588                         phba->link_state = LPFC_HBA_ERROR;
1589                         return NULL;
1590                 }
1591
1592                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1593                         return NULL;
1594         }
1595
1596         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1597                         hbqp->hbqPutIdx;
1598 }
1599
1600 /**
1601  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1602  * @phba: Pointer to HBA context object.
1603  *
1604  * This function is called with no lock held to free all the
1605  * hbq buffers while uninitializing the SLI interface. It also
1606  * frees the HBQ buffers returned by the firmware but not yet
1607  * processed by the upper layers.
1608  **/
1609 void
1610 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1611 {
1612         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1613         struct hbq_dmabuf *hbq_buf;
1614         unsigned long flags;
1615         int i, hbq_count;
1616         uint32_t hbqno;
1617
1618         hbq_count = lpfc_sli_hbq_count();
1619         /* Return all memory used by all HBQs */
1620         spin_lock_irqsave(&phba->hbalock, flags);
1621         for (i = 0; i < hbq_count; ++i) {
1622                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1623                                 &phba->hbqs[i].hbq_buffer_list, list) {
1624                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1625                         list_del(&hbq_buf->dbuf.list);
1626                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1627                 }
1628                 phba->hbqs[i].buffer_count = 0;
1629         }
1630         /* Return all HBQ buffer that are in-fly */
1631         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1632                                  list) {
1633                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1634                 list_del(&hbq_buf->dbuf.list);
1635                 if (hbq_buf->tag == -1) {
1636                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1637                                 (phba, hbq_buf);
1638                 } else {
1639                         hbqno = hbq_buf->tag >> 16;
1640                         if (hbqno >= LPFC_MAX_HBQS)
1641                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1642                                         (phba, hbq_buf);
1643                         else
1644                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1645                                         hbq_buf);
1646                 }
1647         }
1648
1649         /* Mark the HBQs not in use */
1650         phba->hbq_in_use = 0;
1651         spin_unlock_irqrestore(&phba->hbalock, flags);
1652 }
1653
1654 /**
1655  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1656  * @phba: Pointer to HBA context object.
1657  * @hbqno: HBQ number.
1658  * @hbq_buf: Pointer to HBQ buffer.
1659  *
1660  * This function is called with the hbalock held to post a
1661  * hbq buffer to the firmware. If the function finds an empty
1662  * slot in the HBQ, it will post the buffer. The function will return
1663  * pointer to the hbq entry if it successfully post the buffer
1664  * else it will return NULL.
1665  **/
1666 static int
1667 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1668                          struct hbq_dmabuf *hbq_buf)
1669 {
1670         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1671 }
1672
1673 /**
1674  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1675  * @phba: Pointer to HBA context object.
1676  * @hbqno: HBQ number.
1677  * @hbq_buf: Pointer to HBQ buffer.
1678  *
1679  * This function is called with the hbalock held to post a hbq buffer to the
1680  * firmware. If the function finds an empty slot in the HBQ, it will post the
1681  * buffer and place it on the hbq_buffer_list. The function will return zero if
1682  * it successfully post the buffer else it will return an error.
1683  **/
1684 static int
1685 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1686                             struct hbq_dmabuf *hbq_buf)
1687 {
1688         struct lpfc_hbq_entry *hbqe;
1689         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1690
1691         /* Get next HBQ entry slot to use */
1692         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1693         if (hbqe) {
1694                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1695
1696                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1697                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1698                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1699                 hbqe->bde.tus.f.bdeFlags = 0;
1700                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1701                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1702                                 /* Sync SLIM */
1703                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1704                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1705                                 /* flush */
1706                 readl(phba->hbq_put + hbqno);
1707                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1708                 return 0;
1709         } else
1710                 return -ENOMEM;
1711 }
1712
1713 /**
1714  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1715  * @phba: Pointer to HBA context object.
1716  * @hbqno: HBQ number.
1717  * @hbq_buf: Pointer to HBQ buffer.
1718  *
1719  * This function is called with the hbalock held to post an RQE to the SLI4
1720  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1721  * the hbq_buffer_list and return zero, otherwise it will return an error.
1722  **/
1723 static int
1724 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1725                             struct hbq_dmabuf *hbq_buf)
1726 {
1727         int rc;
1728         struct lpfc_rqe hrqe;
1729         struct lpfc_rqe drqe;
1730
1731         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1732         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1733         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1734         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1735         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1736                               &hrqe, &drqe);
1737         if (rc < 0)
1738                 return rc;
1739         hbq_buf->tag = rc;
1740         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1741         return 0;
1742 }
1743
1744 /* HBQ for ELS and CT traffic. */
1745 static struct lpfc_hbq_init lpfc_els_hbq = {
1746         .rn = 1,
1747         .entry_count = 256,
1748         .mask_count = 0,
1749         .profile = 0,
1750         .ring_mask = (1 << LPFC_ELS_RING),
1751         .buffer_count = 0,
1752         .init_count = 40,
1753         .add_count = 40,
1754 };
1755
1756 /* HBQ for the extra ring if needed */
1757 static struct lpfc_hbq_init lpfc_extra_hbq = {
1758         .rn = 1,
1759         .entry_count = 200,
1760         .mask_count = 0,
1761         .profile = 0,
1762         .ring_mask = (1 << LPFC_EXTRA_RING),
1763         .buffer_count = 0,
1764         .init_count = 0,
1765         .add_count = 5,
1766 };
1767
1768 /* Array of HBQs */
1769 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1770         &lpfc_els_hbq,
1771         &lpfc_extra_hbq,
1772 };
1773
1774 /**
1775  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1776  * @phba: Pointer to HBA context object.
1777  * @hbqno: HBQ number.
1778  * @count: Number of HBQ buffers to be posted.
1779  *
1780  * This function is called with no lock held to post more hbq buffers to the
1781  * given HBQ. The function returns the number of HBQ buffers successfully
1782  * posted.
1783  **/
1784 static int
1785 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1786 {
1787         uint32_t i, posted = 0;
1788         unsigned long flags;
1789         struct hbq_dmabuf *hbq_buffer;
1790         LIST_HEAD(hbq_buf_list);
1791         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1792                 return 0;
1793
1794         if ((phba->hbqs[hbqno].buffer_count + count) >
1795             lpfc_hbq_defs[hbqno]->entry_count)
1796                 count = lpfc_hbq_defs[hbqno]->entry_count -
1797                                         phba->hbqs[hbqno].buffer_count;
1798         if (!count)
1799                 return 0;
1800         /* Allocate HBQ entries */
1801         for (i = 0; i < count; i++) {
1802                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1803                 if (!hbq_buffer)
1804                         break;
1805                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1806         }
1807         /* Check whether HBQ is still in use */
1808         spin_lock_irqsave(&phba->hbalock, flags);
1809         if (!phba->hbq_in_use)
1810                 goto err;
1811         while (!list_empty(&hbq_buf_list)) {
1812                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1813                                  dbuf.list);
1814                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1815                                       (hbqno << 16));
1816                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1817                         phba->hbqs[hbqno].buffer_count++;
1818                         posted++;
1819                 } else
1820                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1821         }
1822         spin_unlock_irqrestore(&phba->hbalock, flags);
1823         return posted;
1824 err:
1825         spin_unlock_irqrestore(&phba->hbalock, flags);
1826         while (!list_empty(&hbq_buf_list)) {
1827                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1828                                  dbuf.list);
1829                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1830         }
1831         return 0;
1832 }
1833
1834 /**
1835  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1836  * @phba: Pointer to HBA context object.
1837  * @qno: HBQ number.
1838  *
1839  * This function posts more buffers to the HBQ. This function
1840  * is called with no lock held. The function returns the number of HBQ entries
1841  * successfully allocated.
1842  **/
1843 int
1844 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1845 {
1846         if (phba->sli_rev == LPFC_SLI_REV4)
1847                 return 0;
1848         else
1849                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1850                                          lpfc_hbq_defs[qno]->add_count);
1851 }
1852
1853 /**
1854  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1855  * @phba: Pointer to HBA context object.
1856  * @qno:  HBQ queue number.
1857  *
1858  * This function is called from SLI initialization code path with
1859  * no lock held to post initial HBQ buffers to firmware. The
1860  * function returns the number of HBQ entries successfully allocated.
1861  **/
1862 static int
1863 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1864 {
1865         if (phba->sli_rev == LPFC_SLI_REV4)
1866                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1867                                         lpfc_hbq_defs[qno]->entry_count);
1868         else
1869                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1870                                          lpfc_hbq_defs[qno]->init_count);
1871 }
1872
1873 /**
1874  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1875  * @phba: Pointer to HBA context object.
1876  * @hbqno: HBQ number.
1877  *
1878  * This function removes the first hbq buffer on an hbq list and returns a
1879  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1880  **/
1881 static struct hbq_dmabuf *
1882 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1883 {
1884         struct lpfc_dmabuf *d_buf;
1885
1886         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1887         if (!d_buf)
1888                 return NULL;
1889         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1890 }
1891
1892 /**
1893  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1894  * @phba: Pointer to HBA context object.
1895  * @tag: Tag of the hbq buffer.
1896  *
1897  * This function is called with hbalock held. This function searches
1898  * for the hbq buffer associated with the given tag in the hbq buffer
1899  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1900  * it returns NULL.
1901  **/
1902 static struct hbq_dmabuf *
1903 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1904 {
1905         struct lpfc_dmabuf *d_buf;
1906         struct hbq_dmabuf *hbq_buf;
1907         uint32_t hbqno;
1908
1909         hbqno = tag >> 16;
1910         if (hbqno >= LPFC_MAX_HBQS)
1911                 return NULL;
1912
1913         spin_lock_irq(&phba->hbalock);
1914         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1915                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1916                 if (hbq_buf->tag == tag) {
1917                         spin_unlock_irq(&phba->hbalock);
1918                         return hbq_buf;
1919                 }
1920         }
1921         spin_unlock_irq(&phba->hbalock);
1922         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1923                         "1803 Bad hbq tag. Data: x%x x%x\n",
1924                         tag, phba->hbqs[tag >> 16].buffer_count);
1925         return NULL;
1926 }
1927
1928 /**
1929  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1930  * @phba: Pointer to HBA context object.
1931  * @hbq_buffer: Pointer to HBQ buffer.
1932  *
1933  * This function is called with hbalock. This function gives back
1934  * the hbq buffer to firmware. If the HBQ does not have space to
1935  * post the buffer, it will free the buffer.
1936  **/
1937 void
1938 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1939 {
1940         uint32_t hbqno;
1941
1942         if (hbq_buffer) {
1943                 hbqno = hbq_buffer->tag >> 16;
1944                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1945                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1946         }
1947 }
1948
1949 /**
1950  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1951  * @mbxCommand: mailbox command code.
1952  *
1953  * This function is called by the mailbox event handler function to verify
1954  * that the completed mailbox command is a legitimate mailbox command. If the
1955  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1956  * and the mailbox event handler will take the HBA offline.
1957  **/
1958 static int
1959 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1960 {
1961         uint8_t ret;
1962
1963         switch (mbxCommand) {
1964         case MBX_LOAD_SM:
1965         case MBX_READ_NV:
1966         case MBX_WRITE_NV:
1967         case MBX_WRITE_VPARMS:
1968         case MBX_RUN_BIU_DIAG:
1969         case MBX_INIT_LINK:
1970         case MBX_DOWN_LINK:
1971         case MBX_CONFIG_LINK:
1972         case MBX_CONFIG_RING:
1973         case MBX_RESET_RING:
1974         case MBX_READ_CONFIG:
1975         case MBX_READ_RCONFIG:
1976         case MBX_READ_SPARM:
1977         case MBX_READ_STATUS:
1978         case MBX_READ_RPI:
1979         case MBX_READ_XRI:
1980         case MBX_READ_REV:
1981         case MBX_READ_LNK_STAT:
1982         case MBX_REG_LOGIN:
1983         case MBX_UNREG_LOGIN:
1984         case MBX_CLEAR_LA:
1985         case MBX_DUMP_MEMORY:
1986         case MBX_DUMP_CONTEXT:
1987         case MBX_RUN_DIAGS:
1988         case MBX_RESTART:
1989         case MBX_UPDATE_CFG:
1990         case MBX_DOWN_LOAD:
1991         case MBX_DEL_LD_ENTRY:
1992         case MBX_RUN_PROGRAM:
1993         case MBX_SET_MASK:
1994         case MBX_SET_VARIABLE:
1995         case MBX_UNREG_D_ID:
1996         case MBX_KILL_BOARD:
1997         case MBX_CONFIG_FARP:
1998         case MBX_BEACON:
1999         case MBX_LOAD_AREA:
2000         case MBX_RUN_BIU_DIAG64:
2001         case MBX_CONFIG_PORT:
2002         case MBX_READ_SPARM64:
2003         case MBX_READ_RPI64:
2004         case MBX_REG_LOGIN64:
2005         case MBX_READ_TOPOLOGY:
2006         case MBX_WRITE_WWN:
2007         case MBX_SET_DEBUG:
2008         case MBX_LOAD_EXP_ROM:
2009         case MBX_ASYNCEVT_ENABLE:
2010         case MBX_REG_VPI:
2011         case MBX_UNREG_VPI:
2012         case MBX_HEARTBEAT:
2013         case MBX_PORT_CAPABILITIES:
2014         case MBX_PORT_IOV_CONTROL:
2015         case MBX_SLI4_CONFIG:
2016         case MBX_SLI4_REQ_FTRS:
2017         case MBX_REG_FCFI:
2018         case MBX_UNREG_FCFI:
2019         case MBX_REG_VFI:
2020         case MBX_UNREG_VFI:
2021         case MBX_INIT_VPI:
2022         case MBX_INIT_VFI:
2023         case MBX_RESUME_RPI:
2024         case MBX_READ_EVENT_LOG_STATUS:
2025         case MBX_READ_EVENT_LOG:
2026         case MBX_SECURITY_MGMT:
2027         case MBX_AUTH_PORT:
2028                 ret = mbxCommand;
2029                 break;
2030         default:
2031                 ret = MBX_SHUTDOWN;
2032                 break;
2033         }
2034         return ret;
2035 }
2036
2037 /**
2038  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2039  * @phba: Pointer to HBA context object.
2040  * @pmboxq: Pointer to mailbox command.
2041  *
2042  * This is completion handler function for mailbox commands issued from
2043  * lpfc_sli_issue_mbox_wait function. This function is called by the
2044  * mailbox event handler function with no lock held. This function
2045  * will wake up thread waiting on the wait queue pointed by context1
2046  * of the mailbox.
2047  **/
2048 void
2049 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2050 {
2051         wait_queue_head_t *pdone_q;
2052         unsigned long drvr_flag;
2053
2054         /*
2055          * If pdone_q is empty, the driver thread gave up waiting and
2056          * continued running.
2057          */
2058         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2059         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2060         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2061         if (pdone_q)
2062                 wake_up_interruptible(pdone_q);
2063         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2064         return;
2065 }
2066
2067
2068 /**
2069  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2070  * @phba: Pointer to HBA context object.
2071  * @pmb: Pointer to mailbox object.
2072  *
2073  * This function is the default mailbox completion handler. It
2074  * frees the memory resources associated with the completed mailbox
2075  * command. If the completed command is a REG_LOGIN mailbox command,
2076  * this function will issue a UREG_LOGIN to re-claim the RPI.
2077  **/
2078 void
2079 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2080 {
2081         struct lpfc_vport  *vport = pmb->vport;
2082         struct lpfc_dmabuf *mp;
2083         struct lpfc_nodelist *ndlp;
2084         struct Scsi_Host *shost;
2085         uint16_t rpi, vpi;
2086         int rc;
2087
2088         mp = (struct lpfc_dmabuf *) (pmb->context1);
2089
2090         if (mp) {
2091                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2092                 kfree(mp);
2093         }
2094
2095         /*
2096          * If a REG_LOGIN succeeded  after node is destroyed or node
2097          * is in re-discovery driver need to cleanup the RPI.
2098          */
2099         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2100             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2101             !pmb->u.mb.mbxStatus) {
2102                 rpi = pmb->u.mb.un.varWords[0];
2103                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2104                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2105                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2106                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2107                 if (rc != MBX_NOT_FINISHED)
2108                         return;
2109         }
2110
2111         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2112                 !(phba->pport->load_flag & FC_UNLOADING) &&
2113                 !pmb->u.mb.mbxStatus) {
2114                 shost = lpfc_shost_from_vport(vport);
2115                 spin_lock_irq(shost->host_lock);
2116                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2117                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2118                 spin_unlock_irq(shost->host_lock);
2119         }
2120
2121         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2122                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2123                 lpfc_nlp_put(ndlp);
2124                 pmb->context2 = NULL;
2125         }
2126
2127         /* Check security permission status on INIT_LINK mailbox command */
2128         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2129             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2130                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2131                                 "2860 SLI authentication is required "
2132                                 "for INIT_LINK but has not done yet\n");
2133
2134         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2135                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2136         else
2137                 mempool_free(pmb, phba->mbox_mem_pool);
2138 }
2139
2140 /**
2141  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2142  * @phba: Pointer to HBA context object.
2143  *
2144  * This function is called with no lock held. This function processes all
2145  * the completed mailbox commands and gives it to upper layers. The interrupt
2146  * service routine processes mailbox completion interrupt and adds completed
2147  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2148  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2149  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2150  * function returns the mailbox commands to the upper layer by calling the
2151  * completion handler function of each mailbox.
2152  **/
2153 int
2154 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2155 {
2156         MAILBOX_t *pmbox;
2157         LPFC_MBOXQ_t *pmb;
2158         int rc;
2159         LIST_HEAD(cmplq);
2160
2161         phba->sli.slistat.mbox_event++;
2162
2163         /* Get all completed mailboxe buffers into the cmplq */
2164         spin_lock_irq(&phba->hbalock);
2165         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2166         spin_unlock_irq(&phba->hbalock);
2167
2168         /* Get a Mailbox buffer to setup mailbox commands for callback */
2169         do {
2170                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2171                 if (pmb == NULL)
2172                         break;
2173
2174                 pmbox = &pmb->u.mb;
2175
2176                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2177                         if (pmb->vport) {
2178                                 lpfc_debugfs_disc_trc(pmb->vport,
2179                                         LPFC_DISC_TRC_MBOX_VPORT,
2180                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2181                                         (uint32_t)pmbox->mbxCommand,
2182                                         pmbox->un.varWords[0],
2183                                         pmbox->un.varWords[1]);
2184                         }
2185                         else {
2186                                 lpfc_debugfs_disc_trc(phba->pport,
2187                                         LPFC_DISC_TRC_MBOX,
2188                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2189                                         (uint32_t)pmbox->mbxCommand,
2190                                         pmbox->un.varWords[0],
2191                                         pmbox->un.varWords[1]);
2192                         }
2193                 }
2194
2195                 /*
2196                  * It is a fatal error if unknown mbox command completion.
2197                  */
2198                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2199                     MBX_SHUTDOWN) {
2200                         /* Unknown mailbox command compl */
2201                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2202                                         "(%d):0323 Unknown Mailbox command "
2203                                         "x%x (x%x/x%x) Cmpl\n",
2204                                         pmb->vport ? pmb->vport->vpi : 0,
2205                                         pmbox->mbxCommand,
2206                                         lpfc_sli_config_mbox_subsys_get(phba,
2207                                                                         pmb),
2208                                         lpfc_sli_config_mbox_opcode_get(phba,
2209                                                                         pmb));
2210                         phba->link_state = LPFC_HBA_ERROR;
2211                         phba->work_hs = HS_FFER3;
2212                         lpfc_handle_eratt(phba);
2213                         continue;
2214                 }
2215
2216                 if (pmbox->mbxStatus) {
2217                         phba->sli.slistat.mbox_stat_err++;
2218                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2219                                 /* Mbox cmd cmpl error - RETRYing */
2220                                 lpfc_printf_log(phba, KERN_INFO,
2221                                         LOG_MBOX | LOG_SLI,
2222                                         "(%d):0305 Mbox cmd cmpl "
2223                                         "error - RETRYing Data: x%x "
2224                                         "(x%x/x%x) x%x x%x x%x\n",
2225                                         pmb->vport ? pmb->vport->vpi : 0,
2226                                         pmbox->mbxCommand,
2227                                         lpfc_sli_config_mbox_subsys_get(phba,
2228                                                                         pmb),
2229                                         lpfc_sli_config_mbox_opcode_get(phba,
2230                                                                         pmb),
2231                                         pmbox->mbxStatus,
2232                                         pmbox->un.varWords[0],
2233                                         pmb->vport->port_state);
2234                                 pmbox->mbxStatus = 0;
2235                                 pmbox->mbxOwner = OWN_HOST;
2236                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2237                                 if (rc != MBX_NOT_FINISHED)
2238                                         continue;
2239                         }
2240                 }
2241
2242                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2243                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2244                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2245                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2246                                 pmb->vport ? pmb->vport->vpi : 0,
2247                                 pmbox->mbxCommand,
2248                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2249                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2250                                 pmb->mbox_cmpl,
2251                                 *((uint32_t *) pmbox),
2252                                 pmbox->un.varWords[0],
2253                                 pmbox->un.varWords[1],
2254                                 pmbox->un.varWords[2],
2255                                 pmbox->un.varWords[3],
2256                                 pmbox->un.varWords[4],
2257                                 pmbox->un.varWords[5],
2258                                 pmbox->un.varWords[6],
2259                                 pmbox->un.varWords[7]);
2260
2261                 if (pmb->mbox_cmpl)
2262                         pmb->mbox_cmpl(phba,pmb);
2263         } while (1);
2264         return 0;
2265 }
2266
2267 /**
2268  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2269  * @phba: Pointer to HBA context object.
2270  * @pring: Pointer to driver SLI ring object.
2271  * @tag: buffer tag.
2272  *
2273  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2274  * is set in the tag the buffer is posted for a particular exchange,
2275  * the function will return the buffer without replacing the buffer.
2276  * If the buffer is for unsolicited ELS or CT traffic, this function
2277  * returns the buffer and also posts another buffer to the firmware.
2278  **/
2279 static struct lpfc_dmabuf *
2280 lpfc_sli_get_buff(struct lpfc_hba *phba,
2281                   struct lpfc_sli_ring *pring,
2282                   uint32_t tag)
2283 {
2284         struct hbq_dmabuf *hbq_entry;
2285
2286         if (tag & QUE_BUFTAG_BIT)
2287                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2288         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2289         if (!hbq_entry)
2290                 return NULL;
2291         return &hbq_entry->dbuf;
2292 }
2293
2294 /**
2295  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2296  * @phba: Pointer to HBA context object.
2297  * @pring: Pointer to driver SLI ring object.
2298  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2299  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2300  * @fch_type: the type for the first frame of the sequence.
2301  *
2302  * This function is called with no lock held. This function uses the r_ctl and
2303  * type of the received sequence to find the correct callback function to call
2304  * to process the sequence.
2305  **/
2306 static int
2307 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2308                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2309                          uint32_t fch_type)
2310 {
2311         int i;
2312
2313         /* unSolicited Responses */
2314         if (pring->prt[0].profile) {
2315                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2316                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2317                                                                         saveq);
2318                 return 1;
2319         }
2320         /* We must search, based on rctl / type
2321            for the right routine */
2322         for (i = 0; i < pring->num_mask; i++) {
2323                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2324                     (pring->prt[i].type == fch_type)) {
2325                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2326                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2327                                                 (phba, pring, saveq);
2328                         return 1;
2329                 }
2330         }
2331         return 0;
2332 }
2333
2334 /**
2335  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2336  * @phba: Pointer to HBA context object.
2337  * @pring: Pointer to driver SLI ring object.
2338  * @saveq: Pointer to the unsolicited iocb.
2339  *
2340  * This function is called with no lock held by the ring event handler
2341  * when there is an unsolicited iocb posted to the response ring by the
2342  * firmware. This function gets the buffer associated with the iocbs
2343  * and calls the event handler for the ring. This function handles both
2344  * qring buffers and hbq buffers.
2345  * When the function returns 1 the caller can free the iocb object otherwise
2346  * upper layer functions will free the iocb objects.
2347  **/
2348 static int
2349 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2350                             struct lpfc_iocbq *saveq)
2351 {
2352         IOCB_t           * irsp;
2353         WORD5            * w5p;
2354         uint32_t           Rctl, Type;
2355         uint32_t           match;
2356         struct lpfc_iocbq *iocbq;
2357         struct lpfc_dmabuf *dmzbuf;
2358
2359         match = 0;
2360         irsp = &(saveq->iocb);
2361
2362         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2363                 if (pring->lpfc_sli_rcv_async_status)
2364                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2365                 else
2366                         lpfc_printf_log(phba,
2367                                         KERN_WARNING,
2368                                         LOG_SLI,
2369                                         "0316 Ring %d handler: unexpected "
2370                                         "ASYNC_STATUS iocb received evt_code "
2371                                         "0x%x\n",
2372                                         pring->ringno,
2373                                         irsp->un.asyncstat.evt_code);
2374                 return 1;
2375         }
2376
2377         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2378                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2379                 if (irsp->ulpBdeCount > 0) {
2380                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2381                                         irsp->un.ulpWord[3]);
2382                         lpfc_in_buf_free(phba, dmzbuf);
2383                 }
2384
2385                 if (irsp->ulpBdeCount > 1) {
2386                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2387                                         irsp->unsli3.sli3Words[3]);
2388                         lpfc_in_buf_free(phba, dmzbuf);
2389                 }
2390
2391                 if (irsp->ulpBdeCount > 2) {
2392                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2393                                 irsp->unsli3.sli3Words[7]);
2394                         lpfc_in_buf_free(phba, dmzbuf);
2395                 }
2396
2397                 return 1;
2398         }
2399
2400         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2401                 if (irsp->ulpBdeCount != 0) {
2402                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2403                                                 irsp->un.ulpWord[3]);
2404                         if (!saveq->context2)
2405                                 lpfc_printf_log(phba,
2406                                         KERN_ERR,
2407                                         LOG_SLI,
2408                                         "0341 Ring %d Cannot find buffer for "
2409                                         "an unsolicited iocb. tag 0x%x\n",
2410                                         pring->ringno,
2411                                         irsp->un.ulpWord[3]);
2412                 }
2413                 if (irsp->ulpBdeCount == 2) {
2414                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2415                                                 irsp->unsli3.sli3Words[7]);
2416                         if (!saveq->context3)
2417                                 lpfc_printf_log(phba,
2418                                         KERN_ERR,
2419                                         LOG_SLI,
2420                                         "0342 Ring %d Cannot find buffer for an"
2421                                         " unsolicited iocb. tag 0x%x\n",
2422                                         pring->ringno,
2423                                         irsp->unsli3.sli3Words[7]);
2424                 }
2425                 list_for_each_entry(iocbq, &saveq->list, list) {
2426                         irsp = &(iocbq->iocb);
2427                         if (irsp->ulpBdeCount != 0) {
2428                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2429                                                         irsp->un.ulpWord[3]);
2430                                 if (!iocbq->context2)
2431                                         lpfc_printf_log(phba,
2432                                                 KERN_ERR,
2433                                                 LOG_SLI,
2434                                                 "0343 Ring %d Cannot find "
2435                                                 "buffer for an unsolicited iocb"
2436                                                 ". tag 0x%x\n", pring->ringno,
2437                                                 irsp->un.ulpWord[3]);
2438                         }
2439                         if (irsp->ulpBdeCount == 2) {
2440                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2441                                                 irsp->unsli3.sli3Words[7]);
2442                                 if (!iocbq->context3)
2443                                         lpfc_printf_log(phba,
2444                                                 KERN_ERR,
2445                                                 LOG_SLI,
2446                                                 "0344 Ring %d Cannot find "
2447                                                 "buffer for an unsolicited "
2448                                                 "iocb. tag 0x%x\n",
2449                                                 pring->ringno,
2450                                                 irsp->unsli3.sli3Words[7]);
2451                         }
2452                 }
2453         }
2454         if (irsp->ulpBdeCount != 0 &&
2455             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2456              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2457                 int found = 0;
2458
2459                 /* search continue save q for same XRI */
2460                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2461                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2462                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2463                                 list_add_tail(&saveq->list, &iocbq->list);
2464                                 found = 1;
2465                                 break;
2466                         }
2467                 }
2468                 if (!found)
2469                         list_add_tail(&saveq->clist,
2470                                       &pring->iocb_continue_saveq);
2471                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2472                         list_del_init(&iocbq->clist);
2473                         saveq = iocbq;
2474                         irsp = &(saveq->iocb);
2475                 } else
2476                         return 0;
2477         }
2478         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2479             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2480             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2481                 Rctl = FC_RCTL_ELS_REQ;
2482                 Type = FC_TYPE_ELS;
2483         } else {
2484                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2485                 Rctl = w5p->hcsw.Rctl;
2486                 Type = w5p->hcsw.Type;
2487
2488                 /* Firmware Workaround */
2489                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2490                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2491                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2492                         Rctl = FC_RCTL_ELS_REQ;
2493                         Type = FC_TYPE_ELS;
2494                         w5p->hcsw.Rctl = Rctl;
2495                         w5p->hcsw.Type = Type;
2496                 }
2497         }
2498
2499         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2500                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2501                                 "0313 Ring %d handler: unexpected Rctl x%x "
2502                                 "Type x%x received\n",
2503                                 pring->ringno, Rctl, Type);
2504
2505         return 1;
2506 }
2507
2508 /**
2509  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2510  * @phba: Pointer to HBA context object.
2511  * @pring: Pointer to driver SLI ring object.
2512  * @prspiocb: Pointer to response iocb object.
2513  *
2514  * This function looks up the iocb_lookup table to get the command iocb
2515  * corresponding to the given response iocb using the iotag of the
2516  * response iocb. This function is called with the hbalock held.
2517  * This function returns the command iocb object if it finds the command
2518  * iocb else returns NULL.
2519  **/
2520 static struct lpfc_iocbq *
2521 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2522                       struct lpfc_sli_ring *pring,
2523                       struct lpfc_iocbq *prspiocb)
2524 {
2525         struct lpfc_iocbq *cmd_iocb = NULL;
2526         uint16_t iotag;
2527
2528         iotag = prspiocb->iocb.ulpIoTag;
2529
2530         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2531                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2532                 list_del_init(&cmd_iocb->list);
2533                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2534                         pring->txcmplq_cnt--;
2535                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2536                 }
2537                 return cmd_iocb;
2538         }
2539
2540         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2541                         "0317 iotag x%x is out off "
2542                         "range: max iotag x%x wd0 x%x\n",
2543                         iotag, phba->sli.last_iotag,
2544                         *(((uint32_t *) &prspiocb->iocb) + 7));
2545         return NULL;
2546 }
2547
2548 /**
2549  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2550  * @phba: Pointer to HBA context object.
2551  * @pring: Pointer to driver SLI ring object.
2552  * @iotag: IOCB tag.
2553  *
2554  * This function looks up the iocb_lookup table to get the command iocb
2555  * corresponding to the given iotag. This function is called with the
2556  * hbalock held.
2557  * This function returns the command iocb object if it finds the command
2558  * iocb else returns NULL.
2559  **/
2560 static struct lpfc_iocbq *
2561 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2562                              struct lpfc_sli_ring *pring, uint16_t iotag)
2563 {
2564         struct lpfc_iocbq *cmd_iocb;
2565
2566         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2567                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2568                 list_del_init(&cmd_iocb->list);
2569                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2570                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2571                         pring->txcmplq_cnt--;
2572                 }
2573                 return cmd_iocb;
2574         }
2575
2576         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2577                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2578                         iotag, phba->sli.last_iotag);
2579         return NULL;
2580 }
2581
2582 /**
2583  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2584  * @phba: Pointer to HBA context object.
2585  * @pring: Pointer to driver SLI ring object.
2586  * @saveq: Pointer to the response iocb to be processed.
2587  *
2588  * This function is called by the ring event handler for non-fcp
2589  * rings when there is a new response iocb in the response ring.
2590  * The caller is not required to hold any locks. This function
2591  * gets the command iocb associated with the response iocb and
2592  * calls the completion handler for the command iocb. If there
2593  * is no completion handler, the function will free the resources
2594  * associated with command iocb. If the response iocb is for
2595  * an already aborted command iocb, the status of the completion
2596  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2597  * This function always returns 1.
2598  **/
2599 static int
2600 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2601                           struct lpfc_iocbq *saveq)
2602 {
2603         struct lpfc_iocbq *cmdiocbp;
2604         int rc = 1;
2605         unsigned long iflag;
2606
2607         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2608         spin_lock_irqsave(&phba->hbalock, iflag);
2609         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2610         spin_unlock_irqrestore(&phba->hbalock, iflag);
2611
2612         if (cmdiocbp) {
2613                 if (cmdiocbp->iocb_cmpl) {
2614                         /*
2615                          * If an ELS command failed send an event to mgmt
2616                          * application.
2617                          */
2618                         if (saveq->iocb.ulpStatus &&
2619                              (pring->ringno == LPFC_ELS_RING) &&
2620                              (cmdiocbp->iocb.ulpCommand ==
2621                                 CMD_ELS_REQUEST64_CR))
2622                                 lpfc_send_els_failure_event(phba,
2623                                         cmdiocbp, saveq);
2624
2625                         /*
2626                          * Post all ELS completions to the worker thread.
2627                          * All other are passed to the completion callback.
2628                          */
2629                         if (pring->ringno == LPFC_ELS_RING) {
2630                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2631                                     (cmdiocbp->iocb_flag &
2632                                                         LPFC_DRIVER_ABORTED)) {
2633                                         spin_lock_irqsave(&phba->hbalock,
2634                                                           iflag);
2635                                         cmdiocbp->iocb_flag &=
2636                                                 ~LPFC_DRIVER_ABORTED;
2637                                         spin_unlock_irqrestore(&phba->hbalock,
2638                                                                iflag);
2639                                         saveq->iocb.ulpStatus =
2640                                                 IOSTAT_LOCAL_REJECT;
2641                                         saveq->iocb.un.ulpWord[4] =
2642                                                 IOERR_SLI_ABORTED;
2643
2644                                         /* Firmware could still be in progress
2645                                          * of DMAing payload, so don't free data
2646                                          * buffer till after a hbeat.
2647                                          */
2648                                         spin_lock_irqsave(&phba->hbalock,
2649                                                           iflag);
2650                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2651                                         spin_unlock_irqrestore(&phba->hbalock,
2652                                                                iflag);
2653                                 }
2654                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2655                                         if (saveq->iocb_flag &
2656                                             LPFC_EXCHANGE_BUSY) {
2657                                                 /* Set cmdiocb flag for the
2658                                                  * exchange busy so sgl (xri)
2659                                                  * will not be released until
2660                                                  * the abort xri is received
2661                                                  * from hba.
2662                                                  */
2663                                                 spin_lock_irqsave(
2664                                                         &phba->hbalock, iflag);
2665                                                 cmdiocbp->iocb_flag |=
2666                                                         LPFC_EXCHANGE_BUSY;
2667                                                 spin_unlock_irqrestore(
2668                                                         &phba->hbalock, iflag);
2669                                         }
2670                                         if (cmdiocbp->iocb_flag &
2671                                             LPFC_DRIVER_ABORTED) {
2672                                                 /*
2673                                                  * Clear LPFC_DRIVER_ABORTED
2674                                                  * bit in case it was driver
2675                                                  * initiated abort.
2676                                                  */
2677                                                 spin_lock_irqsave(
2678                                                         &phba->hbalock, iflag);
2679                                                 cmdiocbp->iocb_flag &=
2680                                                         ~LPFC_DRIVER_ABORTED;
2681                                                 spin_unlock_irqrestore(
2682                                                         &phba->hbalock, iflag);
2683                                                 cmdiocbp->iocb.ulpStatus =
2684                                                         IOSTAT_LOCAL_REJECT;
2685                                                 cmdiocbp->iocb.un.ulpWord[4] =
2686                                                         IOERR_ABORT_REQUESTED;
2687                                                 /*
2688                                                  * For SLI4, irsiocb contains
2689                                                  * NO_XRI in sli_xritag, it
2690                                                  * shall not affect releasing
2691                                                  * sgl (xri) process.
2692                                                  */
2693                                                 saveq->iocb.ulpStatus =
2694                                                         IOSTAT_LOCAL_REJECT;
2695                                                 saveq->iocb.un.ulpWord[4] =
2696                                                         IOERR_SLI_ABORTED;
2697                                                 spin_lock_irqsave(
2698                                                         &phba->hbalock, iflag);
2699                                                 saveq->iocb_flag |=
2700                                                         LPFC_DELAY_MEM_FREE;
2701                                                 spin_unlock_irqrestore(
2702                                                         &phba->hbalock, iflag);
2703                                         }
2704                                 }
2705                         }
2706                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2707                 } else
2708                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2709         } else {
2710                 /*
2711                  * Unknown initiating command based on the response iotag.
2712                  * This could be the case on the ELS ring because of
2713                  * lpfc_els_abort().
2714                  */
2715                 if (pring->ringno != LPFC_ELS_RING) {
2716                         /*
2717                          * Ring <ringno> handler: unexpected completion IoTag
2718                          * <IoTag>
2719                          */
2720                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2721                                          "0322 Ring %d handler: "
2722                                          "unexpected completion IoTag x%x "
2723                                          "Data: x%x x%x x%x x%x\n",
2724                                          pring->ringno,
2725                                          saveq->iocb.ulpIoTag,
2726                                          saveq->iocb.ulpStatus,
2727                                          saveq->iocb.un.ulpWord[4],
2728                                          saveq->iocb.ulpCommand,
2729                                          saveq->iocb.ulpContext);
2730                 }
2731         }
2732
2733         return rc;
2734 }
2735
2736 /**
2737  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2738  * @phba: Pointer to HBA context object.
2739  * @pring: Pointer to driver SLI ring object.
2740  *
2741  * This function is called from the iocb ring event handlers when
2742  * put pointer is ahead of the get pointer for a ring. This function signal
2743  * an error attention condition to the worker thread and the worker
2744  * thread will transition the HBA to offline state.
2745  **/
2746 static void
2747 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2748 {
2749         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2750         /*
2751          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2752          * rsp ring <portRspMax>
2753          */
2754         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2755                         "0312 Ring %d handler: portRspPut %d "
2756                         "is bigger than rsp ring %d\n",
2757                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2758                         pring->numRiocb);
2759
2760         phba->link_state = LPFC_HBA_ERROR;
2761
2762         /*
2763          * All error attention handlers are posted to
2764          * worker thread
2765          */
2766         phba->work_ha |= HA_ERATT;
2767         phba->work_hs = HS_FFER3;
2768
2769         lpfc_worker_wake_up(phba);
2770
2771         return;
2772 }
2773
2774 /**
2775  * lpfc_poll_eratt - Error attention polling timer timeout handler
2776  * @ptr: Pointer to address of HBA context object.
2777  *
2778  * This function is invoked by the Error Attention polling timer when the
2779  * timer times out. It will check the SLI Error Attention register for
2780  * possible attention events. If so, it will post an Error Attention event
2781  * and wake up worker thread to process it. Otherwise, it will set up the
2782  * Error Attention polling timer for the next poll.
2783  **/
2784 void lpfc_poll_eratt(unsigned long ptr)
2785 {
2786         struct lpfc_hba *phba;
2787         uint32_t eratt = 0;
2788
2789         phba = (struct lpfc_hba *)ptr;
2790
2791         /* Check chip HA register for error event */
2792         eratt = lpfc_sli_check_eratt(phba);
2793
2794         if (eratt)
2795                 /* Tell the worker thread there is work to do */
2796                 lpfc_worker_wake_up(phba);
2797         else
2798                 /* Restart the timer for next eratt poll */
2799                 mod_timer(&phba->eratt_poll, jiffies +
2800                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2801         return;
2802 }
2803
2804
2805 /**
2806  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2807  * @phba: Pointer to HBA context object.
2808  * @pring: Pointer to driver SLI ring object.
2809  * @mask: Host attention register mask for this ring.
2810  *
2811  * This function is called from the interrupt context when there is a ring
2812  * event for the fcp ring. The caller does not hold any lock.
2813  * The function processes each response iocb in the response ring until it
2814  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2815  * LE bit set. The function will call the completion handler of the command iocb
2816  * if the response iocb indicates a completion for a command iocb or it is
2817  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2818  * function if this is an unsolicited iocb.
2819  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2820  * to check it explicitly.
2821  */
2822 int
2823 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2824                                 struct lpfc_sli_ring *pring, uint32_t mask)
2825 {
2826         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2827         IOCB_t *irsp = NULL;
2828         IOCB_t *entry = NULL;
2829         struct lpfc_iocbq *cmdiocbq = NULL;
2830         struct lpfc_iocbq rspiocbq;
2831         uint32_t status;
2832         uint32_t portRspPut, portRspMax;
2833         int rc = 1;
2834         lpfc_iocb_type type;
2835         unsigned long iflag;
2836         uint32_t rsp_cmpl = 0;
2837
2838         spin_lock_irqsave(&phba->hbalock, iflag);
2839         pring->stats.iocb_event++;
2840
2841         /*
2842          * The next available response entry should never exceed the maximum
2843          * entries.  If it does, treat it as an adapter hardware error.
2844          */
2845         portRspMax = pring->numRiocb;
2846         portRspPut = le32_to_cpu(pgp->rspPutInx);
2847         if (unlikely(portRspPut >= portRspMax)) {
2848                 lpfc_sli_rsp_pointers_error(phba, pring);
2849                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2850                 return 1;
2851         }
2852         if (phba->fcp_ring_in_use) {
2853                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2854                 return 1;
2855         } else
2856                 phba->fcp_ring_in_use = 1;
2857
2858         rmb();
2859         while (pring->rspidx != portRspPut) {
2860                 /*
2861                  * Fetch an entry off the ring and copy it into a local data
2862                  * structure.  The copy involves a byte-swap since the
2863                  * network byte order and pci byte orders are different.
2864                  */
2865                 entry = lpfc_resp_iocb(phba, pring);
2866                 phba->last_completion_time = jiffies;
2867
2868                 if (++pring->rspidx >= portRspMax)
2869                         pring->rspidx = 0;
2870
2871                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2872                                       (uint32_t *) &rspiocbq.iocb,
2873                                       phba->iocb_rsp_size);
2874                 INIT_LIST_HEAD(&(rspiocbq.list));
2875                 irsp = &rspiocbq.iocb;
2876
2877                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2878                 pring->stats.iocb_rsp++;
2879                 rsp_cmpl++;
2880
2881                 if (unlikely(irsp->ulpStatus)) {
2882                         /*
2883                          * If resource errors reported from HBA, reduce
2884                          * queuedepths of the SCSI device.
2885                          */
2886                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2887                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2888                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2889                                 phba->lpfc_rampdown_queue_depth(phba);
2890                                 spin_lock_irqsave(&phba->hbalock, iflag);
2891                         }
2892
2893                         /* Rsp ring <ringno> error: IOCB */
2894                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2895                                         "0336 Rsp Ring %d error: IOCB Data: "
2896                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2897                                         pring->ringno,
2898                                         irsp->un.ulpWord[0],
2899                                         irsp->un.ulpWord[1],
2900                                         irsp->un.ulpWord[2],
2901                                         irsp->un.ulpWord[3],
2902                                         irsp->un.ulpWord[4],
2903                                         irsp->un.ulpWord[5],
2904                                         *(uint32_t *)&irsp->un1,
2905                                         *((uint32_t *)&irsp->un1 + 1));
2906                 }
2907
2908                 switch (type) {
2909                 case LPFC_ABORT_IOCB:
2910                 case LPFC_SOL_IOCB:
2911                         /*
2912                          * Idle exchange closed via ABTS from port.  No iocb
2913                          * resources need to be recovered.
2914                          */
2915                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2916                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2917                                                 "0333 IOCB cmd 0x%x"
2918                                                 " processed. Skipping"
2919                                                 " completion\n",
2920                                                 irsp->ulpCommand);
2921                                 break;
2922                         }
2923
2924                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2925                                                          &rspiocbq);
2926                         if (unlikely(!cmdiocbq))
2927                                 break;
2928                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2929                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2930                         if (cmdiocbq->iocb_cmpl) {
2931                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2932                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2933                                                       &rspiocbq);
2934                                 spin_lock_irqsave(&phba->hbalock, iflag);
2935                         }
2936                         break;
2937                 case LPFC_UNSOL_IOCB:
2938                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2939                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2940                         spin_lock_irqsave(&phba->hbalock, iflag);
2941                         break;
2942                 default:
2943                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2944                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2945                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2946                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2947                                        MAX_MSG_DATA);
2948                                 dev_warn(&((phba->pcidev)->dev),
2949                                          "lpfc%d: %s\n",
2950                                          phba->brd_no, adaptermsg);
2951                         } else {
2952                                 /* Unknown IOCB command */
2953                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2954                                                 "0334 Unknown IOCB command "
2955                                                 "Data: x%x, x%x x%x x%x x%x\n",
2956                                                 type, irsp->ulpCommand,
2957                                                 irsp->ulpStatus,
2958                                                 irsp->ulpIoTag,
2959                                                 irsp->ulpContext);
2960                         }
2961                         break;
2962                 }
2963
2964                 /*
2965                  * The response IOCB has been processed.  Update the ring
2966                  * pointer in SLIM.  If the port response put pointer has not
2967                  * been updated, sync the pgp->rspPutInx and fetch the new port
2968                  * response put pointer.
2969                  */
2970                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2971
2972                 if (pring->rspidx == portRspPut)
2973                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2974         }
2975
2976         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2977                 pring->stats.iocb_rsp_full++;
2978                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2979                 writel(status, phba->CAregaddr);
2980                 readl(phba->CAregaddr);
2981         }
2982         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2983                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2984                 pring->stats.iocb_cmd_empty++;
2985
2986                 /* Force update of the local copy of cmdGetInx */
2987                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2988                 lpfc_sli_resume_iocb(phba, pring);
2989
2990                 if ((pring->lpfc_sli_cmd_available))
2991                         (pring->lpfc_sli_cmd_available) (phba, pring);
2992
2993         }
2994
2995         phba->fcp_ring_in_use = 0;
2996         spin_unlock_irqrestore(&phba->hbalock, iflag);
2997         return rc;
2998 }
2999
3000 /**
3001  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3002  * @phba: Pointer to HBA context object.
3003  * @pring: Pointer to driver SLI ring object.
3004  * @rspiocbp: Pointer to driver response IOCB object.
3005  *
3006  * This function is called from the worker thread when there is a slow-path
3007  * response IOCB to process. This function chains all the response iocbs until
3008  * seeing the iocb with the LE bit set. The function will call
3009  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3010  * completion of a command iocb. The function will call the
3011  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3012  * The function frees the resources or calls the completion handler if this
3013  * iocb is an abort completion. The function returns NULL when the response
3014  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3015  * this function shall chain the iocb on to the iocb_continueq and return the
3016  * response iocb passed in.
3017  **/
3018 static struct lpfc_iocbq *
3019 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3020                         struct lpfc_iocbq *rspiocbp)
3021 {
3022         struct lpfc_iocbq *saveq;
3023         struct lpfc_iocbq *cmdiocbp;
3024         struct lpfc_iocbq *next_iocb;
3025         IOCB_t *irsp = NULL;
3026         uint32_t free_saveq;
3027         uint8_t iocb_cmd_type;
3028         lpfc_iocb_type type;
3029         unsigned long iflag;
3030         int rc;
3031
3032         spin_lock_irqsave(&phba->hbalock, iflag);
3033         /* First add the response iocb to the countinueq list */
3034         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3035         pring->iocb_continueq_cnt++;
3036
3037         /* Now, determine whether the list is completed for processing */
3038         irsp = &rspiocbp->iocb;
3039         if (irsp->ulpLe) {
3040                 /*
3041                  * By default, the driver expects to free all resources
3042                  * associated with this iocb completion.
3043                  */
3044                 free_saveq = 1;
3045                 saveq = list_get_first(&pring->iocb_continueq,
3046                                        struct lpfc_iocbq, list);
3047                 irsp = &(saveq->iocb);
3048                 list_del_init(&pring->iocb_continueq);
3049                 pring->iocb_continueq_cnt = 0;
3050
3051                 pring->stats.iocb_rsp++;
3052
3053                 /*
3054                  * If resource errors reported from HBA, reduce
3055                  * queuedepths of the SCSI device.
3056                  */
3057                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3058                     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
3059                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3060                         phba->lpfc_rampdown_queue_depth(phba);
3061                         spin_lock_irqsave(&phba->hbalock, iflag);
3062                 }
3063
3064                 if (irsp->ulpStatus) {
3065                         /* Rsp ring <ringno> error: IOCB */
3066                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3067                                         "0328 Rsp Ring %d error: "
3068                                         "IOCB Data: "
3069                                         "x%x x%x x%x x%x "
3070                                         "x%x x%x x%x x%x "
3071                                         "x%x x%x x%x x%x "
3072                                         "x%x x%x x%x x%x\n",
3073                                         pring->ringno,
3074                                         irsp->un.ulpWord[0],
3075                                         irsp->un.ulpWord[1],
3076                                         irsp->un.ulpWord[2],
3077                                         irsp->un.ulpWord[3],
3078                                         irsp->un.ulpWord[4],
3079                                         irsp->un.ulpWord[5],
3080                                         *(((uint32_t *) irsp) + 6),
3081                                         *(((uint32_t *) irsp) + 7),
3082                                         *(((uint32_t *) irsp) + 8),
3083                                         *(((uint32_t *) irsp) + 9),
3084                                         *(((uint32_t *) irsp) + 10),
3085                                         *(((uint32_t *) irsp) + 11),
3086                                         *(((uint32_t *) irsp) + 12),
3087                                         *(((uint32_t *) irsp) + 13),
3088                                         *(((uint32_t *) irsp) + 14),
3089                                         *(((uint32_t *) irsp) + 15));
3090                 }
3091
3092                 /*
3093                  * Fetch the IOCB command type and call the correct completion
3094                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3095                  * get freed back to the lpfc_iocb_list by the discovery
3096                  * kernel thread.
3097                  */
3098                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3099                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3100                 switch (type) {
3101                 case LPFC_SOL_IOCB:
3102                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3103                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3104                         spin_lock_irqsave(&phba->hbalock, iflag);
3105                         break;
3106
3107                 case LPFC_UNSOL_IOCB:
3108                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3109                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3110                         spin_lock_irqsave(&phba->hbalock, iflag);
3111                         if (!rc)
3112                                 free_saveq = 0;
3113                         break;
3114
3115                 case LPFC_ABORT_IOCB:
3116                         cmdiocbp = NULL;
3117                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3118                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3119                                                                  saveq);
3120                         if (cmdiocbp) {
3121                                 /* Call the specified completion routine */
3122                                 if (cmdiocbp->iocb_cmpl) {
3123                                         spin_unlock_irqrestore(&phba->hbalock,
3124                                                                iflag);
3125                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3126                                                               saveq);
3127                                         spin_lock_irqsave(&phba->hbalock,
3128                                                           iflag);
3129                                 } else
3130                                         __lpfc_sli_release_iocbq(phba,
3131                                                                  cmdiocbp);
3132                         }
3133                         break;
3134
3135                 case LPFC_UNKNOWN_IOCB:
3136                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3137                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3138                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3139                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3140                                        MAX_MSG_DATA);
3141                                 dev_warn(&((phba->pcidev)->dev),
3142                                          "lpfc%d: %s\n",
3143                                          phba->brd_no, adaptermsg);
3144                         } else {
3145                                 /* Unknown IOCB command */
3146                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3147                                                 "0335 Unknown IOCB "
3148                                                 "command Data: x%x "
3149                                                 "x%x x%x x%x\n",
3150                                                 irsp->ulpCommand,
3151                                                 irsp->ulpStatus,
3152                                                 irsp->ulpIoTag,
3153                                                 irsp->ulpContext);
3154                         }
3155                         break;
3156                 }
3157
3158                 if (free_saveq) {
3159                         list_for_each_entry_safe(rspiocbp, next_iocb,
3160                                                  &saveq->list, list) {
3161                                 list_del(&rspiocbp->list);
3162                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3163                         }
3164                         __lpfc_sli_release_iocbq(phba, saveq);
3165                 }
3166                 rspiocbp = NULL;
3167         }
3168         spin_unlock_irqrestore(&phba->hbalock, iflag);
3169         return rspiocbp;
3170 }
3171
3172 /**
3173  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3174  * @phba: Pointer to HBA context object.
3175  * @pring: Pointer to driver SLI ring object.
3176  * @mask: Host attention register mask for this ring.
3177  *
3178  * This routine wraps the actual slow_ring event process routine from the
3179  * API jump table function pointer from the lpfc_hba struct.
3180  **/
3181 void
3182 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3183                                 struct lpfc_sli_ring *pring, uint32_t mask)
3184 {
3185         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3186 }
3187
3188 /**
3189  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3190  * @phba: Pointer to HBA context object.
3191  * @pring: Pointer to driver SLI ring object.
3192  * @mask: Host attention register mask for this ring.
3193  *
3194  * This function is called from the worker thread when there is a ring event
3195  * for non-fcp rings. The caller does not hold any lock. The function will
3196  * remove each response iocb in the response ring and calls the handle
3197  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3198  **/
3199 static void
3200 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3201                                    struct lpfc_sli_ring *pring, uint32_t mask)
3202 {
3203         struct lpfc_pgp *pgp;
3204         IOCB_t *entry;
3205         IOCB_t *irsp = NULL;
3206         struct lpfc_iocbq *rspiocbp = NULL;
3207         uint32_t portRspPut, portRspMax;
3208         unsigned long iflag;
3209         uint32_t status;
3210
3211         pgp = &phba->port_gp[pring->ringno];
3212         spin_lock_irqsave(&phba->hbalock, iflag);
3213         pring->stats.iocb_event++;
3214
3215         /*
3216          * The next available response entry should never exceed the maximum
3217          * entries.  If it does, treat it as an adapter hardware error.
3218          */
3219         portRspMax = pring->numRiocb;
3220         portRspPut = le32_to_cpu(pgp->rspPutInx);
3221         if (portRspPut >= portRspMax) {
3222                 /*
3223                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3224                  * rsp ring <portRspMax>
3225                  */
3226                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3227                                 "0303 Ring %d handler: portRspPut %d "
3228                                 "is bigger than rsp ring %d\n",
3229                                 pring->ringno, portRspPut, portRspMax);
3230
3231                 phba->link_state = LPFC_HBA_ERROR;
3232                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3233
3234                 phba->work_hs = HS_FFER3;
3235                 lpfc_handle_eratt(phba);
3236
3237                 return;
3238         }
3239
3240         rmb();
3241         while (pring->rspidx != portRspPut) {
3242                 /*
3243                  * Build a completion list and call the appropriate handler.
3244                  * The process is to get the next available response iocb, get
3245                  * a free iocb from the list, copy the response data into the
3246                  * free iocb, insert to the continuation list, and update the
3247                  * next response index to slim.  This process makes response
3248                  * iocb's in the ring available to DMA as fast as possible but
3249                  * pays a penalty for a copy operation.  Since the iocb is
3250                  * only 32 bytes, this penalty is considered small relative to
3251                  * the PCI reads for register values and a slim write.  When
3252                  * the ulpLe field is set, the entire Command has been
3253                  * received.
3254                  */
3255                 entry = lpfc_resp_iocb(phba, pring);
3256
3257                 phba->last_completion_time = jiffies;
3258                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3259                 if (rspiocbp == NULL) {
3260                         printk(KERN_ERR "%s: out of buffers! Failing "
3261                                "completion.\n", __func__);
3262                         break;
3263                 }
3264
3265                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3266                                       phba->iocb_rsp_size);
3267                 irsp = &rspiocbp->iocb;
3268
3269                 if (++pring->rspidx >= portRspMax)
3270                         pring->rspidx = 0;
3271
3272                 if (pring->ringno == LPFC_ELS_RING) {
3273                         lpfc_debugfs_slow_ring_trc(phba,
3274                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3275                                 *(((uint32_t *) irsp) + 4),
3276                                 *(((uint32_t *) irsp) + 6),
3277                                 *(((uint32_t *) irsp) + 7));
3278                 }
3279
3280                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
3281
3282                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3283                 /* Handle the response IOCB */
3284                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3285                 spin_lock_irqsave(&phba->hbalock, iflag);
3286
3287                 /*
3288                  * If the port response put pointer has not been updated, sync
3289                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3290                  * response put pointer.
3291                  */
3292                 if (pring->rspidx == portRspPut) {
3293                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3294                 }
3295         } /* while (pring->rspidx != portRspPut) */
3296
3297         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3298                 /* At least one response entry has been freed */
3299                 pring->stats.iocb_rsp_full++;
3300                 /* SET RxRE_RSP in Chip Att register */
3301                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3302                 writel(status, phba->CAregaddr);
3303                 readl(phba->CAregaddr); /* flush */
3304         }
3305         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3306                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3307                 pring->stats.iocb_cmd_empty++;
3308
3309                 /* Force update of the local copy of cmdGetInx */
3310                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
3311                 lpfc_sli_resume_iocb(phba, pring);
3312
3313                 if ((pring->lpfc_sli_cmd_available))
3314                         (pring->lpfc_sli_cmd_available) (phba, pring);
3315
3316         }
3317
3318         spin_unlock_irqrestore(&phba->hbalock, iflag);
3319         return;
3320 }
3321
3322 /**
3323  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3324  * @phba: Pointer to HBA context object.
3325  * @pring: Pointer to driver SLI ring object.
3326  * @mask: Host attention register mask for this ring.
3327  *
3328  * This function is called from the worker thread when there is a pending
3329  * ELS response iocb on the driver internal slow-path response iocb worker
3330  * queue. The caller does not hold any lock. The function will remove each
3331  * response iocb from the response worker queue and calls the handle
3332  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3333  **/
3334 static void
3335 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3336                                    struct lpfc_sli_ring *pring, uint32_t mask)
3337 {
3338         struct lpfc_iocbq *irspiocbq;
3339         struct hbq_dmabuf *dmabuf;
3340         struct lpfc_cq_event *cq_event;
3341         unsigned long iflag;
3342
3343         spin_lock_irqsave(&phba->hbalock, iflag);
3344         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3345         spin_unlock_irqrestore(&phba->hbalock, iflag);
3346         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3347                 /* Get the response iocb from the head of work queue */
3348                 spin_lock_irqsave(&phba->hbalock, iflag);
3349                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3350                                  cq_event, struct lpfc_cq_event, list);
3351                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3352
3353                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3354                 case CQE_CODE_COMPL_WQE:
3355                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3356                                                  cq_event);
3357                         /* Translate ELS WCQE to response IOCBQ */
3358                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3359                                                                    irspiocbq);
3360                         if (irspiocbq)
3361                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3362                                                            irspiocbq);
3363                         break;
3364                 case CQE_CODE_RECEIVE:
3365                 case CQE_CODE_RECEIVE_V1:
3366                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3367                                               cq_event);
3368                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3369                         break;
3370                 default:
3371                         break;
3372                 }
3373         }
3374 }
3375
3376 /**
3377  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3378  * @phba: Pointer to HBA context object.
3379  * @pring: Pointer to driver SLI ring object.
3380  *
3381  * This function aborts all iocbs in the given ring and frees all the iocb
3382  * objects in txq. This function issues an abort iocb for all the iocb commands
3383  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3384  * the return of this function. The caller is not required to hold any locks.
3385  **/
3386 void
3387 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3388 {
3389         LIST_HEAD(completions);
3390         struct lpfc_iocbq *iocb, *next_iocb;
3391
3392         if (pring->ringno == LPFC_ELS_RING) {
3393                 lpfc_fabric_abort_hba(phba);
3394         }
3395
3396         /* Error everything on txq and txcmplq
3397          * First do the txq.
3398          */
3399         spin_lock_irq(&phba->hbalock);
3400         list_splice_init(&pring->txq, &completions);
3401         pring->txq_cnt = 0;
3402
3403         /* Next issue ABTS for everything on the txcmplq */
3404         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3405                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3406
3407         spin_unlock_irq(&phba->hbalock);
3408
3409         /* Cancel all the IOCBs from the completions list */
3410         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3411                               IOERR_SLI_ABORTED);
3412 }
3413
3414 /**
3415  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3416  * @phba: Pointer to HBA context object.
3417  *
3418  * This function flushes all iocbs in the fcp ring and frees all the iocb
3419  * objects in txq and txcmplq. This function will not issue abort iocbs
3420  * for all the iocb commands in txcmplq, they will just be returned with
3421  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3422  * slot has been permanently disabled.
3423  **/
3424 void
3425 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3426 {
3427         LIST_HEAD(txq);
3428         LIST_HEAD(txcmplq);
3429         struct lpfc_sli *psli = &phba->sli;
3430         struct lpfc_sli_ring  *pring;
3431
3432         /* Currently, only one fcp ring */
3433         pring = &psli->ring[psli->fcp_ring];
3434
3435         spin_lock_irq(&phba->hbalock);
3436         /* Retrieve everything on txq */
3437         list_splice_init(&pring->txq, &txq);
3438         pring->txq_cnt = 0;
3439
3440         /* Retrieve everything on the txcmplq */
3441         list_splice_init(&pring->txcmplq, &txcmplq);
3442         pring->txcmplq_cnt = 0;
3443         spin_unlock_irq(&phba->hbalock);
3444
3445         /* Flush the txq */
3446         lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3447                               IOERR_SLI_DOWN);
3448
3449         /* Flush the txcmpq */
3450         lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3451                               IOERR_SLI_DOWN);
3452 }
3453
3454 /**
3455  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3456  * @phba: Pointer to HBA context object.
3457  * @mask: Bit mask to be checked.
3458  *
3459  * This function reads the host status register and compares
3460  * with the provided bit mask to check if HBA completed
3461  * the restart. This function will wait in a loop for the
3462  * HBA to complete restart. If the HBA does not restart within
3463  * 15 iterations, the function will reset the HBA again. The
3464  * function returns 1 when HBA fail to restart otherwise returns
3465  * zero.
3466  **/
3467 static int
3468 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3469 {
3470         uint32_t status;
3471         int i = 0;
3472         int retval = 0;
3473
3474         /* Read the HBA Host Status Register */
3475         if (lpfc_readl(phba->HSregaddr, &status))
3476                 return 1;
3477
3478         /*
3479          * Check status register every 100ms for 5 retries, then every
3480          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3481          * every 2.5 sec for 4.
3482          * Break our of the loop if errors occurred during init.
3483          */
3484         while (((status & mask) != mask) &&
3485                !(status & HS_FFERM) &&
3486                i++ < 20) {
3487
3488                 if (i <= 5)
3489                         msleep(10);
3490                 else if (i <= 10)
3491                         msleep(500);
3492                 else
3493                         msleep(2500);
3494
3495                 if (i == 15) {
3496                                 /* Do post */
3497                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3498                         lpfc_sli_brdrestart(phba);
3499                 }
3500                 /* Read the HBA Host Status Register */
3501                 if (lpfc_readl(phba->HSregaddr, &status)) {
3502                         retval = 1;
3503                         break;
3504                 }
3505         }
3506
3507         /* Check to see if any errors occurred during init */
3508         if ((status & HS_FFERM) || (i >= 20)) {
3509                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3510                                 "2751 Adapter failed to restart, "
3511                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3512                                 status,
3513                                 readl(phba->MBslimaddr + 0xa8),
3514                                 readl(phba->MBslimaddr + 0xac));
3515                 phba->link_state = LPFC_HBA_ERROR;
3516                 retval = 1;
3517         }
3518
3519         return retval;
3520 }
3521
3522 /**
3523  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3524  * @phba: Pointer to HBA context object.
3525  * @mask: Bit mask to be checked.
3526  *
3527  * This function checks the host status register to check if HBA is
3528  * ready. This function will wait in a loop for the HBA to be ready
3529  * If the HBA is not ready , the function will will reset the HBA PCI
3530  * function again. The function returns 1 when HBA fail to be ready
3531  * otherwise returns zero.
3532  **/
3533 static int
3534 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3535 {
3536         uint32_t status;
3537         int retval = 0;
3538
3539         /* Read the HBA Host Status Register */
3540         status = lpfc_sli4_post_status_check(phba);
3541
3542         if (status) {
3543                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3544                 lpfc_sli_brdrestart(phba);
3545                 status = lpfc_sli4_post_status_check(phba);
3546         }
3547
3548         /* Check to see if any errors occurred during init */
3549         if (status) {
3550                 phba->link_state = LPFC_HBA_ERROR;
3551                 retval = 1;
3552         } else
3553                 phba->sli4_hba.intr_enable = 0;
3554
3555         return retval;
3556 }
3557
3558 /**
3559  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3560  * @phba: Pointer to HBA context object.
3561  * @mask: Bit mask to be checked.
3562  *
3563  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3564  * from the API jump table function pointer from the lpfc_hba struct.
3565  **/
3566 int
3567 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3568 {
3569         return phba->lpfc_sli_brdready(phba, mask);
3570 }
3571
3572 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3573
3574 /**
3575  * lpfc_reset_barrier - Make HBA ready for HBA reset
3576  * @phba: Pointer to HBA context object.
3577  *
3578  * This function is called before resetting an HBA. This
3579  * function requests HBA to quiesce DMAs before a reset.
3580  **/
3581 void lpfc_reset_barrier(struct lpfc_hba *phba)
3582 {
3583         uint32_t __iomem *resp_buf;
3584         uint32_t __iomem *mbox_buf;
3585         volatile uint32_t mbox;
3586         uint32_t hc_copy, ha_copy, resp_data;
3587         int  i;
3588         uint8_t hdrtype;
3589
3590         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3591         if (hdrtype != 0x80 ||
3592             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3593              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3594                 return;
3595
3596         /*
3597          * Tell the other part of the chip to suspend temporarily all
3598          * its DMA activity.
3599          */
3600         resp_buf = phba->MBslimaddr;
3601
3602         /* Disable the error attention */
3603         if (lpfc_readl(phba->HCregaddr, &hc_copy))
3604                 return;
3605         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3606         readl(phba->HCregaddr); /* flush */
3607         phba->link_flag |= LS_IGNORE_ERATT;
3608
3609         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3610                 return;
3611         if (ha_copy & HA_ERATT) {
3612                 /* Clear Chip error bit */
3613                 writel(HA_ERATT, phba->HAregaddr);
3614                 phba->pport->stopped = 1;
3615         }
3616
3617         mbox = 0;
3618         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3619         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3620
3621         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3622         mbox_buf = phba->MBslimaddr;
3623         writel(mbox, mbox_buf);
3624
3625         for (i = 0; i < 50; i++) {
3626                 if (lpfc_readl((resp_buf + 1), &resp_data))
3627                         return;
3628                 if (resp_data != ~(BARRIER_TEST_PATTERN))
3629                         mdelay(1);
3630                 else
3631                         break;
3632         }
3633         resp_data = 0;
3634         if (lpfc_readl((resp_buf + 1), &resp_data))
3635                 return;
3636         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
3637                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3638                     phba->pport->stopped)
3639                         goto restore_hc;
3640                 else
3641                         goto clear_errat;
3642         }
3643
3644         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3645         resp_data = 0;
3646         for (i = 0; i < 500; i++) {
3647                 if (lpfc_readl(resp_buf, &resp_data))
3648                         return;
3649                 if (resp_data != mbox)
3650                         mdelay(1);
3651                 else
3652                         break;
3653         }
3654
3655 clear_errat:
3656
3657         while (++i < 500) {
3658                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3659                         return;
3660                 if (!(ha_copy & HA_ERATT))
3661                         mdelay(1);
3662                 else
3663                         break;
3664         }
3665
3666         if (readl(phba->HAregaddr) & HA_ERATT) {
3667                 writel(HA_ERATT, phba->HAregaddr);
3668                 phba->pport->stopped = 1;
3669         }
3670
3671 restore_hc:
3672         phba->link_flag &= ~LS_IGNORE_ERATT;
3673         writel(hc_copy, phba->HCregaddr);
3674         readl(phba->HCregaddr); /* flush */
3675 }
3676
3677 /**
3678  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3679  * @phba: Pointer to HBA context object.
3680  *
3681  * This function issues a kill_board mailbox command and waits for
3682  * the error attention interrupt. This function is called for stopping
3683  * the firmware processing. The caller is not required to hold any
3684  * locks. This function calls lpfc_hba_down_post function to free
3685  * any pending commands after the kill. The function will return 1 when it
3686  * fails to kill the board else will return 0.
3687  **/
3688 int
3689 lpfc_sli_brdkill(struct lpfc_hba *phba)
3690 {
3691         struct lpfc_sli *psli;
3692         LPFC_MBOXQ_t *pmb;
3693         uint32_t status;
3694         uint32_t ha_copy;
3695         int retval;
3696         int i = 0;
3697
3698         psli = &phba->sli;
3699
3700         /* Kill HBA */
3701         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3702                         "0329 Kill HBA Data: x%x x%x\n",
3703                         phba->pport->port_state, psli->sli_flag);
3704
3705         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3706         if (!pmb)
3707                 return 1;
3708
3709         /* Disable the error attention */
3710         spin_lock_irq(&phba->hbalock);
3711         if (lpfc_readl(phba->HCregaddr, &status)) {
3712                 spin_unlock_irq(&phba->hbalock);
3713                 mempool_free(pmb, phba->mbox_mem_pool);
3714                 return 1;
3715         }
3716         status &= ~HC_ERINT_ENA;
3717         writel(status, phba->HCregaddr);
3718         readl(phba->HCregaddr); /* flush */
3719         phba->link_flag |= LS_IGNORE_ERATT;
3720         spin_unlock_irq(&phba->hbalock);
3721
3722         lpfc_kill_board(phba, pmb);
3723         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3724         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3725
3726         if (retval != MBX_SUCCESS) {
3727                 if (retval != MBX_BUSY)
3728                         mempool_free(pmb, phba->mbox_mem_pool);
3729                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3730                                 "2752 KILL_BOARD command failed retval %d\n",
3731                                 retval);
3732                 spin_lock_irq(&phba->hbalock);
3733                 phba->link_flag &= ~LS_IGNORE_ERATT;
3734                 spin_unlock_irq(&phba->hbalock);
3735                 return 1;
3736         }
3737
3738         spin_lock_irq(&phba->hbalock);
3739         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3740         spin_unlock_irq(&phba->hbalock);
3741
3742         mempool_free(pmb, phba->mbox_mem_pool);
3743
3744         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3745          * attention every 100ms for 3 seconds. If we don't get ERATT after
3746          * 3 seconds we still set HBA_ERROR state because the status of the
3747          * board is now undefined.
3748          */
3749         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3750                 return 1;
3751         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3752                 mdelay(100);
3753                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3754                         return 1;
3755         }
3756
3757         del_timer_sync(&psli->mbox_tmo);
3758         if (ha_copy & HA_ERATT) {
3759                 writel(HA_ERATT, phba->HAregaddr);
3760                 phba->pport->stopped = 1;
3761         }
3762         spin_lock_irq(&phba->hbalock);
3763         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3764         psli->mbox_active = NULL;
3765         phba->link_flag &= ~LS_IGNORE_ERATT;
3766         spin_unlock_irq(&phba->hbalock);
3767
3768         lpfc_hba_down_post(phba);
3769         phba->link_state = LPFC_HBA_ERROR;
3770
3771         return ha_copy & HA_ERATT ? 0 : 1;
3772 }
3773
3774 /**
3775  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3776  * @phba: Pointer to HBA context object.
3777  *
3778  * This function resets the HBA by writing HC_INITFF to the control
3779  * register. After the HBA resets, this function resets all the iocb ring
3780  * indices. This function disables PCI layer parity checking during
3781  * the reset.
3782  * This function returns 0 always.
3783  * The caller is not required to hold any locks.
3784  **/
3785 int
3786 lpfc_sli_brdreset(struct lpfc_hba *phba)
3787 {
3788         struct lpfc_sli *psli;
3789         struct lpfc_sli_ring *pring;
3790         uint16_t cfg_value;
3791         int i;
3792
3793         psli = &phba->sli;
3794
3795         /* Reset HBA */
3796         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3797                         "0325 Reset HBA Data: x%x x%x\n",
3798                         phba->pport->port_state, psli->sli_flag);
3799
3800         /* perform board reset */
3801         phba->fc_eventTag = 0;
3802         phba->link_events = 0;
3803         phba->pport->fc_myDID = 0;
3804         phba->pport->fc_prevDID = 0;
3805
3806         /* Turn off parity checking and serr during the physical reset */
3807         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3808         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3809                               (cfg_value &
3810                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3811
3812         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3813
3814         /* Now toggle INITFF bit in the Host Control Register */
3815         writel(HC_INITFF, phba->HCregaddr);
3816         mdelay(1);
3817         readl(phba->HCregaddr); /* flush */
3818         writel(0, phba->HCregaddr);
3819         readl(phba->HCregaddr); /* flush */
3820
3821         /* Restore PCI cmd register */
3822         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3823
3824         /* Initialize relevant SLI info */
3825         for (i = 0; i < psli->num_rings; i++) {
3826                 pring = &psli->ring[i];
3827                 pring->flag = 0;
3828                 pring->rspidx = 0;
3829                 pring->next_cmdidx  = 0;
3830                 pring->local_getidx = 0;
3831                 pring->cmdidx = 0;
3832                 pring->missbufcnt = 0;
3833         }
3834
3835         phba->link_state = LPFC_WARM_START;
3836         return 0;
3837 }
3838
3839 /**
3840  * lpfc_sli4_brdreset - Reset a sli-4 HBA
3841  * @phba: Pointer to HBA context object.
3842  *
3843  * This function resets a SLI4 HBA. This function disables PCI layer parity
3844  * checking during resets the device. The caller is not required to hold
3845  * any locks.
3846  *
3847  * This function returns 0 always.
3848  **/
3849 int
3850 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3851 {
3852         struct lpfc_sli *psli = &phba->sli;
3853         uint16_t cfg_value;
3854         uint8_t qindx;
3855
3856         /* Reset HBA */
3857         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3858                         "0295 Reset HBA Data: x%x x%x\n",
3859                         phba->pport->port_state, psli->sli_flag);
3860
3861         /* perform board reset */
3862         phba->fc_eventTag = 0;
3863         phba->link_events = 0;
3864         phba->pport->fc_myDID = 0;
3865         phba->pport->fc_prevDID = 0;
3866
3867         spin_lock_irq(&phba->hbalock);
3868         psli->sli_flag &= ~(LPFC_PROCESS_LA);
3869         phba->fcf.fcf_flag = 0;
3870         /* Clean up the child queue list for the CQs */
3871         list_del_init(&phba->sli4_hba.mbx_wq->list);
3872         list_del_init(&phba->sli4_hba.els_wq->list);
3873         list_del_init(&phba->sli4_hba.hdr_rq->list);
3874         list_del_init(&phba->sli4_hba.dat_rq->list);
3875         list_del_init(&phba->sli4_hba.mbx_cq->list);
3876         list_del_init(&phba->sli4_hba.els_cq->list);
3877         for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3878                 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3879         qindx = 0;
3880         do
3881                 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3882         while (++qindx < phba->cfg_fcp_eq_count);
3883         spin_unlock_irq(&phba->hbalock);
3884
3885         /* Now physically reset the device */
3886         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3887                         "0389 Performing PCI function reset!\n");
3888
3889         /* Turn off parity checking and serr during the physical reset */
3890         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3891         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3892                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3893
3894         /* Perform FCoE PCI function reset */
3895         lpfc_pci_function_reset(phba);
3896
3897         /* Restore PCI cmd register */
3898         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3899
3900         return 0;
3901 }
3902
3903 /**
3904  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3905  * @phba: Pointer to HBA context object.
3906  *
3907  * This function is called in the SLI initialization code path to
3908  * restart the HBA. The caller is not required to hold any lock.
3909  * This function writes MBX_RESTART mailbox command to the SLIM and
3910  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3911  * function to free any pending commands. The function enables
3912  * POST only during the first initialization. The function returns zero.
3913  * The function does not guarantee completion of MBX_RESTART mailbox
3914  * command before the return of this function.
3915  **/
3916 static int
3917 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3918 {
3919         MAILBOX_t *mb;
3920         struct lpfc_sli *psli;
3921         volatile uint32_t word0;
3922         void __iomem *to_slim;
3923         uint32_t hba_aer_enabled;
3924
3925         spin_lock_irq(&phba->hbalock);
3926
3927         /* Take PCIe device Advanced Error Reporting (AER) state */
3928         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3929
3930         psli = &phba->sli;
3931
3932         /* Restart HBA */
3933         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3934                         "0337 Restart HBA Data: x%x x%x\n",
3935                         phba->pport->port_state, psli->sli_flag);
3936
3937         word0 = 0;
3938         mb = (MAILBOX_t *) &word0;
3939         mb->mbxCommand = MBX_RESTART;
3940         mb->mbxHc = 1;
3941
3942         lpfc_reset_barrier(phba);
3943
3944         to_slim = phba->MBslimaddr;
3945         writel(*(uint32_t *) mb, to_slim);
3946         readl(to_slim); /* flush */
3947
3948         /* Only skip post after fc_ffinit is completed */
3949         if (phba->pport->port_state)
3950                 word0 = 1;      /* This is really setting up word1 */
3951         else
3952                 word0 = 0;      /* This is really setting up word1 */
3953         to_slim = phba->MBslimaddr + sizeof (uint32_t);
3954         writel(*(uint32_t *) mb, to_slim);
3955         readl(to_slim); /* flush */
3956
3957         lpfc_sli_brdreset(phba);
3958         phba->pport->stopped = 0;
3959         phba->link_state = LPFC_INIT_START;
3960         phba->hba_flag = 0;
3961         spin_unlock_irq(&phba->hbalock);
3962
3963         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3964         psli->stats_start = get_seconds();
3965
3966         /* Give the INITFF and Post time to settle. */
3967         mdelay(100);
3968
3969         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3970         if (hba_aer_enabled)
3971                 pci_disable_pcie_error_reporting(phba->pcidev);
3972
3973         lpfc_hba_down_post(phba);
3974
3975         return 0;
3976 }
3977
3978 /**
3979  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3980  * @phba: Pointer to HBA context object.
3981  *
3982  * This function is called in the SLI initialization code path to restart
3983  * a SLI4 HBA. The caller is not required to hold any lock.
3984  * At the end of the function, it calls lpfc_hba_down_post function to
3985  * free any pending commands.
3986  **/
3987 static int
3988 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3989 {
3990         struct lpfc_sli *psli = &phba->sli;
3991         uint32_t hba_aer_enabled;
3992
3993         /* Restart HBA */
3994         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3995                         "0296 Restart HBA Data: x%x x%x\n",
3996                         phba->pport->port_state, psli->sli_flag);
3997
3998         /* Take PCIe device Advanced Error Reporting (AER) state */
3999         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4000
4001         lpfc_sli4_brdreset(phba);
4002
4003         spin_lock_irq(&phba->hbalock);
4004         phba->pport->stopped = 0;
4005         phba->link_state = LPFC_INIT_START;
4006         phba->hba_flag = 0;
4007         spin_unlock_irq(&phba->hbalock);
4008
4009         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4010         psli->stats_start = get_seconds();
4011
4012         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4013         if (hba_aer_enabled)
4014                 pci_disable_pcie_error_reporting(phba->pcidev);
4015
4016         lpfc_hba_down_post(phba);
4017
4018         return 0;
4019 }
4020
4021 /**
4022  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4023  * @phba: Pointer to HBA context object.
4024  *
4025  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4026  * API jump table function pointer from the lpfc_hba struct.
4027 **/
4028 int
4029 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4030 {
4031         return phba->lpfc_sli_brdrestart(phba);
4032 }
4033
4034 /**
4035  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4036  * @phba: Pointer to HBA context object.
4037  *
4038  * This function is called after a HBA restart to wait for successful
4039  * restart of the HBA. Successful restart of the HBA is indicated by
4040  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4041  * iteration, the function will restart the HBA again. The function returns
4042  * zero if HBA successfully restarted else returns negative error code.
4043  **/
4044 static int
4045 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4046 {
4047         uint32_t status, i = 0;
4048
4049         /* Read the HBA Host Status Register */
4050         if (lpfc_readl(phba->HSregaddr, &status))
4051                 return -EIO;
4052
4053         /* Check status register to see what current state is */
4054         i = 0;
4055         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4056
4057                 /* Check every 10ms for 10 retries, then every 100ms for 90
4058                  * retries, then every 1 sec for 50 retires for a total of
4059                  * ~60 seconds before reset the board again and check every
4060                  * 1 sec for 50 retries. The up to 60 seconds before the
4061                  * board ready is required by the Falcon FIPS zeroization
4062                  * complete, and any reset the board in between shall cause
4063                  * restart of zeroization, further delay the board ready.
4064                  */
4065                 if (i++ >= 200) {
4066                         /* Adapter failed to init, timeout, status reg
4067                            <status> */
4068                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4069                                         "0436 Adapter failed to init, "
4070                                         "timeout, status reg x%x, "
4071                                         "FW Data: A8 x%x AC x%x\n", status,
4072                                         readl(phba->MBslimaddr + 0xa8),
4073                                         readl(phba->MBslimaddr + 0xac));
4074                         phba->link_state = LPFC_HBA_ERROR;
4075                         return -ETIMEDOUT;
4076                 }
4077
4078                 /* Check to see if any errors occurred during init */
4079                 if (status & HS_FFERM) {
4080                         /* ERROR: During chipset initialization */
4081                         /* Adapter failed to init, chipset, status reg
4082                            <status> */
4083                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4084                                         "0437 Adapter failed to init, "
4085                                         "chipset, status reg x%x, "
4086                                         "FW Data: A8 x%x AC x%x\n", status,
4087                                         readl(phba->MBslimaddr + 0xa8),
4088                                         readl(phba->MBslimaddr + 0xac));
4089                         phba->link_state = LPFC_HBA_ERROR;
4090                         return -EIO;
4091                 }
4092
4093                 if (i <= 10)
4094                         msleep(10);
4095                 else if (i <= 100)
4096                         msleep(100);
4097                 else
4098                         msleep(1000);
4099
4100                 if (i == 150) {
4101                         /* Do post */
4102                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4103                         lpfc_sli_brdrestart(phba);
4104                 }
4105                 /* Read the HBA Host Status Register */
4106                 if (lpfc_readl(phba->HSregaddr, &status))
4107                         return -EIO;
4108         }
4109
4110         /* Check to see if any errors occurred during init */
4111         if (status & HS_FFERM) {
4112                 /* ERROR: During chipset initialization */
4113                 /* Adapter failed to init, chipset, status reg <status> */
4114                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4115                                 "0438 Adapter failed to init, chipset, "
4116                                 "status reg x%x, "
4117                                 "FW Data: A8 x%x AC x%x\n", status,
4118                                 readl(phba->MBslimaddr + 0xa8),
4119                                 readl(phba->MBslimaddr + 0xac));
4120                 phba->link_state = LPFC_HBA_ERROR;
4121                 return -EIO;
4122         }
4123
4124         /* Clear all interrupt enable conditions */
4125         writel(0, phba->HCregaddr);
4126         readl(phba->HCregaddr); /* flush */
4127
4128         /* setup host attn register */
4129         writel(0xffffffff, phba->HAregaddr);
4130         readl(phba->HAregaddr); /* flush */
4131         return 0;
4132 }
4133
4134 /**
4135  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4136  *
4137  * This function calculates and returns the number of HBQs required to be
4138  * configured.
4139  **/
4140 int
4141 lpfc_sli_hbq_count(void)
4142 {
4143         return ARRAY_SIZE(lpfc_hbq_defs);
4144 }
4145
4146 /**
4147  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4148  *
4149  * This function adds the number of hbq entries in every HBQ to get
4150  * the total number of hbq entries required for the HBA and returns
4151  * the total count.
4152  **/
4153 static int
4154 lpfc_sli_hbq_entry_count(void)
4155 {
4156         int  hbq_count = lpfc_sli_hbq_count();
4157         int  count = 0;
4158         int  i;
4159
4160         for (i = 0; i < hbq_count; ++i)
4161                 count += lpfc_hbq_defs[i]->entry_count;
4162         return count;
4163 }
4164
4165 /**
4166  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4167  *
4168  * This function calculates amount of memory required for all hbq entries
4169  * to be configured and returns the total memory required.
4170  **/
4171 int
4172 lpfc_sli_hbq_size(void)
4173 {
4174         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4175 }
4176
4177 /**
4178  * lpfc_sli_hbq_setup - configure and initialize HBQs
4179  * @phba: Pointer to HBA context object.
4180  *
4181  * This function is called during the SLI initialization to configure
4182  * all the HBQs and post buffers to the HBQ. The caller is not
4183  * required to hold any locks. This function will return zero if successful
4184  * else it will return negative error code.
4185  **/
4186 static int
4187 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4188 {
4189         int  hbq_count = lpfc_sli_hbq_count();
4190         LPFC_MBOXQ_t *pmb;
4191         MAILBOX_t *pmbox;
4192         uint32_t hbqno;
4193         uint32_t hbq_entry_index;
4194
4195                                 /* Get a Mailbox buffer to setup mailbox
4196                                  * commands for HBA initialization
4197                                  */
4198         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4199
4200         if (!pmb)
4201                 return -ENOMEM;
4202
4203         pmbox = &pmb->u.mb;
4204
4205         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4206         phba->link_state = LPFC_INIT_MBX_CMDS;
4207         phba->hbq_in_use = 1;
4208
4209         hbq_entry_index = 0;
4210         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4211                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4212                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4213                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4214                 phba->hbqs[hbqno].entry_count =
4215                         lpfc_hbq_defs[hbqno]->entry_count;
4216                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4217                         hbq_entry_index, pmb);
4218                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4219
4220                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4221                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4222                            mbxStatus <status>, ring <num> */
4223
4224                         lpfc_printf_log(phba, KERN_ERR,
4225                                         LOG_SLI | LOG_VPORT,
4226                                         "1805 Adapter failed to init. "
4227                                         "Data: x%x x%x x%x\n",
4228                                         pmbox->mbxCommand,
4229                                         pmbox->mbxStatus, hbqno);
4230
4231                         phba->link_state = LPFC_HBA_ERROR;
4232                         mempool_free(pmb, phba->mbox_mem_pool);
4233                         return -ENXIO;
4234                 }
4235         }
4236         phba->hbq_count = hbq_count;
4237
4238         mempool_free(pmb, phba->mbox_mem_pool);
4239
4240         /* Initially populate or replenish the HBQs */
4241         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4242                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4243         return 0;
4244 }
4245
4246 /**
4247  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4248  * @phba: Pointer to HBA context object.
4249  *
4250  * This function is called during the SLI initialization to configure
4251  * all the HBQs and post buffers to the HBQ. The caller is not
4252  * required to hold any locks. This function will return zero if successful
4253  * else it will return negative error code.
4254  **/
4255 static int
4256 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4257 {
4258         phba->hbq_in_use = 1;
4259         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4260         phba->hbq_count = 1;
4261         /* Initially populate or replenish the HBQs */
4262         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4263         return 0;
4264 }
4265
4266 /**
4267  * lpfc_sli_config_port - Issue config port mailbox command
4268  * @phba: Pointer to HBA context object.
4269  * @sli_mode: sli mode - 2/3
4270  *
4271  * This function is called by the sli intialization code path
4272  * to issue config_port mailbox command. This function restarts the
4273  * HBA firmware and issues a config_port mailbox command to configure
4274  * the SLI interface in the sli mode specified by sli_mode
4275  * variable. The caller is not required to hold any locks.
4276  * The function returns 0 if successful, else returns negative error
4277  * code.
4278  **/
4279 int
4280 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4281 {
4282         LPFC_MBOXQ_t *pmb;
4283         uint32_t resetcount = 0, rc = 0, done = 0;
4284
4285         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4286         if (!pmb) {
4287                 phba->link_state = LPFC_HBA_ERROR;
4288                 return -ENOMEM;
4289         }
4290
4291         phba->sli_rev = sli_mode;
4292         while (resetcount < 2 && !done) {
4293                 spin_lock_irq(&phba->hbalock);
4294                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4295                 spin_unlock_irq(&phba->hbalock);
4296                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4297                 lpfc_sli_brdrestart(phba);
4298                 rc = lpfc_sli_chipset_init(phba);
4299                 if (rc)
4300                         break;
4301
4302                 spin_lock_irq(&phba->hbalock);
4303                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4304                 spin_unlock_irq(&phba->hbalock);
4305                 resetcount++;
4306
4307                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4308                  * value of 0 means the call was successful.  Any other
4309                  * nonzero value is a failure, but if ERESTART is returned,
4310                  * the driver may reset the HBA and try again.
4311                  */
4312                 rc = lpfc_config_port_prep(phba);
4313                 if (rc == -ERESTART) {
4314                         phba->link_state = LPFC_LINK_UNKNOWN;
4315                         continue;
4316                 } else if (rc)
4317                         break;
4318
4319                 phba->link_state = LPFC_INIT_MBX_CMDS;
4320                 lpfc_config_port(phba, pmb);
4321                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4322                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4323                                         LPFC_SLI3_HBQ_ENABLED |
4324                                         LPFC_SLI3_CRP_ENABLED |
4325                                         LPFC_SLI3_BG_ENABLED |
4326                                         LPFC_SLI3_DSS_ENABLED);
4327                 if (rc != MBX_SUCCESS) {
4328                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4329                                 "0442 Adapter failed to init, mbxCmd x%x "
4330                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4331                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4332                         spin_lock_irq(&phba->hbalock);
4333                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4334                         spin_unlock_irq(&phba->hbalock);
4335                         rc = -ENXIO;
4336                 } else {
4337                         /* Allow asynchronous mailbox command to go through */
4338                         spin_lock_irq(&phba->hbalock);
4339                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4340                         spin_unlock_irq(&phba->hbalock);
4341                         done = 1;
4342                 }
4343         }
4344         if (!done) {
4345                 rc = -EINVAL;
4346                 goto do_prep_failed;
4347         }
4348         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4349                 if (!pmb->u.mb.un.varCfgPort.cMA) {
4350                         rc = -ENXIO;
4351                         goto do_prep_failed;
4352                 }
4353                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4354                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4355                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4356                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4357                                 phba->max_vpi : phba->max_vports;
4358
4359                 } else
4360                         phba->max_vpi = 0;
4361                 phba->fips_level = 0;
4362                 phba->fips_spec_rev = 0;
4363                 if (pmb->u.mb.un.varCfgPort.gdss) {
4364                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4365                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4366                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4367                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4368                                         "2850 Security Crypto Active. FIPS x%d "
4369                                         "(Spec Rev: x%d)",
4370                                         phba->fips_level, phba->fips_spec_rev);
4371                 }
4372                 if (pmb->u.mb.un.varCfgPort.sec_err) {
4373                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4374                                         "2856 Config Port Security Crypto "
4375                                         "Error: x%x ",
4376                                         pmb->u.mb.un.varCfgPort.sec_err);
4377                 }
4378                 if (pmb->u.mb.un.varCfgPort.gerbm)
4379                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4380                 if (pmb->u.mb.un.varCfgPort.gcrp)
4381                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4382
4383                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4384                 phba->port_gp = phba->mbox->us.s3_pgp.port;
4385
4386                 if (phba->cfg_enable_bg) {
4387                         if (pmb->u.mb.un.varCfgPort.gbg)
4388                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4389                         else
4390                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4391                                                 "0443 Adapter did not grant "
4392                                                 "BlockGuard\n");
4393                 }
4394         } else {
4395                 phba->hbq_get = NULL;
4396                 phba->port_gp = phba->mbox->us.s2.port;
4397                 phba->max_vpi = 0;
4398         }
4399 do_prep_failed:
4400         mempool_free(pmb, phba->mbox_mem_pool);
4401         return rc;
4402 }
4403
4404
4405 /**
4406  * lpfc_sli_hba_setup - SLI intialization function
4407  * @phba: Pointer to HBA context object.
4408  *
4409  * This function is the main SLI intialization function. This function
4410  * is called by the HBA intialization code, HBA reset code and HBA
4411  * error attention handler code. Caller is not required to hold any
4412  * locks. This function issues config_port mailbox command to configure
4413  * the SLI, setup iocb rings and HBQ rings. In the end the function
4414  * calls the config_port_post function to issue init_link mailbox
4415  * command and to start the discovery. The function will return zero
4416  * if successful, else it will return negative error code.
4417  **/
4418 int
4419 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4420 {
4421         uint32_t rc;
4422         int  mode = 3, i;
4423         int longs;
4424
4425         switch (lpfc_sli_mode) {
4426         case 2:
4427                 if (phba->cfg_enable_npiv) {
4428                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4429                                 "1824 NPIV enabled: Override lpfc_sli_mode "
4430                                 "parameter (%d) to auto (0).\n",
4431                                 lpfc_sli_mode);
4432                         break;
4433                 }
4434                 mode = 2;
4435                 break;
4436         case 0:
4437         case 3:
4438                 break;
4439         default:
4440                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4441                                 "1819 Unrecognized lpfc_sli_mode "
4442                                 "parameter: %d.\n", lpfc_sli_mode);
4443
4444                 break;
4445         }
4446
4447         rc = lpfc_sli_config_port(phba, mode);
4448
4449         if (rc && lpfc_sli_mode == 3)
4450                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4451                                 "1820 Unable to select SLI-3.  "
4452                                 "Not supported by adapter.\n");
4453         if (rc && mode != 2)
4454                 rc = lpfc_sli_config_port(phba, 2);
4455         if (rc)
4456                 goto lpfc_sli_hba_setup_error;
4457
4458         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4459         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4460                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4461                 if (!rc) {
4462                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4463                                         "2709 This device supports "
4464                                         "Advanced Error Reporting (AER)\n");
4465                         spin_lock_irq(&phba->hbalock);
4466                         phba->hba_flag |= HBA_AER_ENABLED;
4467                         spin_unlock_irq(&phba->hbalock);
4468                 } else {
4469                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4470                                         "2708 This device does not support "
4471                                         "Advanced Error Reporting (AER)\n");
4472                         phba->cfg_aer_support = 0;
4473                 }
4474         }
4475
4476         if (phba->sli_rev == 3) {
4477                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4478                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4479         } else {
4480                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4481                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4482                 phba->sli3_options = 0;
4483         }
4484
4485         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4486                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4487                         phba->sli_rev, phba->max_vpi);
4488         rc = lpfc_sli_ring_map(phba);
4489
4490         if (rc)
4491                 goto lpfc_sli_hba_setup_error;
4492
4493         /* Initialize VPIs. */
4494         if (phba->sli_rev == LPFC_SLI_REV3) {
4495                 /*
4496                  * The VPI bitmask and physical ID array are allocated
4497                  * and initialized once only - at driver load.  A port
4498                  * reset doesn't need to reinitialize this memory.
4499                  */
4500                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4501                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4502                         phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4503                                                   GFP_KERNEL);
4504                         if (!phba->vpi_bmask) {
4505                                 rc = -ENOMEM;
4506                                 goto lpfc_sli_hba_setup_error;
4507                         }
4508
4509                         phba->vpi_ids = kzalloc(
4510                                         (phba->max_vpi+1) * sizeof(uint16_t),
4511                                         GFP_KERNEL);
4512                         if (!phba->vpi_ids) {
4513                                 kfree(phba->vpi_bmask);
4514                                 rc = -ENOMEM;
4515                                 goto lpfc_sli_hba_setup_error;
4516                         }
4517                         for (i = 0; i < phba->max_vpi; i++)
4518                                 phba->vpi_ids[i] = i;
4519                 }
4520         }
4521
4522         /* Init HBQs */
4523         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4524                 rc = lpfc_sli_hbq_setup(phba);
4525                 if (rc)
4526                         goto lpfc_sli_hba_setup_error;
4527         }
4528         spin_lock_irq(&phba->hbalock);
4529         phba->sli.sli_flag |= LPFC_PROCESS_LA;
4530         spin_unlock_irq(&phba->hbalock);
4531
4532         rc = lpfc_config_port_post(phba);
4533         if (rc)
4534                 goto lpfc_sli_hba_setup_error;
4535
4536         return rc;
4537
4538 lpfc_sli_hba_setup_error:
4539         phba->link_state = LPFC_HBA_ERROR;
4540         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4541                         "0445 Firmware initialization failed\n");
4542         return rc;
4543 }
4544
4545 /**
4546  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4547  * @phba: Pointer to HBA context object.
4548  * @mboxq: mailbox pointer.
4549  * This function issue a dump mailbox command to read config region
4550  * 23 and parse the records in the region and populate driver
4551  * data structure.
4552  **/
4553 static int
4554 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4555                 LPFC_MBOXQ_t *mboxq)
4556 {
4557         struct lpfc_dmabuf *mp;
4558         struct lpfc_mqe *mqe;
4559         uint32_t data_length;
4560         int rc;
4561
4562         /* Program the default value of vlan_id and fc_map */
4563         phba->valid_vlan = 0;
4564         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4565         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4566         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4567
4568         mqe = &mboxq->u.mqe;
4569         if (lpfc_dump_fcoe_param(phba, mboxq))
4570                 return -ENOMEM;
4571
4572         mp = (struct lpfc_dmabuf *) mboxq->context1;
4573         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4574
4575         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4576                         "(%d):2571 Mailbox cmd x%x Status x%x "
4577                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4578                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4579                         "CQ: x%x x%x x%x x%x\n",
4580                         mboxq->vport ? mboxq->vport->vpi : 0,
4581                         bf_get(lpfc_mqe_command, mqe),
4582                         bf_get(lpfc_mqe_status, mqe),
4583                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4584                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4585                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4586                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4587                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4588                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4589                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4590                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4591                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4592                         mboxq->mcqe.word0,
4593                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4594                         mboxq->mcqe.trailer);
4595
4596         if (rc) {
4597                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4598                 kfree(mp);
4599                 return -EIO;
4600         }
4601         data_length = mqe->un.mb_words[5];
4602         if (data_length > DMP_RGN23_SIZE) {
4603                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4604                 kfree(mp);
4605                 return -EIO;
4606         }
4607
4608         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4609         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4610         kfree(mp);
4611         return 0;
4612 }
4613
4614 /**
4615  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4616  * @phba: pointer to lpfc hba data structure.
4617  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4618  * @vpd: pointer to the memory to hold resulting port vpd data.
4619  * @vpd_size: On input, the number of bytes allocated to @vpd.
4620  *            On output, the number of data bytes in @vpd.
4621  *
4622  * This routine executes a READ_REV SLI4 mailbox command.  In
4623  * addition, this routine gets the port vpd data.
4624  *
4625  * Return codes
4626  *      0 - successful
4627  *      -ENOMEM - could not allocated memory.
4628  **/
4629 static int
4630 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4631                     uint8_t *vpd, uint32_t *vpd_size)
4632 {
4633         int rc = 0;
4634         uint32_t dma_size;
4635         struct lpfc_dmabuf *dmabuf;
4636         struct lpfc_mqe *mqe;
4637
4638         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4639         if (!dmabuf)
4640                 return -ENOMEM;
4641
4642         /*
4643          * Get a DMA buffer for the vpd data resulting from the READ_REV
4644          * mailbox command.
4645          */
4646         dma_size = *vpd_size;
4647         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4648                                           dma_size,
4649                                           &dmabuf->phys,
4650                                           GFP_KERNEL);
4651         if (!dmabuf->virt) {
4652                 kfree(dmabuf);
4653                 return -ENOMEM;
4654         }
4655         memset(dmabuf->virt, 0, dma_size);
4656
4657         /*
4658          * The SLI4 implementation of READ_REV conflicts at word1,
4659          * bits 31:16 and SLI4 adds vpd functionality not present
4660          * in SLI3.  This code corrects the conflicts.
4661          */
4662         lpfc_read_rev(phba, mboxq);
4663         mqe = &mboxq->u.mqe;
4664         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4665         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4666         mqe->un.read_rev.word1 &= 0x0000FFFF;
4667         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4668         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4669
4670         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4671         if (rc) {
4672                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4673                                   dmabuf->virt, dmabuf->phys);
4674                 kfree(dmabuf);
4675                 return -EIO;
4676         }
4677
4678         /*
4679          * The available vpd length cannot be bigger than the
4680          * DMA buffer passed to the port.  Catch the less than
4681          * case and update the caller's size.
4682          */
4683         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4684                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4685
4686         memcpy(vpd, dmabuf->virt, *vpd_size);
4687
4688         dma_free_coherent(&phba->pcidev->dev, dma_size,
4689                           dmabuf->virt, dmabuf->phys);
4690         kfree(dmabuf);
4691         return 0;
4692 }
4693
4694 /**
4695  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4696  * @phba: pointer to lpfc hba data structure.
4697  *
4698  * This routine retrieves SLI4 device physical port name this PCI function
4699  * is attached to.
4700  *
4701  * Return codes
4702  *      0 - sucessful
4703  *      otherwise - failed to retrieve physical port name
4704  **/
4705 static int
4706 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4707 {
4708         LPFC_MBOXQ_t *mboxq;
4709         struct lpfc_mbx_read_config *rd_config;
4710         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4711         struct lpfc_controller_attribute *cntl_attr;
4712         struct lpfc_mbx_get_port_name *get_port_name;
4713         void *virtaddr = NULL;
4714         uint32_t alloclen, reqlen;
4715         uint32_t shdr_status, shdr_add_status;
4716         union lpfc_sli4_cfg_shdr *shdr;
4717         char cport_name = 0;
4718         int rc;
4719
4720         /* We assume nothing at this point */
4721         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4722         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4723
4724         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4725         if (!mboxq)
4726                 return -ENOMEM;
4727
4728         /* obtain link type and link number via READ_CONFIG */
4729         lpfc_read_config(phba, mboxq);
4730         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4731         if (rc == MBX_SUCCESS) {
4732                 rd_config = &mboxq->u.mqe.un.rd_config;
4733                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
4734                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4735                         phba->sli4_hba.lnk_info.lnk_tp =
4736                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
4737                         phba->sli4_hba.lnk_info.lnk_no =
4738                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
4739                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4740                                         "3081 lnk_type:%d, lnk_numb:%d\n",
4741                                         phba->sli4_hba.lnk_info.lnk_tp,
4742                                         phba->sli4_hba.lnk_info.lnk_no);
4743                         goto retrieve_ppname;
4744                 } else
4745                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4746                                         "3082 Mailbox (x%x) returned ldv:x0\n",
4747                                         bf_get(lpfc_mqe_command,
4748                                                &mboxq->u.mqe));
4749         } else
4750                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4751                                 "3083 Mailbox (x%x) failed, status:x%x\n",
4752                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4753                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4754
4755         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4756         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4757         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4758                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4759                         LPFC_SLI4_MBX_NEMBED);
4760         if (alloclen < reqlen) {
4761                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4762                                 "3084 Allocated DMA memory size (%d) is "
4763                                 "less than the requested DMA memory size "
4764                                 "(%d)\n", alloclen, reqlen);
4765                 rc = -ENOMEM;
4766                 goto out_free_mboxq;
4767         }
4768         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4769         virtaddr = mboxq->sge_array->addr[0];
4770         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4771         shdr = &mbx_cntl_attr->cfg_shdr;
4772         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4773         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4774         if (shdr_status || shdr_add_status || rc) {
4775                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4776                                 "3085 Mailbox x%x (x%x/x%x) failed, "
4777                                 "rc:x%x, status:x%x, add_status:x%x\n",
4778                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4779                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4780                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4781                                 rc, shdr_status, shdr_add_status);
4782                 rc = -ENXIO;
4783                 goto out_free_mboxq;
4784         }
4785         cntl_attr = &mbx_cntl_attr->cntl_attr;
4786         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4787         phba->sli4_hba.lnk_info.lnk_tp =
4788                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4789         phba->sli4_hba.lnk_info.lnk_no =
4790                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4791         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4792                         "3086 lnk_type:%d, lnk_numb:%d\n",
4793                         phba->sli4_hba.lnk_info.lnk_tp,
4794                         phba->sli4_hba.lnk_info.lnk_no);
4795
4796 retrieve_ppname:
4797         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4798                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4799                 sizeof(struct lpfc_mbx_get_port_name) -
4800                 sizeof(struct lpfc_sli4_cfg_mhdr),
4801                 LPFC_SLI4_MBX_EMBED);
4802         get_port_name = &mboxq->u.mqe.un.get_port_name;
4803         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4804         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4805         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4806                 phba->sli4_hba.lnk_info.lnk_tp);
4807         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4808         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4809         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4810         if (shdr_status || shdr_add_status || rc) {
4811                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4812                                 "3087 Mailbox x%x (x%x/x%x) failed: "
4813                                 "rc:x%x, status:x%x, add_status:x%x\n",
4814                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4815                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4816                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4817                                 rc, shdr_status, shdr_add_status);
4818                 rc = -ENXIO;
4819                 goto out_free_mboxq;
4820         }
4821         switch (phba->sli4_hba.lnk_info.lnk_no) {
4822         case LPFC_LINK_NUMBER_0:
4823                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4824                                 &get_port_name->u.response);
4825                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4826                 break;
4827         case LPFC_LINK_NUMBER_1:
4828                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4829                                 &get_port_name->u.response);
4830                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4831                 break;
4832         case LPFC_LINK_NUMBER_2:
4833                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4834                                 &get_port_name->u.response);
4835                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4836                 break;
4837         case LPFC_LINK_NUMBER_3:
4838                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4839                                 &get_port_name->u.response);
4840                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4841                 break;
4842         default:
4843                 break;
4844         }
4845
4846         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4847                 phba->Port[0] = cport_name;
4848                 phba->Port[1] = '\0';
4849                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4850                                 "3091 SLI get port name: %s\n", phba->Port);
4851         }
4852
4853 out_free_mboxq:
4854         if (rc != MBX_TIMEOUT) {
4855                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4856                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
4857                 else
4858                         mempool_free(mboxq, phba->mbox_mem_pool);
4859         }
4860         return rc;
4861 }
4862
4863 /**
4864  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4865  * @phba: pointer to lpfc hba data structure.
4866  *
4867  * This routine is called to explicitly arm the SLI4 device's completion and
4868  * event queues
4869  **/
4870 static void
4871 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4872 {
4873         uint8_t fcp_eqidx;
4874
4875         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4876         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4877         fcp_eqidx = 0;
4878         do
4879                 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4880                                      LPFC_QUEUE_REARM);
4881         while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4882         lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4883         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4884                 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4885                                      LPFC_QUEUE_REARM);
4886 }
4887
4888 /**
4889  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4890  * @phba: Pointer to HBA context object.
4891  * @type: The resource extent type.
4892  * @extnt_count: buffer to hold port available extent count.
4893  * @extnt_size: buffer to hold element count per extent.
4894  *
4895  * This function calls the port and retrievs the number of available
4896  * extents and their size for a particular extent type.
4897  *
4898  * Returns: 0 if successful.  Nonzero otherwise.
4899  **/
4900 int
4901 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4902                                uint16_t *extnt_count, uint16_t *extnt_size)
4903 {
4904         int rc = 0;
4905         uint32_t length;
4906         uint32_t mbox_tmo;
4907         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4908         LPFC_MBOXQ_t *mbox;
4909
4910         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4911         if (!mbox)
4912                 return -ENOMEM;
4913
4914         /* Find out how many extents are available for this resource type */
4915         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4916                   sizeof(struct lpfc_sli4_cfg_mhdr));
4917         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4918                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4919                          length, LPFC_SLI4_MBX_EMBED);
4920
4921         /* Send an extents count of 0 - the GET doesn't use it. */
4922         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4923                                         LPFC_SLI4_MBX_EMBED);
4924         if (unlikely(rc)) {
4925                 rc = -EIO;
4926                 goto err_exit;
4927         }
4928
4929         if (!phba->sli4_hba.intr_enable)
4930                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4931         else {
4932                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4933                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4934         }
4935         if (unlikely(rc)) {
4936                 rc = -EIO;
4937                 goto err_exit;
4938         }
4939
4940         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4941         if (bf_get(lpfc_mbox_hdr_status,
4942                    &rsrc_info->header.cfg_shdr.response)) {
4943                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4944                                 "2930 Failed to get resource extents "
4945                                 "Status 0x%x Add'l Status 0x%x\n",
4946                                 bf_get(lpfc_mbox_hdr_status,
4947                                        &rsrc_info->header.cfg_shdr.response),
4948                                 bf_get(lpfc_mbox_hdr_add_status,
4949                                        &rsrc_info->header.cfg_shdr.response));
4950                 rc = -EIO;
4951                 goto err_exit;
4952         }
4953
4954         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4955                               &rsrc_info->u.rsp);
4956         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4957                              &rsrc_info->u.rsp);
4958  err_exit:
4959         mempool_free(mbox, phba->mbox_mem_pool);
4960         return rc;
4961 }
4962
4963 /**
4964  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4965  * @phba: Pointer to HBA context object.
4966  * @type: The extent type to check.
4967  *
4968  * This function reads the current available extents from the port and checks
4969  * if the extent count or extent size has changed since the last access.
4970  * Callers use this routine post port reset to understand if there is a
4971  * extent reprovisioning requirement.
4972  *
4973  * Returns:
4974  *   -Error: error indicates problem.
4975  *   1: Extent count or size has changed.
4976  *   0: No changes.
4977  **/
4978 static int
4979 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4980 {
4981         uint16_t curr_ext_cnt, rsrc_ext_cnt;
4982         uint16_t size_diff, rsrc_ext_size;
4983         int rc = 0;
4984         struct lpfc_rsrc_blks *rsrc_entry;
4985         struct list_head *rsrc_blk_list = NULL;
4986
4987         size_diff = 0;
4988         curr_ext_cnt = 0;
4989         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4990                                             &rsrc_ext_cnt,
4991                                             &rsrc_ext_size);
4992         if (unlikely(rc))
4993                 return -EIO;
4994
4995         switch (type) {
4996         case LPFC_RSC_TYPE_FCOE_RPI:
4997                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
4998                 break;
4999         case LPFC_RSC_TYPE_FCOE_VPI:
5000                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5001                 break;
5002         case LPFC_RSC_TYPE_FCOE_XRI:
5003                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5004                 break;
5005         case LPFC_RSC_TYPE_FCOE_VFI:
5006                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5007                 break;
5008         default:
5009                 break;
5010         }
5011
5012         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5013                 curr_ext_cnt++;
5014                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5015                         size_diff++;
5016         }
5017
5018         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5019                 rc = 1;
5020
5021         return rc;
5022 }
5023
5024 /**
5025  * lpfc_sli4_cfg_post_extnts -
5026  * @phba: Pointer to HBA context object.
5027  * @extnt_cnt - number of available extents.
5028  * @type - the extent type (rpi, xri, vfi, vpi).
5029  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5030  * @mbox - pointer to the caller's allocated mailbox structure.
5031  *
5032  * This function executes the extents allocation request.  It also
5033  * takes care of the amount of memory needed to allocate or get the
5034  * allocated extents. It is the caller's responsibility to evaluate
5035  * the response.
5036  *
5037  * Returns:
5038  *   -Error:  Error value describes the condition found.
5039  *   0: if successful
5040  **/
5041 static int
5042 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5043                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5044 {
5045         int rc = 0;
5046         uint32_t req_len;
5047         uint32_t emb_len;
5048         uint32_t alloc_len, mbox_tmo;
5049
5050         /* Calculate the total requested length of the dma memory */
5051         req_len = *extnt_cnt * sizeof(uint16_t);
5052
5053         /*
5054          * Calculate the size of an embedded mailbox.  The uint32_t
5055          * accounts for extents-specific word.
5056          */
5057         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5058                 sizeof(uint32_t);
5059
5060         /*
5061          * Presume the allocation and response will fit into an embedded
5062          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5063          */
5064         *emb = LPFC_SLI4_MBX_EMBED;
5065         if (req_len > emb_len) {
5066                 req_len = *extnt_cnt * sizeof(uint16_t) +
5067                         sizeof(union lpfc_sli4_cfg_shdr) +
5068                         sizeof(uint32_t);
5069                 *emb = LPFC_SLI4_MBX_NEMBED;
5070         }
5071
5072         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5073                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5074                                      req_len, *emb);
5075         if (alloc_len < req_len) {
5076                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5077                         "2982 Allocated DMA memory size (x%x) is "
5078                         "less than the requested DMA memory "
5079                         "size (x%x)\n", alloc_len, req_len);
5080                 return -ENOMEM;
5081         }
5082         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
5083         if (unlikely(rc))
5084                 return -EIO;
5085
5086         if (!phba->sli4_hba.intr_enable)
5087                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5088         else {
5089                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5090                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5091         }
5092
5093         if (unlikely(rc))
5094                 rc = -EIO;
5095         return rc;
5096 }
5097
5098 /**
5099  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5100  * @phba: Pointer to HBA context object.
5101  * @type:  The resource extent type to allocate.
5102  *
5103  * This function allocates the number of elements for the specified
5104  * resource type.
5105  **/
5106 static int
5107 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5108 {
5109         bool emb = false;
5110         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5111         uint16_t rsrc_id, rsrc_start, j, k;
5112         uint16_t *ids;
5113         int i, rc;
5114         unsigned long longs;
5115         unsigned long *bmask;
5116         struct lpfc_rsrc_blks *rsrc_blks;
5117         LPFC_MBOXQ_t *mbox;
5118         uint32_t length;
5119         struct lpfc_id_range *id_array = NULL;
5120         void *virtaddr = NULL;
5121         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5122         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5123         struct list_head *ext_blk_list;
5124
5125         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5126                                             &rsrc_cnt,
5127                                             &rsrc_size);
5128         if (unlikely(rc))
5129                 return -EIO;
5130
5131         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5132                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5133                         "3009 No available Resource Extents "
5134                         "for resource type 0x%x: Count: 0x%x, "
5135                         "Size 0x%x\n", type, rsrc_cnt,
5136                         rsrc_size);
5137                 return -ENOMEM;
5138         }
5139
5140         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
5141                         "2903 Available Resource Extents "
5142                         "for resource type 0x%x: Count: 0x%x, "
5143                         "Size 0x%x\n", type, rsrc_cnt,
5144                         rsrc_size);
5145
5146         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5147         if (!mbox)
5148                 return -ENOMEM;
5149
5150         rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
5151         if (unlikely(rc)) {
5152                 rc = -EIO;
5153                 goto err_exit;
5154         }
5155
5156         /*
5157          * Figure out where the response is located.  Then get local pointers
5158          * to the response data.  The port does not guarantee to respond to
5159          * all extents counts request so update the local variable with the
5160          * allocated count from the port.
5161          */
5162         if (emb == LPFC_SLI4_MBX_EMBED) {
5163                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5164                 id_array = &rsrc_ext->u.rsp.id[0];
5165                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5166         } else {
5167                 virtaddr = mbox->sge_array->addr[0];
5168                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5169                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5170                 id_array = &n_rsrc->id;
5171         }
5172
5173         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5174         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5175
5176         /*
5177          * Based on the resource size and count, correct the base and max
5178          * resource values.
5179          */
5180         length = sizeof(struct lpfc_rsrc_blks);
5181         switch (type) {
5182         case LPFC_RSC_TYPE_FCOE_RPI:
5183                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5184                                                    sizeof(unsigned long),
5185                                                    GFP_KERNEL);
5186                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5187                         rc = -ENOMEM;
5188                         goto err_exit;
5189                 }
5190                 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5191                                                  sizeof(uint16_t),
5192                                                  GFP_KERNEL);
5193                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5194                         kfree(phba->sli4_hba.rpi_bmask);
5195                         rc = -ENOMEM;
5196                         goto err_exit;
5197                 }
5198
5199                 /*
5200                  * The next_rpi was initialized with the maximum available
5201                  * count but the port may allocate a smaller number.  Catch
5202                  * that case and update the next_rpi.
5203                  */
5204                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5205
5206                 /* Initialize local ptrs for common extent processing later. */
5207                 bmask = phba->sli4_hba.rpi_bmask;
5208                 ids = phba->sli4_hba.rpi_ids;
5209                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5210                 break;
5211         case LPFC_RSC_TYPE_FCOE_VPI:
5212                 phba->vpi_bmask = kzalloc(longs *
5213                                           sizeof(unsigned long),
5214                                           GFP_KERNEL);
5215                 if (unlikely(!phba->vpi_bmask)) {
5216                         rc = -ENOMEM;
5217                         goto err_exit;
5218                 }
5219                 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5220                                          sizeof(uint16_t),
5221                                          GFP_KERNEL);
5222                 if (unlikely(!phba->vpi_ids)) {
5223                         kfree(phba->vpi_bmask);
5224                         rc = -ENOMEM;
5225                         goto err_exit;
5226                 }
5227
5228                 /* Initialize local ptrs for common extent processing later. */
5229                 bmask = phba->vpi_bmask;
5230                 ids = phba->vpi_ids;
5231                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5232                 break;
5233         case LPFC_RSC_TYPE_FCOE_XRI:
5234                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5235                                                    sizeof(unsigned long),
5236                                                    GFP_KERNEL);
5237                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5238                         rc = -ENOMEM;
5239                         goto err_exit;
5240                 }
5241                 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5242                                                  sizeof(uint16_t),
5243                                                  GFP_KERNEL);
5244                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5245                         kfree(phba->sli4_hba.xri_bmask);
5246                         rc = -ENOMEM;
5247                         goto err_exit;
5248                 }
5249
5250                 /* Initialize local ptrs for common extent processing later. */
5251                 bmask = phba->sli4_hba.xri_bmask;
5252                 ids = phba->sli4_hba.xri_ids;
5253                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5254                 break;
5255         case LPFC_RSC_TYPE_FCOE_VFI:
5256                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5257                                                    sizeof(unsigned long),
5258                                                    GFP_KERNEL);
5259                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5260                         rc = -ENOMEM;
5261                         goto err_exit;
5262                 }
5263                 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5264                                                  sizeof(uint16_t),
5265                                                  GFP_KERNEL);
5266                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5267                         kfree(phba->sli4_hba.vfi_bmask);
5268                         rc = -ENOMEM;
5269                         goto err_exit;
5270                 }
5271
5272                 /* Initialize local ptrs for common extent processing later. */
5273                 bmask = phba->sli4_hba.vfi_bmask;
5274                 ids = phba->sli4_hba.vfi_ids;
5275                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5276                 break;
5277         default:
5278                 /* Unsupported Opcode.  Fail call. */
5279                 id_array = NULL;
5280                 bmask = NULL;
5281                 ids = NULL;
5282                 ext_blk_list = NULL;
5283                 goto err_exit;
5284         }
5285
5286         /*
5287          * Complete initializing the extent configuration with the
5288          * allocated ids assigned to this function.  The bitmask serves
5289          * as an index into the array and manages the available ids.  The
5290          * array just stores the ids communicated to the port via the wqes.
5291          */
5292         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5293                 if ((i % 2) == 0)
5294                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5295                                          &id_array[k]);
5296                 else
5297                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5298                                          &id_array[k]);
5299
5300                 rsrc_blks = kzalloc(length, GFP_KERNEL);
5301                 if (unlikely(!rsrc_blks)) {
5302                         rc = -ENOMEM;
5303                         kfree(bmask);
5304                         kfree(ids);
5305                         goto err_exit;
5306                 }
5307                 rsrc_blks->rsrc_start = rsrc_id;
5308                 rsrc_blks->rsrc_size = rsrc_size;
5309                 list_add_tail(&rsrc_blks->list, ext_blk_list);
5310                 rsrc_start = rsrc_id;
5311                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5312                         phba->sli4_hba.scsi_xri_start = rsrc_start +
5313                                 lpfc_sli4_get_els_iocb_cnt(phba);
5314
5315                 while (rsrc_id < (rsrc_start + rsrc_size)) {
5316                         ids[j] = rsrc_id;
5317                         rsrc_id++;
5318                         j++;
5319                 }
5320                 /* Entire word processed.  Get next word.*/
5321                 if ((i % 2) == 1)
5322                         k++;
5323         }
5324  err_exit:
5325         lpfc_sli4_mbox_cmd_free(phba, mbox);
5326         return rc;
5327 }
5328
5329 /**
5330  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5331  * @phba: Pointer to HBA context object.
5332  * @type: the extent's type.
5333  *
5334  * This function deallocates all extents of a particular resource type.
5335  * SLI4 does not allow for deallocating a particular extent range.  It
5336  * is the caller's responsibility to release all kernel memory resources.
5337  **/
5338 static int
5339 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5340 {
5341         int rc;
5342         uint32_t length, mbox_tmo = 0;
5343         LPFC_MBOXQ_t *mbox;
5344         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5345         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5346
5347         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5348         if (!mbox)
5349                 return -ENOMEM;
5350
5351         /*
5352          * This function sends an embedded mailbox because it only sends the
5353          * the resource type.  All extents of this type are released by the
5354          * port.
5355          */
5356         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5357                   sizeof(struct lpfc_sli4_cfg_mhdr));
5358         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5359                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5360                          length, LPFC_SLI4_MBX_EMBED);
5361
5362         /* Send an extents count of 0 - the dealloc doesn't use it. */
5363         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5364                                         LPFC_SLI4_MBX_EMBED);
5365         if (unlikely(rc)) {
5366                 rc = -EIO;
5367                 goto out_free_mbox;
5368         }
5369         if (!phba->sli4_hba.intr_enable)
5370                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5371         else {
5372                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5373                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5374         }
5375         if (unlikely(rc)) {
5376                 rc = -EIO;
5377                 goto out_free_mbox;
5378         }
5379
5380         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5381         if (bf_get(lpfc_mbox_hdr_status,
5382                    &dealloc_rsrc->header.cfg_shdr.response)) {
5383                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5384                                 "2919 Failed to release resource extents "
5385                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
5386                                 "Resource memory not released.\n",
5387                                 type,
5388                                 bf_get(lpfc_mbox_hdr_status,
5389                                     &dealloc_rsrc->header.cfg_shdr.response),
5390                                 bf_get(lpfc_mbox_hdr_add_status,
5391                                     &dealloc_rsrc->header.cfg_shdr.response));
5392                 rc = -EIO;
5393                 goto out_free_mbox;
5394         }
5395
5396         /* Release kernel memory resources for the specific type. */
5397         switch (type) {
5398         case LPFC_RSC_TYPE_FCOE_VPI:
5399                 kfree(phba->vpi_bmask);
5400                 kfree(phba->vpi_ids);
5401                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5402                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5403                                     &phba->lpfc_vpi_blk_list, list) {
5404                         list_del_init(&rsrc_blk->list);
5405                         kfree(rsrc_blk);
5406                 }
5407                 break;
5408         case LPFC_RSC_TYPE_FCOE_XRI:
5409                 kfree(phba->sli4_hba.xri_bmask);
5410                 kfree(phba->sli4_hba.xri_ids);
5411                 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5412                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5413                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
5414                         list_del_init(&rsrc_blk->list);
5415                         kfree(rsrc_blk);
5416                 }
5417                 break;
5418         case LPFC_RSC_TYPE_FCOE_VFI:
5419                 kfree(phba->sli4_hba.vfi_bmask);
5420                 kfree(phba->sli4_hba.vfi_ids);
5421                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5422                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5423                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5424                         list_del_init(&rsrc_blk->list);
5425                         kfree(rsrc_blk);
5426                 }
5427                 break;
5428         case LPFC_RSC_TYPE_FCOE_RPI:
5429                 /* RPI bitmask and physical id array are cleaned up earlier. */
5430                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5431                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5432                         list_del_init(&rsrc_blk->list);
5433                         kfree(rsrc_blk);
5434                 }
5435                 break;
5436         default:
5437                 break;
5438         }
5439
5440         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5441
5442  out_free_mbox:
5443         mempool_free(mbox, phba->mbox_mem_pool);
5444         return rc;
5445 }
5446
5447 /**
5448  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5449  * @phba: Pointer to HBA context object.
5450  *
5451  * This function allocates all SLI4 resource identifiers.
5452  **/
5453 int
5454 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5455 {
5456         int i, rc, error = 0;
5457         uint16_t count, base;
5458         unsigned long longs;
5459
5460         if (phba->sli4_hba.extents_in_use) {
5461                 /*
5462                  * The port supports resource extents. The XRI, VPI, VFI, RPI
5463                  * resource extent count must be read and allocated before
5464                  * provisioning the resource id arrays.
5465                  */
5466                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5467                     LPFC_IDX_RSRC_RDY) {
5468                         /*
5469                          * Extent-based resources are set - the driver could
5470                          * be in a port reset. Figure out if any corrective
5471                          * actions need to be taken.
5472                          */
5473                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5474                                                  LPFC_RSC_TYPE_FCOE_VFI);
5475                         if (rc != 0)
5476                                 error++;
5477                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5478                                                  LPFC_RSC_TYPE_FCOE_VPI);
5479                         if (rc != 0)
5480                                 error++;
5481                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5482                                                  LPFC_RSC_TYPE_FCOE_XRI);
5483                         if (rc != 0)
5484                                 error++;
5485                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5486                                                  LPFC_RSC_TYPE_FCOE_RPI);
5487                         if (rc != 0)
5488                                 error++;
5489
5490                         /*
5491                          * It's possible that the number of resources
5492                          * provided to this port instance changed between
5493                          * resets.  Detect this condition and reallocate
5494                          * resources.  Otherwise, there is no action.
5495                          */
5496                         if (error) {
5497                                 lpfc_printf_log(phba, KERN_INFO,
5498                                                 LOG_MBOX | LOG_INIT,
5499                                                 "2931 Detected extent resource "
5500                                                 "change.  Reallocating all "
5501                                                 "extents.\n");
5502                                 rc = lpfc_sli4_dealloc_extent(phba,
5503                                                  LPFC_RSC_TYPE_FCOE_VFI);
5504                                 rc = lpfc_sli4_dealloc_extent(phba,
5505                                                  LPFC_RSC_TYPE_FCOE_VPI);
5506                                 rc = lpfc_sli4_dealloc_extent(phba,
5507                                                  LPFC_RSC_TYPE_FCOE_XRI);
5508                                 rc = lpfc_sli4_dealloc_extent(phba,
5509                                                  LPFC_RSC_TYPE_FCOE_RPI);
5510                         } else
5511                                 return 0;
5512                 }
5513
5514                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5515                 if (unlikely(rc))
5516                         goto err_exit;
5517
5518                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5519                 if (unlikely(rc))
5520                         goto err_exit;
5521
5522                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5523                 if (unlikely(rc))
5524                         goto err_exit;
5525
5526                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5527                 if (unlikely(rc))
5528                         goto err_exit;
5529                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5530                        LPFC_IDX_RSRC_RDY);
5531                 return rc;
5532         } else {
5533                 /*
5534                  * The port does not support resource extents.  The XRI, VPI,
5535                  * VFI, RPI resource ids were determined from READ_CONFIG.
5536                  * Just allocate the bitmasks and provision the resource id
5537                  * arrays.  If a port reset is active, the resources don't
5538                  * need any action - just exit.
5539                  */
5540                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5541                     LPFC_IDX_RSRC_RDY)
5542                         return 0;
5543
5544                 /* RPIs. */
5545                 count = phba->sli4_hba.max_cfg_param.max_rpi;
5546                 base = phba->sli4_hba.max_cfg_param.rpi_base;
5547                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5548                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5549                                                    sizeof(unsigned long),
5550                                                    GFP_KERNEL);
5551                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5552                         rc = -ENOMEM;
5553                         goto err_exit;
5554                 }
5555                 phba->sli4_hba.rpi_ids = kzalloc(count *
5556                                                  sizeof(uint16_t),
5557                                                  GFP_KERNEL);
5558                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5559                         rc = -ENOMEM;
5560                         goto free_rpi_bmask;
5561                 }
5562
5563                 for (i = 0; i < count; i++)
5564                         phba->sli4_hba.rpi_ids[i] = base + i;
5565
5566                 /* VPIs. */
5567                 count = phba->sli4_hba.max_cfg_param.max_vpi;
5568                 base = phba->sli4_hba.max_cfg_param.vpi_base;
5569                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5570                 phba->vpi_bmask = kzalloc(longs *
5571                                           sizeof(unsigned long),
5572                                           GFP_KERNEL);
5573                 if (unlikely(!phba->vpi_bmask)) {
5574                         rc = -ENOMEM;
5575                         goto free_rpi_ids;
5576                 }
5577                 phba->vpi_ids = kzalloc(count *
5578                                         sizeof(uint16_t),
5579                                         GFP_KERNEL);
5580                 if (unlikely(!phba->vpi_ids)) {
5581                         rc = -ENOMEM;
5582                         goto free_vpi_bmask;
5583                 }
5584
5585                 for (i = 0; i < count; i++)
5586                         phba->vpi_ids[i] = base + i;
5587
5588                 /* XRIs. */
5589                 count = phba->sli4_hba.max_cfg_param.max_xri;
5590                 base = phba->sli4_hba.max_cfg_param.xri_base;
5591                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5592                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5593                                                    sizeof(unsigned long),
5594                                                    GFP_KERNEL);
5595                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5596                         rc = -ENOMEM;
5597                         goto free_vpi_ids;
5598                 }
5599                 phba->sli4_hba.xri_ids = kzalloc(count *
5600                                                  sizeof(uint16_t),
5601                                                  GFP_KERNEL);
5602                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5603                         rc = -ENOMEM;
5604                         goto free_xri_bmask;
5605                 }
5606
5607                 for (i = 0; i < count; i++)
5608                         phba->sli4_hba.xri_ids[i] = base + i;
5609
5610                 /* VFIs. */
5611                 count = phba->sli4_hba.max_cfg_param.max_vfi;
5612                 base = phba->sli4_hba.max_cfg_param.vfi_base;
5613                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5614                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5615                                                    sizeof(unsigned long),
5616                                                    GFP_KERNEL);
5617                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5618                         rc = -ENOMEM;
5619                         goto free_xri_ids;
5620                 }
5621                 phba->sli4_hba.vfi_ids = kzalloc(count *
5622                                                  sizeof(uint16_t),
5623                                                  GFP_KERNEL);
5624                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5625                         rc = -ENOMEM;
5626                         goto free_vfi_bmask;
5627                 }
5628
5629                 for (i = 0; i < count; i++)
5630                         phba->sli4_hba.vfi_ids[i] = base + i;
5631
5632                 /*
5633                  * Mark all resources ready.  An HBA reset doesn't need
5634                  * to reset the initialization.
5635                  */
5636                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5637                        LPFC_IDX_RSRC_RDY);
5638                 return 0;
5639         }
5640
5641  free_vfi_bmask:
5642         kfree(phba->sli4_hba.vfi_bmask);
5643  free_xri_ids:
5644         kfree(phba->sli4_hba.xri_ids);
5645  free_xri_bmask:
5646         kfree(phba->sli4_hba.xri_bmask);
5647  free_vpi_ids:
5648         kfree(phba->vpi_ids);
5649  free_vpi_bmask:
5650         kfree(phba->vpi_bmask);
5651  free_rpi_ids:
5652         kfree(phba->sli4_hba.rpi_ids);
5653  free_rpi_bmask:
5654         kfree(phba->sli4_hba.rpi_bmask);
5655  err_exit:
5656         return rc;
5657 }
5658
5659 /**
5660  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5661  * @phba: Pointer to HBA context object.
5662  *
5663  * This function allocates the number of elements for the specified
5664  * resource type.
5665  **/
5666 int
5667 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5668 {
5669         if (phba->sli4_hba.extents_in_use) {
5670                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5671                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5672                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5673                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5674         } else {
5675                 kfree(phba->vpi_bmask);
5676                 kfree(phba->vpi_ids);
5677                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5678                 kfree(phba->sli4_hba.xri_bmask);
5679                 kfree(phba->sli4_hba.xri_ids);
5680                 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5681                 kfree(phba->sli4_hba.vfi_bmask);
5682                 kfree(phba->sli4_hba.vfi_ids);
5683                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5684                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5685         }
5686
5687         return 0;
5688 }
5689
5690 /**
5691  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5692  * @phba: Pointer to HBA context object.
5693  * @type: The resource extent type.
5694  * @extnt_count: buffer to hold port extent count response
5695  * @extnt_size: buffer to hold port extent size response.
5696  *
5697  * This function calls the port to read the host allocated extents
5698  * for a particular type.
5699  **/
5700 int
5701 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5702                                uint16_t *extnt_cnt, uint16_t *extnt_size)
5703 {
5704         bool emb;
5705         int rc = 0;
5706         uint16_t curr_blks = 0;
5707         uint32_t req_len, emb_len;
5708         uint32_t alloc_len, mbox_tmo;
5709         struct list_head *blk_list_head;
5710         struct lpfc_rsrc_blks *rsrc_blk;
5711         LPFC_MBOXQ_t *mbox;
5712         void *virtaddr = NULL;
5713         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5714         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5715         union  lpfc_sli4_cfg_shdr *shdr;
5716
5717         switch (type) {
5718         case LPFC_RSC_TYPE_FCOE_VPI:
5719                 blk_list_head = &phba->lpfc_vpi_blk_list;
5720                 break;
5721         case LPFC_RSC_TYPE_FCOE_XRI:
5722                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5723                 break;
5724         case LPFC_RSC_TYPE_FCOE_VFI:
5725                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5726                 break;
5727         case LPFC_RSC_TYPE_FCOE_RPI:
5728                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5729                 break;
5730         default:
5731                 return -EIO;
5732         }
5733
5734         /* Count the number of extents currently allocatd for this type. */
5735         list_for_each_entry(rsrc_blk, blk_list_head, list) {
5736                 if (curr_blks == 0) {
5737                         /*
5738                          * The GET_ALLOCATED mailbox does not return the size,
5739                          * just the count.  The size should be just the size
5740                          * stored in the current allocated block and all sizes
5741                          * for an extent type are the same so set the return
5742                          * value now.
5743                          */
5744                         *extnt_size = rsrc_blk->rsrc_size;
5745                 }
5746                 curr_blks++;
5747         }
5748
5749         /* Calculate the total requested length of the dma memory. */
5750         req_len = curr_blks * sizeof(uint16_t);
5751
5752         /*
5753          * Calculate the size of an embedded mailbox.  The uint32_t
5754          * accounts for extents-specific word.
5755          */
5756         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5757                 sizeof(uint32_t);
5758
5759         /*
5760          * Presume the allocation and response will fit into an embedded
5761          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5762          */
5763         emb = LPFC_SLI4_MBX_EMBED;
5764         req_len = emb_len;
5765         if (req_len > emb_len) {
5766                 req_len = curr_blks * sizeof(uint16_t) +
5767                         sizeof(union lpfc_sli4_cfg_shdr) +
5768                         sizeof(uint32_t);
5769                 emb = LPFC_SLI4_MBX_NEMBED;
5770         }
5771
5772         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5773         if (!mbox)
5774                 return -ENOMEM;
5775         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5776
5777         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5778                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5779                                      req_len, emb);
5780         if (alloc_len < req_len) {
5781                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5782                         "2983 Allocated DMA memory size (x%x) is "
5783                         "less than the requested DMA memory "
5784                         "size (x%x)\n", alloc_len, req_len);
5785                 rc = -ENOMEM;
5786                 goto err_exit;
5787         }
5788         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5789         if (unlikely(rc)) {
5790                 rc = -EIO;
5791                 goto err_exit;
5792         }
5793
5794         if (!phba->sli4_hba.intr_enable)
5795                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5796         else {
5797                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5798                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5799         }
5800
5801         if (unlikely(rc)) {
5802                 rc = -EIO;
5803                 goto err_exit;
5804         }
5805
5806         /*
5807          * Figure out where the response is located.  Then get local pointers
5808          * to the response data.  The port does not guarantee to respond to
5809          * all extents counts request so update the local variable with the
5810          * allocated count from the port.
5811          */
5812         if (emb == LPFC_SLI4_MBX_EMBED) {
5813                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5814                 shdr = &rsrc_ext->header.cfg_shdr;
5815                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5816         } else {
5817                 virtaddr = mbox->sge_array->addr[0];
5818                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5819                 shdr = &n_rsrc->cfg_shdr;
5820                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5821         }
5822
5823         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5824                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5825                         "2984 Failed to read allocated resources "
5826                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5827                         type,
5828                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
5829                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5830                 rc = -EIO;
5831                 goto err_exit;
5832         }
5833  err_exit:
5834         lpfc_sli4_mbox_cmd_free(phba, mbox);
5835         return rc;
5836 }
5837
5838 /**
5839  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5840  * @phba: Pointer to HBA context object.
5841  *
5842  * This function is the main SLI4 device intialization PCI function. This
5843  * function is called by the HBA intialization code, HBA reset code and
5844  * HBA error attention handler code. Caller is not required to hold any
5845  * locks.
5846  **/
5847 int
5848 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5849 {
5850         int rc;
5851         LPFC_MBOXQ_t *mboxq;
5852         struct lpfc_mqe *mqe;
5853         uint8_t *vpd;
5854         uint32_t vpd_size;
5855         uint32_t ftr_rsp = 0;
5856         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
5857         struct lpfc_vport *vport = phba->pport;
5858         struct lpfc_dmabuf *mp;
5859
5860         /* Perform a PCI function reset to start from clean */
5861         rc = lpfc_pci_function_reset(phba);
5862         if (unlikely(rc))
5863                 return -ENODEV;
5864
5865         /* Check the HBA Host Status Register for readyness */
5866         rc = lpfc_sli4_post_status_check(phba);
5867         if (unlikely(rc))
5868                 return -ENODEV;
5869         else {
5870                 spin_lock_irq(&phba->hbalock);
5871                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
5872                 spin_unlock_irq(&phba->hbalock);
5873         }
5874
5875         /*
5876          * Allocate a single mailbox container for initializing the
5877          * port.
5878          */
5879         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5880         if (!mboxq)
5881                 return -ENOMEM;
5882
5883         /*
5884          * Continue initialization with default values even if driver failed
5885          * to read FCoE param config regions
5886          */
5887         if (lpfc_sli4_read_fcoe_params(phba, mboxq))
5888                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
5889                         "2570 Failed to read FCoE parameters\n");
5890
5891         /* Issue READ_REV to collect vpd and FW information. */
5892         vpd_size = SLI4_PAGE_SIZE;
5893         vpd = kzalloc(vpd_size, GFP_KERNEL);
5894         if (!vpd) {
5895                 rc = -ENOMEM;
5896                 goto out_free_mbox;
5897         }
5898
5899         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
5900         if (unlikely(rc)) {
5901                 kfree(vpd);
5902                 goto out_free_mbox;
5903         }
5904         mqe = &mboxq->u.mqe;
5905         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
5906         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
5907                 phba->hba_flag |= HBA_FCOE_MODE;
5908         else
5909                 phba->hba_flag &= ~HBA_FCOE_MODE;
5910
5911         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
5912                 LPFC_DCBX_CEE_MODE)
5913                 phba->hba_flag |= HBA_FIP_SUPPORT;
5914         else
5915                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
5916
5917         if (phba->sli_rev != LPFC_SLI_REV4) {
5918                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5919                         "0376 READ_REV Error. SLI Level %d "
5920                         "FCoE enabled %d\n",
5921                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
5922                 rc = -EIO;
5923                 kfree(vpd);
5924                 goto out_free_mbox;
5925         }
5926
5927         /*
5928          * Retrieve sli4 device physical port name, failure of doing it
5929          * is considered as non-fatal.
5930          */
5931         rc = lpfc_sli4_retrieve_pport_name(phba);
5932         if (!rc)
5933                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5934                                 "3080 Successful retrieving SLI4 device "
5935                                 "physical port name: %s.\n", phba->Port);
5936
5937         /*
5938          * Evaluate the read rev and vpd data. Populate the driver
5939          * state with the results. If this routine fails, the failure
5940          * is not fatal as the driver will use generic values.
5941          */
5942         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
5943         if (unlikely(!rc)) {
5944                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5945                                 "0377 Error %d parsing vpd. "
5946                                 "Using defaults.\n", rc);
5947                 rc = 0;
5948         }
5949         kfree(vpd);
5950
5951         /* Save information as VPD data */
5952         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
5953         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
5954         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
5955         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
5956                                          &mqe->un.read_rev);
5957         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
5958                                        &mqe->un.read_rev);
5959         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
5960                                             &mqe->un.read_rev);
5961         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
5962                                            &mqe->un.read_rev);
5963         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
5964         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
5965         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
5966         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
5967         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
5968         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
5969         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5970                         "(%d):0380 READ_REV Status x%x "
5971                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
5972                         mboxq->vport ? mboxq->vport->vpi : 0,
5973                         bf_get(lpfc_mqe_status, mqe),
5974                         phba->vpd.rev.opFwName,
5975                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
5976                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
5977
5978         /*
5979          * Discover the port's supported feature set and match it against the
5980          * hosts requests.
5981          */
5982         lpfc_request_features(phba, mboxq);
5983         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5984         if (unlikely(rc)) {
5985                 rc = -EIO;
5986                 goto out_free_mbox;
5987         }
5988
5989         /*
5990          * The port must support FCP initiator mode as this is the
5991          * only mode running in the host.
5992          */
5993         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
5994                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5995                                 "0378 No support for fcpi mode.\n");
5996                 ftr_rsp++;
5997         }
5998         if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
5999                 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6000         else
6001                 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6002         /*
6003          * If the port cannot support the host's requested features
6004          * then turn off the global config parameters to disable the
6005          * feature in the driver.  This is not a fatal error.
6006          */
6007         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6008         if (phba->cfg_enable_bg) {
6009                 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6010                         phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6011                 else
6012                         ftr_rsp++;
6013         }
6014
6015         if (phba->max_vpi && phba->cfg_enable_npiv &&
6016             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6017                 ftr_rsp++;
6018
6019         if (ftr_rsp) {
6020                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6021                                 "0379 Feature Mismatch Data: x%08x %08x "
6022                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6023                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6024                                 phba->cfg_enable_npiv, phba->max_vpi);
6025                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6026                         phba->cfg_enable_bg = 0;
6027                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6028                         phba->cfg_enable_npiv = 0;
6029         }
6030
6031         /* These SLI3 features are assumed in SLI4 */
6032         spin_lock_irq(&phba->hbalock);
6033         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6034         spin_unlock_irq(&phba->hbalock);
6035
6036         /*
6037          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
6038          * calls depends on these resources to complete port setup.
6039          */
6040         rc = lpfc_sli4_alloc_resource_identifiers(phba);
6041         if (rc) {
6042                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6043                                 "2920 Failed to alloc Resource IDs "
6044                                 "rc = x%x\n", rc);
6045                 goto out_free_mbox;
6046         }
6047
6048         /* Read the port's service parameters. */
6049         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6050         if (rc) {
6051                 phba->link_state = LPFC_HBA_ERROR;
6052                 rc = -ENOMEM;
6053                 goto out_free_mbox;
6054         }
6055
6056         mboxq->vport = vport;
6057         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6058         mp = (struct lpfc_dmabuf *) mboxq->context1;
6059         if (rc == MBX_SUCCESS) {
6060                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6061                 rc = 0;
6062         }
6063
6064         /*
6065          * This memory was allocated by the lpfc_read_sparam routine. Release
6066          * it to the mbuf pool.
6067          */
6068         lpfc_mbuf_free(phba, mp->virt, mp->phys);
6069         kfree(mp);
6070         mboxq->context1 = NULL;
6071         if (unlikely(rc)) {
6072                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6073                                 "0382 READ_SPARAM command failed "
6074                                 "status %d, mbxStatus x%x\n",
6075                                 rc, bf_get(lpfc_mqe_status, mqe));
6076                 phba->link_state = LPFC_HBA_ERROR;
6077                 rc = -EIO;
6078                 goto out_free_mbox;
6079         }
6080
6081         lpfc_update_vport_wwn(vport);
6082
6083         /* Update the fc_host data structures with new wwn. */
6084         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6085         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6086
6087         /* Register SGL pool to the device using non-embedded mailbox command */
6088         if (!phba->sli4_hba.extents_in_use) {
6089                 rc = lpfc_sli4_post_els_sgl_list(phba);
6090                 if (unlikely(rc)) {
6091                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6092                                         "0582 Error %d during els sgl post "
6093                                         "operation\n", rc);
6094                         rc = -ENODEV;
6095                         goto out_free_mbox;
6096                 }
6097         } else {
6098                 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
6099                 if (unlikely(rc)) {
6100                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6101                                         "2560 Error %d during els sgl post "
6102                                         "operation\n", rc);
6103                         rc = -ENODEV;
6104                         goto out_free_mbox;
6105                 }
6106         }
6107
6108         /* Register SCSI SGL pool to the device */
6109         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6110         if (unlikely(rc)) {
6111                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6112                                 "0383 Error %d during scsi sgl post "
6113                                 "operation\n", rc);
6114                 /* Some Scsi buffers were moved to the abort scsi list */
6115                 /* A pci function reset will repost them */
6116                 rc = -ENODEV;
6117                 goto out_free_mbox;
6118         }
6119
6120         /* Post the rpi header region to the device. */
6121         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6122         if (unlikely(rc)) {
6123                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6124                                 "0393 Error %d during rpi post operation\n",
6125                                 rc);
6126                 rc = -ENODEV;
6127                 goto out_free_mbox;
6128         }
6129
6130         /* Create all the SLI4 queues */
6131         rc = lpfc_sli4_queue_create(phba);
6132         if (rc) {
6133                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6134                                 "3089 Failed to allocate queues\n");
6135                 rc = -ENODEV;
6136                 goto out_stop_timers;
6137         }
6138         /* Set up all the queues to the device */
6139         rc = lpfc_sli4_queue_setup(phba);
6140         if (unlikely(rc)) {
6141                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6142                                 "0381 Error %d during queue setup.\n ", rc);
6143                 goto out_destroy_queue;
6144         }
6145
6146         /* Arm the CQs and then EQs on device */
6147         lpfc_sli4_arm_cqeq_intr(phba);
6148
6149         /* Indicate device interrupt mode */
6150         phba->sli4_hba.intr_enable = 1;
6151
6152         /* Allow asynchronous mailbox command to go through */
6153         spin_lock_irq(&phba->hbalock);
6154         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6155         spin_unlock_irq(&phba->hbalock);
6156
6157         /* Post receive buffers to the device */
6158         lpfc_sli4_rb_setup(phba);
6159
6160         /* Reset HBA FCF states after HBA reset */
6161         phba->fcf.fcf_flag = 0;
6162         phba->fcf.current_rec.flag = 0;
6163
6164         /* Start the ELS watchdog timer */
6165         mod_timer(&vport->els_tmofunc,
6166                   jiffies + HZ * (phba->fc_ratov * 2));
6167
6168         /* Start heart beat timer */
6169         mod_timer(&phba->hb_tmofunc,
6170                   jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6171         phba->hb_outstanding = 0;
6172         phba->last_completion_time = jiffies;
6173
6174         /* Start error attention (ERATT) polling timer */
6175         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6176
6177         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6178         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6179                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6180                 if (!rc) {
6181                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6182                                         "2829 This device supports "
6183                                         "Advanced Error Reporting (AER)\n");
6184                         spin_lock_irq(&phba->hbalock);
6185                         phba->hba_flag |= HBA_AER_ENABLED;
6186                         spin_unlock_irq(&phba->hbalock);
6187                 } else {
6188                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6189                                         "2830 This device does not support "
6190                                         "Advanced Error Reporting (AER)\n");
6191                         phba->cfg_aer_support = 0;
6192                 }
6193                 rc = 0;
6194         }
6195
6196         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6197                 /*
6198                  * The FC Port needs to register FCFI (index 0)
6199                  */
6200                 lpfc_reg_fcfi(phba, mboxq);
6201                 mboxq->vport = phba->pport;
6202                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6203                 if (rc != MBX_SUCCESS)
6204                         goto out_unset_queue;
6205                 rc = 0;
6206                 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6207                                         &mboxq->u.mqe.un.reg_fcfi);
6208         }
6209         /*
6210          * The port is ready, set the host's link state to LINK_DOWN
6211          * in preparation for link interrupts.
6212          */
6213         spin_lock_irq(&phba->hbalock);
6214         phba->link_state = LPFC_LINK_DOWN;
6215         spin_unlock_irq(&phba->hbalock);
6216         if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6217                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6218                 if (rc)
6219                         goto out_unset_queue;
6220         }
6221         mempool_free(mboxq, phba->mbox_mem_pool);
6222         return rc;
6223 out_unset_queue:
6224         /* Unset all the queues set up in this routine when error out */
6225         lpfc_sli4_queue_unset(phba);
6226 out_destroy_queue:
6227         lpfc_sli4_queue_destroy(phba);
6228 out_stop_timers:
6229         lpfc_stop_hba_timers(phba);
6230 out_free_mbox:
6231         mempool_free(mboxq, phba->mbox_mem_pool);
6232         return rc;
6233 }
6234
6235 /**
6236  * lpfc_mbox_timeout - Timeout call back function for mbox timer
6237  * @ptr: context object - pointer to hba structure.
6238  *
6239  * This is the callback function for mailbox timer. The mailbox
6240  * timer is armed when a new mailbox command is issued and the timer
6241  * is deleted when the mailbox complete. The function is called by
6242  * the kernel timer code when a mailbox does not complete within
6243  * expected time. This function wakes up the worker thread to
6244  * process the mailbox timeout and returns. All the processing is
6245  * done by the worker thread function lpfc_mbox_timeout_handler.
6246  **/
6247 void
6248 lpfc_mbox_timeout(unsigned long ptr)
6249 {
6250         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
6251         unsigned long iflag;
6252         uint32_t tmo_posted;
6253
6254         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6255         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6256         if (!tmo_posted)
6257                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6258         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6259
6260         if (!tmo_posted)
6261                 lpfc_worker_wake_up(phba);
6262         return;
6263 }
6264
6265
6266 /**
6267  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6268  * @phba: Pointer to HBA context object.
6269  *
6270  * This function is called from worker thread when a mailbox command times out.
6271  * The caller is not required to hold any locks. This function will reset the
6272  * HBA and recover all the pending commands.
6273  **/
6274 void
6275 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6276 {
6277         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6278         MAILBOX_t *mb = &pmbox->u.mb;
6279         struct lpfc_sli *psli = &phba->sli;
6280         struct lpfc_sli_ring *pring;
6281
6282         /* Check the pmbox pointer first.  There is a race condition
6283          * between the mbox timeout handler getting executed in the
6284          * worklist and the mailbox actually completing. When this
6285          * race condition occurs, the mbox_active will be NULL.
6286          */
6287         spin_lock_irq(&phba->hbalock);
6288         if (pmbox == NULL) {
6289                 lpfc_printf_log(phba, KERN_WARNING,
6290                                 LOG_MBOX | LOG_SLI,
6291                                 "0353 Active Mailbox cleared - mailbox timeout "
6292                                 "exiting\n");
6293                 spin_unlock_irq(&phba->hbalock);
6294                 return;
6295         }
6296
6297         /* Mbox cmd <mbxCommand> timeout */
6298         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6299                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6300                         mb->mbxCommand,
6301                         phba->pport->port_state,
6302                         phba->sli.sli_flag,
6303                         phba->sli.mbox_active);
6304         spin_unlock_irq(&phba->hbalock);
6305
6306         /* Setting state unknown so lpfc_sli_abort_iocb_ring
6307          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6308          * it to fail all outstanding SCSI IO.
6309          */
6310         spin_lock_irq(&phba->pport->work_port_lock);
6311         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6312         spin_unlock_irq(&phba->pport->work_port_lock);
6313         spin_lock_irq(&phba->hbalock);
6314         phba->link_state = LPFC_LINK_UNKNOWN;
6315         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6316         spin_unlock_irq(&phba->hbalock);
6317
6318         pring = &psli->ring[psli->fcp_ring];
6319         lpfc_sli_abort_iocb_ring(phba, pring);
6320
6321         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6322                         "0345 Resetting board due to mailbox timeout\n");
6323
6324         /* Reset the HBA device */
6325         lpfc_reset_hba(phba);
6326 }
6327
6328 /**
6329  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6330  * @phba: Pointer to HBA context object.
6331  * @pmbox: Pointer to mailbox object.
6332  * @flag: Flag indicating how the mailbox need to be processed.
6333  *
6334  * This function is called by discovery code and HBA management code
6335  * to submit a mailbox command to firmware with SLI-3 interface spec. This
6336  * function gets the hbalock to protect the data structures.
6337  * The mailbox command can be submitted in polling mode, in which case
6338  * this function will wait in a polling loop for the completion of the
6339  * mailbox.
6340  * If the mailbox is submitted in no_wait mode (not polling) the
6341  * function will submit the command and returns immediately without waiting
6342  * for the mailbox completion. The no_wait is supported only when HBA
6343  * is in SLI2/SLI3 mode - interrupts are enabled.
6344  * The SLI interface allows only one mailbox pending at a time. If the
6345  * mailbox is issued in polling mode and there is already a mailbox
6346  * pending, then the function will return an error. If the mailbox is issued
6347  * in NO_WAIT mode and there is a mailbox pending already, the function
6348  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6349  * The sli layer owns the mailbox object until the completion of mailbox
6350  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6351  * return codes the caller owns the mailbox command after the return of
6352  * the function.
6353  **/
6354 static int
6355 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6356                        uint32_t flag)
6357 {
6358         MAILBOX_t *mb;
6359         struct lpfc_sli *psli = &phba->sli;
6360         uint32_t status, evtctr;
6361         uint32_t ha_copy, hc_copy;
6362         int i;
6363         unsigned long timeout;
6364         unsigned long drvr_flag = 0;
6365         uint32_t word0, ldata;
6366         void __iomem *to_slim;
6367         int processing_queue = 0;
6368
6369         spin_lock_irqsave(&phba->hbalock, drvr_flag);
6370         if (!pmbox) {
6371                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6372                 /* processing mbox queue from intr_handler */
6373                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6374                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6375                         return MBX_SUCCESS;
6376                 }
6377                 processing_queue = 1;
6378                 pmbox = lpfc_mbox_get(phba);
6379                 if (!pmbox) {
6380                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6381                         return MBX_SUCCESS;
6382                 }
6383         }
6384
6385         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
6386                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
6387                 if(!pmbox->vport) {
6388                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6389                         lpfc_printf_log(phba, KERN_ERR,
6390                                         LOG_MBOX | LOG_VPORT,
6391                                         "1806 Mbox x%x failed. No vport\n",
6392                                         pmbox->u.mb.mbxCommand);
6393                         dump_stack();
6394                         goto out_not_finished;
6395                 }
6396         }
6397
6398         /* If the PCI channel is in offline state, do not post mbox. */
6399         if (unlikely(pci_channel_offline(phba->pcidev))) {
6400                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6401                 goto out_not_finished;
6402         }
6403
6404         /* If HBA has a deferred error attention, fail the iocb. */
6405         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6406                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6407                 goto out_not_finished;
6408         }
6409
6410         psli = &phba->sli;
6411
6412         mb = &pmbox->u.mb;
6413         status = MBX_SUCCESS;
6414
6415         if (phba->link_state == LPFC_HBA_ERROR) {
6416                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6417
6418                 /* Mbox command <mbxCommand> cannot issue */
6419                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6420                                 "(%d):0311 Mailbox command x%x cannot "
6421                                 "issue Data: x%x x%x\n",
6422                                 pmbox->vport ? pmbox->vport->vpi : 0,
6423                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6424                 goto out_not_finished;
6425         }
6426
6427         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6428                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6429                         !(hc_copy & HC_MBINT_ENA)) {
6430                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6431                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6432                                 "(%d):2528 Mailbox command x%x cannot "
6433                                 "issue Data: x%x x%x\n",
6434                                 pmbox->vport ? pmbox->vport->vpi : 0,
6435                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6436                         goto out_not_finished;
6437                 }
6438         }
6439
6440         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6441                 /* Polling for a mbox command when another one is already active
6442                  * is not allowed in SLI. Also, the driver must have established
6443                  * SLI2 mode to queue and process multiple mbox commands.
6444                  */
6445
6446                 if (flag & MBX_POLL) {
6447                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6448
6449                         /* Mbox command <mbxCommand> cannot issue */
6450                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6451                                         "(%d):2529 Mailbox command x%x "
6452                                         "cannot issue Data: x%x x%x\n",
6453                                         pmbox->vport ? pmbox->vport->vpi : 0,
6454                                         pmbox->u.mb.mbxCommand,
6455                                         psli->sli_flag, flag);
6456                         goto out_not_finished;
6457                 }
6458
6459                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
6460                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6461                         /* Mbox command <mbxCommand> cannot issue */
6462                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6463                                         "(%d):2530 Mailbox command x%x "
6464                                         "cannot issue Data: x%x x%x\n",
6465                                         pmbox->vport ? pmbox->vport->vpi : 0,
6466                                         pmbox->u.mb.mbxCommand,
6467                                         psli->sli_flag, flag);
6468                         goto out_not_finished;
6469                 }
6470
6471                 /* Another mailbox command is still being processed, queue this
6472                  * command to be processed later.
6473                  */
6474                 lpfc_mbox_put(phba, pmbox);
6475
6476                 /* Mbox cmd issue - BUSY */
6477                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6478                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
6479                                 "x%x x%x x%x x%x\n",
6480                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6481                                 mb->mbxCommand, phba->pport->port_state,
6482                                 psli->sli_flag, flag);
6483
6484                 psli->slistat.mbox_busy++;
6485                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6486
6487                 if (pmbox->vport) {
6488                         lpfc_debugfs_disc_trc(pmbox->vport,
6489                                 LPFC_DISC_TRC_MBOX_VPORT,
6490                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
6491                                 (uint32_t)mb->mbxCommand,
6492                                 mb->un.varWords[0], mb->un.varWords[1]);
6493                 }
6494                 else {
6495                         lpfc_debugfs_disc_trc(phba->pport,
6496                                 LPFC_DISC_TRC_MBOX,
6497                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
6498                                 (uint32_t)mb->mbxCommand,
6499                                 mb->un.varWords[0], mb->un.varWords[1]);
6500                 }
6501
6502                 return MBX_BUSY;
6503         }
6504
6505         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6506
6507         /* If we are not polling, we MUST be in SLI2 mode */
6508         if (flag != MBX_POLL) {
6509                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
6510                     (mb->mbxCommand != MBX_KILL_BOARD)) {
6511                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6512                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6513                         /* Mbox command <mbxCommand> cannot issue */
6514                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6515                                         "(%d):2531 Mailbox command x%x "
6516                                         "cannot issue Data: x%x x%x\n",
6517                                         pmbox->vport ? pmbox->vport->vpi : 0,
6518                                         pmbox->u.mb.mbxCommand,
6519                                         psli->sli_flag, flag);
6520                         goto out_not_finished;
6521                 }
6522                 /* timeout active mbox command */
6523                 mod_timer(&psli->mbox_tmo, (jiffies +
6524                                (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
6525         }
6526
6527         /* Mailbox cmd <cmd> issue */
6528         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6529                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
6530                         "x%x\n",
6531                         pmbox->vport ? pmbox->vport->vpi : 0,
6532                         mb->mbxCommand, phba->pport->port_state,
6533                         psli->sli_flag, flag);
6534
6535         if (mb->mbxCommand != MBX_HEARTBEAT) {
6536                 if (pmbox->vport) {
6537                         lpfc_debugfs_disc_trc(pmbox->vport,
6538                                 LPFC_DISC_TRC_MBOX_VPORT,
6539                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6540                                 (uint32_t)mb->mbxCommand,
6541                                 mb->un.varWords[0], mb->un.varWords[1]);
6542                 }
6543                 else {
6544                         lpfc_debugfs_disc_trc(phba->pport,
6545                                 LPFC_DISC_TRC_MBOX,
6546                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
6547                                 (uint32_t)mb->mbxCommand,
6548                                 mb->un.varWords[0], mb->un.varWords[1]);
6549                 }
6550         }
6551
6552         psli->slistat.mbox_cmd++;
6553         evtctr = psli->slistat.mbox_event;
6554
6555         /* next set own bit for the adapter and copy over command word */
6556         mb->mbxOwner = OWN_CHIP;
6557
6558         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6559                 /* Populate mbox extension offset word. */
6560                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6561                         *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6562                                 = (uint8_t *)phba->mbox_ext
6563                                   - (uint8_t *)phba->mbox;
6564                 }
6565
6566                 /* Copy the mailbox extension data */
6567                 if (pmbox->in_ext_byte_len && pmbox->context2) {
6568                         lpfc_sli_pcimem_bcopy(pmbox->context2,
6569                                 (uint8_t *)phba->mbox_ext,
6570                                 pmbox->in_ext_byte_len);
6571                 }
6572                 /* Copy command data to host SLIM area */
6573                 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
6574         } else {
6575                 /* Populate mbox extension offset word. */
6576                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6577                         *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6578                                 = MAILBOX_HBA_EXT_OFFSET;
6579
6580                 /* Copy the mailbox extension data */
6581                 if (pmbox->in_ext_byte_len && pmbox->context2) {
6582                         lpfc_memcpy_to_slim(phba->MBslimaddr +
6583                                 MAILBOX_HBA_EXT_OFFSET,
6584                                 pmbox->context2, pmbox->in_ext_byte_len);
6585
6586                 }
6587                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6588                         /* copy command data into host mbox for cmpl */
6589                         lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
6590                 }
6591
6592                 /* First copy mbox command data to HBA SLIM, skip past first
6593                    word */
6594                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6595                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
6596                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
6597
6598                 /* Next copy over first word, with mbxOwner set */
6599                 ldata = *((uint32_t *)mb);
6600                 to_slim = phba->MBslimaddr;
6601                 writel(ldata, to_slim);
6602                 readl(to_slim); /* flush */
6603
6604                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6605                         /* switch over to host mailbox */
6606                         psli->sli_flag |= LPFC_SLI_ACTIVE;
6607                 }
6608         }
6609
6610         wmb();
6611
6612         switch (flag) {
6613         case MBX_NOWAIT:
6614                 /* Set up reference to mailbox command */
6615                 psli->mbox_active = pmbox;
6616                 /* Interrupt board to do it */
6617                 writel(CA_MBATT, phba->CAregaddr);
6618                 readl(phba->CAregaddr); /* flush */
6619                 /* Don't wait for it to finish, just return */
6620                 break;
6621
6622         case MBX_POLL:
6623                 /* Set up null reference to mailbox command */
6624                 psli->mbox_active = NULL;
6625                 /* Interrupt board to do it */
6626                 writel(CA_MBATT, phba->CAregaddr);
6627                 readl(phba->CAregaddr); /* flush */
6628
6629                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6630                         /* First read mbox status word */
6631                         word0 = *((uint32_t *)phba->mbox);
6632                         word0 = le32_to_cpu(word0);
6633                 } else {
6634                         /* First read mbox status word */
6635                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
6636                                 spin_unlock_irqrestore(&phba->hbalock,
6637                                                        drvr_flag);
6638                                 goto out_not_finished;
6639                         }
6640                 }
6641
6642                 /* Read the HBA Host Attention Register */
6643                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6644                         spin_unlock_irqrestore(&phba->hbalock,
6645                                                        drvr_flag);
6646                         goto out_not_finished;
6647                 }
6648                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6649                                                         1000) + jiffies;
6650                 i = 0;
6651                 /* Wait for command to complete */
6652                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6653                        (!(ha_copy & HA_MBATT) &&
6654                         (phba->link_state > LPFC_WARM_START))) {
6655                         if (time_after(jiffies, timeout)) {
6656                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6657                                 spin_unlock_irqrestore(&phba->hbalock,
6658                                                        drvr_flag);
6659                                 goto out_not_finished;
6660                         }
6661
6662                         /* Check if we took a mbox interrupt while we were
6663                            polling */
6664                         if (((word0 & OWN_CHIP) != OWN_CHIP)
6665                             && (evtctr != psli->slistat.mbox_event))
6666                                 break;
6667
6668                         if (i++ > 10) {
6669                                 spin_unlock_irqrestore(&phba->hbalock,
6670                                                        drvr_flag);
6671                                 msleep(1);
6672                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6673                         }
6674
6675                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6676                                 /* First copy command data */
6677                                 word0 = *((uint32_t *)phba->mbox);
6678                                 word0 = le32_to_cpu(word0);
6679                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6680                                         MAILBOX_t *slimmb;
6681                                         uint32_t slimword0;
6682                                         /* Check real SLIM for any errors */
6683                                         slimword0 = readl(phba->MBslimaddr);
6684                                         slimmb = (MAILBOX_t *) & slimword0;
6685                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6686                                             && slimmb->mbxStatus) {
6687                                                 psli->sli_flag &=
6688                                                     ~LPFC_SLI_ACTIVE;
6689                                                 word0 = slimword0;
6690                                         }
6691                                 }
6692                         } else {
6693                                 /* First copy command data */
6694                                 word0 = readl(phba->MBslimaddr);
6695                         }
6696                         /* Read the HBA Host Attention Register */
6697                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6698                                 spin_unlock_irqrestore(&phba->hbalock,
6699                                                        drvr_flag);
6700                                 goto out_not_finished;
6701                         }
6702                 }
6703
6704                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6705                         /* copy results back to user */
6706                         lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
6707                         /* Copy the mailbox extension data */
6708                         if (pmbox->out_ext_byte_len && pmbox->context2) {
6709                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
6710                                                       pmbox->context2,
6711                                                       pmbox->out_ext_byte_len);
6712                         }
6713                 } else {
6714                         /* First copy command data */
6715                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
6716                                                         MAILBOX_CMD_SIZE);
6717                         /* Copy the mailbox extension data */
6718                         if (pmbox->out_ext_byte_len && pmbox->context2) {
6719                                 lpfc_memcpy_from_slim(pmbox->context2,
6720                                         phba->MBslimaddr +
6721                                         MAILBOX_HBA_EXT_OFFSET,
6722                                         pmbox->out_ext_byte_len);
6723                         }
6724                 }
6725
6726                 writel(HA_MBATT, phba->HAregaddr);
6727                 readl(phba->HAregaddr); /* flush */
6728
6729                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6730                 status = mb->mbxStatus;
6731         }
6732
6733         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6734         return status;
6735
6736 out_not_finished:
6737         if (processing_queue) {
6738                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
6739                 lpfc_mbox_cmpl_put(phba, pmbox);
6740         }
6741         return MBX_NOT_FINISHED;
6742 }
6743
6744 /**
6745  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
6746  * @phba: Pointer to HBA context object.
6747  *
6748  * The function blocks the posting of SLI4 asynchronous mailbox commands from
6749  * the driver internal pending mailbox queue. It will then try to wait out the
6750  * possible outstanding mailbox command before return.
6751  *
6752  * Returns:
6753  *      0 - the outstanding mailbox command completed; otherwise, the wait for
6754  *      the outstanding mailbox command timed out.
6755  **/
6756 static int
6757 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
6758 {
6759         struct lpfc_sli *psli = &phba->sli;
6760         int rc = 0;
6761         unsigned long timeout = 0;
6762
6763         /* Mark the asynchronous mailbox command posting as blocked */
6764         spin_lock_irq(&phba->hbalock);
6765         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6766         /* Determine how long we might wait for the active mailbox
6767          * command to be gracefully completed by firmware.
6768          */
6769         if (phba->sli.mbox_active)
6770                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
6771                                                 phba->sli.mbox_active) *
6772                                                 1000) + jiffies;
6773         spin_unlock_irq(&phba->hbalock);
6774
6775         /* Wait for the outstnading mailbox command to complete */
6776         while (phba->sli.mbox_active) {
6777                 /* Check active mailbox complete status every 2ms */
6778                 msleep(2);
6779                 if (time_after(jiffies, timeout)) {
6780                         /* Timeout, marked the outstanding cmd not complete */
6781                         rc = 1;
6782                         break;
6783                 }
6784         }
6785
6786         /* Can not cleanly block async mailbox command, fails it */
6787         if (rc) {
6788                 spin_lock_irq(&phba->hbalock);
6789                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6790                 spin_unlock_irq(&phba->hbalock);
6791         }
6792         return rc;
6793 }
6794
6795 /**
6796  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
6797  * @phba: Pointer to HBA context object.
6798  *
6799  * The function unblocks and resume posting of SLI4 asynchronous mailbox
6800  * commands from the driver internal pending mailbox queue. It makes sure
6801  * that there is no outstanding mailbox command before resuming posting
6802  * asynchronous mailbox commands. If, for any reason, there is outstanding
6803  * mailbox command, it will try to wait it out before resuming asynchronous
6804  * mailbox command posting.
6805  **/
6806 static void
6807 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
6808 {
6809         struct lpfc_sli *psli = &phba->sli;
6810
6811         spin_lock_irq(&phba->hbalock);
6812         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6813                 /* Asynchronous mailbox posting is not blocked, do nothing */
6814                 spin_unlock_irq(&phba->hbalock);
6815                 return;
6816         }
6817
6818         /* Outstanding synchronous mailbox command is guaranteed to be done,
6819          * successful or timeout, after timing-out the outstanding mailbox
6820          * command shall always be removed, so just unblock posting async
6821          * mailbox command and resume
6822          */
6823         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6824         spin_unlock_irq(&phba->hbalock);
6825
6826         /* wake up worker thread to post asynchronlous mailbox command */
6827         lpfc_worker_wake_up(phba);
6828 }
6829
6830 /**
6831  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
6832  * @phba: Pointer to HBA context object.
6833  * @mboxq: Pointer to mailbox object.
6834  *
6835  * The function posts a mailbox to the port.  The mailbox is expected
6836  * to be comletely filled in and ready for the port to operate on it.
6837  * This routine executes a synchronous completion operation on the
6838  * mailbox by polling for its completion.
6839  *
6840  * The caller must not be holding any locks when calling this routine.
6841  *
6842  * Returns:
6843  *      MBX_SUCCESS - mailbox posted successfully
6844  *      Any of the MBX error values.
6845  **/
6846 static int
6847 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6848 {
6849         int rc = MBX_SUCCESS;
6850         unsigned long iflag;
6851         uint32_t db_ready;
6852         uint32_t mcqe_status;
6853         uint32_t mbx_cmnd;
6854         unsigned long timeout;
6855         struct lpfc_sli *psli = &phba->sli;
6856         struct lpfc_mqe *mb = &mboxq->u.mqe;
6857         struct lpfc_bmbx_create *mbox_rgn;
6858         struct dma_address *dma_address;
6859         struct lpfc_register bmbx_reg;
6860
6861         /*
6862          * Only one mailbox can be active to the bootstrap mailbox region
6863          * at a time and there is no queueing provided.
6864          */
6865         spin_lock_irqsave(&phba->hbalock, iflag);
6866         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6867                 spin_unlock_irqrestore(&phba->hbalock, iflag);
6868                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6869                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
6870                                 "cannot issue Data: x%x x%x\n",
6871                                 mboxq->vport ? mboxq->vport->vpi : 0,
6872                                 mboxq->u.mb.mbxCommand,
6873                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6874                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6875                                 psli->sli_flag, MBX_POLL);
6876                 return MBXERR_ERROR;
6877         }
6878         /* The server grabs the token and owns it until release */
6879         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6880         phba->sli.mbox_active = mboxq;
6881         spin_unlock_irqrestore(&phba->hbalock, iflag);
6882
6883         /*
6884          * Initialize the bootstrap memory region to avoid stale data areas
6885          * in the mailbox post.  Then copy the caller's mailbox contents to
6886          * the bmbx mailbox region.
6887          */
6888         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
6889         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
6890         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
6891                               sizeof(struct lpfc_mqe));
6892
6893         /* Post the high mailbox dma address to the port and wait for ready. */
6894         dma_address = &phba->sli4_hba.bmbx.dma_address;
6895         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
6896
6897         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
6898                                    * 1000) + jiffies;
6899         do {
6900                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
6901                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
6902                 if (!db_ready)
6903                         msleep(2);
6904
6905                 if (time_after(jiffies, timeout)) {
6906                         rc = MBXERR_ERROR;
6907                         goto exit;
6908                 }
6909         } while (!db_ready);
6910
6911         /* Post the low mailbox dma address to the port. */
6912         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
6913         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
6914                                    * 1000) + jiffies;
6915         do {
6916                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
6917                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
6918                 if (!db_ready)
6919                         msleep(2);
6920
6921                 if (time_after(jiffies, timeout)) {
6922                         rc = MBXERR_ERROR;
6923                         goto exit;
6924                 }
6925         } while (!db_ready);
6926
6927         /*
6928          * Read the CQ to ensure the mailbox has completed.
6929          * If so, update the mailbox status so that the upper layers
6930          * can complete the request normally.
6931          */
6932         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
6933                               sizeof(struct lpfc_mqe));
6934         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
6935         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
6936                               sizeof(struct lpfc_mcqe));
6937         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
6938         /*
6939          * When the CQE status indicates a failure and the mailbox status
6940          * indicates success then copy the CQE status into the mailbox status
6941          * (and prefix it with x4000).
6942          */
6943         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
6944                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
6945                         bf_set(lpfc_mqe_status, mb,
6946                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
6947                 rc = MBXERR_ERROR;
6948         } else
6949                 lpfc_sli4_swap_str(phba, mboxq);
6950
6951         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6952                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
6953                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
6954                         " x%x x%x CQ: x%x x%x x%x x%x\n",
6955                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
6956                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6957                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6958                         bf_get(lpfc_mqe_status, mb),
6959                         mb->un.mb_words[0], mb->un.mb_words[1],
6960                         mb->un.mb_words[2], mb->un.mb_words[3],
6961                         mb->un.mb_words[4], mb->un.mb_words[5],
6962                         mb->un.mb_words[6], mb->un.mb_words[7],
6963                         mb->un.mb_words[8], mb->un.mb_words[9],
6964                         mb->un.mb_words[10], mb->un.mb_words[11],
6965                         mb->un.mb_words[12], mboxq->mcqe.word0,
6966                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
6967                         mboxq->mcqe.trailer);
6968 exit:
6969         /* We are holding the token, no needed for lock when release */
6970         spin_lock_irqsave(&phba->hbalock, iflag);
6971         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6972         phba->sli.mbox_active = NULL;
6973         spin_unlock_irqrestore(&phba->hbalock, iflag);
6974         return rc;
6975 }
6976
6977 /**
6978  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
6979  * @phba: Pointer to HBA context object.
6980  * @pmbox: Pointer to mailbox object.
6981  * @flag: Flag indicating how the mailbox need to be processed.
6982  *
6983  * This function is called by discovery code and HBA management code to submit
6984  * a mailbox command to firmware with SLI-4 interface spec.
6985  *
6986  * Return codes the caller owns the mailbox command after the return of the
6987  * function.
6988  **/
6989 static int
6990 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6991                        uint32_t flag)
6992 {
6993         struct lpfc_sli *psli = &phba->sli;
6994         unsigned long iflags;
6995         int rc;
6996
6997         /* dump from issue mailbox command if setup */
6998         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
6999
7000         rc = lpfc_mbox_dev_check(phba);
7001         if (unlikely(rc)) {
7002                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7003                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7004                                 "cannot issue Data: x%x x%x\n",
7005                                 mboxq->vport ? mboxq->vport->vpi : 0,
7006                                 mboxq->u.mb.mbxCommand,
7007                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7008                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7009                                 psli->sli_flag, flag);
7010                 goto out_not_finished;
7011         }
7012
7013         /* Detect polling mode and jump to a handler */
7014         if (!phba->sli4_hba.intr_enable) {
7015                 if (flag == MBX_POLL)
7016                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7017                 else
7018                         rc = -EIO;
7019                 if (rc != MBX_SUCCESS)
7020                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7021                                         "(%d):2541 Mailbox command x%x "
7022                                         "(x%x/x%x) cannot issue Data: "
7023                                         "x%x x%x\n",
7024                                         mboxq->vport ? mboxq->vport->vpi : 0,
7025                                         mboxq->u.mb.mbxCommand,
7026                                         lpfc_sli_config_mbox_subsys_get(phba,
7027                                                                         mboxq),
7028                                         lpfc_sli_config_mbox_opcode_get(phba,
7029                                                                         mboxq),
7030                                         psli->sli_flag, flag);
7031                 return rc;
7032         } else if (flag == MBX_POLL) {
7033                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7034                                 "(%d):2542 Try to issue mailbox command "
7035                                 "x%x (x%x/x%x) synchronously ahead of async"
7036                                 "mailbox command queue: x%x x%x\n",
7037                                 mboxq->vport ? mboxq->vport->vpi : 0,
7038                                 mboxq->u.mb.mbxCommand,
7039                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7040                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7041                                 psli->sli_flag, flag);
7042                 /* Try to block the asynchronous mailbox posting */
7043                 rc = lpfc_sli4_async_mbox_block(phba);
7044                 if (!rc) {
7045                         /* Successfully blocked, now issue sync mbox cmd */
7046                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7047                         if (rc != MBX_SUCCESS)
7048                                 lpfc_printf_log(phba, KERN_ERR,
7049                                         LOG_MBOX | LOG_SLI,
7050                                         "(%d):2597 Mailbox command "
7051                                         "x%x (x%x/x%x) cannot issue "
7052                                         "Data: x%x x%x\n",
7053                                         mboxq->vport ?
7054                                         mboxq->vport->vpi : 0,
7055                                         mboxq->u.mb.mbxCommand,
7056                                         lpfc_sli_config_mbox_subsys_get(phba,
7057                                                                         mboxq),
7058                                         lpfc_sli_config_mbox_opcode_get(phba,
7059                                                                         mboxq),
7060                                         psli->sli_flag, flag);
7061                         /* Unblock the async mailbox posting afterward */
7062                         lpfc_sli4_async_mbox_unblock(phba);
7063                 }
7064                 return rc;
7065         }
7066
7067         /* Now, interrupt mode asynchrous mailbox command */
7068         rc = lpfc_mbox_cmd_check(phba, mboxq);
7069         if (rc) {
7070                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7071                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7072                                 "cannot issue Data: x%x x%x\n",
7073                                 mboxq->vport ? mboxq->vport->vpi : 0,
7074                                 mboxq->u.mb.mbxCommand,
7075                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7076                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7077                                 psli->sli_flag, flag);
7078                 goto out_not_finished;
7079         }
7080
7081         /* Put the mailbox command to the driver internal FIFO */
7082         psli->slistat.mbox_busy++;
7083         spin_lock_irqsave(&phba->hbalock, iflags);
7084         lpfc_mbox_put(phba, mboxq);
7085         spin_unlock_irqrestore(&phba->hbalock, iflags);
7086         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7087                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
7088                         "x%x (x%x/x%x) x%x x%x x%x\n",
7089                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7090                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7091                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7092                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7093                         phba->pport->port_state,
7094                         psli->sli_flag, MBX_NOWAIT);
7095         /* Wake up worker thread to transport mailbox command from head */
7096         lpfc_worker_wake_up(phba);
7097
7098         return MBX_BUSY;
7099
7100 out_not_finished:
7101         return MBX_NOT_FINISHED;
7102 }
7103
7104 /**
7105  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7106  * @phba: Pointer to HBA context object.
7107  *
7108  * This function is called by worker thread to send a mailbox command to
7109  * SLI4 HBA firmware.
7110  *
7111  **/
7112 int
7113 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7114 {
7115         struct lpfc_sli *psli = &phba->sli;
7116         LPFC_MBOXQ_t *mboxq;
7117         int rc = MBX_SUCCESS;
7118         unsigned long iflags;
7119         struct lpfc_mqe *mqe;
7120         uint32_t mbx_cmnd;
7121
7122         /* Check interrupt mode before post async mailbox command */
7123         if (unlikely(!phba->sli4_hba.intr_enable))
7124                 return MBX_NOT_FINISHED;
7125
7126         /* Check for mailbox command service token */
7127         spin_lock_irqsave(&phba->hbalock, iflags);
7128         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7129                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7130                 return MBX_NOT_FINISHED;
7131         }
7132         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7133                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7134                 return MBX_NOT_FINISHED;
7135         }
7136         if (unlikely(phba->sli.mbox_active)) {
7137                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7138                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7139                                 "0384 There is pending active mailbox cmd\n");
7140                 return MBX_NOT_FINISHED;
7141         }
7142         /* Take the mailbox command service token */
7143         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7144
7145         /* Get the next mailbox command from head of queue */
7146         mboxq = lpfc_mbox_get(phba);
7147
7148         /* If no more mailbox command waiting for post, we're done */
7149         if (!mboxq) {
7150                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7151                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7152                 return MBX_SUCCESS;
7153         }
7154         phba->sli.mbox_active = mboxq;
7155         spin_unlock_irqrestore(&phba->hbalock, iflags);
7156
7157         /* Check device readiness for posting mailbox command */
7158         rc = lpfc_mbox_dev_check(phba);
7159         if (unlikely(rc))
7160                 /* Driver clean routine will clean up pending mailbox */
7161                 goto out_not_finished;
7162
7163         /* Prepare the mbox command to be posted */
7164         mqe = &mboxq->u.mqe;
7165         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7166
7167         /* Start timer for the mbox_tmo and log some mailbox post messages */
7168         mod_timer(&psli->mbox_tmo, (jiffies +
7169                   (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
7170
7171         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7172                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7173                         "x%x x%x\n",
7174                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7175                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7176                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7177                         phba->pport->port_state, psli->sli_flag);
7178
7179         if (mbx_cmnd != MBX_HEARTBEAT) {
7180                 if (mboxq->vport) {
7181                         lpfc_debugfs_disc_trc(mboxq->vport,
7182                                 LPFC_DISC_TRC_MBOX_VPORT,
7183                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7184                                 mbx_cmnd, mqe->un.mb_words[0],
7185                                 mqe->un.mb_words[1]);
7186                 } else {
7187                         lpfc_debugfs_disc_trc(phba->pport,
7188                                 LPFC_DISC_TRC_MBOX,
7189                                 "MBOX Send: cmd:x%x mb:x%x x%x",
7190                                 mbx_cmnd, mqe->un.mb_words[0],
7191                                 mqe->un.mb_words[1]);
7192                 }
7193         }
7194         psli->slistat.mbox_cmd++;
7195
7196         /* Post the mailbox command to the port */
7197         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7198         if (rc != MBX_SUCCESS) {
7199                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7200                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7201                                 "cannot issue Data: x%x x%x\n",
7202                                 mboxq->vport ? mboxq->vport->vpi : 0,
7203                                 mboxq->u.mb.mbxCommand,
7204                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7205                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7206                                 psli->sli_flag, MBX_NOWAIT);
7207                 goto out_not_finished;
7208         }
7209
7210         return rc;
7211
7212 out_not_finished:
7213         spin_lock_irqsave(&phba->hbalock, iflags);
7214         mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7215         __lpfc_mbox_cmpl_put(phba, mboxq);
7216         /* Release the token */
7217         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7218         phba->sli.mbox_active = NULL;
7219         spin_unlock_irqrestore(&phba->hbalock, iflags);
7220
7221         return MBX_NOT_FINISHED;
7222 }
7223
7224 /**
7225  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7226  * @phba: Pointer to HBA context object.
7227  * @pmbox: Pointer to mailbox object.
7228  * @flag: Flag indicating how the mailbox need to be processed.
7229  *
7230  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7231  * the API jump table function pointer from the lpfc_hba struct.
7232  *
7233  * Return codes the caller owns the mailbox command after the return of the
7234  * function.
7235  **/
7236 int
7237 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7238 {
7239         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7240 }
7241
7242 /**
7243  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7244  * @phba: The hba struct for which this call is being executed.
7245  * @dev_grp: The HBA PCI-Device group number.
7246  *
7247  * This routine sets up the mbox interface API function jump table in @phba
7248  * struct.
7249  * Returns: 0 - success, -ENODEV - failure.
7250  **/
7251 int
7252 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7253 {
7254
7255         switch (dev_grp) {
7256         case LPFC_PCI_DEV_LP:
7257                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7258                 phba->lpfc_sli_handle_slow_ring_event =
7259                                 lpfc_sli_handle_slow_ring_event_s3;
7260                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7261                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7262                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7263                 break;
7264         case LPFC_PCI_DEV_OC:
7265                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7266                 phba->lpfc_sli_handle_slow_ring_event =
7267                                 lpfc_sli_handle_slow_ring_event_s4;
7268                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7269                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7270                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7271                 break;
7272         default:
7273                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7274                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
7275                                 dev_grp);
7276                 return -ENODEV;
7277                 break;
7278         }
7279         return 0;
7280 }
7281
7282 /**
7283  * __lpfc_sli_ringtx_put - Add an iocb to the txq
7284  * @phba: Pointer to HBA context object.
7285  * @pring: Pointer to driver SLI ring object.
7286  * @piocb: Pointer to address of newly added command iocb.
7287  *
7288  * This function is called with hbalock held to add a command
7289  * iocb to the txq when SLI layer cannot submit the command iocb
7290  * to the ring.
7291  **/
7292 void
7293 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7294                     struct lpfc_iocbq *piocb)
7295 {
7296         /* Insert the caller's iocb in the txq tail for later processing. */
7297         list_add_tail(&piocb->list, &pring->txq);
7298         pring->txq_cnt++;
7299 }
7300
7301 /**
7302  * lpfc_sli_next_iocb - Get the next iocb in the txq
7303  * @phba: Pointer to HBA context object.
7304  * @pring: Pointer to driver SLI ring object.
7305  * @piocb: Pointer to address of newly added command iocb.
7306  *
7307  * This function is called with hbalock held before a new
7308  * iocb is submitted to the firmware. This function checks
7309  * txq to flush the iocbs in txq to Firmware before
7310  * submitting new iocbs to the Firmware.
7311  * If there are iocbs in the txq which need to be submitted
7312  * to firmware, lpfc_sli_next_iocb returns the first element
7313  * of the txq after dequeuing it from txq.
7314  * If there is no iocb in the txq then the function will return
7315  * *piocb and *piocb is set to NULL. Caller needs to check
7316  * *piocb to find if there are more commands in the txq.
7317  **/
7318 static struct lpfc_iocbq *
7319 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7320                    struct lpfc_iocbq **piocb)
7321 {
7322         struct lpfc_iocbq * nextiocb;
7323
7324         nextiocb = lpfc_sli_ringtx_get(phba, pring);
7325         if (!nextiocb) {
7326                 nextiocb = *piocb;
7327                 *piocb = NULL;
7328         }
7329
7330         return nextiocb;
7331 }
7332
7333 /**
7334  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7335  * @phba: Pointer to HBA context object.
7336  * @ring_number: SLI ring number to issue iocb on.
7337  * @piocb: Pointer to command iocb.
7338  * @flag: Flag indicating if this command can be put into txq.
7339  *
7340  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7341  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7342  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7343  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7344  * this function allows only iocbs for posting buffers. This function finds
7345  * next available slot in the command ring and posts the command to the
7346  * available slot and writes the port attention register to request HBA start
7347  * processing new iocb. If there is no slot available in the ring and
7348  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7349  * the function returns IOCB_BUSY.
7350  *
7351  * This function is called with hbalock held. The function will return success
7352  * after it successfully submit the iocb to firmware or after adding to the
7353  * txq.
7354  **/
7355 static int
7356 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
7357                     struct lpfc_iocbq *piocb, uint32_t flag)
7358 {
7359         struct lpfc_iocbq *nextiocb;
7360         IOCB_t *iocb;
7361         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
7362
7363         if (piocb->iocb_cmpl && (!piocb->vport) &&
7364            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7365            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7366                 lpfc_printf_log(phba, KERN_ERR,
7367                                 LOG_SLI | LOG_VPORT,
7368                                 "1807 IOCB x%x failed. No vport\n",
7369                                 piocb->iocb.ulpCommand);
7370                 dump_stack();
7371                 return IOCB_ERROR;
7372         }
7373
7374
7375         /* If the PCI channel is in offline state, do not post iocbs. */
7376         if (unlikely(pci_channel_offline(phba->pcidev)))
7377                 return IOCB_ERROR;
7378
7379         /* If HBA has a deferred error attention, fail the iocb. */
7380         if (unlikely(phba->hba_flag & DEFER_ERATT))
7381                 return IOCB_ERROR;
7382
7383         /*
7384          * We should never get an IOCB if we are in a < LINK_DOWN state
7385          */
7386         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7387                 return IOCB_ERROR;
7388
7389         /*
7390          * Check to see if we are blocking IOCB processing because of a
7391          * outstanding event.
7392          */
7393         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
7394                 goto iocb_busy;
7395
7396         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
7397                 /*
7398                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
7399                  * can be issued if the link is not up.
7400                  */
7401                 switch (piocb->iocb.ulpCommand) {
7402                 case CMD_GEN_REQUEST64_CR:
7403                 case CMD_GEN_REQUEST64_CX:
7404                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7405                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
7406                                         FC_RCTL_DD_UNSOL_CMD) ||
7407                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7408                                         MENLO_TRANSPORT_TYPE))
7409
7410                                 goto iocb_busy;
7411                         break;
7412                 case CMD_QUE_RING_BUF_CN:
7413                 case CMD_QUE_RING_BUF64_CN:
7414                         /*
7415                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7416                          * completion, iocb_cmpl MUST be 0.
7417                          */
7418                         if (piocb->iocb_cmpl)
7419                                 piocb->iocb_cmpl = NULL;
7420                         /*FALLTHROUGH*/
7421                 case CMD_CREATE_XRI_CR:
7422                 case CMD_CLOSE_XRI_CN:
7423                 case CMD_CLOSE_XRI_CX:
7424                         break;
7425                 default:
7426                         goto iocb_busy;
7427                 }
7428
7429         /*
7430          * For FCP commands, we must be in a state where we can process link
7431          * attention events.
7432          */
7433         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
7434                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
7435                 goto iocb_busy;
7436         }
7437
7438         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7439                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7440                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7441
7442         if (iocb)
7443                 lpfc_sli_update_ring(phba, pring);
7444         else
7445                 lpfc_sli_update_full_ring(phba, pring);
7446
7447         if (!piocb)
7448                 return IOCB_SUCCESS;
7449
7450         goto out_busy;
7451
7452  iocb_busy:
7453         pring->stats.iocb_cmd_delay++;
7454
7455  out_busy:
7456
7457         if (!(flag & SLI_IOCB_RET_IOCB)) {
7458                 __lpfc_sli_ringtx_put(phba, pring, piocb);
7459                 return IOCB_SUCCESS;
7460         }
7461
7462         return IOCB_BUSY;
7463 }
7464
7465 /**
7466  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7467  * @phba: Pointer to HBA context object.
7468  * @piocb: Pointer to command iocb.
7469  * @sglq: Pointer to the scatter gather queue object.
7470  *
7471  * This routine converts the bpl or bde that is in the IOCB
7472  * to a sgl list for the sli4 hardware. The physical address
7473  * of the bpl/bde is converted back to a virtual address.
7474  * If the IOCB contains a BPL then the list of BDE's is
7475  * converted to sli4_sge's. If the IOCB contains a single
7476  * BDE then it is converted to a single sli_sge.
7477  * The IOCB is still in cpu endianess so the contents of
7478  * the bpl can be used without byte swapping.
7479  *
7480  * Returns valid XRI = Success, NO_XRI = Failure.
7481 **/
7482 static uint16_t
7483 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7484                 struct lpfc_sglq *sglq)
7485 {
7486         uint16_t xritag = NO_XRI;
7487         struct ulp_bde64 *bpl = NULL;
7488         struct ulp_bde64 bde;
7489         struct sli4_sge *sgl  = NULL;
7490         IOCB_t *icmd;
7491         int numBdes = 0;
7492         int i = 0;
7493         uint32_t offset = 0; /* accumulated offset in the sg request list */
7494         int inbound = 0; /* number of sg reply entries inbound from firmware */
7495
7496         if (!piocbq || !sglq)
7497                 return xritag;
7498
7499         sgl  = (struct sli4_sge *)sglq->sgl;
7500         icmd = &piocbq->iocb;
7501         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7502                 numBdes = icmd->un.genreq64.bdl.bdeSize /
7503                                 sizeof(struct ulp_bde64);
7504                 /* The addrHigh and addrLow fields within the IOCB
7505                  * have not been byteswapped yet so there is no
7506                  * need to swap them back.
7507                  */
7508                 bpl  = (struct ulp_bde64 *)
7509                         ((struct lpfc_dmabuf *)piocbq->context3)->virt;
7510
7511                 if (!bpl)
7512                         return xritag;
7513
7514                 for (i = 0; i < numBdes; i++) {
7515                         /* Should already be byte swapped. */
7516                         sgl->addr_hi = bpl->addrHigh;
7517                         sgl->addr_lo = bpl->addrLow;
7518
7519                         sgl->word2 = le32_to_cpu(sgl->word2);
7520                         if ((i+1) == numBdes)
7521                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
7522                         else
7523                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
7524                         /* swap the size field back to the cpu so we
7525                          * can assign it to the sgl.
7526                          */
7527                         bde.tus.w = le32_to_cpu(bpl->tus.w);
7528                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
7529                         /* The offsets in the sgl need to be accumulated
7530                          * separately for the request and reply lists.
7531                          * The request is always first, the reply follows.
7532                          */
7533                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7534                                 /* add up the reply sg entries */
7535                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7536                                         inbound++;
7537                                 /* first inbound? reset the offset */
7538                                 if (inbound == 1)
7539                                         offset = 0;
7540                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
7541                                 bf_set(lpfc_sli4_sge_type, sgl,
7542                                         LPFC_SGE_TYPE_DATA);
7543                                 offset += bde.tus.f.bdeSize;
7544                         }
7545                         sgl->word2 = cpu_to_le32(sgl->word2);
7546                         bpl++;
7547                         sgl++;
7548                 }
7549         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7550                         /* The addrHigh and addrLow fields of the BDE have not
7551                          * been byteswapped yet so they need to be swapped
7552                          * before putting them in the sgl.
7553                          */
7554                         sgl->addr_hi =
7555                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7556                         sgl->addr_lo =
7557                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7558                         sgl->word2 = le32_to_cpu(sgl->word2);
7559                         bf_set(lpfc_sli4_sge_last, sgl, 1);
7560                         sgl->word2 = cpu_to_le32(sgl->word2);
7561                         sgl->sge_len =
7562                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
7563         }
7564         return sglq->sli4_xritag;
7565 }
7566
7567 /**
7568  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
7569  * @phba: Pointer to HBA context object.
7570  *
7571  * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
7572  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7573  * held.
7574  *
7575  * Return: index into SLI4 fast-path FCP queue index.
7576  **/
7577 static uint32_t
7578 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7579 {
7580         ++phba->fcp_qidx;
7581         if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
7582                 phba->fcp_qidx = 0;
7583
7584         return phba->fcp_qidx;
7585 }
7586
7587 /**
7588  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
7589  * @phba: Pointer to HBA context object.
7590  * @piocb: Pointer to command iocb.
7591  * @wqe: Pointer to the work queue entry.
7592  *
7593  * This routine converts the iocb command to its Work Queue Entry
7594  * equivalent. The wqe pointer should not have any fields set when
7595  * this routine is called because it will memcpy over them.
7596  * This routine does not set the CQ_ID or the WQEC bits in the
7597  * wqe.
7598  *
7599  * Returns: 0 = Success, IOCB_ERROR = Failure.
7600  **/
7601 static int
7602 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7603                 union lpfc_wqe *wqe)
7604 {
7605         uint32_t xmit_len = 0, total_len = 0;
7606         uint8_t ct = 0;
7607         uint32_t fip;
7608         uint32_t abort_tag;
7609         uint8_t command_type = ELS_COMMAND_NON_FIP;
7610         uint8_t cmnd;
7611         uint16_t xritag;
7612         uint16_t abrt_iotag;
7613         struct lpfc_iocbq *abrtiocbq;
7614         struct ulp_bde64 *bpl = NULL;
7615         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
7616         int numBdes, i;
7617         struct ulp_bde64 bde;
7618         struct lpfc_nodelist *ndlp;
7619
7620         fip = phba->hba_flag & HBA_FIP_SUPPORT;
7621         /* The fcp commands will set command type */
7622         if (iocbq->iocb_flag &  LPFC_IO_FCP)
7623                 command_type = FCP_COMMAND;
7624         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
7625                 command_type = ELS_COMMAND_FIP;
7626         else
7627                 command_type = ELS_COMMAND_NON_FIP;
7628
7629         /* Some of the fields are in the right position already */
7630         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7631         abort_tag = (uint32_t) iocbq->iotag;
7632         xritag = iocbq->sli4_xritag;
7633         wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
7634         /* words0-2 bpl convert bde */
7635         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7636                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7637                                 sizeof(struct ulp_bde64);
7638                 bpl  = (struct ulp_bde64 *)
7639                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7640                 if (!bpl)
7641                         return IOCB_ERROR;
7642
7643                 /* Should already be byte swapped. */
7644                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
7645                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
7646                 /* swap the size field back to the cpu so we
7647                  * can assign it to the sgl.
7648                  */
7649                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
7650                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7651                 total_len = 0;
7652                 for (i = 0; i < numBdes; i++) {
7653                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
7654                         total_len += bde.tus.f.bdeSize;
7655                 }
7656         } else
7657                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
7658
7659         iocbq->iocb.ulpIoTag = iocbq->iotag;
7660         cmnd = iocbq->iocb.ulpCommand;
7661
7662         switch (iocbq->iocb.ulpCommand) {
7663         case CMD_ELS_REQUEST64_CR:
7664                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7665                 if (!iocbq->iocb.ulpLe) {
7666                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7667                                 "2007 Only Limited Edition cmd Format"
7668                                 " supported 0x%x\n",
7669                                 iocbq->iocb.ulpCommand);
7670                         return IOCB_ERROR;
7671                 }
7672                 wqe->els_req.payload_len = xmit_len;
7673                 /* Els_reguest64 has a TMO */
7674                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
7675                         iocbq->iocb.ulpTimeout);
7676                 /* Need a VF for word 4 set the vf bit*/
7677                 bf_set(els_req64_vf, &wqe->els_req, 0);
7678                 /* And a VFID for word 12 */
7679                 bf_set(els_req64_vfid, &wqe->els_req, 0);
7680                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
7681                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7682                        iocbq->iocb.ulpContext);
7683                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
7684                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
7685                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
7686                 if (command_type == ELS_COMMAND_FIP) {
7687                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
7688                                         >> LPFC_FIP_ELS_ID_SHIFT);
7689                 }
7690                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7691                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7692                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
7693                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
7694                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
7695                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7696                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7697                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7698                 break;
7699         case CMD_XMIT_SEQUENCE64_CX:
7700                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7701                        iocbq->iocb.un.ulpWord[3]);
7702                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7703                        iocbq->iocb.unsli3.rcvsli3.ox_id);
7704                 /* The entire sequence is transmitted for this IOCB */
7705                 xmit_len = total_len;
7706                 cmnd = CMD_XMIT_SEQUENCE64_CR;
7707         case CMD_XMIT_SEQUENCE64_CR:
7708                 /* word3 iocb=io_tag32 wqe=reserved */
7709                 wqe->xmit_sequence.rsvd3 = 0;
7710                 /* word4 relative_offset memcpy */
7711                 /* word5 r_ctl/df_ctl memcpy */
7712                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
7713                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
7714                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
7715                        LPFC_WQE_IOD_WRITE);
7716                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
7717                        LPFC_WQE_LENLOC_WORD12);
7718                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
7719                 wqe->xmit_sequence.xmit_len = xmit_len;
7720                 command_type = OTHER_COMMAND;
7721                 break;
7722         case CMD_XMIT_BCAST64_CN:
7723                 /* word3 iocb=iotag32 wqe=seq_payload_len */
7724                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
7725                 /* word4 iocb=rsvd wqe=rsvd */
7726                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
7727                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
7728                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
7729                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7730                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
7731                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
7732                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7733                        LPFC_WQE_LENLOC_WORD3);
7734                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7735                 break;
7736         case CMD_FCP_IWRITE64_CR:
7737                 command_type = FCP_COMMAND_DATA_OUT;
7738                 /* word3 iocb=iotag wqe=payload_offset_len */
7739                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7740                 wqe->fcp_iwrite.payload_offset_len =
7741                         xmit_len + sizeof(struct fcp_rsp);
7742                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
7743                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
7744                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
7745                        iocbq->iocb.ulpFCP2Rcvy);
7746                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
7747                 /* Always open the exchange */
7748                 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
7749                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7750                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
7751                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
7752                        LPFC_WQE_LENLOC_WORD4);
7753                 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7754                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
7755                 break;
7756         case CMD_FCP_IREAD64_CR:
7757                 /* word3 iocb=iotag wqe=payload_offset_len */
7758                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7759                 wqe->fcp_iread.payload_offset_len =
7760                         xmit_len + sizeof(struct fcp_rsp);
7761                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
7762                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
7763                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
7764                        iocbq->iocb.ulpFCP2Rcvy);
7765                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
7766                 /* Always open the exchange */
7767                 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
7768                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7769                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
7770                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
7771                        LPFC_WQE_LENLOC_WORD4);
7772                 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7773                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
7774                 break;
7775         case CMD_FCP_ICMND64_CR:
7776                 /* word3 iocb=IO_TAG wqe=reserved */
7777                 wqe->fcp_icmd.rsrvd3 = 0;
7778                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
7779                 /* Always open the exchange */
7780                 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
7781                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
7782                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
7783                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
7784                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
7785                        LPFC_WQE_LENLOC_NONE);
7786                 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7787                 break;
7788         case CMD_GEN_REQUEST64_CR:
7789                 /* For this command calculate the xmit length of the
7790                  * request bde.
7791                  */
7792                 xmit_len = 0;
7793                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7794                         sizeof(struct ulp_bde64);
7795                 for (i = 0; i < numBdes; i++) {
7796                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
7797                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
7798                                 break;
7799                         xmit_len += bde.tus.f.bdeSize;
7800                 }
7801                 /* word3 iocb=IO_TAG wqe=request_payload_len */
7802                 wqe->gen_req.request_payload_len = xmit_len;
7803                 /* word4 iocb=parameter wqe=relative_offset memcpy */
7804                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
7805                 /* word6 context tag copied in memcpy */
7806                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
7807                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
7808                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7809                                 "2015 Invalid CT %x command 0x%x\n",
7810                                 ct, iocbq->iocb.ulpCommand);
7811                         return IOCB_ERROR;
7812                 }
7813                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
7814                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
7815                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
7816                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
7817                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
7818                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
7819                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7820                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
7821                 command_type = OTHER_COMMAND;
7822                 break;
7823         case CMD_XMIT_ELS_RSP64_CX:
7824                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7825                 /* words0-2 BDE memcpy */
7826                 /* word3 iocb=iotag32 wqe=response_payload_len */
7827                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
7828                 /* word4 iocb=did wge=rsvd. */
7829                 wqe->xmit_els_rsp.rsvd4 = 0;
7830                 /* word5 iocb=rsvd wge=did */
7831                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
7832                          iocbq->iocb.un.elsreq64.remoteID);
7833                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
7834                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7835                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
7836                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7837                        iocbq->iocb.unsli3.rcvsli3.ox_id);
7838                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
7839                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7840                                phba->vpi_ids[iocbq->vport->vpi]);
7841                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
7842                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
7843                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
7844                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
7845                        LPFC_WQE_LENLOC_WORD3);
7846                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
7847                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7848                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7849                 command_type = OTHER_COMMAND;
7850                 break;
7851         case CMD_CLOSE_XRI_CN:
7852         case CMD_ABORT_XRI_CN:
7853         case CMD_ABORT_XRI_CX:
7854                 /* words 0-2 memcpy should be 0 rserved */
7855                 /* port will send abts */
7856                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
7857                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
7858                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
7859                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
7860                 } else
7861                         fip = 0;
7862
7863                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
7864                         /*
7865                          * The link is down, or the command was ELS_FIP
7866                          * so the fw does not need to send abts
7867                          * on the wire.
7868                          */
7869                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
7870                 else
7871                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
7872                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
7873                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
7874                 wqe->abort_cmd.rsrvd5 = 0;
7875                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
7876                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7877                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
7878                 /*
7879                  * The abort handler will send us CMD_ABORT_XRI_CN or
7880                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
7881                  */
7882                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
7883                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
7884                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
7885                        LPFC_WQE_LENLOC_NONE);
7886                 cmnd = CMD_ABORT_XRI_CX;
7887                 command_type = OTHER_COMMAND;
7888                 xritag = 0;
7889                 break;
7890         case CMD_XMIT_BLS_RSP64_CX:
7891                 /* As BLS ABTS RSP WQE is very different from other WQEs,
7892                  * we re-construct this WQE here based on information in
7893                  * iocbq from scratch.
7894                  */
7895                 memset(wqe, 0, sizeof(union lpfc_wqe));
7896                 /* OX_ID is invariable to who sent ABTS to CT exchange */
7897                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
7898                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
7899                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
7900                     LPFC_ABTS_UNSOL_INT) {
7901                         /* ABTS sent by initiator to CT exchange, the
7902                          * RX_ID field will be filled with the newly
7903                          * allocated responder XRI.
7904                          */
7905                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
7906                                iocbq->sli4_xritag);
7907                 } else {
7908                         /* ABTS sent by responder to CT exchange, the
7909                          * RX_ID field will be filled with the responder
7910                          * RX_ID from ABTS.
7911                          */
7912                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
7913                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
7914                 }
7915                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
7916                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
7917                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
7918                        iocbq->iocb.ulpContext);
7919                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
7920                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
7921                        LPFC_WQE_LENLOC_NONE);
7922                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
7923                 command_type = OTHER_COMMAND;
7924                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
7925                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
7926                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
7927                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
7928                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
7929                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
7930                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
7931                 }
7932
7933                 break;
7934         case CMD_XRI_ABORTED_CX:
7935         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
7936         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
7937         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
7938         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
7939         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
7940         default:
7941                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7942                                 "2014 Invalid command 0x%x\n",
7943                                 iocbq->iocb.ulpCommand);
7944                 return IOCB_ERROR;
7945                 break;
7946         }
7947
7948         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
7949         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
7950         wqe->generic.wqe_com.abort_tag = abort_tag;
7951         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
7952         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
7953         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
7954         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
7955         return 0;
7956 }
7957
7958 /**
7959  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
7960  * @phba: Pointer to HBA context object.
7961  * @ring_number: SLI ring number to issue iocb on.
7962  * @piocb: Pointer to command iocb.
7963  * @flag: Flag indicating if this command can be put into txq.
7964  *
7965  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
7966  * an iocb command to an HBA with SLI-4 interface spec.
7967  *
7968  * This function is called with hbalock held. The function will return success
7969  * after it successfully submit the iocb to firmware or after adding to the
7970  * txq.
7971  **/
7972 static int
7973 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
7974                          struct lpfc_iocbq *piocb, uint32_t flag)
7975 {
7976         struct lpfc_sglq *sglq;
7977         union lpfc_wqe wqe;
7978         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
7979
7980         if (piocb->sli4_xritag == NO_XRI) {
7981                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
7982                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
7983                     piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7984                         sglq = NULL;
7985                 else {
7986                         if (pring->txq_cnt) {
7987                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
7988                                         __lpfc_sli_ringtx_put(phba,
7989                                                 pring, piocb);
7990                                         return IOCB_SUCCESS;
7991                                 } else {
7992                                         return IOCB_BUSY;
7993                                 }
7994                         } else {
7995                                 sglq = __lpfc_sli_get_sglq(phba, piocb);
7996                                 if (!sglq) {
7997                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
7998                                                 __lpfc_sli_ringtx_put(phba,
7999                                                                 pring,
8000                                                                 piocb);
8001                                                 return IOCB_SUCCESS;
8002                                         } else
8003                                                 return IOCB_BUSY;
8004                                 }
8005                         }
8006                 }
8007         } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
8008                 /* These IO's already have an XRI and a mapped sgl. */
8009                 sglq = NULL;
8010         } else {
8011                 /*
8012                  * This is a continuation of a commandi,(CX) so this
8013                  * sglq is on the active list
8014                  */
8015                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
8016                 if (!sglq)
8017                         return IOCB_ERROR;
8018         }
8019
8020         if (sglq) {
8021                 piocb->sli4_lxritag = sglq->sli4_lxritag;
8022                 piocb->sli4_xritag = sglq->sli4_xritag;
8023                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8024                         return IOCB_ERROR;
8025         }
8026
8027         if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8028                 return IOCB_ERROR;
8029
8030         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8031                 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8032                 /*
8033                  * For FCP command IOCB, get a new WQ index to distribute
8034                  * WQE across the WQsr. On the other hand, for abort IOCB,
8035                  * it carries the same WQ index to the original command
8036                  * IOCB.
8037                  */
8038                 if (piocb->iocb_flag & LPFC_IO_FCP)
8039                         piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8040                 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8041                                      &wqe))
8042                         return IOCB_ERROR;
8043         } else {
8044                 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8045                         return IOCB_ERROR;
8046         }
8047         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8048
8049         return 0;
8050 }
8051
8052 /**
8053  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8054  *
8055  * This routine wraps the actual lockless version for issusing IOCB function
8056  * pointer from the lpfc_hba struct.
8057  *
8058  * Return codes:
8059  *      IOCB_ERROR - Error
8060  *      IOCB_SUCCESS - Success
8061  *      IOCB_BUSY - Busy
8062  **/
8063 int
8064 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8065                 struct lpfc_iocbq *piocb, uint32_t flag)
8066 {
8067         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8068 }
8069
8070 /**
8071  * lpfc_sli_api_table_setup - Set up sli api function jump table
8072  * @phba: The hba struct for which this call is being executed.
8073  * @dev_grp: The HBA PCI-Device group number.
8074  *
8075  * This routine sets up the SLI interface API function jump table in @phba
8076  * struct.
8077  * Returns: 0 - success, -ENODEV - failure.
8078  **/
8079 int
8080 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8081 {
8082
8083         switch (dev_grp) {
8084         case LPFC_PCI_DEV_LP:
8085                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8086                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8087                 break;
8088         case LPFC_PCI_DEV_OC:
8089                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8090                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8091                 break;
8092         default:
8093                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8094                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
8095                                 dev_grp);
8096                 return -ENODEV;
8097                 break;
8098         }
8099         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8100         return 0;
8101 }
8102
8103 /**
8104  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8105  * @phba: Pointer to HBA context object.
8106  * @pring: Pointer to driver SLI ring object.
8107  * @piocb: Pointer to command iocb.
8108  * @flag: Flag indicating if this command can be put into txq.
8109  *
8110  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8111  * function. This function gets the hbalock and calls
8112  * __lpfc_sli_issue_iocb function and will return the error returned
8113  * by __lpfc_sli_issue_iocb function. This wrapper is used by
8114  * functions which do not hold hbalock.
8115  **/
8116 int
8117 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8118                     struct lpfc_iocbq *piocb, uint32_t flag)
8119 {
8120         unsigned long iflags;
8121         int rc;
8122
8123         spin_lock_irqsave(&phba->hbalock, iflags);
8124         rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8125         spin_unlock_irqrestore(&phba->hbalock, iflags);
8126
8127         return rc;
8128 }
8129
8130 /**
8131  * lpfc_extra_ring_setup - Extra ring setup function
8132  * @phba: Pointer to HBA context object.
8133  *
8134  * This function is called while driver attaches with the
8135  * HBA to setup the extra ring. The extra ring is used
8136  * only when driver needs to support target mode functionality
8137  * or IP over FC functionalities.
8138  *
8139  * This function is called with no lock held.
8140  **/
8141 static int
8142 lpfc_extra_ring_setup( struct lpfc_hba *phba)
8143 {
8144         struct lpfc_sli *psli;
8145         struct lpfc_sli_ring *pring;
8146
8147         psli = &phba->sli;
8148
8149         /* Adjust cmd/rsp ring iocb entries more evenly */
8150
8151         /* Take some away from the FCP ring */
8152         pring = &psli->ring[psli->fcp_ring];
8153         pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8154         pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8155         pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8156         pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8157
8158         /* and give them to the extra ring */
8159         pring = &psli->ring[psli->extra_ring];
8160
8161         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8162         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8163         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8164         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8165
8166         /* Setup default profile for this ring */
8167         pring->iotag_max = 4096;
8168         pring->num_mask = 1;
8169         pring->prt[0].profile = 0;      /* Mask 0 */
8170         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8171         pring->prt[0].type = phba->cfg_multi_ring_type;
8172         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8173         return 0;
8174 }
8175
8176 /**
8177  * lpfc_sli_async_event_handler - ASYNC iocb handler function
8178  * @phba: Pointer to HBA context object.
8179  * @pring: Pointer to driver SLI ring object.
8180  * @iocbq: Pointer to iocb object.
8181  *
8182  * This function is called by the slow ring event handler
8183  * function when there is an ASYNC event iocb in the ring.
8184  * This function is called with no lock held.
8185  * Currently this function handles only temperature related
8186  * ASYNC events. The function decodes the temperature sensor
8187  * event message and posts events for the management applications.
8188  **/
8189 static void
8190 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8191         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8192 {
8193         IOCB_t *icmd;
8194         uint16_t evt_code;
8195         uint16_t temp;
8196         struct temp_event temp_event_data;
8197         struct Scsi_Host *shost;
8198         uint32_t *iocb_w;
8199
8200         icmd = &iocbq->iocb;
8201         evt_code = icmd->un.asyncstat.evt_code;
8202         temp = icmd->ulpContext;
8203
8204         if ((evt_code != ASYNC_TEMP_WARN) &&
8205                 (evt_code != ASYNC_TEMP_SAFE)) {
8206                 iocb_w = (uint32_t *) icmd;
8207                 lpfc_printf_log(phba,
8208                         KERN_ERR,
8209                         LOG_SLI,
8210                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
8211                         " evt_code 0x%x\n"
8212                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
8213                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
8214                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
8215                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
8216                         pring->ringno,
8217                         icmd->un.asyncstat.evt_code,
8218                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8219                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8220                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8221                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8222
8223                 return;
8224         }
8225         temp_event_data.data = (uint32_t)temp;
8226         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8227         if (evt_code == ASYNC_TEMP_WARN) {
8228                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8229                 lpfc_printf_log(phba,
8230                                 KERN_ERR,
8231                                 LOG_TEMP,
8232                                 "0347 Adapter is very hot, please take "
8233                                 "corrective action. temperature : %d Celsius\n",
8234                                 temp);
8235         }
8236         if (evt_code == ASYNC_TEMP_SAFE) {
8237                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8238                 lpfc_printf_log(phba,
8239                                 KERN_ERR,
8240                                 LOG_TEMP,
8241                                 "0340 Adapter temperature is OK now. "
8242                                 "temperature : %d Celsius\n",
8243                                 temp);
8244         }
8245
8246         /* Send temperature change event to applications */
8247         shost = lpfc_shost_from_vport(phba->pport);
8248         fc_host_post_vendor_event(shost, fc_get_event_number(),
8249                 sizeof(temp_event_data), (char *) &temp_event_data,
8250                 LPFC_NL_VENDOR_ID);
8251
8252 }
8253
8254
8255 /**
8256  * lpfc_sli_setup - SLI ring setup function
8257  * @phba: Pointer to HBA context object.
8258  *
8259  * lpfc_sli_setup sets up rings of the SLI interface with
8260  * number of iocbs per ring and iotags. This function is
8261  * called while driver attach to the HBA and before the
8262  * interrupts are enabled. So there is no need for locking.
8263  *
8264  * This function always returns 0.
8265  **/
8266 int
8267 lpfc_sli_setup(struct lpfc_hba *phba)
8268 {
8269         int i, totiocbsize = 0;
8270         struct lpfc_sli *psli = &phba->sli;
8271         struct lpfc_sli_ring *pring;
8272
8273         psli->num_rings = MAX_CONFIGURED_RINGS;
8274         psli->sli_flag = 0;
8275         psli->fcp_ring = LPFC_FCP_RING;
8276         psli->next_ring = LPFC_FCP_NEXT_RING;
8277         psli->extra_ring = LPFC_EXTRA_RING;
8278
8279         psli->iocbq_lookup = NULL;
8280         psli->iocbq_lookup_len = 0;
8281         psli->last_iotag = 0;
8282
8283         for (i = 0; i < psli->num_rings; i++) {
8284                 pring = &psli->ring[i];
8285                 switch (i) {
8286                 case LPFC_FCP_RING:     /* ring 0 - FCP */
8287                         /* numCiocb and numRiocb are used in config_port */
8288                         pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8289                         pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8290                         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8291                         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8292                         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8293                         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8294                         pring->sizeCiocb = (phba->sli_rev == 3) ?
8295                                                         SLI3_IOCB_CMD_SIZE :
8296                                                         SLI2_IOCB_CMD_SIZE;
8297                         pring->sizeRiocb = (phba->sli_rev == 3) ?
8298                                                         SLI3_IOCB_RSP_SIZE :
8299                                                         SLI2_IOCB_RSP_SIZE;
8300                         pring->iotag_ctr = 0;
8301                         pring->iotag_max =
8302                             (phba->cfg_hba_queue_depth * 2);
8303                         pring->fast_iotag = pring->iotag_max;
8304                         pring->num_mask = 0;
8305                         break;
8306                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
8307                         /* numCiocb and numRiocb are used in config_port */
8308                         pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8309                         pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8310                         pring->sizeCiocb = (phba->sli_rev == 3) ?
8311                                                         SLI3_IOCB_CMD_SIZE :
8312                                                         SLI2_IOCB_CMD_SIZE;
8313                         pring->sizeRiocb = (phba->sli_rev == 3) ?
8314                                                         SLI3_IOCB_RSP_SIZE :
8315                                                         SLI2_IOCB_RSP_SIZE;
8316                         pring->iotag_max = phba->cfg_hba_queue_depth;
8317                         pring->num_mask = 0;
8318                         break;
8319                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
8320                         /* numCiocb and numRiocb are used in config_port */
8321                         pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8322                         pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8323                         pring->sizeCiocb = (phba->sli_rev == 3) ?
8324                                                         SLI3_IOCB_CMD_SIZE :
8325                                                         SLI2_IOCB_CMD_SIZE;
8326                         pring->sizeRiocb = (phba->sli_rev == 3) ?
8327                                                         SLI3_IOCB_RSP_SIZE :
8328                                                         SLI2_IOCB_RSP_SIZE;
8329                         pring->fast_iotag = 0;
8330                         pring->iotag_ctr = 0;
8331                         pring->iotag_max = 4096;
8332                         pring->lpfc_sli_rcv_async_status =
8333                                 lpfc_sli_async_event_handler;
8334                         pring->num_mask = LPFC_MAX_RING_MASK;
8335                         pring->prt[0].profile = 0;      /* Mask 0 */
8336                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8337                         pring->prt[0].type = FC_TYPE_ELS;
8338                         pring->prt[0].lpfc_sli_rcv_unsol_event =
8339                             lpfc_els_unsol_event;
8340                         pring->prt[1].profile = 0;      /* Mask 1 */
8341                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
8342                         pring->prt[1].type = FC_TYPE_ELS;
8343                         pring->prt[1].lpfc_sli_rcv_unsol_event =
8344                             lpfc_els_unsol_event;
8345                         pring->prt[2].profile = 0;      /* Mask 2 */
8346                         /* NameServer Inquiry */
8347                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
8348                         /* NameServer */
8349                         pring->prt[2].type = FC_TYPE_CT;
8350                         pring->prt[2].lpfc_sli_rcv_unsol_event =
8351                             lpfc_ct_unsol_event;
8352                         pring->prt[3].profile = 0;      /* Mask 3 */
8353                         /* NameServer response */
8354                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
8355                         /* NameServer */
8356                         pring->prt[3].type = FC_TYPE_CT;
8357                         pring->prt[3].lpfc_sli_rcv_unsol_event =
8358                             lpfc_ct_unsol_event;
8359                         /* abort unsolicited sequence */
8360                         pring->prt[4].profile = 0;      /* Mask 4 */
8361                         pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8362                         pring->prt[4].type = FC_TYPE_BLS;
8363                         pring->prt[4].lpfc_sli_rcv_unsol_event =
8364                             lpfc_sli4_ct_abort_unsol_event;
8365                         break;
8366                 }
8367                 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
8368                                 (pring->numRiocb * pring->sizeRiocb);
8369         }
8370         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8371                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
8372                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8373                        "SLI2 SLIM Data: x%x x%lx\n",
8374                        phba->brd_no, totiocbsize,
8375                        (unsigned long) MAX_SLIM_IOCB_SIZE);
8376         }
8377         if (phba->cfg_multi_ring_support == 2)
8378                 lpfc_extra_ring_setup(phba);
8379
8380         return 0;
8381 }
8382
8383 /**
8384  * lpfc_sli_queue_setup - Queue initialization function
8385  * @phba: Pointer to HBA context object.
8386  *
8387  * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8388  * ring. This function also initializes ring indices of each ring.
8389  * This function is called during the initialization of the SLI
8390  * interface of an HBA.
8391  * This function is called with no lock held and always returns
8392  * 1.
8393  **/
8394 int
8395 lpfc_sli_queue_setup(struct lpfc_hba *phba)
8396 {
8397         struct lpfc_sli *psli;
8398         struct lpfc_sli_ring *pring;
8399         int i;
8400
8401         psli = &phba->sli;
8402         spin_lock_irq(&phba->hbalock);
8403         INIT_LIST_HEAD(&psli->mboxq);
8404         INIT_LIST_HEAD(&psli->mboxq_cmpl);
8405         /* Initialize list headers for txq and txcmplq as double linked lists */
8406         for (i = 0; i < psli->num_rings; i++) {
8407                 pring = &psli->ring[i];
8408                 pring->ringno = i;
8409                 pring->next_cmdidx  = 0;
8410                 pring->local_getidx = 0;
8411                 pring->cmdidx = 0;
8412                 INIT_LIST_HEAD(&pring->txq);
8413                 INIT_LIST_HEAD(&pring->txcmplq);
8414                 INIT_LIST_HEAD(&pring->iocb_continueq);
8415                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8416                 INIT_LIST_HEAD(&pring->postbufq);
8417         }
8418         spin_unlock_irq(&phba->hbalock);
8419         return 1;
8420 }
8421
8422 /**
8423  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8424  * @phba: Pointer to HBA context object.
8425  *
8426  * This routine flushes the mailbox command subsystem. It will unconditionally
8427  * flush all the mailbox commands in the three possible stages in the mailbox
8428  * command sub-system: pending mailbox command queue; the outstanding mailbox
8429  * command; and completed mailbox command queue. It is caller's responsibility
8430  * to make sure that the driver is in the proper state to flush the mailbox
8431  * command sub-system. Namely, the posting of mailbox commands into the
8432  * pending mailbox command queue from the various clients must be stopped;
8433  * either the HBA is in a state that it will never works on the outstanding
8434  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8435  * mailbox command has been completed.
8436  **/
8437 static void
8438 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8439 {
8440         LIST_HEAD(completions);
8441         struct lpfc_sli *psli = &phba->sli;
8442         LPFC_MBOXQ_t *pmb;
8443         unsigned long iflag;
8444
8445         /* Flush all the mailbox commands in the mbox system */
8446         spin_lock_irqsave(&phba->hbalock, iflag);
8447         /* The pending mailbox command queue */
8448         list_splice_init(&phba->sli.mboxq, &completions);
8449         /* The outstanding active mailbox command */
8450         if (psli->mbox_active) {
8451                 list_add_tail(&psli->mbox_active->list, &completions);
8452                 psli->mbox_active = NULL;
8453                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8454         }
8455         /* The completed mailbox command queue */
8456         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8457         spin_unlock_irqrestore(&phba->hbalock, iflag);
8458
8459         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
8460         while (!list_empty(&completions)) {
8461                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
8462                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
8463                 if (pmb->mbox_cmpl)
8464                         pmb->mbox_cmpl(phba, pmb);
8465         }
8466 }
8467
8468 /**
8469  * lpfc_sli_host_down - Vport cleanup function
8470  * @vport: Pointer to virtual port object.
8471  *
8472  * lpfc_sli_host_down is called to clean up the resources
8473  * associated with a vport before destroying virtual
8474  * port data structures.
8475  * This function does following operations:
8476  * - Free discovery resources associated with this virtual
8477  *   port.
8478  * - Free iocbs associated with this virtual port in
8479  *   the txq.
8480  * - Send abort for all iocb commands associated with this
8481  *   vport in txcmplq.
8482  *
8483  * This function is called with no lock held and always returns 1.
8484  **/
8485 int
8486 lpfc_sli_host_down(struct lpfc_vport *vport)
8487 {
8488         LIST_HEAD(completions);
8489         struct lpfc_hba *phba = vport->phba;
8490         struct lpfc_sli *psli = &phba->sli;
8491         struct lpfc_sli_ring *pring;
8492         struct lpfc_iocbq *iocb, *next_iocb;
8493         int i;
8494         unsigned long flags = 0;
8495         uint16_t prev_pring_flag;
8496
8497         lpfc_cleanup_discovery_resources(vport);
8498
8499         spin_lock_irqsave(&phba->hbalock, flags);
8500         for (i = 0; i < psli->num_rings; i++) {
8501                 pring = &psli->ring[i];
8502                 prev_pring_flag = pring->flag;
8503                 /* Only slow rings */
8504                 if (pring->ringno == LPFC_ELS_RING) {
8505                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
8506                         /* Set the lpfc data pending flag */
8507                         set_bit(LPFC_DATA_READY, &phba->data_flags);
8508                 }
8509                 /*
8510                  * Error everything on the txq since these iocbs have not been
8511                  * given to the FW yet.
8512                  */
8513                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
8514                         if (iocb->vport != vport)
8515                                 continue;
8516                         list_move_tail(&iocb->list, &completions);
8517                         pring->txq_cnt--;
8518                 }
8519
8520                 /* Next issue ABTS for everything on the txcmplq */
8521                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
8522                                                                         list) {
8523                         if (iocb->vport != vport)
8524                                 continue;
8525                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
8526                 }
8527
8528                 pring->flag = prev_pring_flag;
8529         }
8530
8531         spin_unlock_irqrestore(&phba->hbalock, flags);
8532
8533         /* Cancel all the IOCBs from the completions list */
8534         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8535                               IOERR_SLI_DOWN);
8536         return 1;
8537 }
8538
8539 /**
8540  * lpfc_sli_hba_down - Resource cleanup function for the HBA
8541  * @phba: Pointer to HBA context object.
8542  *
8543  * This function cleans up all iocb, buffers, mailbox commands
8544  * while shutting down the HBA. This function is called with no
8545  * lock held and always returns 1.
8546  * This function does the following to cleanup driver resources:
8547  * - Free discovery resources for each virtual port
8548  * - Cleanup any pending fabric iocbs
8549  * - Iterate through the iocb txq and free each entry
8550  *   in the list.
8551  * - Free up any buffer posted to the HBA
8552  * - Free mailbox commands in the mailbox queue.
8553  **/
8554 int
8555 lpfc_sli_hba_down(struct lpfc_hba *phba)
8556 {
8557         LIST_HEAD(completions);
8558         struct lpfc_sli *psli = &phba->sli;
8559         struct lpfc_sli_ring *pring;
8560         struct lpfc_dmabuf *buf_ptr;
8561         unsigned long flags = 0;
8562         int i;
8563
8564         /* Shutdown the mailbox command sub-system */
8565         lpfc_sli_mbox_sys_shutdown(phba);
8566
8567         lpfc_hba_down_prep(phba);
8568
8569         lpfc_fabric_abort_hba(phba);
8570
8571         spin_lock_irqsave(&phba->hbalock, flags);
8572         for (i = 0; i < psli->num_rings; i++) {
8573                 pring = &psli->ring[i];
8574                 /* Only slow rings */
8575                 if (pring->ringno == LPFC_ELS_RING) {
8576                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
8577                         /* Set the lpfc data pending flag */
8578                         set_bit(LPFC_DATA_READY, &phba->data_flags);
8579                 }
8580
8581                 /*
8582                  * Error everything on the txq since these iocbs have not been
8583                  * given to the FW yet.
8584                  */
8585                 list_splice_init(&pring->txq, &completions);
8586                 pring->txq_cnt = 0;
8587
8588         }
8589         spin_unlock_irqrestore(&phba->hbalock, flags);
8590
8591         /* Cancel all the IOCBs from the completions list */
8592         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8593                               IOERR_SLI_DOWN);
8594
8595         spin_lock_irqsave(&phba->hbalock, flags);
8596         list_splice_init(&phba->elsbuf, &completions);
8597         phba->elsbuf_cnt = 0;
8598         phba->elsbuf_prev_cnt = 0;
8599         spin_unlock_irqrestore(&phba->hbalock, flags);
8600
8601         while (!list_empty(&completions)) {
8602                 list_remove_head(&completions, buf_ptr,
8603                         struct lpfc_dmabuf, list);
8604                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
8605                 kfree(buf_ptr);
8606         }
8607
8608         /* Return any active mbox cmds */
8609         del_timer_sync(&psli->mbox_tmo);
8610
8611         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
8612         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8613         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
8614
8615         return 1;
8616 }
8617
8618 /**
8619  * lpfc_sli_pcimem_bcopy - SLI memory copy function
8620  * @srcp: Source memory pointer.
8621  * @destp: Destination memory pointer.
8622  * @cnt: Number of words required to be copied.
8623  *
8624  * This function is used for copying data between driver memory
8625  * and the SLI memory. This function also changes the endianness
8626  * of each word if native endianness is different from SLI
8627  * endianness. This function can be called with or without
8628  * lock.
8629  **/
8630 void
8631 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
8632 {
8633         uint32_t *src = srcp;
8634         uint32_t *dest = destp;
8635         uint32_t ldata;
8636         int i;
8637
8638         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
8639                 ldata = *src;
8640                 ldata = le32_to_cpu(ldata);
8641                 *dest = ldata;
8642                 src++;
8643                 dest++;
8644         }
8645 }
8646
8647
8648 /**
8649  * lpfc_sli_bemem_bcopy - SLI memory copy function
8650  * @srcp: Source memory pointer.
8651  * @destp: Destination memory pointer.
8652  * @cnt: Number of words required to be copied.
8653  *
8654  * This function is used for copying data between a data structure
8655  * with big endian representation to local endianness.
8656  * This function can be called with or without lock.
8657  **/
8658 void
8659 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
8660 {
8661         uint32_t *src = srcp;
8662         uint32_t *dest = destp;
8663         uint32_t ldata;
8664         int i;
8665
8666         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
8667                 ldata = *src;
8668                 ldata = be32_to_cpu(ldata);
8669                 *dest = ldata;
8670                 src++;
8671                 dest++;
8672         }
8673 }
8674
8675 /**
8676  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
8677  * @phba: Pointer to HBA context object.
8678  * @pring: Pointer to driver SLI ring object.
8679  * @mp: Pointer to driver buffer object.
8680  *
8681  * This function is called with no lock held.
8682  * It always return zero after adding the buffer to the postbufq
8683  * buffer list.
8684  **/
8685 int
8686 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8687                          struct lpfc_dmabuf *mp)
8688 {
8689         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
8690            later */
8691         spin_lock_irq(&phba->hbalock);
8692         list_add_tail(&mp->list, &pring->postbufq);
8693         pring->postbufq_cnt++;
8694         spin_unlock_irq(&phba->hbalock);
8695         return 0;
8696 }
8697
8698 /**
8699  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
8700  * @phba: Pointer to HBA context object.
8701  *
8702  * When HBQ is enabled, buffers are searched based on tags. This function
8703  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
8704  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
8705  * does not conflict with tags of buffer posted for unsolicited events.
8706  * The function returns the allocated tag. The function is called with
8707  * no locks held.
8708  **/
8709 uint32_t
8710 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
8711 {
8712         spin_lock_irq(&phba->hbalock);
8713         phba->buffer_tag_count++;
8714         /*
8715          * Always set the QUE_BUFTAG_BIT to distiguish between
8716          * a tag assigned by HBQ.
8717          */
8718         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
8719         spin_unlock_irq(&phba->hbalock);
8720         return phba->buffer_tag_count;
8721 }
8722
8723 /**
8724  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
8725  * @phba: Pointer to HBA context object.
8726  * @pring: Pointer to driver SLI ring object.
8727  * @tag: Buffer tag.
8728  *
8729  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
8730  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
8731  * iocb is posted to the response ring with the tag of the buffer.
8732  * This function searches the pring->postbufq list using the tag
8733  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
8734  * iocb. If the buffer is found then lpfc_dmabuf object of the
8735  * buffer is returned to the caller else NULL is returned.
8736  * This function is called with no lock held.
8737  **/
8738 struct lpfc_dmabuf *
8739 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8740                         uint32_t tag)
8741 {
8742         struct lpfc_dmabuf *mp, *next_mp;
8743         struct list_head *slp = &pring->postbufq;
8744
8745         /* Search postbufq, from the beginning, looking for a match on tag */
8746         spin_lock_irq(&phba->hbalock);
8747         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
8748                 if (mp->buffer_tag == tag) {
8749                         list_del_init(&mp->list);
8750                         pring->postbufq_cnt--;
8751                         spin_unlock_irq(&phba->hbalock);
8752                         return mp;
8753                 }
8754         }
8755
8756         spin_unlock_irq(&phba->hbalock);
8757         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8758                         "0402 Cannot find virtual addr for buffer tag on "
8759                         "ring %d Data x%lx x%p x%p x%x\n",
8760                         pring->ringno, (unsigned long) tag,
8761                         slp->next, slp->prev, pring->postbufq_cnt);
8762
8763         return NULL;
8764 }
8765
8766 /**
8767  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
8768  * @phba: Pointer to HBA context object.
8769  * @pring: Pointer to driver SLI ring object.
8770  * @phys: DMA address of the buffer.
8771  *
8772  * This function searches the buffer list using the dma_address
8773  * of unsolicited event to find the driver's lpfc_dmabuf object
8774  * corresponding to the dma_address. The function returns the
8775  * lpfc_dmabuf object if a buffer is found else it returns NULL.
8776  * This function is called by the ct and els unsolicited event
8777  * handlers to get the buffer associated with the unsolicited
8778  * event.
8779  *
8780  * This function is called with no lock held.
8781  **/
8782 struct lpfc_dmabuf *
8783 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8784                          dma_addr_t phys)
8785 {
8786         struct lpfc_dmabuf *mp, *next_mp;
8787         struct list_head *slp = &pring->postbufq;
8788
8789         /* Search postbufq, from the beginning, looking for a match on phys */
8790         spin_lock_irq(&phba->hbalock);
8791         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
8792                 if (mp->phys == phys) {
8793                         list_del_init(&mp->list);
8794                         pring->postbufq_cnt--;
8795                         spin_unlock_irq(&phba->hbalock);
8796                         return mp;
8797                 }
8798         }
8799
8800         spin_unlock_irq(&phba->hbalock);
8801         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8802                         "0410 Cannot find virtual addr for mapped buf on "
8803                         "ring %d Data x%llx x%p x%p x%x\n",
8804                         pring->ringno, (unsigned long long)phys,
8805                         slp->next, slp->prev, pring->postbufq_cnt);
8806         return NULL;
8807 }
8808
8809 /**
8810  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
8811  * @phba: Pointer to HBA context object.
8812  * @cmdiocb: Pointer to driver command iocb object.
8813  * @rspiocb: Pointer to driver response iocb object.
8814  *
8815  * This function is the completion handler for the abort iocbs for
8816  * ELS commands. This function is called from the ELS ring event
8817  * handler with no lock held. This function frees memory resources
8818  * associated with the abort iocb.
8819  **/
8820 static void
8821 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8822                         struct lpfc_iocbq *rspiocb)
8823 {
8824         IOCB_t *irsp = &rspiocb->iocb;
8825         uint16_t abort_iotag, abort_context;
8826         struct lpfc_iocbq *abort_iocb;
8827         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8828
8829         abort_iocb = NULL;
8830
8831         if (irsp->ulpStatus) {
8832                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
8833                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
8834
8835                 spin_lock_irq(&phba->hbalock);
8836                 if (phba->sli_rev < LPFC_SLI_REV4) {
8837                         if (abort_iotag != 0 &&
8838                                 abort_iotag <= phba->sli.last_iotag)
8839                                 abort_iocb =
8840                                         phba->sli.iocbq_lookup[abort_iotag];
8841                 } else
8842                         /* For sli4 the abort_tag is the XRI,
8843                          * so the abort routine puts the iotag  of the iocb
8844                          * being aborted in the context field of the abort
8845                          * IOCB.
8846                          */
8847                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
8848
8849                 /*
8850                  *  If the iocb is not found in Firmware queue the iocb
8851                  *  might have completed already. Do not free it again.
8852                  */
8853                 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
8854                         if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
8855                                 spin_unlock_irq(&phba->hbalock);
8856                                 lpfc_sli_release_iocbq(phba, cmdiocb);
8857                                 return;
8858                         }
8859                         /* For SLI4 the ulpContext field for abort IOCB
8860                          * holds the iotag of the IOCB being aborted so
8861                          * the local abort_context needs to be reset to
8862                          * match the aborted IOCBs ulpContext.
8863                          */
8864                         if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
8865                                 abort_context = abort_iocb->iocb.ulpContext;
8866                 }
8867
8868                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
8869                                 "0327 Cannot abort els iocb %p "
8870                                 "with tag %x context %x, abort status %x, "
8871                                 "abort code %x\n",
8872                                 abort_iocb, abort_iotag, abort_context,
8873                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
8874                 /*
8875                  * make sure we have the right iocbq before taking it
8876                  * off the txcmplq and try to call completion routine.
8877                  */
8878                 if (!abort_iocb ||
8879                     abort_iocb->iocb.ulpContext != abort_context ||
8880                     (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
8881                         spin_unlock_irq(&phba->hbalock);
8882                 else if (phba->sli_rev < LPFC_SLI_REV4) {
8883                         /*
8884                          * leave the SLI4 aborted command on the txcmplq
8885                          * list and the command complete WCQE's XB bit
8886                          * will tell whether the SGL (XRI) can be released
8887                          * immediately or to the aborted SGL list for the
8888                          * following abort XRI from the HBA.
8889                          */
8890                         list_del_init(&abort_iocb->list);
8891                         if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
8892                                 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
8893                                 pring->txcmplq_cnt--;
8894                         }
8895
8896                         /* Firmware could still be in progress of DMAing
8897                          * payload, so don't free data buffer till after
8898                          * a hbeat.
8899                          */
8900                         abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
8901                         abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
8902                         spin_unlock_irq(&phba->hbalock);
8903
8904                         abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
8905                         abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
8906                         (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
8907                 } else
8908                         spin_unlock_irq(&phba->hbalock);
8909         }
8910
8911         lpfc_sli_release_iocbq(phba, cmdiocb);
8912         return;
8913 }
8914
8915 /**
8916  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
8917  * @phba: Pointer to HBA context object.
8918  * @cmdiocb: Pointer to driver command iocb object.
8919  * @rspiocb: Pointer to driver response iocb object.
8920  *
8921  * The function is called from SLI ring event handler with no
8922  * lock held. This function is the completion handler for ELS commands
8923  * which are aborted. The function frees memory resources used for
8924  * the aborted ELS commands.
8925  **/
8926 static void
8927 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8928                      struct lpfc_iocbq *rspiocb)
8929 {
8930         IOCB_t *irsp = &rspiocb->iocb;
8931
8932         /* ELS cmd tag <ulpIoTag> completes */
8933         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
8934                         "0139 Ignoring ELS cmd tag x%x completion Data: "
8935                         "x%x x%x x%x\n",
8936                         irsp->ulpIoTag, irsp->ulpStatus,
8937                         irsp->un.ulpWord[4], irsp->ulpTimeout);
8938         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
8939                 lpfc_ct_free_iocb(phba, cmdiocb);
8940         else
8941                 lpfc_els_free_iocb(phba, cmdiocb);
8942         return;
8943 }
8944
8945 /**
8946  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
8947  * @phba: Pointer to HBA context object.
8948  * @pring: Pointer to driver SLI ring object.
8949  * @cmdiocb: Pointer to driver command iocb object.
8950  *
8951  * This function issues an abort iocb for the provided command iocb down to
8952  * the port. Other than the case the outstanding command iocb is an abort
8953  * request, this function issues abort out unconditionally. This function is
8954  * called with hbalock held. The function returns 0 when it fails due to
8955  * memory allocation failure or when the command iocb is an abort request.
8956  **/
8957 static int
8958 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8959                            struct lpfc_iocbq *cmdiocb)
8960 {
8961         struct lpfc_vport *vport = cmdiocb->vport;
8962         struct lpfc_iocbq *abtsiocbp;
8963         IOCB_t *icmd = NULL;
8964         IOCB_t *iabt = NULL;
8965         int retval;
8966
8967         /*
8968          * There are certain command types we don't want to abort.  And we
8969          * don't want to abort commands that are already in the process of
8970          * being aborted.
8971          */
8972         icmd = &cmdiocb->iocb;
8973         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
8974             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
8975             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
8976                 return 0;
8977
8978         /* issue ABTS for this IOCB based on iotag */
8979         abtsiocbp = __lpfc_sli_get_iocbq(phba);
8980         if (abtsiocbp == NULL)
8981                 return 0;
8982
8983         /* This signals the response to set the correct status
8984          * before calling the completion handler
8985          */
8986         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
8987
8988         iabt = &abtsiocbp->iocb;
8989         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
8990         iabt->un.acxri.abortContextTag = icmd->ulpContext;
8991         if (phba->sli_rev == LPFC_SLI_REV4) {
8992                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
8993                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
8994         }
8995         else
8996                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
8997         iabt->ulpLe = 1;
8998         iabt->ulpClass = icmd->ulpClass;
8999
9000         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9001         abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9002         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9003                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9004
9005         if (phba->link_state >= LPFC_LINK_UP)
9006                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9007         else
9008                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
9009
9010         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
9011
9012         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9013                          "0339 Abort xri x%x, original iotag x%x, "
9014                          "abort cmd iotag x%x\n",
9015                          iabt->un.acxri.abortIoTag,
9016                          iabt->un.acxri.abortContextTag,
9017                          abtsiocbp->iotag);
9018         retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
9019
9020         if (retval)
9021                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
9022
9023         /*
9024          * Caller to this routine should check for IOCB_ERROR
9025          * and handle it properly.  This routine no longer removes
9026          * iocb off txcmplq and call compl in case of IOCB_ERROR.
9027          */
9028         return retval;
9029 }
9030
9031 /**
9032  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9033  * @phba: Pointer to HBA context object.
9034  * @pring: Pointer to driver SLI ring object.
9035  * @cmdiocb: Pointer to driver command iocb object.
9036  *
9037  * This function issues an abort iocb for the provided command iocb. In case
9038  * of unloading, the abort iocb will not be issued to commands on the ELS
9039  * ring. Instead, the callback function shall be changed to those commands
9040  * so that nothing happens when them finishes. This function is called with
9041  * hbalock held. The function returns 0 when the command iocb is an abort
9042  * request.
9043  **/
9044 int
9045 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9046                            struct lpfc_iocbq *cmdiocb)
9047 {
9048         struct lpfc_vport *vport = cmdiocb->vport;
9049         int retval = IOCB_ERROR;
9050         IOCB_t *icmd = NULL;
9051
9052         /*
9053          * There are certain command types we don't want to abort.  And we
9054          * don't want to abort commands that are already in the process of
9055          * being aborted.
9056          */
9057         icmd = &cmdiocb->iocb;
9058         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9059             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9060             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9061                 return 0;
9062
9063         /*
9064          * If we're unloading, don't abort iocb on the ELS ring, but change
9065          * the callback so that nothing happens when it finishes.
9066          */
9067         if ((vport->load_flag & FC_UNLOADING) &&
9068             (pring->ringno == LPFC_ELS_RING)) {
9069                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9070                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9071                 else
9072                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9073                 goto abort_iotag_exit;
9074         }
9075
9076         /* Now, we try to issue the abort to the cmdiocb out */
9077         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9078
9079 abort_iotag_exit:
9080         /*
9081          * Caller to this routine should check for IOCB_ERROR
9082          * and handle it properly.  This routine no longer removes
9083          * iocb off txcmplq and call compl in case of IOCB_ERROR.
9084          */
9085         return retval;
9086 }
9087
9088 /**
9089  * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9090  * @phba: Pointer to HBA context object.
9091  * @pring: Pointer to driver SLI ring object.
9092  *
9093  * This function aborts all iocbs in the given ring and frees all the iocb
9094  * objects in txq. This function issues abort iocbs unconditionally for all
9095  * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9096  * to complete before the return of this function. The caller is not required
9097  * to hold any locks.
9098  **/
9099 static void
9100 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9101 {
9102         LIST_HEAD(completions);
9103         struct lpfc_iocbq *iocb, *next_iocb;
9104
9105         if (pring->ringno == LPFC_ELS_RING)
9106                 lpfc_fabric_abort_hba(phba);
9107
9108         spin_lock_irq(&phba->hbalock);
9109
9110         /* Take off all the iocbs on txq for cancelling */
9111         list_splice_init(&pring->txq, &completions);
9112         pring->txq_cnt = 0;
9113
9114         /* Next issue ABTS for everything on the txcmplq */
9115         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9116                 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9117
9118         spin_unlock_irq(&phba->hbalock);
9119
9120         /* Cancel all the IOCBs from the completions list */
9121         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9122                               IOERR_SLI_ABORTED);
9123 }
9124
9125 /**
9126  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9127  * @phba: pointer to lpfc HBA data structure.
9128  *
9129  * This routine will abort all pending and outstanding iocbs to an HBA.
9130  **/
9131 void
9132 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9133 {
9134         struct lpfc_sli *psli = &phba->sli;
9135         struct lpfc_sli_ring *pring;
9136         int i;
9137
9138         for (i = 0; i < psli->num_rings; i++) {
9139                 pring = &psli->ring[i];
9140                 lpfc_sli_iocb_ring_abort(phba, pring);
9141         }
9142 }
9143
9144 /**
9145  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
9146  * @iocbq: Pointer to driver iocb object.
9147  * @vport: Pointer to driver virtual port object.
9148  * @tgt_id: SCSI ID of the target.
9149  * @lun_id: LUN ID of the scsi device.
9150  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9151  *
9152  * This function acts as an iocb filter for functions which abort or count
9153  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9154  * 0 if the filtering criteria is met for the given iocb and will return
9155  * 1 if the filtering criteria is not met.
9156  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9157  * given iocb is for the SCSI device specified by vport, tgt_id and
9158  * lun_id parameter.
9159  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
9160  * given iocb is for the SCSI target specified by vport and tgt_id
9161  * parameters.
9162  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9163  * given iocb is for the SCSI host associated with the given vport.
9164  * This function is called with no locks held.
9165  **/
9166 static int
9167 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9168                            uint16_t tgt_id, uint64_t lun_id,
9169                            lpfc_ctx_cmd ctx_cmd)
9170 {
9171         struct lpfc_scsi_buf *lpfc_cmd;
9172         int rc = 1;
9173
9174         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
9175                 return rc;
9176
9177         if (iocbq->vport != vport)
9178                 return rc;
9179
9180         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
9181
9182         if (lpfc_cmd->pCmd == NULL)
9183                 return rc;
9184
9185         switch (ctx_cmd) {
9186         case LPFC_CTX_LUN:
9187                 if ((lpfc_cmd->rdata->pnode) &&
9188                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9189                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
9190                         rc = 0;
9191                 break;
9192         case LPFC_CTX_TGT:
9193                 if ((lpfc_cmd->rdata->pnode) &&
9194                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
9195                         rc = 0;
9196                 break;
9197         case LPFC_CTX_HOST:
9198                 rc = 0;
9199                 break;
9200         default:
9201                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
9202                         __func__, ctx_cmd);
9203                 break;
9204         }
9205
9206         return rc;
9207 }
9208
9209 /**
9210  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
9211  * @vport: Pointer to virtual port.
9212  * @tgt_id: SCSI ID of the target.
9213  * @lun_id: LUN ID of the scsi device.
9214  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9215  *
9216  * This function returns number of FCP commands pending for the vport.
9217  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9218  * commands pending on the vport associated with SCSI device specified
9219  * by tgt_id and lun_id parameters.
9220  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9221  * commands pending on the vport associated with SCSI target specified
9222  * by tgt_id parameter.
9223  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9224  * commands pending on the vport.
9225  * This function returns the number of iocbs which satisfy the filter.
9226  * This function is called without any lock held.
9227  **/
9228 int
9229 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9230                   lpfc_ctx_cmd ctx_cmd)
9231 {
9232         struct lpfc_hba *phba = vport->phba;
9233         struct lpfc_iocbq *iocbq;
9234         int sum, i;
9235
9236         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9237                 iocbq = phba->sli.iocbq_lookup[i];
9238
9239                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9240                                                 ctx_cmd) == 0)
9241                         sum++;
9242         }
9243
9244         return sum;
9245 }
9246
9247 /**
9248  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
9249  * @phba: Pointer to HBA context object
9250  * @cmdiocb: Pointer to command iocb object.
9251  * @rspiocb: Pointer to response iocb object.
9252  *
9253  * This function is called when an aborted FCP iocb completes. This
9254  * function is called by the ring event handler with no lock held.
9255  * This function frees the iocb.
9256  **/
9257 void
9258 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9259                         struct lpfc_iocbq *rspiocb)
9260 {
9261         lpfc_sli_release_iocbq(phba, cmdiocb);
9262         return;
9263 }
9264
9265 /**
9266  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
9267  * @vport: Pointer to virtual port.
9268  * @pring: Pointer to driver SLI ring object.
9269  * @tgt_id: SCSI ID of the target.
9270  * @lun_id: LUN ID of the scsi device.
9271  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9272  *
9273  * This function sends an abort command for every SCSI command
9274  * associated with the given virtual port pending on the ring
9275  * filtered by lpfc_sli_validate_fcp_iocb function.
9276  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9277  * FCP iocbs associated with lun specified by tgt_id and lun_id
9278  * parameters
9279  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9280  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9281  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9282  * FCP iocbs associated with virtual port.
9283  * This function returns number of iocbs it failed to abort.
9284  * This function is called with no locks held.
9285  **/
9286 int
9287 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9288                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
9289 {
9290         struct lpfc_hba *phba = vport->phba;
9291         struct lpfc_iocbq *iocbq;
9292         struct lpfc_iocbq *abtsiocb;
9293         IOCB_t *cmd = NULL;
9294         int errcnt = 0, ret_val = 0;
9295         int i;
9296
9297         for (i = 1; i <= phba->sli.last_iotag; i++) {
9298                 iocbq = phba->sli.iocbq_lookup[i];
9299
9300                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
9301                                                abort_cmd) != 0)
9302                         continue;
9303
9304                 /* issue ABTS for this IOCB based on iotag */
9305                 abtsiocb = lpfc_sli_get_iocbq(phba);
9306                 if (abtsiocb == NULL) {
9307                         errcnt++;
9308                         continue;
9309                 }
9310
9311                 cmd = &iocbq->iocb;
9312                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9313                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
9314                 if (phba->sli_rev == LPFC_SLI_REV4)
9315                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9316                 else
9317                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
9318                 abtsiocb->iocb.ulpLe = 1;
9319                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
9320                 abtsiocb->vport = phba->pport;
9321
9322                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9323                 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
9324                 if (iocbq->iocb_flag & LPFC_IO_FCP)
9325                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9326
9327                 if (lpfc_is_link_up(phba))
9328                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9329                 else
9330                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9331
9332                 /* Setup callback routine and issue the command. */
9333                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
9334                 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9335                                               abtsiocb, 0);
9336                 if (ret_val == IOCB_ERROR) {
9337                         lpfc_sli_release_iocbq(phba, abtsiocb);
9338                         errcnt++;
9339                         continue;
9340                 }
9341         }
9342
9343         return errcnt;
9344 }
9345
9346 /**
9347  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
9348  * @phba: Pointer to HBA context object.
9349  * @cmdiocbq: Pointer to command iocb.
9350  * @rspiocbq: Pointer to response iocb.
9351  *
9352  * This function is the completion handler for iocbs issued using
9353  * lpfc_sli_issue_iocb_wait function. This function is called by the
9354  * ring event handler function without any lock held. This function
9355  * can be called from both worker thread context and interrupt
9356  * context. This function also can be called from other thread which
9357  * cleans up the SLI layer objects.
9358  * This function copy the contents of the response iocb to the
9359  * response iocb memory object provided by the caller of
9360  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9361  * sleeps for the iocb completion.
9362  **/
9363 static void
9364 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9365                         struct lpfc_iocbq *cmdiocbq,
9366                         struct lpfc_iocbq *rspiocbq)
9367 {
9368         wait_queue_head_t *pdone_q;
9369         unsigned long iflags;
9370         struct lpfc_scsi_buf *lpfc_cmd;
9371
9372         spin_lock_irqsave(&phba->hbalock, iflags);
9373         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9374         if (cmdiocbq->context2 && rspiocbq)
9375                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9376                        &rspiocbq->iocb, sizeof(IOCB_t));
9377
9378         /* Set the exchange busy flag for task management commands */
9379         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9380                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9381                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9382                         cur_iocbq);
9383                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9384         }
9385
9386         pdone_q = cmdiocbq->context_un.wait_queue;
9387         if (pdone_q)
9388                 wake_up(pdone_q);
9389         spin_unlock_irqrestore(&phba->hbalock, iflags);
9390         return;
9391 }
9392
9393 /**
9394  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9395  * @phba: Pointer to HBA context object..
9396  * @piocbq: Pointer to command iocb.
9397  * @flag: Flag to test.
9398  *
9399  * This routine grabs the hbalock and then test the iocb_flag to
9400  * see if the passed in flag is set.
9401  * Returns:
9402  * 1 if flag is set.
9403  * 0 if flag is not set.
9404  **/
9405 static int
9406 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9407                  struct lpfc_iocbq *piocbq, uint32_t flag)
9408 {
9409         unsigned long iflags;
9410         int ret;
9411
9412         spin_lock_irqsave(&phba->hbalock, iflags);
9413         ret = piocbq->iocb_flag & flag;
9414         spin_unlock_irqrestore(&phba->hbalock, iflags);
9415         return ret;
9416
9417 }
9418
9419 /**
9420  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
9421  * @phba: Pointer to HBA context object..
9422  * @pring: Pointer to sli ring.
9423  * @piocb: Pointer to command iocb.
9424  * @prspiocbq: Pointer to response iocb.
9425  * @timeout: Timeout in number of seconds.
9426  *
9427  * This function issues the iocb to firmware and waits for the
9428  * iocb to complete. If the iocb command is not
9429  * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9430  * Caller should not free the iocb resources if this function
9431  * returns IOCB_TIMEDOUT.
9432  * The function waits for the iocb completion using an
9433  * non-interruptible wait.
9434  * This function will sleep while waiting for iocb completion.
9435  * So, this function should not be called from any context which
9436  * does not allow sleeping. Due to the same reason, this function
9437  * cannot be called with interrupt disabled.
9438  * This function assumes that the iocb completions occur while
9439  * this function sleep. So, this function cannot be called from
9440  * the thread which process iocb completion for this ring.
9441  * This function clears the iocb_flag of the iocb object before
9442  * issuing the iocb and the iocb completion handler sets this
9443  * flag and wakes this thread when the iocb completes.
9444  * The contents of the response iocb will be copied to prspiocbq
9445  * by the completion handler when the command completes.
9446  * This function returns IOCB_SUCCESS when success.
9447  * This function is called with no lock held.
9448  **/
9449 int
9450 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9451                          uint32_t ring_number,
9452                          struct lpfc_iocbq *piocb,
9453                          struct lpfc_iocbq *prspiocbq,
9454                          uint32_t timeout)
9455 {
9456         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
9457         long timeleft, timeout_req = 0;
9458         int retval = IOCB_SUCCESS;
9459         uint32_t creg_val;
9460         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
9461         /*
9462          * If the caller has provided a response iocbq buffer, then context2
9463          * is NULL or its an error.
9464          */
9465         if (prspiocbq) {
9466                 if (piocb->context2)
9467                         return IOCB_ERROR;
9468                 piocb->context2 = prspiocbq;
9469         }
9470
9471         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9472         piocb->context_un.wait_queue = &done_q;
9473         piocb->iocb_flag &= ~LPFC_IO_WAKE;
9474
9475         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9476                 if (lpfc_readl(phba->HCregaddr, &creg_val))
9477                         return IOCB_ERROR;
9478                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9479                 writel(creg_val, phba->HCregaddr);
9480                 readl(phba->HCregaddr); /* flush */
9481         }
9482
9483         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9484                                      SLI_IOCB_RET_IOCB);
9485         if (retval == IOCB_SUCCESS) {
9486                 timeout_req = timeout * HZ;
9487                 timeleft = wait_event_timeout(done_q,
9488                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
9489                                 timeout_req);
9490
9491                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
9492                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9493                                         "0331 IOCB wake signaled\n");
9494                 } else if (timeleft == 0) {
9495                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9496                                         "0338 IOCB wait timeout error - no "
9497                                         "wake response Data x%x\n", timeout);
9498                         retval = IOCB_TIMEDOUT;
9499                 } else {
9500                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9501                                         "0330 IOCB wake NOT set, "
9502                                         "Data x%x x%lx\n",
9503                                         timeout, (timeleft / jiffies));
9504                         retval = IOCB_TIMEDOUT;
9505                 }
9506         } else if (retval == IOCB_BUSY) {
9507                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9508                         "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
9509                         phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
9510                 return retval;
9511         } else {
9512                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9513                                 "0332 IOCB wait issue failed, Data x%x\n",
9514                                 retval);
9515                 retval = IOCB_ERROR;
9516         }
9517
9518         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9519                 if (lpfc_readl(phba->HCregaddr, &creg_val))
9520                         return IOCB_ERROR;
9521                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
9522                 writel(creg_val, phba->HCregaddr);
9523                 readl(phba->HCregaddr); /* flush */
9524         }
9525
9526         if (prspiocbq)
9527                 piocb->context2 = NULL;
9528
9529         piocb->context_un.wait_queue = NULL;
9530         piocb->iocb_cmpl = NULL;
9531         return retval;
9532 }
9533
9534 /**
9535  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
9536  * @phba: Pointer to HBA context object.
9537  * @pmboxq: Pointer to driver mailbox object.
9538  * @timeout: Timeout in number of seconds.
9539  *
9540  * This function issues the mailbox to firmware and waits for the
9541  * mailbox command to complete. If the mailbox command is not
9542  * completed within timeout seconds, it returns MBX_TIMEOUT.
9543  * The function waits for the mailbox completion using an
9544  * interruptible wait. If the thread is woken up due to a
9545  * signal, MBX_TIMEOUT error is returned to the caller. Caller
9546  * should not free the mailbox resources, if this function returns
9547  * MBX_TIMEOUT.
9548  * This function will sleep while waiting for mailbox completion.
9549  * So, this function should not be called from any context which
9550  * does not allow sleeping. Due to the same reason, this function
9551  * cannot be called with interrupt disabled.
9552  * This function assumes that the mailbox completion occurs while
9553  * this function sleep. So, this function cannot be called from
9554  * the worker thread which processes mailbox completion.
9555  * This function is called in the context of HBA management
9556  * applications.
9557  * This function returns MBX_SUCCESS when successful.
9558  * This function is called with no lock held.
9559  **/
9560 int
9561 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
9562                          uint32_t timeout)
9563 {
9564         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
9565         int retval;
9566         unsigned long flag;
9567
9568         /* The caller must leave context1 empty. */
9569         if (pmboxq->context1)
9570                 return MBX_NOT_FINISHED;
9571
9572         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
9573         /* setup wake call as IOCB callback */
9574         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
9575         /* setup context field to pass wait_queue pointer to wake function  */
9576         pmboxq->context1 = &done_q;
9577
9578         /* now issue the command */
9579         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
9580         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
9581                 wait_event_interruptible_timeout(done_q,
9582                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
9583                                 timeout * HZ);
9584
9585                 spin_lock_irqsave(&phba->hbalock, flag);
9586                 pmboxq->context1 = NULL;
9587                 /*
9588                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
9589                  * else do not free the resources.
9590                  */
9591                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
9592                         retval = MBX_SUCCESS;
9593                         lpfc_sli4_swap_str(phba, pmboxq);
9594                 } else {
9595                         retval = MBX_TIMEOUT;
9596                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9597                 }
9598                 spin_unlock_irqrestore(&phba->hbalock, flag);
9599         }
9600
9601         return retval;
9602 }
9603
9604 /**
9605  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
9606  * @phba: Pointer to HBA context.
9607  *
9608  * This function is called to shutdown the driver's mailbox sub-system.
9609  * It first marks the mailbox sub-system is in a block state to prevent
9610  * the asynchronous mailbox command from issued off the pending mailbox
9611  * command queue. If the mailbox command sub-system shutdown is due to
9612  * HBA error conditions such as EEH or ERATT, this routine shall invoke
9613  * the mailbox sub-system flush routine to forcefully bring down the
9614  * mailbox sub-system. Otherwise, if it is due to normal condition (such
9615  * as with offline or HBA function reset), this routine will wait for the
9616  * outstanding mailbox command to complete before invoking the mailbox
9617  * sub-system flush routine to gracefully bring down mailbox sub-system.
9618  **/
9619 void
9620 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
9621 {
9622         struct lpfc_sli *psli = &phba->sli;
9623         unsigned long timeout;
9624
9625         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
9626         spin_lock_irq(&phba->hbalock);
9627         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9628         spin_unlock_irq(&phba->hbalock);
9629
9630         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9631                 spin_lock_irq(&phba->hbalock);
9632                 /* Determine how long we might wait for the active mailbox
9633                  * command to be gracefully completed by firmware.
9634                  */
9635                 if (phba->sli.mbox_active)
9636                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9637                                                 phba->sli.mbox_active) *
9638                                                 1000) + jiffies;
9639                 spin_unlock_irq(&phba->hbalock);
9640
9641                 while (phba->sli.mbox_active) {
9642                         /* Check active mailbox complete status every 2ms */
9643                         msleep(2);
9644                         if (time_after(jiffies, timeout))
9645                                 /* Timeout, let the mailbox flush routine to
9646                                  * forcefully release active mailbox command
9647                                  */
9648                                 break;
9649                 }
9650         }
9651         lpfc_sli_mbox_sys_flush(phba);
9652 }
9653
9654 /**
9655  * lpfc_sli_eratt_read - read sli-3 error attention events
9656  * @phba: Pointer to HBA context.
9657  *
9658  * This function is called to read the SLI3 device error attention registers
9659  * for possible error attention events. The caller must hold the hostlock
9660  * with spin_lock_irq().
9661  *
9662  * This function returns 1 when there is Error Attention in the Host Attention
9663  * Register and returns 0 otherwise.
9664  **/
9665 static int
9666 lpfc_sli_eratt_read(struct lpfc_hba *phba)
9667 {
9668         uint32_t ha_copy;
9669
9670         /* Read chip Host Attention (HA) register */
9671         if (lpfc_readl(phba->HAregaddr, &ha_copy))
9672                 goto unplug_err;
9673
9674         if (ha_copy & HA_ERATT) {
9675                 /* Read host status register to retrieve error event */
9676                 if (lpfc_sli_read_hs(phba))
9677                         goto unplug_err;
9678
9679                 /* Check if there is a deferred error condition is active */
9680                 if ((HS_FFER1 & phba->work_hs) &&
9681                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
9682                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
9683                         phba->hba_flag |= DEFER_ERATT;
9684                         /* Clear all interrupt enable conditions */
9685                         writel(0, phba->HCregaddr);
9686                         readl(phba->HCregaddr);
9687                 }
9688
9689                 /* Set the driver HA work bitmap */
9690                 phba->work_ha |= HA_ERATT;
9691                 /* Indicate polling handles this ERATT */
9692                 phba->hba_flag |= HBA_ERATT_HANDLED;
9693                 return 1;
9694         }
9695         return 0;
9696
9697 unplug_err:
9698         /* Set the driver HS work bitmap */
9699         phba->work_hs |= UNPLUG_ERR;
9700         /* Set the driver HA work bitmap */
9701         phba->work_ha |= HA_ERATT;
9702         /* Indicate polling handles this ERATT */
9703         phba->hba_flag |= HBA_ERATT_HANDLED;
9704         return 1;
9705 }
9706
9707 /**
9708  * lpfc_sli4_eratt_read - read sli-4 error attention events
9709  * @phba: Pointer to HBA context.
9710  *
9711  * This function is called to read the SLI4 device error attention registers
9712  * for possible error attention events. The caller must hold the hostlock
9713  * with spin_lock_irq().
9714  *
9715  * This function returns 1 when there is Error Attention in the Host Attention
9716  * Register and returns 0 otherwise.
9717  **/
9718 static int
9719 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
9720 {
9721         uint32_t uerr_sta_hi, uerr_sta_lo;
9722         uint32_t if_type, portsmphr;
9723         struct lpfc_register portstat_reg;
9724
9725         /*
9726          * For now, use the SLI4 device internal unrecoverable error
9727          * registers for error attention. This can be changed later.
9728          */
9729         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9730         switch (if_type) {
9731         case LPFC_SLI_INTF_IF_TYPE_0:
9732                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
9733                         &uerr_sta_lo) ||
9734                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
9735                         &uerr_sta_hi)) {
9736                         phba->work_hs |= UNPLUG_ERR;
9737                         phba->work_ha |= HA_ERATT;
9738                         phba->hba_flag |= HBA_ERATT_HANDLED;
9739                         return 1;
9740                 }
9741                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
9742                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
9743                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9744                                         "1423 HBA Unrecoverable error: "
9745                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
9746                                         "ue_mask_lo_reg=0x%x, "
9747                                         "ue_mask_hi_reg=0x%x\n",
9748                                         uerr_sta_lo, uerr_sta_hi,
9749                                         phba->sli4_hba.ue_mask_lo,
9750                                         phba->sli4_hba.ue_mask_hi);
9751                         phba->work_status[0] = uerr_sta_lo;
9752                         phba->work_status[1] = uerr_sta_hi;
9753                         phba->work_ha |= HA_ERATT;
9754                         phba->hba_flag |= HBA_ERATT_HANDLED;
9755                         return 1;
9756                 }
9757                 break;
9758         case LPFC_SLI_INTF_IF_TYPE_2:
9759                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9760                         &portstat_reg.word0) ||
9761                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9762                         &portsmphr)){
9763                         phba->work_hs |= UNPLUG_ERR;
9764                         phba->work_ha |= HA_ERATT;
9765                         phba->hba_flag |= HBA_ERATT_HANDLED;
9766                         return 1;
9767                 }
9768                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
9769                         phba->work_status[0] =
9770                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
9771                         phba->work_status[1] =
9772                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
9773                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9774                                         "2885 Port Error Detected: "
9775                                         "port status reg 0x%x, "
9776                                         "port smphr reg 0x%x, "
9777                                         "error 1=0x%x, error 2=0x%x\n",
9778                                         portstat_reg.word0,
9779                                         portsmphr,
9780                                         phba->work_status[0],
9781                                         phba->work_status[1]);
9782                         phba->work_ha |= HA_ERATT;
9783                         phba->hba_flag |= HBA_ERATT_HANDLED;
9784                         return 1;
9785                 }
9786                 break;
9787         case LPFC_SLI_INTF_IF_TYPE_1:
9788         default:
9789                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9790                                 "2886 HBA Error Attention on unsupported "
9791                                 "if type %d.", if_type);
9792                 return 1;
9793         }
9794
9795         return 0;
9796 }
9797
9798 /**
9799  * lpfc_sli_check_eratt - check error attention events
9800  * @phba: Pointer to HBA context.
9801  *
9802  * This function is called from timer soft interrupt context to check HBA's
9803  * error attention register bit for error attention events.
9804  *
9805  * This function returns 1 when there is Error Attention in the Host Attention
9806  * Register and returns 0 otherwise.
9807  **/
9808 int
9809 lpfc_sli_check_eratt(struct lpfc_hba *phba)
9810 {
9811         uint32_t ha_copy;
9812
9813         /* If somebody is waiting to handle an eratt, don't process it
9814          * here. The brdkill function will do this.
9815          */
9816         if (phba->link_flag & LS_IGNORE_ERATT)
9817                 return 0;
9818
9819         /* Check if interrupt handler handles this ERATT */
9820         spin_lock_irq(&phba->hbalock);
9821         if (phba->hba_flag & HBA_ERATT_HANDLED) {
9822                 /* Interrupt handler has handled ERATT */
9823                 spin_unlock_irq(&phba->hbalock);
9824                 return 0;
9825         }
9826
9827         /*
9828          * If there is deferred error attention, do not check for error
9829          * attention
9830          */
9831         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9832                 spin_unlock_irq(&phba->hbalock);
9833                 return 0;
9834         }
9835
9836         /* If PCI channel is offline, don't process it */
9837         if (unlikely(pci_channel_offline(phba->pcidev))) {
9838                 spin_unlock_irq(&phba->hbalock);
9839                 return 0;
9840         }
9841
9842         switch (phba->sli_rev) {
9843         case LPFC_SLI_REV2:
9844         case LPFC_SLI_REV3:
9845                 /* Read chip Host Attention (HA) register */
9846                 ha_copy = lpfc_sli_eratt_read(phba);
9847                 break;
9848         case LPFC_SLI_REV4:
9849                 /* Read device Uncoverable Error (UERR) registers */
9850                 ha_copy = lpfc_sli4_eratt_read(phba);
9851                 break;
9852         default:
9853                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9854                                 "0299 Invalid SLI revision (%d)\n",
9855                                 phba->sli_rev);
9856                 ha_copy = 0;
9857                 break;
9858         }
9859         spin_unlock_irq(&phba->hbalock);
9860
9861         return ha_copy;
9862 }
9863
9864 /**
9865  * lpfc_intr_state_check - Check device state for interrupt handling
9866  * @phba: Pointer to HBA context.
9867  *
9868  * This inline routine checks whether a device or its PCI slot is in a state
9869  * that the interrupt should be handled.
9870  *
9871  * This function returns 0 if the device or the PCI slot is in a state that
9872  * interrupt should be handled, otherwise -EIO.
9873  */
9874 static inline int
9875 lpfc_intr_state_check(struct lpfc_hba *phba)
9876 {
9877         /* If the pci channel is offline, ignore all the interrupts */
9878         if (unlikely(pci_channel_offline(phba->pcidev)))
9879                 return -EIO;
9880
9881         /* Update device level interrupt statistics */
9882         phba->sli.slistat.sli_intr++;
9883
9884         /* Ignore all interrupts during initialization. */
9885         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9886                 return -EIO;
9887
9888         return 0;
9889 }
9890
9891 /**
9892  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
9893  * @irq: Interrupt number.
9894  * @dev_id: The device context pointer.
9895  *
9896  * This function is directly called from the PCI layer as an interrupt
9897  * service routine when device with SLI-3 interface spec is enabled with
9898  * MSI-X multi-message interrupt mode and there are slow-path events in
9899  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9900  * interrupt mode, this function is called as part of the device-level
9901  * interrupt handler. When the PCI slot is in error recovery or the HBA
9902  * is undergoing initialization, the interrupt handler will not process
9903  * the interrupt. The link attention and ELS ring attention events are
9904  * handled by the worker thread. The interrupt handler signals the worker
9905  * thread and returns for these events. This function is called without
9906  * any lock held. It gets the hbalock to access and update SLI data
9907  * structures.
9908  *
9909  * This function returns IRQ_HANDLED when interrupt is handled else it
9910  * returns IRQ_NONE.
9911  **/
9912 irqreturn_t
9913 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
9914 {
9915         struct lpfc_hba  *phba;
9916         uint32_t ha_copy, hc_copy;
9917         uint32_t work_ha_copy;
9918         unsigned long status;
9919         unsigned long iflag;
9920         uint32_t control;
9921
9922         MAILBOX_t *mbox, *pmbox;
9923         struct lpfc_vport *vport;
9924         struct lpfc_nodelist *ndlp;
9925         struct lpfc_dmabuf *mp;
9926         LPFC_MBOXQ_t *pmb;
9927         int rc;
9928
9929         /*
9930          * Get the driver's phba structure from the dev_id and
9931          * assume the HBA is not interrupting.
9932          */
9933         phba = (struct lpfc_hba *)dev_id;
9934
9935         if (unlikely(!phba))
9936                 return IRQ_NONE;
9937
9938         /*
9939          * Stuff needs to be attented to when this function is invoked as an
9940          * individual interrupt handler in MSI-X multi-message interrupt mode
9941          */
9942         if (phba->intr_type == MSIX) {
9943                 /* Check device state for handling interrupt */
9944                 if (lpfc_intr_state_check(phba))
9945                         return IRQ_NONE;
9946                 /* Need to read HA REG for slow-path events */
9947                 spin_lock_irqsave(&phba->hbalock, iflag);
9948                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
9949                         goto unplug_error;
9950                 /* If somebody is waiting to handle an eratt don't process it
9951                  * here. The brdkill function will do this.
9952                  */
9953                 if (phba->link_flag & LS_IGNORE_ERATT)
9954                         ha_copy &= ~HA_ERATT;
9955                 /* Check the need for handling ERATT in interrupt handler */
9956                 if (ha_copy & HA_ERATT) {
9957                         if (phba->hba_flag & HBA_ERATT_HANDLED)
9958                                 /* ERATT polling has handled ERATT */
9959                                 ha_copy &= ~HA_ERATT;
9960                         else
9961                                 /* Indicate interrupt handler handles ERATT */
9962                                 phba->hba_flag |= HBA_ERATT_HANDLED;
9963                 }
9964
9965                 /*
9966                  * If there is deferred error attention, do not check for any
9967                  * interrupt.
9968                  */
9969                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9970                         spin_unlock_irqrestore(&phba->hbalock, iflag);
9971                         return IRQ_NONE;
9972                 }
9973
9974                 /* Clear up only attention source related to slow-path */
9975                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
9976                         goto unplug_error;
9977
9978                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
9979                         HC_LAINT_ENA | HC_ERINT_ENA),
9980                         phba->HCregaddr);
9981                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
9982                         phba->HAregaddr);
9983                 writel(hc_copy, phba->HCregaddr);
9984                 readl(phba->HAregaddr); /* flush */
9985                 spin_unlock_irqrestore(&phba->hbalock, iflag);
9986         } else
9987                 ha_copy = phba->ha_copy;
9988
9989         work_ha_copy = ha_copy & phba->work_ha_mask;
9990
9991         if (work_ha_copy) {
9992                 if (work_ha_copy & HA_LATT) {
9993                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
9994                                 /*
9995                                  * Turn off Link Attention interrupts
9996                                  * until CLEAR_LA done
9997                                  */
9998                                 spin_lock_irqsave(&phba->hbalock, iflag);
9999                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
10000                                 if (lpfc_readl(phba->HCregaddr, &control))
10001                                         goto unplug_error;
10002                                 control &= ~HC_LAINT_ENA;
10003                                 writel(control, phba->HCregaddr);
10004                                 readl(phba->HCregaddr); /* flush */
10005                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10006                         }
10007                         else
10008                                 work_ha_copy &= ~HA_LATT;
10009                 }
10010
10011                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
10012                         /*
10013                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10014                          * the only slow ring.
10015                          */
10016                         status = (work_ha_copy &
10017                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
10018                         status >>= (4*LPFC_ELS_RING);
10019                         if (status & HA_RXMASK) {
10020                                 spin_lock_irqsave(&phba->hbalock, iflag);
10021                                 if (lpfc_readl(phba->HCregaddr, &control))
10022                                         goto unplug_error;
10023
10024                                 lpfc_debugfs_slow_ring_trc(phba,
10025                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
10026                                 control, status,
10027                                 (uint32_t)phba->sli.slistat.sli_intr);
10028
10029                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
10030                                         lpfc_debugfs_slow_ring_trc(phba,
10031                                                 "ISR Disable ring:"
10032                                                 "pwork:x%x hawork:x%x wait:x%x",
10033                                                 phba->work_ha, work_ha_copy,
10034                                                 (uint32_t)((unsigned long)
10035                                                 &phba->work_waitq));
10036
10037                                         control &=
10038                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
10039                                         writel(control, phba->HCregaddr);
10040                                         readl(phba->HCregaddr); /* flush */
10041                                 }
10042                                 else {
10043                                         lpfc_debugfs_slow_ring_trc(phba,
10044                                                 "ISR slow ring:   pwork:"
10045                                                 "x%x hawork:x%x wait:x%x",
10046                                                 phba->work_ha, work_ha_copy,
10047                                                 (uint32_t)((unsigned long)
10048                                                 &phba->work_waitq));
10049                                 }
10050                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10051                         }
10052                 }
10053                 spin_lock_irqsave(&phba->hbalock, iflag);
10054                 if (work_ha_copy & HA_ERATT) {
10055                         if (lpfc_sli_read_hs(phba))
10056                                 goto unplug_error;
10057                         /*
10058                          * Check if there is a deferred error condition
10059                          * is active
10060                          */
10061                         if ((HS_FFER1 & phba->work_hs) &&
10062                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10063                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
10064                                   phba->work_hs)) {
10065                                 phba->hba_flag |= DEFER_ERATT;
10066                                 /* Clear all interrupt enable conditions */
10067                                 writel(0, phba->HCregaddr);
10068                                 readl(phba->HCregaddr);
10069                         }
10070                 }
10071
10072                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
10073                         pmb = phba->sli.mbox_active;
10074                         pmbox = &pmb->u.mb;
10075                         mbox = phba->mbox;
10076                         vport = pmb->vport;
10077
10078                         /* First check out the status word */
10079                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10080                         if (pmbox->mbxOwner != OWN_HOST) {
10081                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10082                                 /*
10083                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
10084                                  * mbxStatus <status>
10085                                  */
10086                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10087                                                 LOG_SLI,
10088                                                 "(%d):0304 Stray Mailbox "
10089                                                 "Interrupt mbxCommand x%x "
10090                                                 "mbxStatus x%x\n",
10091                                                 (vport ? vport->vpi : 0),
10092                                                 pmbox->mbxCommand,
10093                                                 pmbox->mbxStatus);
10094                                 /* clear mailbox attention bit */
10095                                 work_ha_copy &= ~HA_MBATT;
10096                         } else {
10097                                 phba->sli.mbox_active = NULL;
10098                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10099                                 phba->last_completion_time = jiffies;
10100                                 del_timer(&phba->sli.mbox_tmo);
10101                                 if (pmb->mbox_cmpl) {
10102                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
10103                                                         MAILBOX_CMD_SIZE);
10104                                         if (pmb->out_ext_byte_len &&
10105                                                 pmb->context2)
10106                                                 lpfc_sli_pcimem_bcopy(
10107                                                 phba->mbox_ext,
10108                                                 pmb->context2,
10109                                                 pmb->out_ext_byte_len);
10110                                 }
10111                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10112                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10113
10114                                         lpfc_debugfs_disc_trc(vport,
10115                                                 LPFC_DISC_TRC_MBOX_VPORT,
10116                                                 "MBOX dflt rpi: : "
10117                                                 "status:x%x rpi:x%x",
10118                                                 (uint32_t)pmbox->mbxStatus,
10119                                                 pmbox->un.varWords[0], 0);
10120
10121                                         if (!pmbox->mbxStatus) {
10122                                                 mp = (struct lpfc_dmabuf *)
10123                                                         (pmb->context1);
10124                                                 ndlp = (struct lpfc_nodelist *)
10125                                                         pmb->context2;
10126
10127                                                 /* Reg_LOGIN of dflt RPI was
10128                                                  * successful. new lets get
10129                                                  * rid of the RPI using the
10130                                                  * same mbox buffer.
10131                                                  */
10132                                                 lpfc_unreg_login(phba,
10133                                                         vport->vpi,
10134                                                         pmbox->un.varWords[0],
10135                                                         pmb);
10136                                                 pmb->mbox_cmpl =
10137                                                         lpfc_mbx_cmpl_dflt_rpi;
10138                                                 pmb->context1 = mp;
10139                                                 pmb->context2 = ndlp;
10140                                                 pmb->vport = vport;
10141                                                 rc = lpfc_sli_issue_mbox(phba,
10142                                                                 pmb,
10143                                                                 MBX_NOWAIT);
10144                                                 if (rc != MBX_BUSY)
10145                                                         lpfc_printf_log(phba,
10146                                                         KERN_ERR,
10147                                                         LOG_MBOX | LOG_SLI,
10148                                                         "0350 rc should have"
10149                                                         "been MBX_BUSY\n");
10150                                                 if (rc != MBX_NOT_FINISHED)
10151                                                         goto send_current_mbox;
10152                                         }
10153                                 }
10154                                 spin_lock_irqsave(
10155                                                 &phba->pport->work_port_lock,
10156                                                 iflag);
10157                                 phba->pport->work_port_events &=
10158                                         ~WORKER_MBOX_TMO;
10159                                 spin_unlock_irqrestore(
10160                                                 &phba->pport->work_port_lock,
10161                                                 iflag);
10162                                 lpfc_mbox_cmpl_put(phba, pmb);
10163                         }
10164                 } else
10165                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10166
10167                 if ((work_ha_copy & HA_MBATT) &&
10168                     (phba->sli.mbox_active == NULL)) {
10169 send_current_mbox:
10170                         /* Process next mailbox command if there is one */
10171                         do {
10172                                 rc = lpfc_sli_issue_mbox(phba, NULL,
10173                                                          MBX_NOWAIT);
10174                         } while (rc == MBX_NOT_FINISHED);
10175                         if (rc != MBX_SUCCESS)
10176                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10177                                                 LOG_SLI, "0349 rc should be "
10178                                                 "MBX_SUCCESS\n");
10179                 }
10180
10181                 spin_lock_irqsave(&phba->hbalock, iflag);
10182                 phba->work_ha |= work_ha_copy;
10183                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10184                 lpfc_worker_wake_up(phba);
10185         }
10186         return IRQ_HANDLED;
10187 unplug_error:
10188         spin_unlock_irqrestore(&phba->hbalock, iflag);
10189         return IRQ_HANDLED;
10190
10191 } /* lpfc_sli_sp_intr_handler */
10192
10193 /**
10194  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
10195  * @irq: Interrupt number.
10196  * @dev_id: The device context pointer.
10197  *
10198  * This function is directly called from the PCI layer as an interrupt
10199  * service routine when device with SLI-3 interface spec is enabled with
10200  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10201  * ring event in the HBA. However, when the device is enabled with either
10202  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10203  * device-level interrupt handler. When the PCI slot is in error recovery
10204  * or the HBA is undergoing initialization, the interrupt handler will not
10205  * process the interrupt. The SCSI FCP fast-path ring event are handled in
10206  * the intrrupt context. This function is called without any lock held.
10207  * It gets the hbalock to access and update SLI data structures.
10208  *
10209  * This function returns IRQ_HANDLED when interrupt is handled else it
10210  * returns IRQ_NONE.
10211  **/
10212 irqreturn_t
10213 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
10214 {
10215         struct lpfc_hba  *phba;
10216         uint32_t ha_copy;
10217         unsigned long status;
10218         unsigned long iflag;
10219
10220         /* Get the driver's phba structure from the dev_id and
10221          * assume the HBA is not interrupting.
10222          */
10223         phba = (struct lpfc_hba *) dev_id;
10224
10225         if (unlikely(!phba))
10226                 return IRQ_NONE;
10227
10228         /*
10229          * Stuff needs to be attented to when this function is invoked as an
10230          * individual interrupt handler in MSI-X multi-message interrupt mode
10231          */
10232         if (phba->intr_type == MSIX) {
10233                 /* Check device state for handling interrupt */
10234                 if (lpfc_intr_state_check(phba))
10235                         return IRQ_NONE;
10236                 /* Need to read HA REG for FCP ring and other ring events */
10237                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10238                         return IRQ_HANDLED;
10239                 /* Clear up only attention source related to fast-path */
10240                 spin_lock_irqsave(&phba->hbalock, iflag);
10241                 /*
10242                  * If there is deferred error attention, do not check for
10243                  * any interrupt.
10244                  */
10245                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10246                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10247                         return IRQ_NONE;
10248                 }
10249                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10250                         phba->HAregaddr);
10251                 readl(phba->HAregaddr); /* flush */
10252                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10253         } else
10254                 ha_copy = phba->ha_copy;
10255
10256         /*
10257          * Process all events on FCP ring. Take the optimized path for FCP IO.
10258          */
10259         ha_copy &= ~(phba->work_ha_mask);
10260
10261         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10262         status >>= (4*LPFC_FCP_RING);
10263         if (status & HA_RXMASK)
10264                 lpfc_sli_handle_fast_ring_event(phba,
10265                                                 &phba->sli.ring[LPFC_FCP_RING],
10266                                                 status);
10267
10268         if (phba->cfg_multi_ring_support == 2) {
10269                 /*
10270                  * Process all events on extra ring. Take the optimized path
10271                  * for extra ring IO.
10272                  */
10273                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10274                 status >>= (4*LPFC_EXTRA_RING);
10275                 if (status & HA_RXMASK) {
10276                         lpfc_sli_handle_fast_ring_event(phba,
10277                                         &phba->sli.ring[LPFC_EXTRA_RING],
10278                                         status);
10279                 }
10280         }
10281         return IRQ_HANDLED;
10282 }  /* lpfc_sli_fp_intr_handler */
10283
10284 /**
10285  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
10286  * @irq: Interrupt number.
10287  * @dev_id: The device context pointer.
10288  *
10289  * This function is the HBA device-level interrupt handler to device with
10290  * SLI-3 interface spec, called from the PCI layer when either MSI or
10291  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10292  * requires driver attention. This function invokes the slow-path interrupt
10293  * attention handling function and fast-path interrupt attention handling
10294  * function in turn to process the relevant HBA attention events. This
10295  * function is called without any lock held. It gets the hbalock to access
10296  * and update SLI data structures.
10297  *
10298  * This function returns IRQ_HANDLED when interrupt is handled, else it
10299  * returns IRQ_NONE.
10300  **/
10301 irqreturn_t
10302 lpfc_sli_intr_handler(int irq, void *dev_id)
10303 {
10304         struct lpfc_hba  *phba;
10305         irqreturn_t sp_irq_rc, fp_irq_rc;
10306         unsigned long status1, status2;
10307         uint32_t hc_copy;
10308
10309         /*
10310          * Get the driver's phba structure from the dev_id and
10311          * assume the HBA is not interrupting.
10312          */
10313         phba = (struct lpfc_hba *) dev_id;
10314
10315         if (unlikely(!phba))
10316                 return IRQ_NONE;
10317
10318         /* Check device state for handling interrupt */
10319         if (lpfc_intr_state_check(phba))
10320                 return IRQ_NONE;
10321
10322         spin_lock(&phba->hbalock);
10323         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10324                 spin_unlock(&phba->hbalock);
10325                 return IRQ_HANDLED;
10326         }
10327
10328         if (unlikely(!phba->ha_copy)) {
10329                 spin_unlock(&phba->hbalock);
10330                 return IRQ_NONE;
10331         } else if (phba->ha_copy & HA_ERATT) {
10332                 if (phba->hba_flag & HBA_ERATT_HANDLED)
10333                         /* ERATT polling has handled ERATT */
10334                         phba->ha_copy &= ~HA_ERATT;
10335                 else
10336                         /* Indicate interrupt handler handles ERATT */
10337                         phba->hba_flag |= HBA_ERATT_HANDLED;
10338         }
10339
10340         /*
10341          * If there is deferred error attention, do not check for any interrupt.
10342          */
10343         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10344                 spin_unlock(&phba->hbalock);
10345                 return IRQ_NONE;
10346         }
10347
10348         /* Clear attention sources except link and error attentions */
10349         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10350                 spin_unlock(&phba->hbalock);
10351                 return IRQ_HANDLED;
10352         }
10353         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10354                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10355                 phba->HCregaddr);
10356         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
10357         writel(hc_copy, phba->HCregaddr);
10358         readl(phba->HAregaddr); /* flush */
10359         spin_unlock(&phba->hbalock);
10360
10361         /*
10362          * Invokes slow-path host attention interrupt handling as appropriate.
10363          */
10364
10365         /* status of events with mailbox and link attention */
10366         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10367
10368         /* status of events with ELS ring */
10369         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
10370         status2 >>= (4*LPFC_ELS_RING);
10371
10372         if (status1 || (status2 & HA_RXMASK))
10373                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
10374         else
10375                 sp_irq_rc = IRQ_NONE;
10376
10377         /*
10378          * Invoke fast-path host attention interrupt handling as appropriate.
10379          */
10380
10381         /* status of events with FCP ring */
10382         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10383         status1 >>= (4*LPFC_FCP_RING);
10384
10385         /* status of events with extra ring */
10386         if (phba->cfg_multi_ring_support == 2) {
10387                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10388                 status2 >>= (4*LPFC_EXTRA_RING);
10389         } else
10390                 status2 = 0;
10391
10392         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
10393                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
10394         else
10395                 fp_irq_rc = IRQ_NONE;
10396
10397         /* Return device-level interrupt handling status */
10398         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
10399 }  /* lpfc_sli_intr_handler */
10400
10401 /**
10402  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10403  * @phba: pointer to lpfc hba data structure.
10404  *
10405  * This routine is invoked by the worker thread to process all the pending
10406  * SLI4 FCP abort XRI events.
10407  **/
10408 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10409 {
10410         struct lpfc_cq_event *cq_event;
10411
10412         /* First, declare the fcp xri abort event has been handled */
10413         spin_lock_irq(&phba->hbalock);
10414         phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10415         spin_unlock_irq(&phba->hbalock);
10416         /* Now, handle all the fcp xri abort events */
10417         while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10418                 /* Get the first event from the head of the event queue */
10419                 spin_lock_irq(&phba->hbalock);
10420                 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10421                                  cq_event, struct lpfc_cq_event, list);
10422                 spin_unlock_irq(&phba->hbalock);
10423                 /* Notify aborted XRI for FCP work queue */
10424                 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10425                 /* Free the event processed back to the free pool */
10426                 lpfc_sli4_cq_event_release(phba, cq_event);
10427         }
10428 }
10429
10430 /**
10431  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10432  * @phba: pointer to lpfc hba data structure.
10433  *
10434  * This routine is invoked by the worker thread to process all the pending
10435  * SLI4 els abort xri events.
10436  **/
10437 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10438 {
10439         struct lpfc_cq_event *cq_event;
10440
10441         /* First, declare the els xri abort event has been handled */
10442         spin_lock_irq(&phba->hbalock);
10443         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10444         spin_unlock_irq(&phba->hbalock);
10445         /* Now, handle all the els xri abort events */
10446         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10447                 /* Get the first event from the head of the event queue */
10448                 spin_lock_irq(&phba->hbalock);
10449                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10450                                  cq_event, struct lpfc_cq_event, list);
10451                 spin_unlock_irq(&phba->hbalock);
10452                 /* Notify aborted XRI for ELS work queue */
10453                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10454                 /* Free the event processed back to the free pool */
10455                 lpfc_sli4_cq_event_release(phba, cq_event);
10456         }
10457 }
10458
10459 /**
10460  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10461  * @phba: pointer to lpfc hba data structure
10462  * @pIocbIn: pointer to the rspiocbq
10463  * @pIocbOut: pointer to the cmdiocbq
10464  * @wcqe: pointer to the complete wcqe
10465  *
10466  * This routine transfers the fields of a command iocbq to a response iocbq
10467  * by copying all the IOCB fields from command iocbq and transferring the
10468  * completion status information from the complete wcqe.
10469  **/
10470 static void
10471 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10472                               struct lpfc_iocbq *pIocbIn,
10473                               struct lpfc_iocbq *pIocbOut,
10474                               struct lpfc_wcqe_complete *wcqe)
10475 {
10476         unsigned long iflags;
10477         size_t offset = offsetof(struct lpfc_iocbq, iocb);
10478
10479         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10480                sizeof(struct lpfc_iocbq) - offset);
10481         /* Map WCQE parameters into irspiocb parameters */
10482         pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
10483         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
10484                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
10485                         pIocbIn->iocb.un.fcpi.fcpi_parm =
10486                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
10487                                         wcqe->total_data_placed;
10488                 else
10489                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
10490         else {
10491                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
10492                 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
10493         }
10494
10495         /* Pick up HBA exchange busy condition */
10496         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
10497                 spin_lock_irqsave(&phba->hbalock, iflags);
10498                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
10499                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10500         }
10501 }
10502
10503 /**
10504  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
10505  * @phba: Pointer to HBA context object.
10506  * @wcqe: Pointer to work-queue completion queue entry.
10507  *
10508  * This routine handles an ELS work-queue completion event and construct
10509  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
10510  * discovery engine to handle.
10511  *
10512  * Return: Pointer to the receive IOCBQ, NULL otherwise.
10513  **/
10514 static struct lpfc_iocbq *
10515 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
10516                                struct lpfc_iocbq *irspiocbq)
10517 {
10518         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10519         struct lpfc_iocbq *cmdiocbq;
10520         struct lpfc_wcqe_complete *wcqe;
10521         unsigned long iflags;
10522
10523         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
10524         spin_lock_irqsave(&phba->hbalock, iflags);
10525         pring->stats.iocb_event++;
10526         /* Look up the ELS command IOCB and create pseudo response IOCB */
10527         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
10528                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10529         spin_unlock_irqrestore(&phba->hbalock, iflags);
10530
10531         if (unlikely(!cmdiocbq)) {
10532                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10533                                 "0386 ELS complete with no corresponding "
10534                                 "cmdiocb: iotag (%d)\n",
10535                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10536                 lpfc_sli_release_iocbq(phba, irspiocbq);
10537                 return NULL;
10538         }
10539
10540         /* Fake the irspiocbq and copy necessary response information */
10541         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
10542
10543         return irspiocbq;
10544 }
10545
10546 /**
10547  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
10548  * @phba: Pointer to HBA context object.
10549  * @cqe: Pointer to mailbox completion queue entry.
10550  *
10551  * This routine process a mailbox completion queue entry with asynchrous
10552  * event.
10553  *
10554  * Return: true if work posted to worker thread, otherwise false.
10555  **/
10556 static bool
10557 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10558 {
10559         struct lpfc_cq_event *cq_event;
10560         unsigned long iflags;
10561
10562         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10563                         "0392 Async Event: word0:x%x, word1:x%x, "
10564                         "word2:x%x, word3:x%x\n", mcqe->word0,
10565                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
10566
10567         /* Allocate a new internal CQ_EVENT entry */
10568         cq_event = lpfc_sli4_cq_event_alloc(phba);
10569         if (!cq_event) {
10570                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10571                                 "0394 Failed to allocate CQ_EVENT entry\n");
10572                 return false;
10573         }
10574
10575         /* Move the CQE into an asynchronous event entry */
10576         memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
10577         spin_lock_irqsave(&phba->hbalock, iflags);
10578         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
10579         /* Set the async event flag */
10580         phba->hba_flag |= ASYNC_EVENT;
10581         spin_unlock_irqrestore(&phba->hbalock, iflags);
10582
10583         return true;
10584 }
10585
10586 /**
10587  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
10588  * @phba: Pointer to HBA context object.
10589  * @cqe: Pointer to mailbox completion queue entry.
10590  *
10591  * This routine process a mailbox completion queue entry with mailbox
10592  * completion event.
10593  *
10594  * Return: true if work posted to worker thread, otherwise false.
10595  **/
10596 static bool
10597 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10598 {
10599         uint32_t mcqe_status;
10600         MAILBOX_t *mbox, *pmbox;
10601         struct lpfc_mqe *mqe;
10602         struct lpfc_vport *vport;
10603         struct lpfc_nodelist *ndlp;
10604         struct lpfc_dmabuf *mp;
10605         unsigned long iflags;
10606         LPFC_MBOXQ_t *pmb;
10607         bool workposted = false;
10608         int rc;
10609
10610         /* If not a mailbox complete MCQE, out by checking mailbox consume */
10611         if (!bf_get(lpfc_trailer_completed, mcqe))
10612                 goto out_no_mqe_complete;
10613
10614         /* Get the reference to the active mbox command */
10615         spin_lock_irqsave(&phba->hbalock, iflags);
10616         pmb = phba->sli.mbox_active;
10617         if (unlikely(!pmb)) {
10618                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10619                                 "1832 No pending MBOX command to handle\n");
10620                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10621                 goto out_no_mqe_complete;
10622         }
10623         spin_unlock_irqrestore(&phba->hbalock, iflags);
10624         mqe = &pmb->u.mqe;
10625         pmbox = (MAILBOX_t *)&pmb->u.mqe;
10626         mbox = phba->mbox;
10627         vport = pmb->vport;
10628
10629         /* Reset heartbeat timer */
10630         phba->last_completion_time = jiffies;
10631         del_timer(&phba->sli.mbox_tmo);
10632
10633         /* Move mbox data to caller's mailbox region, do endian swapping */
10634         if (pmb->mbox_cmpl && mbox)
10635                 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
10636
10637         /*
10638          * For mcqe errors, conditionally move a modified error code to
10639          * the mbox so that the error will not be missed.
10640          */
10641         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
10642         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
10643                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
10644                         bf_set(lpfc_mqe_status, mqe,
10645                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
10646         }
10647         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10648                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10649                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
10650                                       "MBOX dflt rpi: status:x%x rpi:x%x",
10651                                       mcqe_status,
10652                                       pmbox->un.varWords[0], 0);
10653                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
10654                         mp = (struct lpfc_dmabuf *)(pmb->context1);
10655                         ndlp = (struct lpfc_nodelist *)pmb->context2;
10656                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
10657                          * RID of the PPI using the same mbox buffer.
10658                          */
10659                         lpfc_unreg_login(phba, vport->vpi,
10660                                          pmbox->un.varWords[0], pmb);
10661                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
10662                         pmb->context1 = mp;
10663                         pmb->context2 = ndlp;
10664                         pmb->vport = vport;
10665                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
10666                         if (rc != MBX_BUSY)
10667                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10668                                                 LOG_SLI, "0385 rc should "
10669                                                 "have been MBX_BUSY\n");
10670                         if (rc != MBX_NOT_FINISHED)
10671                                 goto send_current_mbox;
10672                 }
10673         }
10674         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
10675         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10676         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
10677
10678         /* There is mailbox completion work to do */
10679         spin_lock_irqsave(&phba->hbalock, iflags);
10680         __lpfc_mbox_cmpl_put(phba, pmb);
10681         phba->work_ha |= HA_MBATT;
10682         spin_unlock_irqrestore(&phba->hbalock, iflags);
10683         workposted = true;
10684
10685 send_current_mbox:
10686         spin_lock_irqsave(&phba->hbalock, iflags);
10687         /* Release the mailbox command posting token */
10688         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10689         /* Setting active mailbox pointer need to be in sync to flag clear */
10690         phba->sli.mbox_active = NULL;
10691         spin_unlock_irqrestore(&phba->hbalock, iflags);
10692         /* Wake up worker thread to post the next pending mailbox command */
10693         lpfc_worker_wake_up(phba);
10694 out_no_mqe_complete:
10695         if (bf_get(lpfc_trailer_consumed, mcqe))
10696                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
10697         return workposted;
10698 }
10699
10700 /**
10701  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
10702  * @phba: Pointer to HBA context object.
10703  * @cqe: Pointer to mailbox completion queue entry.
10704  *
10705  * This routine process a mailbox completion queue entry, it invokes the
10706  * proper mailbox complete handling or asynchrous event handling routine
10707  * according to the MCQE's async bit.
10708  *
10709  * Return: true if work posted to worker thread, otherwise false.
10710  **/
10711 static bool
10712 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
10713 {
10714         struct lpfc_mcqe mcqe;
10715         bool workposted;
10716
10717         /* Copy the mailbox MCQE and convert endian order as needed */
10718         lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
10719
10720         /* Invoke the proper event handling routine */
10721         if (!bf_get(lpfc_trailer_async, &mcqe))
10722                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
10723         else
10724                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
10725         return workposted;
10726 }
10727
10728 /**
10729  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
10730  * @phba: Pointer to HBA context object.
10731  * @wcqe: Pointer to work-queue completion queue entry.
10732  *
10733  * This routine handles an ELS work-queue completion event.
10734  *
10735  * Return: true if work posted to worker thread, otherwise false.
10736  **/
10737 static bool
10738 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
10739                              struct lpfc_wcqe_complete *wcqe)
10740 {
10741         struct lpfc_iocbq *irspiocbq;
10742         unsigned long iflags;
10743         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
10744
10745         /* Get an irspiocbq for later ELS response processing use */
10746         irspiocbq = lpfc_sli_get_iocbq(phba);
10747         if (!irspiocbq) {
10748                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10749                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
10750                         "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
10751                         pring->txq_cnt, phba->iocb_cnt,
10752                         phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
10753                         phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
10754                 return false;
10755         }
10756
10757         /* Save off the slow-path queue event for work thread to process */
10758         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
10759         spin_lock_irqsave(&phba->hbalock, iflags);
10760         list_add_tail(&irspiocbq->cq_event.list,
10761                       &phba->sli4_hba.sp_queue_event);
10762         phba->hba_flag |= HBA_SP_QUEUE_EVT;
10763         spin_unlock_irqrestore(&phba->hbalock, iflags);
10764
10765         return true;
10766 }
10767
10768 /**
10769  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
10770  * @phba: Pointer to HBA context object.
10771  * @wcqe: Pointer to work-queue completion queue entry.
10772  *
10773  * This routine handles slow-path WQ entry comsumed event by invoking the
10774  * proper WQ release routine to the slow-path WQ.
10775  **/
10776 static void
10777 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
10778                              struct lpfc_wcqe_release *wcqe)
10779 {
10780         /* Check for the slow-path ELS work queue */
10781         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
10782                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
10783                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
10784         else
10785                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10786                                 "2579 Slow-path wqe consume event carries "
10787                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
10788                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
10789                                 phba->sli4_hba.els_wq->queue_id);
10790 }
10791
10792 /**
10793  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
10794  * @phba: Pointer to HBA context object.
10795  * @cq: Pointer to a WQ completion queue.
10796  * @wcqe: Pointer to work-queue completion queue entry.
10797  *
10798  * This routine handles an XRI abort event.
10799  *
10800  * Return: true if work posted to worker thread, otherwise false.
10801  **/
10802 static bool
10803 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
10804                                    struct lpfc_queue *cq,
10805                                    struct sli4_wcqe_xri_aborted *wcqe)
10806 {
10807         bool workposted = false;
10808         struct lpfc_cq_event *cq_event;
10809         unsigned long iflags;
10810
10811         /* Allocate a new internal CQ_EVENT entry */
10812         cq_event = lpfc_sli4_cq_event_alloc(phba);
10813         if (!cq_event) {
10814                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10815                                 "0602 Failed to allocate CQ_EVENT entry\n");
10816                 return false;
10817         }
10818
10819         /* Move the CQE into the proper xri abort event list */
10820         memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
10821         switch (cq->subtype) {
10822         case LPFC_FCP:
10823                 spin_lock_irqsave(&phba->hbalock, iflags);
10824                 list_add_tail(&cq_event->list,
10825                               &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
10826                 /* Set the fcp xri abort event flag */
10827                 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
10828                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10829                 workposted = true;
10830                 break;
10831         case LPFC_ELS:
10832                 spin_lock_irqsave(&phba->hbalock, iflags);
10833                 list_add_tail(&cq_event->list,
10834                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
10835                 /* Set the els xri abort event flag */
10836                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
10837                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10838                 workposted = true;
10839                 break;
10840         default:
10841                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10842                                 "0603 Invalid work queue CQE subtype (x%x)\n",
10843                                 cq->subtype);
10844                 workposted = false;
10845                 break;
10846         }
10847         return workposted;
10848 }
10849
10850 /**
10851  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
10852  * @phba: Pointer to HBA context object.
10853  * @rcqe: Pointer to receive-queue completion queue entry.
10854  *
10855  * This routine process a receive-queue completion queue entry.
10856  *
10857  * Return: true if work posted to worker thread, otherwise false.
10858  **/
10859 static bool
10860 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10861 {
10862         bool workposted = false;
10863         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
10864         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
10865         struct hbq_dmabuf *dma_buf;
10866         uint32_t status, rq_id;
10867         unsigned long iflags;
10868
10869         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10870                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
10871         else
10872                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
10873         if (rq_id != hrq->queue_id)
10874                 goto out;
10875
10876         status = bf_get(lpfc_rcqe_status, rcqe);
10877         switch (status) {
10878         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
10879                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10880                                 "2537 Receive Frame Truncated!!\n");
10881         case FC_STATUS_RQ_SUCCESS:
10882                 lpfc_sli4_rq_release(hrq, drq);
10883                 spin_lock_irqsave(&phba->hbalock, iflags);
10884                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
10885                 if (!dma_buf) {
10886                         spin_unlock_irqrestore(&phba->hbalock, iflags);
10887                         goto out;
10888                 }
10889                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
10890                 /* save off the frame for the word thread to process */
10891                 list_add_tail(&dma_buf->cq_event.list,
10892                               &phba->sli4_hba.sp_queue_event);
10893                 /* Frame received */
10894                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
10895                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10896                 workposted = true;
10897                 break;
10898         case FC_STATUS_INSUFF_BUF_NEED_BUF:
10899         case FC_STATUS_INSUFF_BUF_FRM_DISC:
10900                 /* Post more buffers if possible */
10901                 spin_lock_irqsave(&phba->hbalock, iflags);
10902                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
10903                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10904                 workposted = true;
10905                 break;
10906         }
10907 out:
10908         return workposted;
10909 }
10910
10911 /**
10912  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
10913  * @phba: Pointer to HBA context object.
10914  * @cq: Pointer to the completion queue.
10915  * @wcqe: Pointer to a completion queue entry.
10916  *
10917  * This routine process a slow-path work-queue or receive queue completion queue
10918  * entry.
10919  *
10920  * Return: true if work posted to worker thread, otherwise false.
10921  **/
10922 static bool
10923 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
10924                          struct lpfc_cqe *cqe)
10925 {
10926         struct lpfc_cqe cqevt;
10927         bool workposted = false;
10928
10929         /* Copy the work queue CQE and convert endian order if needed */
10930         lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
10931
10932         /* Check and process for different type of WCQE and dispatch */
10933         switch (bf_get(lpfc_cqe_code, &cqevt)) {
10934         case CQE_CODE_COMPL_WQE:
10935                 /* Process the WQ/RQ complete event */
10936                 phba->last_completion_time = jiffies;
10937                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
10938                                 (struct lpfc_wcqe_complete *)&cqevt);
10939                 break;
10940         case CQE_CODE_RELEASE_WQE:
10941                 /* Process the WQ release event */
10942                 lpfc_sli4_sp_handle_rel_wcqe(phba,
10943                                 (struct lpfc_wcqe_release *)&cqevt);
10944                 break;
10945         case CQE_CODE_XRI_ABORTED:
10946                 /* Process the WQ XRI abort event */
10947                 phba->last_completion_time = jiffies;
10948                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
10949                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
10950                 break;
10951         case CQE_CODE_RECEIVE:
10952         case CQE_CODE_RECEIVE_V1:
10953                 /* Process the RQ event */
10954                 phba->last_completion_time = jiffies;
10955                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
10956                                 (struct lpfc_rcqe *)&cqevt);
10957                 break;
10958         default:
10959                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10960                                 "0388 Not a valid WCQE code: x%x\n",
10961                                 bf_get(lpfc_cqe_code, &cqevt));
10962                 break;
10963         }
10964         return workposted;
10965 }
10966
10967 /**
10968  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
10969  * @phba: Pointer to HBA context object.
10970  * @eqe: Pointer to fast-path event queue entry.
10971  *
10972  * This routine process a event queue entry from the slow-path event queue.
10973  * It will check the MajorCode and MinorCode to determine this is for a
10974  * completion event on a completion queue, if not, an error shall be logged
10975  * and just return. Otherwise, it will get to the corresponding completion
10976  * queue and process all the entries on that completion queue, rearm the
10977  * completion queue, and then return.
10978  *
10979  **/
10980 static void
10981 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
10982 {
10983         struct lpfc_queue *cq = NULL, *childq, *speq;
10984         struct lpfc_cqe *cqe;
10985         bool workposted = false;
10986         int ecount = 0;
10987         uint16_t cqid;
10988
10989         if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
10990                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10991                                 "0359 Not a valid slow-path completion "
10992                                 "event: majorcode=x%x, minorcode=x%x\n",
10993                                 bf_get_le32(lpfc_eqe_major_code, eqe),
10994                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
10995                 return;
10996         }
10997
10998         /* Get the reference to the corresponding CQ */
10999         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11000
11001         /* Search for completion queue pointer matching this cqid */
11002         speq = phba->sli4_hba.sp_eq;
11003         list_for_each_entry(childq, &speq->child_list, list) {
11004                 if (childq->queue_id == cqid) {
11005                         cq = childq;
11006                         break;
11007                 }
11008         }
11009         if (unlikely(!cq)) {
11010                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11011                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11012                                         "0365 Slow-path CQ identifier "
11013                                         "(%d) does not exist\n", cqid);
11014                 return;
11015         }
11016
11017         /* Process all the entries to the CQ */
11018         switch (cq->type) {
11019         case LPFC_MCQ:
11020                 while ((cqe = lpfc_sli4_cq_get(cq))) {
11021                         workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11022                         if (!(++ecount % cq->entry_repost))
11023                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11024                 }
11025                 break;
11026         case LPFC_WCQ:
11027                 while ((cqe = lpfc_sli4_cq_get(cq))) {
11028                         if (cq->subtype == LPFC_FCP)
11029                                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11030                                                                        cqe);
11031                         else
11032                                 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11033                                                                       cqe);
11034                         if (!(++ecount % cq->entry_repost))
11035                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11036                 }
11037                 break;
11038         default:
11039                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11040                                 "0370 Invalid completion queue type (%d)\n",
11041                                 cq->type);
11042                 return;
11043         }
11044
11045         /* Catch the no cq entry condition, log an error */
11046         if (unlikely(ecount == 0))
11047                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11048                                 "0371 No entry from the CQ: identifier "
11049                                 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11050
11051         /* In any case, flash and re-arm the RCQ */
11052         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11053
11054         /* wake up worker thread if there are works to be done */
11055         if (workposted)
11056                 lpfc_worker_wake_up(phba);
11057 }
11058
11059 /**
11060  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11061  * @eqe: Pointer to fast-path completion queue entry.
11062  *
11063  * This routine process a fast-path work queue completion entry from fast-path
11064  * event queue for FCP command response completion.
11065  **/
11066 static void
11067 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
11068                              struct lpfc_wcqe_complete *wcqe)
11069 {
11070         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
11071         struct lpfc_iocbq *cmdiocbq;
11072         struct lpfc_iocbq irspiocbq;
11073         unsigned long iflags;
11074
11075         spin_lock_irqsave(&phba->hbalock, iflags);
11076         pring->stats.iocb_event++;
11077         spin_unlock_irqrestore(&phba->hbalock, iflags);
11078
11079         /* Check for response status */
11080         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11081                 /* If resource errors reported from HBA, reduce queue
11082                  * depth of the SCSI device.
11083                  */
11084                 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
11085                      IOSTAT_LOCAL_REJECT) &&
11086                     (wcqe->parameter == IOERR_NO_RESOURCES)) {
11087                         phba->lpfc_rampdown_queue_depth(phba);
11088                 }
11089                 /* Log the error status */
11090                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11091                                 "0373 FCP complete error: status=x%x, "
11092                                 "hw_status=x%x, total_data_specified=%d, "
11093                                 "parameter=x%x, word3=x%x\n",
11094                                 bf_get(lpfc_wcqe_c_status, wcqe),
11095                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11096                                 wcqe->total_data_placed, wcqe->parameter,
11097                                 wcqe->word3);
11098         }
11099
11100         /* Look up the FCP command IOCB and create pseudo response IOCB */
11101         spin_lock_irqsave(&phba->hbalock, iflags);
11102         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11103                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11104         spin_unlock_irqrestore(&phba->hbalock, iflags);
11105         if (unlikely(!cmdiocbq)) {
11106                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11107                                 "0374 FCP complete with no corresponding "
11108                                 "cmdiocb: iotag (%d)\n",
11109                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11110                 return;
11111         }
11112         if (unlikely(!cmdiocbq->iocb_cmpl)) {
11113                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11114                                 "0375 FCP cmdiocb not callback function "
11115                                 "iotag: (%d)\n",
11116                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11117                 return;
11118         }
11119
11120         /* Fake the irspiocb and copy necessary response information */
11121         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
11122
11123         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11124                 spin_lock_irqsave(&phba->hbalock, iflags);
11125                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11126                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11127         }
11128
11129         /* Pass the cmd_iocb and the rsp state to the upper layer */
11130         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11131 }
11132
11133 /**
11134  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11135  * @phba: Pointer to HBA context object.
11136  * @cq: Pointer to completion queue.
11137  * @wcqe: Pointer to work-queue completion queue entry.
11138  *
11139  * This routine handles an fast-path WQ entry comsumed event by invoking the
11140  * proper WQ release routine to the slow-path WQ.
11141  **/
11142 static void
11143 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11144                              struct lpfc_wcqe_release *wcqe)
11145 {
11146         struct lpfc_queue *childwq;
11147         bool wqid_matched = false;
11148         uint16_t fcp_wqid;
11149
11150         /* Check for fast-path FCP work queue release */
11151         fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11152         list_for_each_entry(childwq, &cq->child_list, list) {
11153                 if (childwq->queue_id == fcp_wqid) {
11154                         lpfc_sli4_wq_release(childwq,
11155                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11156                         wqid_matched = true;
11157                         break;
11158                 }
11159         }
11160         /* Report warning log message if no match found */
11161         if (wqid_matched != true)
11162                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11163                                 "2580 Fast-path wqe consume event carries "
11164                                 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11165 }
11166
11167 /**
11168  * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11169  * @cq: Pointer to the completion queue.
11170  * @eqe: Pointer to fast-path completion queue entry.
11171  *
11172  * This routine process a fast-path work queue completion entry from fast-path
11173  * event queue for FCP command response completion.
11174  **/
11175 static int
11176 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11177                          struct lpfc_cqe *cqe)
11178 {
11179         struct lpfc_wcqe_release wcqe;
11180         bool workposted = false;
11181
11182         /* Copy the work queue CQE and convert endian order if needed */
11183         lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11184
11185         /* Check and process for different type of WCQE and dispatch */
11186         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11187         case CQE_CODE_COMPL_WQE:
11188                 /* Process the WQ complete event */
11189                 phba->last_completion_time = jiffies;
11190                 lpfc_sli4_fp_handle_fcp_wcqe(phba,
11191                                 (struct lpfc_wcqe_complete *)&wcqe);
11192                 break;
11193         case CQE_CODE_RELEASE_WQE:
11194                 /* Process the WQ release event */
11195                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11196                                 (struct lpfc_wcqe_release *)&wcqe);
11197                 break;
11198         case CQE_CODE_XRI_ABORTED:
11199                 /* Process the WQ XRI abort event */
11200                 phba->last_completion_time = jiffies;
11201                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11202                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
11203                 break;
11204         default:
11205                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11206                                 "0144 Not a valid WCQE code: x%x\n",
11207                                 bf_get(lpfc_wcqe_c_code, &wcqe));
11208                 break;
11209         }
11210         return workposted;
11211 }
11212
11213 /**
11214  * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
11215  * @phba: Pointer to HBA context object.
11216  * @eqe: Pointer to fast-path event queue entry.
11217  *
11218  * This routine process a event queue entry from the fast-path event queue.
11219  * It will check the MajorCode and MinorCode to determine this is for a
11220  * completion event on a completion queue, if not, an error shall be logged
11221  * and just return. Otherwise, it will get to the corresponding completion
11222  * queue and process all the entries on the completion queue, rearm the
11223  * completion queue, and then return.
11224  **/
11225 static void
11226 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11227                         uint32_t fcp_cqidx)
11228 {
11229         struct lpfc_queue *cq;
11230         struct lpfc_cqe *cqe;
11231         bool workposted = false;
11232         uint16_t cqid;
11233         int ecount = 0;
11234
11235         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11236                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11237                                 "0366 Not a valid fast-path completion "
11238                                 "event: majorcode=x%x, minorcode=x%x\n",
11239                                 bf_get_le32(lpfc_eqe_major_code, eqe),
11240                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
11241                 return;
11242         }
11243
11244         cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
11245         if (unlikely(!cq)) {
11246                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11247                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11248                                         "0367 Fast-path completion queue "
11249                                         "does not exist\n");
11250                 return;
11251         }
11252
11253         /* Get the reference to the corresponding CQ */
11254         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11255         if (unlikely(cqid != cq->queue_id)) {
11256                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11257                                 "0368 Miss-matched fast-path completion "
11258                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11259                                 cqid, cq->queue_id);
11260                 return;
11261         }
11262
11263         /* Process all the entries to the CQ */
11264         while ((cqe = lpfc_sli4_cq_get(cq))) {
11265                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
11266                 if (!(++ecount % cq->entry_repost))
11267                         lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11268         }
11269
11270         /* Catch the no cq entry condition */
11271         if (unlikely(ecount == 0))
11272                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11273                                 "0369 No entry from fast-path completion "
11274                                 "queue fcpcqid=%d\n", cq->queue_id);
11275
11276         /* In any case, flash and re-arm the CQ */
11277         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11278
11279         /* wake up worker thread if there are works to be done */
11280         if (workposted)
11281                 lpfc_worker_wake_up(phba);
11282 }
11283
11284 static void
11285 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11286 {
11287         struct lpfc_eqe *eqe;
11288
11289         /* walk all the EQ entries and drop on the floor */
11290         while ((eqe = lpfc_sli4_eq_get(eq)))
11291                 ;
11292
11293         /* Clear and re-arm the EQ */
11294         lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11295 }
11296
11297 /**
11298  * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
11299  * @irq: Interrupt number.
11300  * @dev_id: The device context pointer.
11301  *
11302  * This function is directly called from the PCI layer as an interrupt
11303  * service routine when device with SLI-4 interface spec is enabled with
11304  * MSI-X multi-message interrupt mode and there are slow-path events in
11305  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11306  * interrupt mode, this function is called as part of the device-level
11307  * interrupt handler. When the PCI slot is in error recovery or the HBA is
11308  * undergoing initialization, the interrupt handler will not process the
11309  * interrupt. The link attention and ELS ring attention events are handled
11310  * by the worker thread. The interrupt handler signals the worker thread
11311  * and returns for these events. This function is called without any lock
11312  * held. It gets the hbalock to access and update SLI data structures.
11313  *
11314  * This function returns IRQ_HANDLED when interrupt is handled else it
11315  * returns IRQ_NONE.
11316  **/
11317 irqreturn_t
11318 lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11319 {
11320         struct lpfc_hba *phba;
11321         struct lpfc_queue *speq;
11322         struct lpfc_eqe *eqe;
11323         unsigned long iflag;
11324         int ecount = 0;
11325
11326         /*
11327          * Get the driver's phba structure from the dev_id
11328          */
11329         phba = (struct lpfc_hba *)dev_id;
11330
11331         if (unlikely(!phba))
11332                 return IRQ_NONE;
11333
11334         /* Get to the EQ struct associated with this vector */
11335         speq = phba->sli4_hba.sp_eq;
11336         if (unlikely(!speq))
11337                 return IRQ_NONE;
11338
11339         /* Check device state for handling interrupt */
11340         if (unlikely(lpfc_intr_state_check(phba))) {
11341                 /* Check again for link_state with lock held */
11342                 spin_lock_irqsave(&phba->hbalock, iflag);
11343                 if (phba->link_state < LPFC_LINK_DOWN)
11344                         /* Flush, clear interrupt, and rearm the EQ */
11345                         lpfc_sli4_eq_flush(phba, speq);
11346                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11347                 return IRQ_NONE;
11348         }
11349
11350         /*
11351          * Process all the event on FCP slow-path EQ
11352          */
11353         while ((eqe = lpfc_sli4_eq_get(speq))) {
11354                 lpfc_sli4_sp_handle_eqe(phba, eqe);
11355                 if (!(++ecount % speq->entry_repost))
11356                         lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11357         }
11358
11359         /* Always clear and re-arm the slow-path EQ */
11360         lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11361
11362         /* Catch the no cq entry condition */
11363         if (unlikely(ecount == 0)) {
11364                 if (phba->intr_type == MSIX)
11365                         /* MSI-X treated interrupt served as no EQ share INT */
11366                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11367                                         "0357 MSI-X interrupt with no EQE\n");
11368                 else
11369                         /* Non MSI-X treated on interrupt as EQ share INT */
11370                         return IRQ_NONE;
11371         }
11372
11373         return IRQ_HANDLED;
11374 } /* lpfc_sli4_sp_intr_handler */
11375
11376 /**
11377  * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11378  * @irq: Interrupt number.
11379  * @dev_id: The device context pointer.
11380  *
11381  * This function is directly called from the PCI layer as an interrupt
11382  * service routine when device with SLI-4 interface spec is enabled with
11383  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11384  * ring event in the HBA. However, when the device is enabled with either
11385  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11386  * device-level interrupt handler. When the PCI slot is in error recovery
11387  * or the HBA is undergoing initialization, the interrupt handler will not
11388  * process the interrupt. The SCSI FCP fast-path ring event are handled in
11389  * the intrrupt context. This function is called without any lock held.
11390  * It gets the hbalock to access and update SLI data structures. Note that,
11391  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11392  * equal to that of FCP CQ index.
11393  *
11394  * This function returns IRQ_HANDLED when interrupt is handled else it
11395  * returns IRQ_NONE.
11396  **/
11397 irqreturn_t
11398 lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11399 {
11400         struct lpfc_hba *phba;
11401         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11402         struct lpfc_queue *fpeq;
11403         struct lpfc_eqe *eqe;
11404         unsigned long iflag;
11405         int ecount = 0;
11406         uint32_t fcp_eqidx;
11407
11408         /* Get the driver's phba structure from the dev_id */
11409         fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11410         phba = fcp_eq_hdl->phba;
11411         fcp_eqidx = fcp_eq_hdl->idx;
11412
11413         if (unlikely(!phba))
11414                 return IRQ_NONE;
11415         if (unlikely(!phba->sli4_hba.fp_eq))
11416                 return IRQ_NONE;
11417
11418         /* Get to the EQ struct associated with this vector */
11419         fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
11420
11421         /* Check device state for handling interrupt */
11422         if (unlikely(lpfc_intr_state_check(phba))) {
11423                 /* Check again for link_state with lock held */
11424                 spin_lock_irqsave(&phba->hbalock, iflag);
11425                 if (phba->link_state < LPFC_LINK_DOWN)
11426                         /* Flush, clear interrupt, and rearm the EQ */
11427                         lpfc_sli4_eq_flush(phba, fpeq);
11428                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11429                 return IRQ_NONE;
11430         }
11431
11432         /*
11433          * Process all the event on FCP fast-path EQ
11434          */
11435         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11436                 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
11437                 if (!(++ecount % fpeq->entry_repost))
11438                         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11439         }
11440
11441         /* Always clear and re-arm the fast-path EQ */
11442         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11443
11444         if (unlikely(ecount == 0)) {
11445                 if (phba->intr_type == MSIX)
11446                         /* MSI-X treated interrupt served as no EQ share INT */
11447                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11448                                         "0358 MSI-X interrupt with no EQE\n");
11449                 else
11450                         /* Non MSI-X treated on interrupt as EQ share INT */
11451                         return IRQ_NONE;
11452         }
11453
11454         return IRQ_HANDLED;
11455 } /* lpfc_sli4_fp_intr_handler */
11456
11457 /**
11458  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11459  * @irq: Interrupt number.
11460  * @dev_id: The device context pointer.
11461  *
11462  * This function is the device-level interrupt handler to device with SLI-4
11463  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
11464  * interrupt mode is enabled and there is an event in the HBA which requires
11465  * driver attention. This function invokes the slow-path interrupt attention
11466  * handling function and fast-path interrupt attention handling function in
11467  * turn to process the relevant HBA attention events. This function is called
11468  * without any lock held. It gets the hbalock to access and update SLI data
11469  * structures.
11470  *
11471  * This function returns IRQ_HANDLED when interrupt is handled, else it
11472  * returns IRQ_NONE.
11473  **/
11474 irqreturn_t
11475 lpfc_sli4_intr_handler(int irq, void *dev_id)
11476 {
11477         struct lpfc_hba  *phba;
11478         irqreturn_t sp_irq_rc, fp_irq_rc;
11479         bool fp_handled = false;
11480         uint32_t fcp_eqidx;
11481
11482         /* Get the driver's phba structure from the dev_id */
11483         phba = (struct lpfc_hba *)dev_id;
11484
11485         if (unlikely(!phba))
11486                 return IRQ_NONE;
11487
11488         /*
11489          * Invokes slow-path host attention interrupt handling as appropriate.
11490          */
11491         sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
11492
11493         /*
11494          * Invoke fast-path host attention interrupt handling as appropriate.
11495          */
11496         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
11497                 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
11498                                         &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11499                 if (fp_irq_rc == IRQ_HANDLED)
11500                         fp_handled |= true;
11501         }
11502
11503         return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
11504 } /* lpfc_sli4_intr_handler */
11505
11506 /**
11507  * lpfc_sli4_queue_free - free a queue structure and associated memory
11508  * @queue: The queue structure to free.
11509  *
11510  * This function frees a queue structure and the DMAable memory used for
11511  * the host resident queue. This function must be called after destroying the
11512  * queue on the HBA.
11513  **/
11514 void
11515 lpfc_sli4_queue_free(struct lpfc_queue *queue)
11516 {
11517         struct lpfc_dmabuf *dmabuf;
11518
11519         if (!queue)
11520                 return;
11521
11522         while (!list_empty(&queue->page_list)) {
11523                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
11524                                  list);
11525                 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
11526                                   dmabuf->virt, dmabuf->phys);
11527                 kfree(dmabuf);
11528         }
11529         kfree(queue);
11530         return;
11531 }
11532
11533 /**
11534  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
11535  * @phba: The HBA that this queue is being created on.
11536  * @entry_size: The size of each queue entry for this queue.
11537  * @entry count: The number of entries that this queue will handle.
11538  *
11539  * This function allocates a queue structure and the DMAable memory used for
11540  * the host resident queue. This function must be called before creating the
11541  * queue on the HBA.
11542  **/
11543 struct lpfc_queue *
11544 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
11545                       uint32_t entry_count)
11546 {
11547         struct lpfc_queue *queue;
11548         struct lpfc_dmabuf *dmabuf;
11549         int x, total_qe_count;
11550         void *dma_pointer;
11551         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11552
11553         if (!phba->sli4_hba.pc_sli4_params.supported)
11554                 hw_page_size = SLI4_PAGE_SIZE;
11555
11556         queue = kzalloc(sizeof(struct lpfc_queue) +
11557                         (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
11558         if (!queue)
11559                 return NULL;
11560         queue->page_count = (ALIGN(entry_size * entry_count,
11561                         hw_page_size))/hw_page_size;
11562         INIT_LIST_HEAD(&queue->list);
11563         INIT_LIST_HEAD(&queue->page_list);
11564         INIT_LIST_HEAD(&queue->child_list);
11565         for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
11566                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
11567                 if (!dmabuf)
11568                         goto out_fail;
11569                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
11570                                                   hw_page_size, &dmabuf->phys,
11571                                                   GFP_KERNEL);
11572                 if (!dmabuf->virt) {
11573                         kfree(dmabuf);
11574                         goto out_fail;
11575                 }
11576                 memset(dmabuf->virt, 0, hw_page_size);
11577                 dmabuf->buffer_tag = x;
11578                 list_add_tail(&dmabuf->list, &queue->page_list);
11579                 /* initialize queue's entry array */
11580                 dma_pointer = dmabuf->virt;
11581                 for (; total_qe_count < entry_count &&
11582                      dma_pointer < (hw_page_size + dmabuf->virt);
11583                      total_qe_count++, dma_pointer += entry_size) {
11584                         queue->qe[total_qe_count].address = dma_pointer;
11585                 }
11586         }
11587         queue->entry_size = entry_size;
11588         queue->entry_count = entry_count;
11589
11590         /*
11591          * entry_repost is calculated based on the number of entries in the
11592          * queue. This works out except for RQs. If buffers are NOT initially
11593          * posted for every RQE, entry_repost should be adjusted accordingly.
11594          */
11595         queue->entry_repost = (entry_count >> 3);
11596         if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
11597                 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
11598         queue->phba = phba;
11599
11600         return queue;
11601 out_fail:
11602         lpfc_sli4_queue_free(queue);
11603         return NULL;
11604 }
11605
11606 /**
11607  * lpfc_eq_create - Create an Event Queue on the HBA
11608  * @phba: HBA structure that indicates port to create a queue on.
11609  * @eq: The queue structure to use to create the event queue.
11610  * @imax: The maximum interrupt per second limit.
11611  *
11612  * This function creates an event queue, as detailed in @eq, on a port,
11613  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
11614  *
11615  * The @phba struct is used to send mailbox command to HBA. The @eq struct
11616  * is used to get the entry count and entry size that are necessary to
11617  * determine the number of pages to allocate and use for this queue. This
11618  * function will send the EQ_CREATE mailbox command to the HBA to setup the
11619  * event queue. This function is asynchronous and will wait for the mailbox
11620  * command to finish before continuing.
11621  *
11622  * On success this function will return a zero. If unable to allocate enough
11623  * memory this function will return -ENOMEM. If the queue create mailbox command
11624  * fails this function will return -ENXIO.
11625  **/
11626 uint32_t
11627 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
11628 {
11629         struct lpfc_mbx_eq_create *eq_create;
11630         LPFC_MBOXQ_t *mbox;
11631         int rc, length, status = 0;
11632         struct lpfc_dmabuf *dmabuf;
11633         uint32_t shdr_status, shdr_add_status;
11634         union lpfc_sli4_cfg_shdr *shdr;
11635         uint16_t dmult;
11636         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11637
11638         if (!phba->sli4_hba.pc_sli4_params.supported)
11639                 hw_page_size = SLI4_PAGE_SIZE;
11640
11641         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11642         if (!mbox)
11643                 return -ENOMEM;
11644         length = (sizeof(struct lpfc_mbx_eq_create) -
11645                   sizeof(struct lpfc_sli4_cfg_mhdr));
11646         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11647                          LPFC_MBOX_OPCODE_EQ_CREATE,
11648                          length, LPFC_SLI4_MBX_EMBED);
11649         eq_create = &mbox->u.mqe.un.eq_create;
11650         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
11651                eq->page_count);
11652         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
11653                LPFC_EQE_SIZE);
11654         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
11655         /* Calculate delay multiper from maximum interrupt per second */
11656         dmult = LPFC_DMULT_CONST/imax - 1;
11657         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
11658                dmult);
11659         switch (eq->entry_count) {
11660         default:
11661                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11662                                 "0360 Unsupported EQ count. (%d)\n",
11663                                 eq->entry_count);
11664                 if (eq->entry_count < 256)
11665                         return -EINVAL;
11666                 /* otherwise default to smallest count (drop through) */
11667         case 256:
11668                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11669                        LPFC_EQ_CNT_256);
11670                 break;
11671         case 512:
11672                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11673                        LPFC_EQ_CNT_512);
11674                 break;
11675         case 1024:
11676                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11677                        LPFC_EQ_CNT_1024);
11678                 break;
11679         case 2048:
11680                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11681                        LPFC_EQ_CNT_2048);
11682                 break;
11683         case 4096:
11684                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11685                        LPFC_EQ_CNT_4096);
11686                 break;
11687         }
11688         list_for_each_entry(dmabuf, &eq->page_list, list) {
11689                 memset(dmabuf->virt, 0, hw_page_size);
11690                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
11691                                         putPaddrLow(dmabuf->phys);
11692                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
11693                                         putPaddrHigh(dmabuf->phys);
11694         }
11695         mbox->vport = phba->pport;
11696         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11697         mbox->context1 = NULL;
11698         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11699         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
11700         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11701         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11702         if (shdr_status || shdr_add_status || rc) {
11703                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11704                                 "2500 EQ_CREATE mailbox failed with "
11705                                 "status x%x add_status x%x, mbx status x%x\n",
11706                                 shdr_status, shdr_add_status, rc);
11707                 status = -ENXIO;
11708         }
11709         eq->type = LPFC_EQ;
11710         eq->subtype = LPFC_NONE;
11711         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
11712         if (eq->queue_id == 0xFFFF)
11713                 status = -ENXIO;
11714         eq->host_index = 0;
11715         eq->hba_index = 0;
11716
11717         mempool_free(mbox, phba->mbox_mem_pool);
11718         return status;
11719 }
11720
11721 /**
11722  * lpfc_cq_create - Create a Completion Queue on the HBA
11723  * @phba: HBA structure that indicates port to create a queue on.
11724  * @cq: The queue structure to use to create the completion queue.
11725  * @eq: The event queue to bind this completion queue to.
11726  *
11727  * This function creates a completion queue, as detailed in @wq, on a port,
11728  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
11729  *
11730  * The @phba struct is used to send mailbox command to HBA. The @cq struct
11731  * is used to get the entry count and entry size that are necessary to
11732  * determine the number of pages to allocate and use for this queue. The @eq
11733  * is used to indicate which event queue to bind this completion queue to. This
11734  * function will send the CQ_CREATE mailbox command to the HBA to setup the
11735  * completion queue. This function is asynchronous and will wait for the mailbox
11736  * command to finish before continuing.
11737  *
11738  * On success this function will return a zero. If unable to allocate enough
11739  * memory this function will return -ENOMEM. If the queue create mailbox command
11740  * fails this function will return -ENXIO.
11741  **/
11742 uint32_t
11743 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
11744                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
11745 {
11746         struct lpfc_mbx_cq_create *cq_create;
11747         struct lpfc_dmabuf *dmabuf;
11748         LPFC_MBOXQ_t *mbox;
11749         int rc, length, status = 0;
11750         uint32_t shdr_status, shdr_add_status;
11751         union lpfc_sli4_cfg_shdr *shdr;
11752         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11753
11754         if (!phba->sli4_hba.pc_sli4_params.supported)
11755                 hw_page_size = SLI4_PAGE_SIZE;
11756
11757         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11758         if (!mbox)
11759                 return -ENOMEM;
11760         length = (sizeof(struct lpfc_mbx_cq_create) -
11761                   sizeof(struct lpfc_sli4_cfg_mhdr));
11762         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11763                          LPFC_MBOX_OPCODE_CQ_CREATE,
11764                          length, LPFC_SLI4_MBX_EMBED);
11765         cq_create = &mbox->u.mqe.un.cq_create;
11766         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
11767         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
11768                     cq->page_count);
11769         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
11770         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
11771         bf_set(lpfc_mbox_hdr_version, &shdr->request,
11772                phba->sli4_hba.pc_sli4_params.cqv);
11773         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
11774                 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
11775                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
11776                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
11777                        eq->queue_id);
11778         } else {
11779                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
11780                        eq->queue_id);
11781         }
11782         switch (cq->entry_count) {
11783         default:
11784                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11785                                 "0361 Unsupported CQ count. (%d)\n",
11786                                 cq->entry_count);
11787                 if (cq->entry_count < 256)
11788                         return -EINVAL;
11789                 /* otherwise default to smallest count (drop through) */
11790         case 256:
11791                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
11792                        LPFC_CQ_CNT_256);
11793                 break;
11794         case 512:
11795                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
11796                        LPFC_CQ_CNT_512);
11797                 break;
11798         case 1024:
11799                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
11800                        LPFC_CQ_CNT_1024);
11801                 break;
11802         }
11803         list_for_each_entry(dmabuf, &cq->page_list, list) {
11804                 memset(dmabuf->virt, 0, hw_page_size);
11805                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
11806                                         putPaddrLow(dmabuf->phys);
11807                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
11808                                         putPaddrHigh(dmabuf->phys);
11809         }
11810         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11811
11812         /* The IOCTL status is embedded in the mailbox subheader. */
11813         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11814         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11815         if (shdr_status || shdr_add_status || rc) {
11816                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11817                                 "2501 CQ_CREATE mailbox failed with "
11818                                 "status x%x add_status x%x, mbx status x%x\n",
11819                                 shdr_status, shdr_add_status, rc);
11820                 status = -ENXIO;
11821                 goto out;
11822         }
11823         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
11824         if (cq->queue_id == 0xFFFF) {
11825                 status = -ENXIO;
11826                 goto out;
11827         }
11828         /* link the cq onto the parent eq child list */
11829         list_add_tail(&cq->list, &eq->child_list);
11830         /* Set up completion queue's type and subtype */
11831         cq->type = type;
11832         cq->subtype = subtype;
11833         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
11834         cq->assoc_qid = eq->queue_id;
11835         cq->host_index = 0;
11836         cq->hba_index = 0;
11837
11838 out:
11839         mempool_free(mbox, phba->mbox_mem_pool);
11840         return status;
11841 }
11842
11843 /**
11844  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
11845  * @phba: HBA structure that indicates port to create a queue on.
11846  * @mq: The queue structure to use to create the mailbox queue.
11847  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
11848  * @cq: The completion queue to associate with this cq.
11849  *
11850  * This function provides failback (fb) functionality when the
11851  * mq_create_ext fails on older FW generations.  It's purpose is identical
11852  * to mq_create_ext otherwise.
11853  *
11854  * This routine cannot fail as all attributes were previously accessed and
11855  * initialized in mq_create_ext.
11856  **/
11857 static void
11858 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
11859                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
11860 {
11861         struct lpfc_mbx_mq_create *mq_create;
11862         struct lpfc_dmabuf *dmabuf;
11863         int length;
11864
11865         length = (sizeof(struct lpfc_mbx_mq_create) -
11866                   sizeof(struct lpfc_sli4_cfg_mhdr));
11867         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11868                          LPFC_MBOX_OPCODE_MQ_CREATE,
11869                          length, LPFC_SLI4_MBX_EMBED);
11870         mq_create = &mbox->u.mqe.un.mq_create;
11871         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
11872                mq->page_count);
11873         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
11874                cq->queue_id);
11875         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
11876         switch (mq->entry_count) {
11877         case 16:
11878                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11879                        LPFC_MQ_RING_SIZE_16);
11880                 break;
11881         case 32:
11882                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11883                        LPFC_MQ_RING_SIZE_32);
11884                 break;
11885         case 64:
11886                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11887                        LPFC_MQ_RING_SIZE_64);
11888                 break;
11889         case 128:
11890                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11891                        LPFC_MQ_RING_SIZE_128);
11892                 break;
11893         }
11894         list_for_each_entry(dmabuf, &mq->page_list, list) {
11895                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
11896                         putPaddrLow(dmabuf->phys);
11897                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
11898                         putPaddrHigh(dmabuf->phys);
11899         }
11900 }
11901
11902 /**
11903  * lpfc_mq_create - Create a mailbox Queue on the HBA
11904  * @phba: HBA structure that indicates port to create a queue on.
11905  * @mq: The queue structure to use to create the mailbox queue.
11906  * @cq: The completion queue to associate with this cq.
11907  * @subtype: The queue's subtype.
11908  *
11909  * This function creates a mailbox queue, as detailed in @mq, on a port,
11910  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
11911  *
11912  * The @phba struct is used to send mailbox command to HBA. The @cq struct
11913  * is used to get the entry count and entry size that are necessary to
11914  * determine the number of pages to allocate and use for this queue. This
11915  * function will send the MQ_CREATE mailbox command to the HBA to setup the
11916  * mailbox queue. This function is asynchronous and will wait for the mailbox
11917  * command to finish before continuing.
11918  *
11919  * On success this function will return a zero. If unable to allocate enough
11920  * memory this function will return -ENOMEM. If the queue create mailbox command
11921  * fails this function will return -ENXIO.
11922  **/
11923 int32_t
11924 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
11925                struct lpfc_queue *cq, uint32_t subtype)
11926 {
11927         struct lpfc_mbx_mq_create *mq_create;
11928         struct lpfc_mbx_mq_create_ext *mq_create_ext;
11929         struct lpfc_dmabuf *dmabuf;
11930         LPFC_MBOXQ_t *mbox;
11931         int rc, length, status = 0;
11932         uint32_t shdr_status, shdr_add_status;
11933         union lpfc_sli4_cfg_shdr *shdr;
11934         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11935
11936         if (!phba->sli4_hba.pc_sli4_params.supported)
11937                 hw_page_size = SLI4_PAGE_SIZE;
11938
11939         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11940         if (!mbox)
11941                 return -ENOMEM;
11942         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
11943                   sizeof(struct lpfc_sli4_cfg_mhdr));
11944         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11945                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
11946                          length, LPFC_SLI4_MBX_EMBED);
11947
11948         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
11949         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
11950         bf_set(lpfc_mbx_mq_create_ext_num_pages,
11951                &mq_create_ext->u.request, mq->page_count);
11952         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
11953                &mq_create_ext->u.request, 1);
11954         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
11955                &mq_create_ext->u.request, 1);
11956         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
11957                &mq_create_ext->u.request, 1);
11958         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
11959                &mq_create_ext->u.request, 1);
11960         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
11961                &mq_create_ext->u.request, 1);
11962         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
11963         bf_set(lpfc_mbox_hdr_version, &shdr->request,
11964                phba->sli4_hba.pc_sli4_params.mqv);
11965         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
11966                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
11967                        cq->queue_id);
11968         else
11969                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
11970                        cq->queue_id);
11971         switch (mq->entry_count) {
11972         default:
11973                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11974                                 "0362 Unsupported MQ count. (%d)\n",
11975                                 mq->entry_count);
11976                 if (mq->entry_count < 16)
11977                         return -EINVAL;
11978                 /* otherwise default to smallest count (drop through) */
11979         case 16:
11980                 bf_set(lpfc_mq_context_ring_size,
11981                        &mq_create_ext->u.request.context,
11982                        LPFC_MQ_RING_SIZE_16);
11983                 break;
11984         case 32:
11985                 bf_set(lpfc_mq_context_ring_size,
11986                        &mq_create_ext->u.request.context,
11987                        LPFC_MQ_RING_SIZE_32);
11988                 break;
11989         case 64:
11990                 bf_set(lpfc_mq_context_ring_size,
11991                        &mq_create_ext->u.request.context,
11992                        LPFC_MQ_RING_SIZE_64);
11993                 break;
11994         case 128:
11995                 bf_set(lpfc_mq_context_ring_size,
11996                        &mq_create_ext->u.request.context,
11997                        LPFC_MQ_RING_SIZE_128);
11998                 break;
11999         }
12000         list_for_each_entry(dmabuf, &mq->page_list, list) {
12001                 memset(dmabuf->virt, 0, hw_page_size);
12002                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
12003                                         putPaddrLow(dmabuf->phys);
12004                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
12005                                         putPaddrHigh(dmabuf->phys);
12006         }
12007         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12008         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12009                               &mq_create_ext->u.response);
12010         if (rc != MBX_SUCCESS) {
12011                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12012                                 "2795 MQ_CREATE_EXT failed with "
12013                                 "status x%x. Failback to MQ_CREATE.\n",
12014                                 rc);
12015                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12016                 mq_create = &mbox->u.mqe.un.mq_create;
12017                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12018                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12019                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12020                                       &mq_create->u.response);
12021         }
12022
12023         /* The IOCTL status is embedded in the mailbox subheader. */
12024         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12025         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12026         if (shdr_status || shdr_add_status || rc) {
12027                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12028                                 "2502 MQ_CREATE mailbox failed with "
12029                                 "status x%x add_status x%x, mbx status x%x\n",
12030                                 shdr_status, shdr_add_status, rc);
12031                 status = -ENXIO;
12032                 goto out;
12033         }
12034         if (mq->queue_id == 0xFFFF) {
12035                 status = -ENXIO;
12036                 goto out;
12037         }
12038         mq->type = LPFC_MQ;
12039         mq->assoc_qid = cq->queue_id;
12040         mq->subtype = subtype;
12041         mq->host_index = 0;
12042         mq->hba_index = 0;
12043
12044         /* link the mq onto the parent cq child list */
12045         list_add_tail(&mq->list, &cq->child_list);
12046 out:
12047         mempool_free(mbox, phba->mbox_mem_pool);
12048         return status;
12049 }
12050
12051 /**
12052  * lpfc_wq_create - Create a Work Queue on the HBA
12053  * @phba: HBA structure that indicates port to create a queue on.
12054  * @wq: The queue structure to use to create the work queue.
12055  * @cq: The completion queue to bind this work queue to.
12056  * @subtype: The subtype of the work queue indicating its functionality.
12057  *
12058  * This function creates a work queue, as detailed in @wq, on a port, described
12059  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12060  *
12061  * The @phba struct is used to send mailbox command to HBA. The @wq struct
12062  * is used to get the entry count and entry size that are necessary to
12063  * determine the number of pages to allocate and use for this queue. The @cq
12064  * is used to indicate which completion queue to bind this work queue to. This
12065  * function will send the WQ_CREATE mailbox command to the HBA to setup the
12066  * work queue. This function is asynchronous and will wait for the mailbox
12067  * command to finish before continuing.
12068  *
12069  * On success this function will return a zero. If unable to allocate enough
12070  * memory this function will return -ENOMEM. If the queue create mailbox command
12071  * fails this function will return -ENXIO.
12072  **/
12073 uint32_t
12074 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12075                struct lpfc_queue *cq, uint32_t subtype)
12076 {
12077         struct lpfc_mbx_wq_create *wq_create;
12078         struct lpfc_dmabuf *dmabuf;
12079         LPFC_MBOXQ_t *mbox;
12080         int rc, length, status = 0;
12081         uint32_t shdr_status, shdr_add_status;
12082         union lpfc_sli4_cfg_shdr *shdr;
12083         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12084         struct dma_address *page;
12085
12086         if (!phba->sli4_hba.pc_sli4_params.supported)
12087                 hw_page_size = SLI4_PAGE_SIZE;
12088
12089         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12090         if (!mbox)
12091                 return -ENOMEM;
12092         length = (sizeof(struct lpfc_mbx_wq_create) -
12093                   sizeof(struct lpfc_sli4_cfg_mhdr));
12094         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12095                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12096                          length, LPFC_SLI4_MBX_EMBED);
12097         wq_create = &mbox->u.mqe.un.wq_create;
12098         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
12099         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12100                     wq->page_count);
12101         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12102                     cq->queue_id);
12103         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12104                phba->sli4_hba.pc_sli4_params.wqv);
12105         if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12106                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12107                        wq->entry_count);
12108                 switch (wq->entry_size) {
12109                 default:
12110                 case 64:
12111                         bf_set(lpfc_mbx_wq_create_wqe_size,
12112                                &wq_create->u.request_1,
12113                                LPFC_WQ_WQE_SIZE_64);
12114                         break;
12115                 case 128:
12116                         bf_set(lpfc_mbx_wq_create_wqe_size,
12117                                &wq_create->u.request_1,
12118                                LPFC_WQ_WQE_SIZE_128);
12119                         break;
12120                 }
12121                 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12122                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12123                 page = wq_create->u.request_1.page;
12124         } else {
12125                 page = wq_create->u.request.page;
12126         }
12127         list_for_each_entry(dmabuf, &wq->page_list, list) {
12128                 memset(dmabuf->virt, 0, hw_page_size);
12129                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12130                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
12131         }
12132         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12133         /* The IOCTL status is embedded in the mailbox subheader. */
12134         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12135         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12136         if (shdr_status || shdr_add_status || rc) {
12137                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12138                                 "2503 WQ_CREATE mailbox failed with "
12139                                 "status x%x add_status x%x, mbx status x%x\n",
12140                                 shdr_status, shdr_add_status, rc);
12141                 status = -ENXIO;
12142                 goto out;
12143         }
12144         wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12145         if (wq->queue_id == 0xFFFF) {
12146                 status = -ENXIO;
12147                 goto out;
12148         }
12149         wq->type = LPFC_WQ;
12150         wq->assoc_qid = cq->queue_id;
12151         wq->subtype = subtype;
12152         wq->host_index = 0;
12153         wq->hba_index = 0;
12154
12155         /* link the wq onto the parent cq child list */
12156         list_add_tail(&wq->list, &cq->child_list);
12157 out:
12158         mempool_free(mbox, phba->mbox_mem_pool);
12159         return status;
12160 }
12161
12162 /**
12163  * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12164  * @phba: HBA structure that indicates port to create a queue on.
12165  * @rq:   The queue structure to use for the receive queue.
12166  * @qno:  The associated HBQ number
12167  *
12168  *
12169  * For SLI4 we need to adjust the RQ repost value based on
12170  * the number of buffers that are initially posted to the RQ.
12171  */
12172 void
12173 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12174 {
12175         uint32_t cnt;
12176
12177         cnt = lpfc_hbq_defs[qno]->entry_count;
12178
12179         /* Recalc repost for RQs based on buffers initially posted */
12180         cnt = (cnt >> 3);
12181         if (cnt < LPFC_QUEUE_MIN_REPOST)
12182                 cnt = LPFC_QUEUE_MIN_REPOST;
12183
12184         rq->entry_repost = cnt;
12185 }
12186
12187 /**
12188  * lpfc_rq_create - Create a Receive Queue on the HBA
12189  * @phba: HBA structure that indicates port to create a queue on.
12190  * @hrq: The queue structure to use to create the header receive queue.
12191  * @drq: The queue structure to use to create the data receive queue.
12192  * @cq: The completion queue to bind this work queue to.
12193  *
12194  * This function creates a receive buffer queue pair , as detailed in @hrq and
12195  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12196  * to the HBA.
12197  *
12198  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12199  * struct is used to get the entry count that is necessary to determine the
12200  * number of pages to use for this queue. The @cq is used to indicate which
12201  * completion queue to bind received buffers that are posted to these queues to.
12202  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12203  * receive queue pair. This function is asynchronous and will wait for the
12204  * mailbox command to finish before continuing.
12205  *
12206  * On success this function will return a zero. If unable to allocate enough
12207  * memory this function will return -ENOMEM. If the queue create mailbox command
12208  * fails this function will return -ENXIO.
12209  **/
12210 uint32_t
12211 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12212                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12213 {
12214         struct lpfc_mbx_rq_create *rq_create;
12215         struct lpfc_dmabuf *dmabuf;
12216         LPFC_MBOXQ_t *mbox;
12217         int rc, length, status = 0;
12218         uint32_t shdr_status, shdr_add_status;
12219         union lpfc_sli4_cfg_shdr *shdr;
12220         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12221
12222         if (!phba->sli4_hba.pc_sli4_params.supported)
12223                 hw_page_size = SLI4_PAGE_SIZE;
12224
12225         if (hrq->entry_count != drq->entry_count)
12226                 return -EINVAL;
12227         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12228         if (!mbox)
12229                 return -ENOMEM;
12230         length = (sizeof(struct lpfc_mbx_rq_create) -
12231                   sizeof(struct lpfc_sli4_cfg_mhdr));
12232         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12233                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12234                          length, LPFC_SLI4_MBX_EMBED);
12235         rq_create = &mbox->u.mqe.un.rq_create;
12236         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12237         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12238                phba->sli4_hba.pc_sli4_params.rqv);
12239         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12240                 bf_set(lpfc_rq_context_rqe_count_1,
12241                        &rq_create->u.request.context,
12242                        hrq->entry_count);
12243                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
12244                 bf_set(lpfc_rq_context_rqe_size,
12245                        &rq_create->u.request.context,
12246                        LPFC_RQE_SIZE_8);
12247                 bf_set(lpfc_rq_context_page_size,
12248                        &rq_create->u.request.context,
12249                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12250         } else {
12251                 switch (hrq->entry_count) {
12252                 default:
12253                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12254                                         "2535 Unsupported RQ count. (%d)\n",
12255                                         hrq->entry_count);
12256                         if (hrq->entry_count < 512)
12257                                 return -EINVAL;
12258                         /* otherwise default to smallest count (drop through) */
12259                 case 512:
12260                         bf_set(lpfc_rq_context_rqe_count,
12261                                &rq_create->u.request.context,
12262                                LPFC_RQ_RING_SIZE_512);
12263                         break;
12264                 case 1024:
12265                         bf_set(lpfc_rq_context_rqe_count,
12266                                &rq_create->u.request.context,
12267                                LPFC_RQ_RING_SIZE_1024);
12268                         break;
12269                 case 2048:
12270                         bf_set(lpfc_rq_context_rqe_count,
12271                                &rq_create->u.request.context,
12272                                LPFC_RQ_RING_SIZE_2048);
12273                         break;
12274                 case 4096:
12275                         bf_set(lpfc_rq_context_rqe_count,
12276                                &rq_create->u.request.context,
12277                                LPFC_RQ_RING_SIZE_4096);
12278                         break;
12279                 }
12280                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12281                        LPFC_HDR_BUF_SIZE);
12282         }
12283         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12284                cq->queue_id);
12285         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12286                hrq->page_count);
12287         list_for_each_entry(dmabuf, &hrq->page_list, list) {
12288                 memset(dmabuf->virt, 0, hw_page_size);
12289                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12290                                         putPaddrLow(dmabuf->phys);
12291                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12292                                         putPaddrHigh(dmabuf->phys);
12293         }
12294         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12295         /* The IOCTL status is embedded in the mailbox subheader. */
12296         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12297         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12298         if (shdr_status || shdr_add_status || rc) {
12299                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12300                                 "2504 RQ_CREATE mailbox failed with "
12301                                 "status x%x add_status x%x, mbx status x%x\n",
12302                                 shdr_status, shdr_add_status, rc);
12303                 status = -ENXIO;
12304                 goto out;
12305         }
12306         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12307         if (hrq->queue_id == 0xFFFF) {
12308                 status = -ENXIO;
12309                 goto out;
12310         }
12311         hrq->type = LPFC_HRQ;
12312         hrq->assoc_qid = cq->queue_id;
12313         hrq->subtype = subtype;
12314         hrq->host_index = 0;
12315         hrq->hba_index = 0;
12316
12317         /* now create the data queue */
12318         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12319                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12320                          length, LPFC_SLI4_MBX_EMBED);
12321         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12322                phba->sli4_hba.pc_sli4_params.rqv);
12323         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12324                 bf_set(lpfc_rq_context_rqe_count_1,
12325                        &rq_create->u.request.context, hrq->entry_count);
12326                 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
12327                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
12328                        LPFC_RQE_SIZE_8);
12329                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
12330                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12331         } else {
12332                 switch (drq->entry_count) {
12333                 default:
12334                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12335                                         "2536 Unsupported RQ count. (%d)\n",
12336                                         drq->entry_count);
12337                         if (drq->entry_count < 512)
12338                                 return -EINVAL;
12339                         /* otherwise default to smallest count (drop through) */
12340                 case 512:
12341                         bf_set(lpfc_rq_context_rqe_count,
12342                                &rq_create->u.request.context,
12343                                LPFC_RQ_RING_SIZE_512);
12344                         break;
12345                 case 1024:
12346                         bf_set(lpfc_rq_context_rqe_count,
12347                                &rq_create->u.request.context,
12348                                LPFC_RQ_RING_SIZE_1024);
12349                         break;
12350                 case 2048:
12351                         bf_set(lpfc_rq_context_rqe_count,
12352                                &rq_create->u.request.context,
12353                                LPFC_RQ_RING_SIZE_2048);
12354                         break;
12355                 case 4096:
12356                         bf_set(lpfc_rq_context_rqe_count,
12357                                &rq_create->u.request.context,
12358                                LPFC_RQ_RING_SIZE_4096);
12359                         break;
12360                 }
12361                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12362                        LPFC_DATA_BUF_SIZE);
12363         }
12364         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12365                cq->queue_id);
12366         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12367                drq->page_count);
12368         list_for_each_entry(dmabuf, &drq->page_list, list) {
12369                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12370                                         putPaddrLow(dmabuf->phys);
12371                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12372                                         putPaddrHigh(dmabuf->phys);
12373         }
12374         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12375         /* The IOCTL status is embedded in the mailbox subheader. */
12376         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12377         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12378         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12379         if (shdr_status || shdr_add_status || rc) {
12380                 status = -ENXIO;
12381                 goto out;
12382         }
12383         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12384         if (drq->queue_id == 0xFFFF) {
12385                 status = -ENXIO;
12386                 goto out;
12387         }
12388         drq->type = LPFC_DRQ;
12389         drq->assoc_qid = cq->queue_id;
12390         drq->subtype = subtype;
12391         drq->host_index = 0;
12392         drq->hba_index = 0;
12393
12394         /* link the header and data RQs onto the parent cq child list */
12395         list_add_tail(&hrq->list, &cq->child_list);
12396         list_add_tail(&drq->list, &cq->child_list);
12397
12398 out:
12399         mempool_free(mbox, phba->mbox_mem_pool);
12400         return status;
12401 }
12402
12403 /**
12404  * lpfc_eq_destroy - Destroy an event Queue on the HBA
12405  * @eq: The queue structure associated with the queue to destroy.
12406  *
12407  * This function destroys a queue, as detailed in @eq by sending an mailbox
12408  * command, specific to the type of queue, to the HBA.
12409  *
12410  * The @eq struct is used to get the queue ID of the queue to destroy.
12411  *
12412  * On success this function will return a zero. If the queue destroy mailbox
12413  * command fails this function will return -ENXIO.
12414  **/
12415 uint32_t
12416 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
12417 {
12418         LPFC_MBOXQ_t *mbox;
12419         int rc, length, status = 0;
12420         uint32_t shdr_status, shdr_add_status;
12421         union lpfc_sli4_cfg_shdr *shdr;
12422
12423         if (!eq)
12424                 return -ENODEV;
12425         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
12426         if (!mbox)
12427                 return -ENOMEM;
12428         length = (sizeof(struct lpfc_mbx_eq_destroy) -
12429                   sizeof(struct lpfc_sli4_cfg_mhdr));
12430         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12431                          LPFC_MBOX_OPCODE_EQ_DESTROY,
12432                          length, LPFC_SLI4_MBX_EMBED);
12433         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
12434                eq->queue_id);
12435         mbox->vport = eq->phba->pport;
12436         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12437
12438         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
12439         /* The IOCTL status is embedded in the mailbox subheader. */
12440         shdr = (union lpfc_sli4_cfg_shdr *)
12441                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
12442         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12443         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12444         if (shdr_status || shdr_add_status || rc) {
12445                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12446                                 "2505 EQ_DESTROY mailbox failed with "
12447                                 "status x%x add_status x%x, mbx status x%x\n",
12448                                 shdr_status, shdr_add_status, rc);
12449                 status = -ENXIO;
12450         }
12451
12452         /* Remove eq from any list */
12453         list_del_init(&eq->list);
12454         mempool_free(mbox, eq->phba->mbox_mem_pool);
12455         return status;
12456 }
12457
12458 /**
12459  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
12460  * @cq: The queue structure associated with the queue to destroy.
12461  *
12462  * This function destroys a queue, as detailed in @cq by sending an mailbox
12463  * command, specific to the type of queue, to the HBA.
12464  *
12465  * The @cq struct is used to get the queue ID of the queue to destroy.
12466  *
12467  * On success this function will return a zero. If the queue destroy mailbox
12468  * command fails this function will return -ENXIO.
12469  **/
12470 uint32_t
12471 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
12472 {
12473         LPFC_MBOXQ_t *mbox;
12474         int rc, length, status = 0;
12475         uint32_t shdr_status, shdr_add_status;
12476         union lpfc_sli4_cfg_shdr *shdr;
12477
12478         if (!cq)
12479                 return -ENODEV;
12480         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
12481         if (!mbox)
12482                 return -ENOMEM;
12483         length = (sizeof(struct lpfc_mbx_cq_destroy) -
12484                   sizeof(struct lpfc_sli4_cfg_mhdr));
12485         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12486                          LPFC_MBOX_OPCODE_CQ_DESTROY,
12487                          length, LPFC_SLI4_MBX_EMBED);
12488         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
12489                cq->queue_id);
12490         mbox->vport = cq->phba->pport;
12491         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12492         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
12493         /* The IOCTL status is embedded in the mailbox subheader. */
12494         shdr = (union lpfc_sli4_cfg_shdr *)
12495                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
12496         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12497         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12498         if (shdr_status || shdr_add_status || rc) {
12499                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12500                                 "2506 CQ_DESTROY mailbox failed with "
12501                                 "status x%x add_status x%x, mbx status x%x\n",
12502                                 shdr_status, shdr_add_status, rc);
12503                 status = -ENXIO;
12504         }
12505         /* Remove cq from any list */
12506         list_del_init(&cq->list);
12507         mempool_free(mbox, cq->phba->mbox_mem_pool);
12508         return status;
12509 }
12510
12511 /**
12512  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
12513  * @qm: The queue structure associated with the queue to destroy.
12514  *
12515  * This function destroys a queue, as detailed in @mq by sending an mailbox
12516  * command, specific to the type of queue, to the HBA.
12517  *
12518  * The @mq struct is used to get the queue ID of the queue to destroy.
12519  *
12520  * On success this function will return a zero. If the queue destroy mailbox
12521  * command fails this function will return -ENXIO.
12522  **/
12523 uint32_t
12524 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
12525 {
12526         LPFC_MBOXQ_t *mbox;
12527         int rc, length, status = 0;
12528         uint32_t shdr_status, shdr_add_status;
12529         union lpfc_sli4_cfg_shdr *shdr;
12530
12531         if (!mq)
12532                 return -ENODEV;
12533         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
12534         if (!mbox)
12535                 return -ENOMEM;
12536         length = (sizeof(struct lpfc_mbx_mq_destroy) -
12537                   sizeof(struct lpfc_sli4_cfg_mhdr));
12538         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12539                          LPFC_MBOX_OPCODE_MQ_DESTROY,
12540                          length, LPFC_SLI4_MBX_EMBED);
12541         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
12542                mq->queue_id);
12543         mbox->vport = mq->phba->pport;
12544         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12545         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
12546         /* The IOCTL status is embedded in the mailbox subheader. */
12547         shdr = (union lpfc_sli4_cfg_shdr *)
12548                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
12549         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12550         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12551         if (shdr_status || shdr_add_status || rc) {
12552                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12553                                 "2507 MQ_DESTROY mailbox failed with "
12554                                 "status x%x add_status x%x, mbx status x%x\n",
12555                                 shdr_status, shdr_add_status, rc);
12556                 status = -ENXIO;
12557         }
12558         /* Remove mq from any list */
12559         list_del_init(&mq->list);
12560         mempool_free(mbox, mq->phba->mbox_mem_pool);
12561         return status;
12562 }
12563
12564 /**
12565  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
12566  * @wq: The queue structure associated with the queue to destroy.
12567  *
12568  * This function destroys a queue, as detailed in @wq by sending an mailbox
12569  * command, specific to the type of queue, to the HBA.
12570  *
12571  * The @wq struct is used to get the queue ID of the queue to destroy.
12572  *
12573  * On success this function will return a zero. If the queue destroy mailbox
12574  * command fails this function will return -ENXIO.
12575  **/
12576 uint32_t
12577 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
12578 {
12579         LPFC_MBOXQ_t *mbox;
12580         int rc, length, status = 0;
12581         uint32_t shdr_status, shdr_add_status;
12582         union lpfc_sli4_cfg_shdr *shdr;
12583
12584         if (!wq)
12585                 return -ENODEV;
12586         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
12587         if (!mbox)
12588                 return -ENOMEM;
12589         length = (sizeof(struct lpfc_mbx_wq_destroy) -
12590                   sizeof(struct lpfc_sli4_cfg_mhdr));
12591         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12592                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
12593                          length, LPFC_SLI4_MBX_EMBED);
12594         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
12595                wq->queue_id);
12596         mbox->vport = wq->phba->pport;
12597         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12598         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
12599         shdr = (union lpfc_sli4_cfg_shdr *)
12600                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
12601         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12602         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12603         if (shdr_status || shdr_add_status || rc) {
12604                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12605                                 "2508 WQ_DESTROY mailbox failed with "
12606                                 "status x%x add_status x%x, mbx status x%x\n",
12607                                 shdr_status, shdr_add_status, rc);
12608                 status = -ENXIO;
12609         }
12610         /* Remove wq from any list */
12611         list_del_init(&wq->list);
12612         mempool_free(mbox, wq->phba->mbox_mem_pool);
12613         return status;
12614 }
12615
12616 /**
12617  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
12618  * @rq: The queue structure associated with the queue to destroy.
12619  *
12620  * This function destroys a queue, as detailed in @rq by sending an mailbox
12621  * command, specific to the type of queue, to the HBA.
12622  *
12623  * The @rq struct is used to get the queue ID of the queue to destroy.
12624  *
12625  * On success this function will return a zero. If the queue destroy mailbox
12626  * command fails this function will return -ENXIO.
12627  **/
12628 uint32_t
12629 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12630                 struct lpfc_queue *drq)
12631 {
12632         LPFC_MBOXQ_t *mbox;
12633         int rc, length, status = 0;
12634         uint32_t shdr_status, shdr_add_status;
12635         union lpfc_sli4_cfg_shdr *shdr;
12636
12637         if (!hrq || !drq)
12638                 return -ENODEV;
12639         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
12640         if (!mbox)
12641                 return -ENOMEM;
12642         length = (sizeof(struct lpfc_mbx_rq_destroy) -
12643                   sizeof(struct lpfc_sli4_cfg_mhdr));
12644         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12645                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
12646                          length, LPFC_SLI4_MBX_EMBED);
12647         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
12648                hrq->queue_id);
12649         mbox->vport = hrq->phba->pport;
12650         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12651         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
12652         /* The IOCTL status is embedded in the mailbox subheader. */
12653         shdr = (union lpfc_sli4_cfg_shdr *)
12654                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
12655         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12656         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12657         if (shdr_status || shdr_add_status || rc) {
12658                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12659                                 "2509 RQ_DESTROY mailbox failed with "
12660                                 "status x%x add_status x%x, mbx status x%x\n",
12661                                 shdr_status, shdr_add_status, rc);
12662                 if (rc != MBX_TIMEOUT)
12663                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
12664                 return -ENXIO;
12665         }
12666         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
12667                drq->queue_id);
12668         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
12669         shdr = (union lpfc_sli4_cfg_shdr *)
12670                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
12671         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12672         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12673         if (shdr_status || shdr_add_status || rc) {
12674                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12675                                 "2510 RQ_DESTROY mailbox failed with "
12676                                 "status x%x add_status x%x, mbx status x%x\n",
12677                                 shdr_status, shdr_add_status, rc);
12678                 status = -ENXIO;
12679         }
12680         list_del_init(&hrq->list);
12681         list_del_init(&drq->list);
12682         mempool_free(mbox, hrq->phba->mbox_mem_pool);
12683         return status;
12684 }
12685
12686 /**
12687  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
12688  * @phba: The virtual port for which this call being executed.
12689  * @pdma_phys_addr0: Physical address of the 1st SGL page.
12690  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
12691  * @xritag: the xritag that ties this io to the SGL pages.
12692  *
12693  * This routine will post the sgl pages for the IO that has the xritag
12694  * that is in the iocbq structure. The xritag is assigned during iocbq
12695  * creation and persists for as long as the driver is loaded.
12696  * if the caller has fewer than 256 scatter gather segments to map then
12697  * pdma_phys_addr1 should be 0.
12698  * If the caller needs to map more than 256 scatter gather segment then
12699  * pdma_phys_addr1 should be a valid physical address.
12700  * physical address for SGLs must be 64 byte aligned.
12701  * If you are going to map 2 SGL's then the first one must have 256 entries
12702  * the second sgl can have between 1 and 256 entries.
12703  *
12704  * Return codes:
12705  *      0 - Success
12706  *      -ENXIO, -ENOMEM - Failure
12707  **/
12708 int
12709 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
12710                 dma_addr_t pdma_phys_addr0,
12711                 dma_addr_t pdma_phys_addr1,
12712                 uint16_t xritag)
12713 {
12714         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
12715         LPFC_MBOXQ_t *mbox;
12716         int rc;
12717         uint32_t shdr_status, shdr_add_status;
12718         uint32_t mbox_tmo;
12719         union lpfc_sli4_cfg_shdr *shdr;
12720
12721         if (xritag == NO_XRI) {
12722                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12723                                 "0364 Invalid param:\n");
12724                 return -EINVAL;
12725         }
12726
12727         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12728         if (!mbox)
12729                 return -ENOMEM;
12730
12731         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12732                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12733                         sizeof(struct lpfc_mbx_post_sgl_pages) -
12734                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
12735
12736         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
12737                                 &mbox->u.mqe.un.post_sgl_pages;
12738         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
12739         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
12740
12741         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
12742                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
12743         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
12744                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
12745
12746         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
12747                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
12748         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
12749                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
12750         if (!phba->sli4_hba.intr_enable)
12751                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12752         else {
12753                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
12754                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12755         }
12756         /* The IOCTL status is embedded in the mailbox subheader. */
12757         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
12758         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12759         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12760         if (rc != MBX_TIMEOUT)
12761                 mempool_free(mbox, phba->mbox_mem_pool);
12762         if (shdr_status || shdr_add_status || rc) {
12763                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12764                                 "2511 POST_SGL mailbox failed with "
12765                                 "status x%x add_status x%x, mbx status x%x\n",
12766                                 shdr_status, shdr_add_status, rc);
12767                 rc = -ENXIO;
12768         }
12769         return 0;
12770 }
12771
12772 /**
12773  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
12774  * @phba: pointer to lpfc hba data structure.
12775  *
12776  * This routine is invoked to post rpi header templates to the
12777  * HBA consistent with the SLI-4 interface spec.  This routine
12778  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12779  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12780  *
12781  * Returns
12782  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12783  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
12784  **/
12785 uint16_t
12786 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12787 {
12788         unsigned long xri;
12789
12790         /*
12791          * Fetch the next logical xri.  Because this index is logical,
12792          * the driver starts at 0 each time.
12793          */
12794         spin_lock_irq(&phba->hbalock);
12795         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
12796                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
12797         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
12798                 spin_unlock_irq(&phba->hbalock);
12799                 return NO_XRI;
12800         } else {
12801                 set_bit(xri, phba->sli4_hba.xri_bmask);
12802                 phba->sli4_hba.max_cfg_param.xri_used++;
12803                 phba->sli4_hba.xri_count++;
12804         }
12805
12806         spin_unlock_irq(&phba->hbalock);
12807         return xri;
12808 }
12809
12810 /**
12811  * lpfc_sli4_free_xri - Release an xri for reuse.
12812  * @phba: pointer to lpfc hba data structure.
12813  *
12814  * This routine is invoked to release an xri to the pool of
12815  * available rpis maintained by the driver.
12816  **/
12817 void
12818 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12819 {
12820         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
12821                 phba->sli4_hba.xri_count--;
12822                 phba->sli4_hba.max_cfg_param.xri_used--;
12823         }
12824 }
12825
12826 /**
12827  * lpfc_sli4_free_xri - Release an xri for reuse.
12828  * @phba: pointer to lpfc hba data structure.
12829  *
12830  * This routine is invoked to release an xri to the pool of
12831  * available rpis maintained by the driver.
12832  **/
12833 void
12834 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12835 {
12836         spin_lock_irq(&phba->hbalock);
12837         __lpfc_sli4_free_xri(phba, xri);
12838         spin_unlock_irq(&phba->hbalock);
12839 }
12840
12841 /**
12842  * lpfc_sli4_next_xritag - Get an xritag for the io
12843  * @phba: Pointer to HBA context object.
12844  *
12845  * This function gets an xritag for the iocb. If there is no unused xritag
12846  * it will return 0xffff.
12847  * The function returns the allocated xritag if successful, else returns zero.
12848  * Zero is not a valid xritag.
12849  * The caller is not required to hold any lock.
12850  **/
12851 uint16_t
12852 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
12853 {
12854         uint16_t xri_index;
12855
12856         xri_index = lpfc_sli4_alloc_xri(phba);
12857         if (xri_index != NO_XRI)
12858                 return xri_index;
12859
12860         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12861                         "2004 Failed to allocate XRI.last XRITAG is %d"
12862                         " Max XRI is %d, Used XRI is %d\n",
12863                         xri_index,
12864                         phba->sli4_hba.max_cfg_param.max_xri,
12865                         phba->sli4_hba.max_cfg_param.xri_used);
12866         return NO_XRI;
12867 }
12868
12869 /**
12870  * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
12871  * @phba: pointer to lpfc hba data structure.
12872  *
12873  * This routine is invoked to post a block of driver's sgl pages to the
12874  * HBA using non-embedded mailbox command. No Lock is held. This routine
12875  * is only called when the driver is loading and after all IO has been
12876  * stopped.
12877  **/
12878 int
12879 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
12880 {
12881         struct lpfc_sglq *sglq_entry;
12882         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12883         struct sgl_page_pairs *sgl_pg_pairs;
12884         void *viraddr;
12885         LPFC_MBOXQ_t *mbox;
12886         uint32_t reqlen, alloclen, pg_pairs;
12887         uint32_t mbox_tmo;
12888         uint16_t xritag_start = 0, lxri = 0;
12889         int els_xri_cnt, rc = 0;
12890         uint32_t shdr_status, shdr_add_status;
12891         union lpfc_sli4_cfg_shdr *shdr;
12892
12893         /* The number of sgls to be posted */
12894         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
12895
12896         reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
12897                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12898         if (reqlen > SLI4_PAGE_SIZE) {
12899                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12900                                 "2559 Block sgl registration required DMA "
12901                                 "size (%d) great than a page\n", reqlen);
12902                 return -ENOMEM;
12903         }
12904         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12905         if (!mbox)
12906                 return -ENOMEM;
12907
12908         /* Allocate DMA memory and set up the non-embedded mailbox command */
12909         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12910                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
12911                          LPFC_SLI4_MBX_NEMBED);
12912
12913         if (alloclen < reqlen) {
12914                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12915                                 "0285 Allocated DMA memory size (%d) is "
12916                                 "less than the requested DMA memory "
12917                                 "size (%d)\n", alloclen, reqlen);
12918                 lpfc_sli4_mbox_cmd_free(phba, mbox);
12919                 return -ENOMEM;
12920         }
12921         /* Set up the SGL pages in the non-embedded DMA pages */
12922         viraddr = mbox->sge_array->addr[0];
12923         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12924         sgl_pg_pairs = &sgl->sgl_pg_pairs;
12925
12926         for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
12927                 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
12928
12929                 /*
12930                  * Assign the sglq a physical xri only if the driver has not
12931                  * initialized those resources.  A port reset only needs
12932                  * the sglq's posted.
12933                  */
12934                 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
12935                     LPFC_XRI_RSRC_RDY) {
12936                         lxri = lpfc_sli4_next_xritag(phba);
12937                         if (lxri == NO_XRI) {
12938                                 lpfc_sli4_mbox_cmd_free(phba, mbox);
12939                                 return -ENOMEM;
12940                         }
12941                         sglq_entry->sli4_lxritag = lxri;
12942                         sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
12943                 }
12944
12945                 /* Set up the sge entry */
12946                 sgl_pg_pairs->sgl_pg0_addr_lo =
12947                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
12948                 sgl_pg_pairs->sgl_pg0_addr_hi =
12949                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
12950                 sgl_pg_pairs->sgl_pg1_addr_lo =
12951                                 cpu_to_le32(putPaddrLow(0));
12952                 sgl_pg_pairs->sgl_pg1_addr_hi =
12953                                 cpu_to_le32(putPaddrHigh(0));
12954
12955                 /* Keep the first xritag on the list */
12956                 if (pg_pairs == 0)
12957                         xritag_start = sglq_entry->sli4_xritag;
12958                 sgl_pg_pairs++;
12959         }
12960
12961         /* Complete initialization and perform endian conversion. */
12962         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
12963         bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
12964         sgl->word0 = cpu_to_le32(sgl->word0);
12965         if (!phba->sli4_hba.intr_enable)
12966                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12967         else {
12968                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
12969                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12970         }
12971         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12972         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12973         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12974         if (rc != MBX_TIMEOUT)
12975                 lpfc_sli4_mbox_cmd_free(phba, mbox);
12976         if (shdr_status || shdr_add_status || rc) {
12977                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12978                                 "2513 POST_SGL_BLOCK mailbox command failed "
12979                                 "status x%x add_status x%x mbx status x%x\n",
12980                                 shdr_status, shdr_add_status, rc);
12981                 rc = -ENXIO;
12982         }
12983
12984         if (rc == 0)
12985                 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12986                        LPFC_XRI_RSRC_RDY);
12987         return rc;
12988 }
12989
12990 /**
12991  * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
12992  * @phba: pointer to lpfc hba data structure.
12993  *
12994  * This routine is invoked to post a block of driver's sgl pages to the
12995  * HBA using non-embedded mailbox command. No Lock is held. This routine
12996  * is only called when the driver is loading and after all IO has been
12997  * stopped.
12998  **/
12999 int
13000 lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13001 {
13002         struct lpfc_sglq *sglq_entry;
13003         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13004         struct sgl_page_pairs *sgl_pg_pairs;
13005         void *viraddr;
13006         LPFC_MBOXQ_t *mbox;
13007         uint32_t reqlen, alloclen, index;
13008         uint32_t mbox_tmo;
13009         uint16_t rsrc_start, rsrc_size, els_xri_cnt;
13010         uint16_t xritag_start = 0, lxri = 0;
13011         struct lpfc_rsrc_blks *rsrc_blk;
13012         int cnt, ttl_cnt, rc = 0;
13013         int loop_cnt;
13014         uint32_t shdr_status, shdr_add_status;
13015         union lpfc_sli4_cfg_shdr *shdr;
13016
13017         /* The number of sgls to be posted */
13018         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13019
13020         reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13021                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13022         if (reqlen > SLI4_PAGE_SIZE) {
13023                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13024                                 "2989 Block sgl registration required DMA "
13025                                 "size (%d) great than a page\n", reqlen);
13026                 return -ENOMEM;
13027         }
13028
13029         cnt = 0;
13030         ttl_cnt = 0;
13031         list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13032                             list) {
13033                 rsrc_start = rsrc_blk->rsrc_start;
13034                 rsrc_size = rsrc_blk->rsrc_size;
13035
13036                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13037                                 "3014 Working ELS Extent start %d, cnt %d\n",
13038                                 rsrc_start, rsrc_size);
13039
13040                 loop_cnt = min(els_xri_cnt, rsrc_size);
13041                 if (ttl_cnt + loop_cnt >= els_xri_cnt) {
13042                         loop_cnt = els_xri_cnt - ttl_cnt;
13043                         ttl_cnt = els_xri_cnt;
13044                 }
13045
13046                 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13047                 if (!mbox)
13048                         return -ENOMEM;
13049                 /*
13050                  * Allocate DMA memory and set up the non-embedded mailbox
13051                  * command.
13052                  */
13053                 alloclen = lpfc_sli4_config(phba, mbox,
13054                                         LPFC_MBOX_SUBSYSTEM_FCOE,
13055                                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13056                                         reqlen, LPFC_SLI4_MBX_NEMBED);
13057                 if (alloclen < reqlen) {
13058                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13059                                         "2987 Allocated DMA memory size (%d) "
13060                                         "is less than the requested DMA memory "
13061                                         "size (%d)\n", alloclen, reqlen);
13062                         lpfc_sli4_mbox_cmd_free(phba, mbox);
13063                         return -ENOMEM;
13064                 }
13065
13066                 /* Set up the SGL pages in the non-embedded DMA pages */
13067                 viraddr = mbox->sge_array->addr[0];
13068                 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13069                 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13070
13071                 /*
13072                  * The starting resource may not begin at zero. Control
13073                  * the loop variants via the block resource parameters,
13074                  * but handle the sge pointers with a zero-based index
13075                  * that doesn't get reset per loop pass.
13076                  */
13077                 for (index = rsrc_start;
13078                      index < rsrc_start + loop_cnt;
13079                      index++) {
13080                         sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
13081
13082                         /*
13083                          * Assign the sglq a physical xri only if the driver
13084                          * has not initialized those resources.  A port reset
13085                          * only needs the sglq's posted.
13086                          */
13087                         if (bf_get(lpfc_xri_rsrc_rdy,
13088                                    &phba->sli4_hba.sli4_flags) !=
13089                                    LPFC_XRI_RSRC_RDY) {
13090                                 lxri = lpfc_sli4_next_xritag(phba);
13091                                 if (lxri == NO_XRI) {
13092                                         lpfc_sli4_mbox_cmd_free(phba, mbox);
13093                                         rc = -ENOMEM;
13094                                         goto err_exit;
13095                                 }
13096                                 sglq_entry->sli4_lxritag = lxri;
13097                                 sglq_entry->sli4_xritag =
13098                                                 phba->sli4_hba.xri_ids[lxri];
13099                         }
13100
13101                         /* Set up the sge entry */
13102                         sgl_pg_pairs->sgl_pg0_addr_lo =
13103                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13104                         sgl_pg_pairs->sgl_pg0_addr_hi =
13105                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13106                         sgl_pg_pairs->sgl_pg1_addr_lo =
13107                                 cpu_to_le32(putPaddrLow(0));
13108                         sgl_pg_pairs->sgl_pg1_addr_hi =
13109                                 cpu_to_le32(putPaddrHigh(0));
13110
13111                         /* Track the starting physical XRI for the mailbox. */
13112                         if (index == rsrc_start)
13113                                 xritag_start = sglq_entry->sli4_xritag;
13114                         sgl_pg_pairs++;
13115                         cnt++;
13116                 }
13117
13118                 /* Complete initialization and perform endian conversion. */
13119                 rsrc_blk->rsrc_used += loop_cnt;
13120                 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13121                 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
13122                 sgl->word0 = cpu_to_le32(sgl->word0);
13123
13124                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13125                                 "3015 Post ELS Extent SGL, start %d, "
13126                                 "cnt %d, used %d\n",
13127                                 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
13128                 if (!phba->sli4_hba.intr_enable)
13129                         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13130                 else {
13131                         mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13132                         rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13133                 }
13134                 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13135                 shdr_status = bf_get(lpfc_mbox_hdr_status,
13136                                      &shdr->response);
13137                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13138                                          &shdr->response);
13139                 if (rc != MBX_TIMEOUT)
13140                         lpfc_sli4_mbox_cmd_free(phba, mbox);
13141                 if (shdr_status || shdr_add_status || rc) {
13142                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13143                                         "2988 POST_SGL_BLOCK mailbox "
13144                                         "command failed status x%x "
13145                                         "add_status x%x mbx status x%x\n",
13146                                         shdr_status, shdr_add_status, rc);
13147                         rc = -ENXIO;
13148                         goto err_exit;
13149                 }
13150                 if (ttl_cnt >= els_xri_cnt)
13151                         break;
13152         }
13153
13154  err_exit:
13155         if (rc == 0)
13156                 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13157                        LPFC_XRI_RSRC_RDY);
13158         return rc;
13159 }
13160
13161 /**
13162  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13163  * @phba: pointer to lpfc hba data structure.
13164  * @sblist: pointer to scsi buffer list.
13165  * @count: number of scsi buffers on the list.
13166  *
13167  * This routine is invoked to post a block of @count scsi sgl pages from a
13168  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13169  * No Lock is held.
13170  *
13171  **/
13172 int
13173 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13174                               int cnt)
13175 {
13176         struct lpfc_scsi_buf *psb;
13177         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13178         struct sgl_page_pairs *sgl_pg_pairs;
13179         void *viraddr;
13180         LPFC_MBOXQ_t *mbox;
13181         uint32_t reqlen, alloclen, pg_pairs;
13182         uint32_t mbox_tmo;
13183         uint16_t xritag_start = 0;
13184         int rc = 0;
13185         uint32_t shdr_status, shdr_add_status;
13186         dma_addr_t pdma_phys_bpl1;
13187         union lpfc_sli4_cfg_shdr *shdr;
13188
13189         /* Calculate the requested length of the dma memory */
13190         reqlen = cnt * sizeof(struct sgl_page_pairs) +
13191                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13192         if (reqlen > SLI4_PAGE_SIZE) {
13193                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13194                                 "0217 Block sgl registration required DMA "
13195                                 "size (%d) great than a page\n", reqlen);
13196                 return -ENOMEM;
13197         }
13198         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13199         if (!mbox) {
13200                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13201                                 "0283 Failed to allocate mbox cmd memory\n");
13202                 return -ENOMEM;
13203         }
13204
13205         /* Allocate DMA memory and set up the non-embedded mailbox command */
13206         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13207                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13208                                 LPFC_SLI4_MBX_NEMBED);
13209
13210         if (alloclen < reqlen) {
13211                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13212                                 "2561 Allocated DMA memory size (%d) is "
13213                                 "less than the requested DMA memory "
13214                                 "size (%d)\n", alloclen, reqlen);
13215                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13216                 return -ENOMEM;
13217         }
13218
13219         /* Get the first SGE entry from the non-embedded DMA memory */
13220         viraddr = mbox->sge_array->addr[0];
13221
13222         /* Set up the SGL pages in the non-embedded DMA pages */
13223         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13224         sgl_pg_pairs = &sgl->sgl_pg_pairs;
13225
13226         pg_pairs = 0;
13227         list_for_each_entry(psb, sblist, list) {
13228                 /* Set up the sge entry */
13229                 sgl_pg_pairs->sgl_pg0_addr_lo =
13230                         cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13231                 sgl_pg_pairs->sgl_pg0_addr_hi =
13232                         cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13233                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13234                         pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13235                 else
13236                         pdma_phys_bpl1 = 0;
13237                 sgl_pg_pairs->sgl_pg1_addr_lo =
13238                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13239                 sgl_pg_pairs->sgl_pg1_addr_hi =
13240                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13241                 /* Keep the first xritag on the list */
13242                 if (pg_pairs == 0)
13243                         xritag_start = psb->cur_iocbq.sli4_xritag;
13244                 sgl_pg_pairs++;
13245                 pg_pairs++;
13246         }
13247         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13248         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13249         /* Perform endian conversion if necessary */
13250         sgl->word0 = cpu_to_le32(sgl->word0);
13251
13252         if (!phba->sli4_hba.intr_enable)
13253                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13254         else {
13255                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13256                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13257         }
13258         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13259         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13260         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13261         if (rc != MBX_TIMEOUT)
13262                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13263         if (shdr_status || shdr_add_status || rc) {
13264                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13265                                 "2564 POST_SGL_BLOCK mailbox command failed "
13266                                 "status x%x add_status x%x mbx status x%x\n",
13267                                 shdr_status, shdr_add_status, rc);
13268                 rc = -ENXIO;
13269         }
13270         return rc;
13271 }
13272
13273 /**
13274  * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
13275  * @phba: pointer to lpfc hba data structure.
13276  * @sblist: pointer to scsi buffer list.
13277  * @count: number of scsi buffers on the list.
13278  *
13279  * This routine is invoked to post a block of @count scsi sgl pages from a
13280  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13281  * No Lock is held.
13282  *
13283  **/
13284 int
13285 lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
13286                                 int cnt)
13287 {
13288         struct lpfc_scsi_buf *psb = NULL;
13289         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13290         struct sgl_page_pairs *sgl_pg_pairs;
13291         void *viraddr;
13292         LPFC_MBOXQ_t *mbox;
13293         uint32_t reqlen, alloclen, pg_pairs;
13294         uint32_t mbox_tmo;
13295         uint16_t xri_start = 0, scsi_xri_start;
13296         uint16_t rsrc_range;
13297         int rc = 0, avail_cnt;
13298         uint32_t shdr_status, shdr_add_status;
13299         dma_addr_t pdma_phys_bpl1;
13300         union lpfc_sli4_cfg_shdr *shdr;
13301         struct lpfc_rsrc_blks *rsrc_blk;
13302         uint32_t xri_cnt = 0;
13303
13304         /* Calculate the total requested length of the dma memory */
13305         reqlen = cnt * sizeof(struct sgl_page_pairs) +
13306                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13307         if (reqlen > SLI4_PAGE_SIZE) {
13308                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13309                                 "2932 Block sgl registration required DMA "
13310                                 "size (%d) great than a page\n", reqlen);
13311                 return -ENOMEM;
13312         }
13313
13314         /*
13315          * The use of extents requires the driver to post the sgl headers
13316          * in multiple postings to meet the contiguous resource assignment.
13317          */
13318         psb = list_prepare_entry(psb, sblist, list);
13319         scsi_xri_start = phba->sli4_hba.scsi_xri_start;
13320         list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13321                             list) {
13322                 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
13323                 if (rsrc_range < scsi_xri_start)
13324                         continue;
13325                 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
13326                         continue;
13327                 else
13328                         avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
13329
13330                 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
13331                         sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13332                 /*
13333                  * Allocate DMA memory and set up the non-embedded mailbox
13334                  * command. The mbox is used to post an SGL page per loop
13335                  * but the DMA memory has a use-once semantic so the mailbox
13336                  * is used and freed per loop pass.
13337                  */
13338                 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13339                 if (!mbox) {
13340                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13341                                         "2933 Failed to allocate mbox cmd "
13342                                         "memory\n");
13343                         return -ENOMEM;
13344                 }
13345                 alloclen = lpfc_sli4_config(phba, mbox,
13346                                         LPFC_MBOX_SUBSYSTEM_FCOE,
13347                                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13348                                         reqlen,
13349                                         LPFC_SLI4_MBX_NEMBED);
13350                 if (alloclen < reqlen) {
13351                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13352                                         "2934 Allocated DMA memory size (%d) "
13353                                         "is less than the requested DMA memory "
13354                                         "size (%d)\n", alloclen, reqlen);
13355                         lpfc_sli4_mbox_cmd_free(phba, mbox);
13356                         return -ENOMEM;
13357                 }
13358
13359                 /* Get the first SGE entry from the non-embedded DMA memory */
13360                 viraddr = mbox->sge_array->addr[0];
13361
13362                 /* Set up the SGL pages in the non-embedded DMA pages */
13363                 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13364                 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13365
13366                 /* pg_pairs tracks posted SGEs per loop iteration. */
13367                 pg_pairs = 0;
13368                 list_for_each_entry_continue(psb, sblist, list) {
13369                         /* Set up the sge entry */
13370                         sgl_pg_pairs->sgl_pg0_addr_lo =
13371                                 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13372                         sgl_pg_pairs->sgl_pg0_addr_hi =
13373                                 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13374                         if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13375                                 pdma_phys_bpl1 = psb->dma_phys_bpl +
13376                                         SGL_PAGE_SIZE;
13377                         else
13378                                 pdma_phys_bpl1 = 0;
13379                         sgl_pg_pairs->sgl_pg1_addr_lo =
13380                                 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13381                         sgl_pg_pairs->sgl_pg1_addr_hi =
13382                                 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13383                         /* Keep the first xri for this extent. */
13384                         if (pg_pairs == 0)
13385                                 xri_start = psb->cur_iocbq.sli4_xritag;
13386                         sgl_pg_pairs++;
13387                         pg_pairs++;
13388                         xri_cnt++;
13389
13390                         /*
13391                          * Track two exit conditions - the loop has constructed
13392                          * all of the caller's SGE pairs or all available
13393                          * resource IDs in this extent are consumed.
13394                          */
13395                         if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
13396                                 break;
13397                 }
13398                 rsrc_blk->rsrc_used += pg_pairs;
13399                 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
13400                 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13401
13402                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13403                                 "3016 Post SCSI Extent SGL, start %d, cnt %d "
13404                                 "blk use %d\n",
13405                                 xri_start, pg_pairs, rsrc_blk->rsrc_used);
13406                 /* Perform endian conversion if necessary */
13407                 sgl->word0 = cpu_to_le32(sgl->word0);
13408                 if (!phba->sli4_hba.intr_enable)
13409                         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13410                 else {
13411                         mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13412                         rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13413                 }
13414                 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13415                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13416                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13417                                          &shdr->response);
13418                 if (rc != MBX_TIMEOUT)
13419                         lpfc_sli4_mbox_cmd_free(phba, mbox);
13420                 if (shdr_status || shdr_add_status || rc) {
13421                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13422                                         "2935 POST_SGL_BLOCK mailbox command "
13423                                         "failed status x%x add_status x%x "
13424                                         "mbx status x%x\n",
13425                                         shdr_status, shdr_add_status, rc);
13426                         return -ENXIO;
13427                 }
13428
13429                 /* Post only what is requested. */
13430                 if (xri_cnt >= cnt)
13431                         break;
13432         }
13433         return rc;
13434 }
13435
13436 /**
13437  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13438  * @phba: pointer to lpfc_hba struct that the frame was received on
13439  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13440  *
13441  * This function checks the fields in the @fc_hdr to see if the FC frame is a
13442  * valid type of frame that the LPFC driver will handle. This function will
13443  * return a zero if the frame is a valid frame or a non zero value when the
13444  * frame does not pass the check.
13445  **/
13446 static int
13447 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13448 {
13449         /*  make rctl_names static to save stack space */
13450         static char *rctl_names[] = FC_RCTL_NAMES_INIT;
13451         char *type_names[] = FC_TYPE_NAMES_INIT;
13452         struct fc_vft_header *fc_vft_hdr;
13453         uint32_t *header = (uint32_t *) fc_hdr;
13454
13455         switch (fc_hdr->fh_r_ctl) {
13456         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
13457         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
13458         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
13459         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
13460         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
13461         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
13462         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
13463         case FC_RCTL_DD_CMD_STATUS:     /* command status */
13464         case FC_RCTL_ELS_REQ:   /* extended link services request */
13465         case FC_RCTL_ELS_REP:   /* extended link services reply */
13466         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
13467         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
13468         case FC_RCTL_BA_NOP:    /* basic link service NOP */
13469         case FC_RCTL_BA_ABTS:   /* basic link service abort */
13470         case FC_RCTL_BA_RMC:    /* remove connection */
13471         case FC_RCTL_BA_ACC:    /* basic accept */
13472         case FC_RCTL_BA_RJT:    /* basic reject */
13473         case FC_RCTL_BA_PRMT:
13474         case FC_RCTL_ACK_1:     /* acknowledge_1 */
13475         case FC_RCTL_ACK_0:     /* acknowledge_0 */
13476         case FC_RCTL_P_RJT:     /* port reject */
13477         case FC_RCTL_F_RJT:     /* fabric reject */
13478         case FC_RCTL_P_BSY:     /* port busy */
13479         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
13480         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
13481         case FC_RCTL_LCR:       /* link credit reset */
13482         case FC_RCTL_END:       /* end */
13483                 break;
13484         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
13485                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13486                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13487                 return lpfc_fc_frame_check(phba, fc_hdr);
13488         default:
13489                 goto drop;
13490         }
13491         switch (fc_hdr->fh_type) {
13492         case FC_TYPE_BLS:
13493         case FC_TYPE_ELS:
13494         case FC_TYPE_FCP:
13495         case FC_TYPE_CT:
13496                 break;
13497         case FC_TYPE_IP:
13498         case FC_TYPE_ILS:
13499         default:
13500                 goto drop;
13501         }
13502
13503         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13504                         "2538 Received frame rctl:%s type:%s "
13505                         "Frame Data:%08x %08x %08x %08x %08x %08x\n",
13506                         rctl_names[fc_hdr->fh_r_ctl],
13507                         type_names[fc_hdr->fh_type],
13508                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13509                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13510                         be32_to_cpu(header[4]), be32_to_cpu(header[5]));
13511         return 0;
13512 drop:
13513         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13514                         "2539 Dropped frame rctl:%s type:%s\n",
13515                         rctl_names[fc_hdr->fh_r_ctl],
13516                         type_names[fc_hdr->fh_type]);
13517         return 1;
13518 }
13519
13520 /**
13521  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13522  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13523  *
13524  * This function processes the FC header to retrieve the VFI from the VF
13525  * header, if one exists. This function will return the VFI if one exists
13526  * or 0 if no VSAN Header exists.
13527  **/
13528 static uint32_t
13529 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13530 {
13531         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13532
13533         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13534                 return 0;
13535         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13536 }
13537
13538 /**
13539  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13540  * @phba: Pointer to the HBA structure to search for the vport on
13541  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13542  * @fcfi: The FC Fabric ID that the frame came from
13543  *
13544  * This function searches the @phba for a vport that matches the content of the
13545  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13546  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13547  * returns the matching vport pointer or NULL if unable to match frame to a
13548  * vport.
13549  **/
13550 static struct lpfc_vport *
13551 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13552                        uint16_t fcfi)
13553 {
13554         struct lpfc_vport **vports;
13555         struct lpfc_vport *vport = NULL;
13556         int i;
13557         uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13558                         fc_hdr->fh_d_id[1] << 8 |
13559                         fc_hdr->fh_d_id[2]);
13560         if (did == Fabric_DID)
13561                 return phba->pport;
13562         vports = lpfc_create_vport_work_array(phba);
13563         if (vports != NULL)
13564                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
13565                         if (phba->fcf.fcfi == fcfi &&
13566                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
13567                             vports[i]->fc_myDID == did) {
13568                                 vport = vports[i];
13569                                 break;
13570                         }
13571                 }
13572         lpfc_destroy_vport_work_array(phba, vports);
13573         return vport;
13574 }
13575
13576 /**
13577  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
13578  * @vport: The vport to work on.
13579  *
13580  * This function updates the receive sequence time stamp for this vport. The
13581  * receive sequence time stamp indicates the time that the last frame of the
13582  * the sequence that has been idle for the longest amount of time was received.
13583  * the driver uses this time stamp to indicate if any received sequences have
13584  * timed out.
13585  **/
13586 void
13587 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
13588 {
13589         struct lpfc_dmabuf *h_buf;
13590         struct hbq_dmabuf *dmabuf = NULL;
13591
13592         /* get the oldest sequence on the rcv list */
13593         h_buf = list_get_first(&vport->rcv_buffer_list,
13594                                struct lpfc_dmabuf, list);
13595         if (!h_buf)
13596                 return;
13597         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13598         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
13599 }
13600
13601 /**
13602  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
13603  * @vport: The vport that the received sequences were sent to.
13604  *
13605  * This function cleans up all outstanding received sequences. This is called
13606  * by the driver when a link event or user action invalidates all the received
13607  * sequences.
13608  **/
13609 void
13610 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
13611 {
13612         struct lpfc_dmabuf *h_buf, *hnext;
13613         struct lpfc_dmabuf *d_buf, *dnext;
13614         struct hbq_dmabuf *dmabuf = NULL;
13615
13616         /* start with the oldest sequence on the rcv list */
13617         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13618                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13619                 list_del_init(&dmabuf->hbuf.list);
13620                 list_for_each_entry_safe(d_buf, dnext,
13621                                          &dmabuf->dbuf.list, list) {
13622                         list_del_init(&d_buf->list);
13623                         lpfc_in_buf_free(vport->phba, d_buf);
13624                 }
13625                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13626         }
13627 }
13628
13629 /**
13630  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
13631  * @vport: The vport that the received sequences were sent to.
13632  *
13633  * This function determines whether any received sequences have timed out by
13634  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
13635  * indicates that there is at least one timed out sequence this routine will
13636  * go through the received sequences one at a time from most inactive to most
13637  * active to determine which ones need to be cleaned up. Once it has determined
13638  * that a sequence needs to be cleaned up it will simply free up the resources
13639  * without sending an abort.
13640  **/
13641 void
13642 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
13643 {
13644         struct lpfc_dmabuf *h_buf, *hnext;
13645         struct lpfc_dmabuf *d_buf, *dnext;
13646         struct hbq_dmabuf *dmabuf = NULL;
13647         unsigned long timeout;
13648         int abort_count = 0;
13649
13650         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13651                    vport->rcv_buffer_time_stamp);
13652         if (list_empty(&vport->rcv_buffer_list) ||
13653             time_before(jiffies, timeout))
13654                 return;
13655         /* start with the oldest sequence on the rcv list */
13656         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13657                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13658                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13659                            dmabuf->time_stamp);
13660                 if (time_before(jiffies, timeout))
13661                         break;
13662                 abort_count++;
13663                 list_del_init(&dmabuf->hbuf.list);
13664                 list_for_each_entry_safe(d_buf, dnext,
13665                                          &dmabuf->dbuf.list, list) {
13666                         list_del_init(&d_buf->list);
13667                         lpfc_in_buf_free(vport->phba, d_buf);
13668                 }
13669                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13670         }
13671         if (abort_count)
13672                 lpfc_update_rcv_time_stamp(vport);
13673 }
13674
13675 /**
13676  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
13677  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
13678  *
13679  * This function searches through the existing incomplete sequences that have
13680  * been sent to this @vport. If the frame matches one of the incomplete
13681  * sequences then the dbuf in the @dmabuf is added to the list of frames that
13682  * make up that sequence. If no sequence is found that matches this frame then
13683  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
13684  * This function returns a pointer to the first dmabuf in the sequence list that
13685  * the frame was linked to.
13686  **/
13687 static struct hbq_dmabuf *
13688 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
13689 {
13690         struct fc_frame_header *new_hdr;
13691         struct fc_frame_header *temp_hdr;
13692         struct lpfc_dmabuf *d_buf;
13693         struct lpfc_dmabuf *h_buf;
13694         struct hbq_dmabuf *seq_dmabuf = NULL;
13695         struct hbq_dmabuf *temp_dmabuf = NULL;
13696
13697         INIT_LIST_HEAD(&dmabuf->dbuf.list);
13698         dmabuf->time_stamp = jiffies;
13699         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13700         /* Use the hdr_buf to find the sequence that this frame belongs to */
13701         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13702                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13703                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13704                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13705                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13706                         continue;
13707                 /* found a pending sequence that matches this frame */
13708                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13709                 break;
13710         }
13711         if (!seq_dmabuf) {
13712                 /*
13713                  * This indicates first frame received for this sequence.
13714                  * Queue the buffer on the vport's rcv_buffer_list.
13715                  */
13716                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13717                 lpfc_update_rcv_time_stamp(vport);
13718                 return dmabuf;
13719         }
13720         temp_hdr = seq_dmabuf->hbuf.virt;
13721         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
13722                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
13723                 list_del_init(&seq_dmabuf->hbuf.list);
13724                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13725                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13726                 lpfc_update_rcv_time_stamp(vport);
13727                 return dmabuf;
13728         }
13729         /* move this sequence to the tail to indicate a young sequence */
13730         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
13731         seq_dmabuf->time_stamp = jiffies;
13732         lpfc_update_rcv_time_stamp(vport);
13733         if (list_empty(&seq_dmabuf->dbuf.list)) {
13734                 temp_hdr = dmabuf->hbuf.virt;
13735                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13736                 return seq_dmabuf;
13737         }
13738         /* find the correct place in the sequence to insert this frame */
13739         list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
13740                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13741                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
13742                 /*
13743                  * If the frame's sequence count is greater than the frame on
13744                  * the list then insert the frame right after this frame
13745                  */
13746                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
13747                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
13748                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
13749                         return seq_dmabuf;
13750                 }
13751         }
13752         return NULL;
13753 }
13754
13755 /**
13756  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
13757  * @vport: pointer to a vitural port
13758  * @dmabuf: pointer to a dmabuf that describes the FC sequence
13759  *
13760  * This function tries to abort from the partially assembed sequence, described
13761  * by the information from basic abbort @dmabuf. It checks to see whether such
13762  * partially assembled sequence held by the driver. If so, it shall free up all
13763  * the frames from the partially assembled sequence.
13764  *
13765  * Return
13766  * true  -- if there is matching partially assembled sequence present and all
13767  *          the frames freed with the sequence;
13768  * false -- if there is no matching partially assembled sequence present so
13769  *          nothing got aborted in the lower layer driver
13770  **/
13771 static bool
13772 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
13773                             struct hbq_dmabuf *dmabuf)
13774 {
13775         struct fc_frame_header *new_hdr;
13776         struct fc_frame_header *temp_hdr;
13777         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
13778         struct hbq_dmabuf *seq_dmabuf = NULL;
13779
13780         /* Use the hdr_buf to find the sequence that matches this frame */
13781         INIT_LIST_HEAD(&dmabuf->dbuf.list);
13782         INIT_LIST_HEAD(&dmabuf->hbuf.list);
13783         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13784         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13785                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13786                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13787                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13788                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13789                         continue;
13790                 /* found a pending sequence that matches this frame */
13791                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13792                 break;
13793         }
13794
13795         /* Free up all the frames from the partially assembled sequence */
13796         if (seq_dmabuf) {
13797                 list_for_each_entry_safe(d_buf, n_buf,
13798                                          &seq_dmabuf->dbuf.list, list) {
13799                         list_del_init(&d_buf->list);
13800                         lpfc_in_buf_free(vport->phba, d_buf);
13801                 }
13802                 return true;
13803         }
13804         return false;
13805 }
13806
13807 /**
13808  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
13809  * @phba: Pointer to HBA context object.
13810  * @cmd_iocbq: pointer to the command iocbq structure.
13811  * @rsp_iocbq: pointer to the response iocbq structure.
13812  *
13813  * This function handles the sequence abort response iocb command complete
13814  * event. It properly releases the memory allocated to the sequence abort
13815  * accept iocb.
13816  **/
13817 static void
13818 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
13819                              struct lpfc_iocbq *cmd_iocbq,
13820                              struct lpfc_iocbq *rsp_iocbq)
13821 {
13822         if (cmd_iocbq)
13823                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
13824 }
13825
13826 /**
13827  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13828  * @phba: Pointer to HBA context object.
13829  * @xri: xri id in transaction.
13830  *
13831  * This function validates the xri maps to the known range of XRIs allocated an
13832  * used by the driver.
13833  **/
13834 uint16_t
13835 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13836                       uint16_t xri)
13837 {
13838         int i;
13839
13840         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13841                 if (xri == phba->sli4_hba.xri_ids[i])
13842                         return i;
13843         }
13844         return NO_XRI;
13845 }
13846
13847
13848 /**
13849  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
13850  * @phba: Pointer to HBA context object.
13851  * @fc_hdr: pointer to a FC frame header.
13852  *
13853  * This function sends a basic response to a previous unsol sequence abort
13854  * event after aborting the sequence handling.
13855  **/
13856 static void
13857 lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
13858                         struct fc_frame_header *fc_hdr)
13859 {
13860         struct lpfc_iocbq *ctiocb = NULL;
13861         struct lpfc_nodelist *ndlp;
13862         uint16_t oxid, rxid;
13863         uint32_t sid, fctl;
13864         IOCB_t *icmd;
13865         int rc;
13866
13867         if (!lpfc_is_link_up(phba))
13868                 return;
13869
13870         sid = sli4_sid_from_fc_hdr(fc_hdr);
13871         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
13872         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
13873
13874         ndlp = lpfc_findnode_did(phba->pport, sid);
13875         if (!ndlp) {
13876                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13877                                 "1268 Find ndlp returned NULL for oxid:x%x "
13878                                 "SID:x%x\n", oxid, sid);
13879                 return;
13880         }
13881         if (lpfc_sli4_xri_inrange(phba, rxid))
13882                 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
13883
13884         /* Allocate buffer for rsp iocb */
13885         ctiocb = lpfc_sli_get_iocbq(phba);
13886         if (!ctiocb)
13887                 return;
13888
13889         /* Extract the F_CTL field from FC_HDR */
13890         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
13891
13892         icmd = &ctiocb->iocb;
13893         icmd->un.xseq64.bdl.bdeSize = 0;
13894         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
13895         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
13896         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
13897         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
13898
13899         /* Fill in the rest of iocb fields */
13900         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
13901         icmd->ulpBdeCount = 0;
13902         icmd->ulpLe = 1;
13903         icmd->ulpClass = CLASS3;
13904         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
13905         ctiocb->context1 = ndlp;
13906
13907         ctiocb->iocb_cmpl = NULL;
13908         ctiocb->vport = phba->pport;
13909         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
13910         ctiocb->sli4_lxritag = NO_XRI;
13911         ctiocb->sli4_xritag = NO_XRI;
13912
13913         /* If the oxid maps to the FCP XRI range or if it is out of range,
13914          * send a BLS_RJT.  The driver no longer has that exchange.
13915          * Override the IOCB for a BA_RJT.
13916          */
13917         if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
13918                     phba->sli4_hba.max_cfg_param.xri_base) ||
13919             oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
13920                     phba->sli4_hba.max_cfg_param.xri_base)) {
13921                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
13922                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
13923                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
13924                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
13925         }
13926
13927         if (fctl & FC_FC_EX_CTX) {
13928                 /* ABTS sent by responder to CT exchange, construction
13929                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
13930                  * field and RX_ID from ABTS for RX_ID field.
13931                  */
13932                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
13933                 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
13934         } else {
13935                 /* ABTS sent by initiator to CT exchange, construction
13936                  * of BA_ACC will need to allocate a new XRI as for the
13937                  * XRI_TAG and RX_ID fields.
13938                  */
13939                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
13940                 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
13941         }
13942         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
13943
13944         /* Xmit CT abts response on exchange <xid> */
13945         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13946                         "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
13947                         icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
13948
13949         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
13950         if (rc == IOCB_ERROR) {
13951                 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
13952                                 "2925 Failed to issue CT ABTS RSP x%x on "
13953                                 "xri x%x, Data x%x\n",
13954                                 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
13955                                 phba->link_state);
13956                 lpfc_sli_release_iocbq(phba, ctiocb);
13957         }
13958 }
13959
13960 /**
13961  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
13962  * @vport: Pointer to the vport on which this sequence was received
13963  * @dmabuf: pointer to a dmabuf that describes the FC sequence
13964  *
13965  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
13966  * receive sequence is only partially assembed by the driver, it shall abort
13967  * the partially assembled frames for the sequence. Otherwise, if the
13968  * unsolicited receive sequence has been completely assembled and passed to
13969  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
13970  * unsolicited sequence has been aborted. After that, it will issue a basic
13971  * accept to accept the abort.
13972  **/
13973 void
13974 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
13975                              struct hbq_dmabuf *dmabuf)
13976 {
13977         struct lpfc_hba *phba = vport->phba;
13978         struct fc_frame_header fc_hdr;
13979         uint32_t fctl;
13980         bool abts_par;
13981
13982         /* Make a copy of fc_hdr before the dmabuf being released */
13983         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
13984         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
13985
13986         if (fctl & FC_FC_EX_CTX) {
13987                 /*
13988                  * ABTS sent by responder to exchange, just free the buffer
13989                  */
13990                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
13991         } else {
13992                 /*
13993                  * ABTS sent by initiator to exchange, need to do cleanup
13994                  */
13995                 /* Try to abort partially assembled seq */
13996                 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
13997
13998                 /* Send abort to ULP if partially seq abort failed */
13999                 if (abts_par == false)
14000                         lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
14001                 else
14002                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
14003         }
14004         /* Send basic accept (BA_ACC) to the abort requester */
14005         lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
14006 }
14007
14008 /**
14009  * lpfc_seq_complete - Indicates if a sequence is complete
14010  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14011  *
14012  * This function checks the sequence, starting with the frame described by
14013  * @dmabuf, to see if all the frames associated with this sequence are present.
14014  * the frames associated with this sequence are linked to the @dmabuf using the
14015  * dbuf list. This function looks for two major things. 1) That the first frame
14016  * has a sequence count of zero. 2) There is a frame with last frame of sequence
14017  * set. 3) That there are no holes in the sequence count. The function will
14018  * return 1 when the sequence is complete, otherwise it will return 0.
14019  **/
14020 static int
14021 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14022 {
14023         struct fc_frame_header *hdr;
14024         struct lpfc_dmabuf *d_buf;
14025         struct hbq_dmabuf *seq_dmabuf;
14026         uint32_t fctl;
14027         int seq_count = 0;
14028
14029         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14030         /* make sure first fame of sequence has a sequence count of zero */
14031         if (hdr->fh_seq_cnt != seq_count)
14032                 return 0;
14033         fctl = (hdr->fh_f_ctl[0] << 16 |
14034                 hdr->fh_f_ctl[1] << 8 |
14035                 hdr->fh_f_ctl[2]);
14036         /* If last frame of sequence we can return success. */
14037         if (fctl & FC_FC_END_SEQ)
14038                 return 1;
14039         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14040                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14041                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14042                 /* If there is a hole in the sequence count then fail. */
14043                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
14044                         return 0;
14045                 fctl = (hdr->fh_f_ctl[0] << 16 |
14046                         hdr->fh_f_ctl[1] << 8 |
14047                         hdr->fh_f_ctl[2]);
14048                 /* If last frame of sequence we can return success. */
14049                 if (fctl & FC_FC_END_SEQ)
14050                         return 1;
14051         }
14052         return 0;
14053 }
14054
14055 /**
14056  * lpfc_prep_seq - Prep sequence for ULP processing
14057  * @vport: Pointer to the vport on which this sequence was received
14058  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14059  *
14060  * This function takes a sequence, described by a list of frames, and creates
14061  * a list of iocbq structures to describe the sequence. This iocbq list will be
14062  * used to issue to the generic unsolicited sequence handler. This routine
14063  * returns a pointer to the first iocbq in the list. If the function is unable
14064  * to allocate an iocbq then it throw out the received frames that were not
14065  * able to be described and return a pointer to the first iocbq. If unable to
14066  * allocate any iocbqs (including the first) this function will return NULL.
14067  **/
14068 static struct lpfc_iocbq *
14069 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14070 {
14071         struct hbq_dmabuf *hbq_buf;
14072         struct lpfc_dmabuf *d_buf, *n_buf;
14073         struct lpfc_iocbq *first_iocbq, *iocbq;
14074         struct fc_frame_header *fc_hdr;
14075         uint32_t sid;
14076         uint32_t len, tot_len;
14077         struct ulp_bde64 *pbde;
14078
14079         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14080         /* remove from receive buffer list */
14081         list_del_init(&seq_dmabuf->hbuf.list);
14082         lpfc_update_rcv_time_stamp(vport);
14083         /* get the Remote Port's SID */
14084         sid = sli4_sid_from_fc_hdr(fc_hdr);
14085         tot_len = 0;
14086         /* Get an iocbq struct to fill in. */
14087         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14088         if (first_iocbq) {
14089                 /* Initialize the first IOCB. */
14090                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
14091                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14092                 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
14093                 first_iocbq->iocb.ulpContext = NO_XRI;
14094                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14095                         be16_to_cpu(fc_hdr->fh_ox_id);
14096                 /* iocbq is prepped for internal consumption.  Physical vpi. */
14097                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14098                         vport->phba->vpi_ids[vport->vpi];
14099                 /* put the first buffer into the first IOCBq */
14100                 first_iocbq->context2 = &seq_dmabuf->dbuf;
14101                 first_iocbq->context3 = NULL;
14102                 first_iocbq->iocb.ulpBdeCount = 1;
14103                 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14104                                                         LPFC_DATA_BUF_SIZE;
14105                 first_iocbq->iocb.un.rcvels.remoteID = sid;
14106                 tot_len = bf_get(lpfc_rcqe_length,
14107                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
14108                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14109         }
14110         iocbq = first_iocbq;
14111         /*
14112          * Each IOCBq can have two Buffers assigned, so go through the list
14113          * of buffers for this sequence and save two buffers in each IOCBq
14114          */
14115         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14116                 if (!iocbq) {
14117                         lpfc_in_buf_free(vport->phba, d_buf);
14118                         continue;
14119                 }
14120                 if (!iocbq->context3) {
14121                         iocbq->context3 = d_buf;
14122                         iocbq->iocb.ulpBdeCount++;
14123                         pbde = (struct ulp_bde64 *)
14124                                         &iocbq->iocb.unsli3.sli3Words[4];
14125                         pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
14126
14127                         /* We need to get the size out of the right CQE */
14128                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14129                         len = bf_get(lpfc_rcqe_length,
14130                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
14131                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14132                         tot_len += len;
14133                 } else {
14134                         iocbq = lpfc_sli_get_iocbq(vport->phba);
14135                         if (!iocbq) {
14136                                 if (first_iocbq) {
14137                                         first_iocbq->iocb.ulpStatus =
14138                                                         IOSTAT_FCP_RSP_ERROR;
14139                                         first_iocbq->iocb.un.ulpWord[4] =
14140                                                         IOERR_NO_RESOURCES;
14141                                 }
14142                                 lpfc_in_buf_free(vport->phba, d_buf);
14143                                 continue;
14144                         }
14145                         iocbq->context2 = d_buf;
14146                         iocbq->context3 = NULL;
14147                         iocbq->iocb.ulpBdeCount = 1;
14148                         iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14149                                                         LPFC_DATA_BUF_SIZE;
14150
14151                         /* We need to get the size out of the right CQE */
14152                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14153                         len = bf_get(lpfc_rcqe_length,
14154                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
14155                         tot_len += len;
14156                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14157
14158                         iocbq->iocb.un.rcvels.remoteID = sid;
14159                         list_add_tail(&iocbq->list, &first_iocbq->list);
14160                 }
14161         }
14162         return first_iocbq;
14163 }
14164
14165 static void
14166 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14167                           struct hbq_dmabuf *seq_dmabuf)
14168 {
14169         struct fc_frame_header *fc_hdr;
14170         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14171         struct lpfc_hba *phba = vport->phba;
14172
14173         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14174         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14175         if (!iocbq) {
14176                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14177                                 "2707 Ring %d handler: Failed to allocate "
14178                                 "iocb Rctl x%x Type x%x received\n",
14179                                 LPFC_ELS_RING,
14180                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14181                 return;
14182         }
14183         if (!lpfc_complete_unsol_iocb(phba,
14184                                       &phba->sli.ring[LPFC_ELS_RING],
14185                                       iocbq, fc_hdr->fh_r_ctl,
14186                                       fc_hdr->fh_type))
14187                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14188                                 "2540 Ring %d handler: unexpected Rctl "
14189                                 "x%x Type x%x received\n",
14190                                 LPFC_ELS_RING,
14191                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14192
14193         /* Free iocb created in lpfc_prep_seq */
14194         list_for_each_entry_safe(curr_iocb, next_iocb,
14195                 &iocbq->list, list) {
14196                 list_del_init(&curr_iocb->list);
14197                 lpfc_sli_release_iocbq(phba, curr_iocb);
14198         }
14199         lpfc_sli_release_iocbq(phba, iocbq);
14200 }
14201
14202 /**
14203  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14204  * @phba: Pointer to HBA context object.
14205  *
14206  * This function is called with no lock held. This function processes all
14207  * the received buffers and gives it to upper layers when a received buffer
14208  * indicates that it is the final frame in the sequence. The interrupt
14209  * service routine processes received buffers at interrupt contexts and adds
14210  * received dma buffers to the rb_pend_list queue and signals the worker thread.
14211  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14212  * appropriate receive function when the final frame in a sequence is received.
14213  **/
14214 void
14215 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14216                                  struct hbq_dmabuf *dmabuf)
14217 {
14218         struct hbq_dmabuf *seq_dmabuf;
14219         struct fc_frame_header *fc_hdr;
14220         struct lpfc_vport *vport;
14221         uint32_t fcfi;
14222
14223         /* Process each received buffer */
14224         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14225         /* check to see if this a valid type of frame */
14226         if (lpfc_fc_frame_check(phba, fc_hdr)) {
14227                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14228                 return;
14229         }
14230         if ((bf_get(lpfc_cqe_code,
14231                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14232                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14233                               &dmabuf->cq_event.cqe.rcqe_cmpl);
14234         else
14235                 fcfi = bf_get(lpfc_rcqe_fcf_id,
14236                               &dmabuf->cq_event.cqe.rcqe_cmpl);
14237         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
14238         if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
14239                 /* throw out the frame */
14240                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14241                 return;
14242         }
14243         /* Handle the basic abort sequence (BA_ABTS) event */
14244         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14245                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14246                 return;
14247         }
14248
14249         /* Link this frame */
14250         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14251         if (!seq_dmabuf) {
14252                 /* unable to add frame to vport - throw it out */
14253                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14254                 return;
14255         }
14256         /* If not last frame in sequence continue processing frames. */
14257         if (!lpfc_seq_complete(seq_dmabuf))
14258                 return;
14259
14260         /* Send the complete sequence to the upper layer protocol */
14261         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
14262 }
14263
14264 /**
14265  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14266  * @phba: pointer to lpfc hba data structure.
14267  *
14268  * This routine is invoked to post rpi header templates to the
14269  * HBA consistent with the SLI-4 interface spec.  This routine
14270  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14271  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14272  *
14273  * This routine does not require any locks.  It's usage is expected
14274  * to be driver load or reset recovery when the driver is
14275  * sequential.
14276  *
14277  * Return codes
14278  *      0 - successful
14279  *      -EIO - The mailbox failed to complete successfully.
14280  *      When this error occurs, the driver is not guaranteed
14281  *      to have any rpi regions posted to the device and
14282  *      must either attempt to repost the regions or take a
14283  *      fatal error.
14284  **/
14285 int
14286 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14287 {
14288         struct lpfc_rpi_hdr *rpi_page;
14289         uint32_t rc = 0;
14290         uint16_t lrpi = 0;
14291
14292         /* SLI4 ports that support extents do not require RPI headers. */
14293         if (!phba->sli4_hba.rpi_hdrs_in_use)
14294                 goto exit;
14295         if (phba->sli4_hba.extents_in_use)
14296                 return -EIO;
14297
14298         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
14299                 /*
14300                  * Assign the rpi headers a physical rpi only if the driver
14301                  * has not initialized those resources.  A port reset only
14302                  * needs the headers posted.
14303                  */
14304                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14305                     LPFC_RPI_RSRC_RDY)
14306                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14307
14308                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14309                 if (rc != MBX_SUCCESS) {
14310                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14311                                         "2008 Error %d posting all rpi "
14312                                         "headers\n", rc);
14313                         rc = -EIO;
14314                         break;
14315                 }
14316         }
14317
14318  exit:
14319         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14320                LPFC_RPI_RSRC_RDY);
14321         return rc;
14322 }
14323
14324 /**
14325  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14326  * @phba: pointer to lpfc hba data structure.
14327  * @rpi_page:  pointer to the rpi memory region.
14328  *
14329  * This routine is invoked to post a single rpi header to the
14330  * HBA consistent with the SLI-4 interface spec.  This memory region
14331  * maps up to 64 rpi context regions.
14332  *
14333  * Return codes
14334  *      0 - successful
14335  *      -ENOMEM - No available memory
14336  *      -EIO - The mailbox failed to complete successfully.
14337  **/
14338 int
14339 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14340 {
14341         LPFC_MBOXQ_t *mboxq;
14342         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14343         uint32_t rc = 0;
14344         uint32_t shdr_status, shdr_add_status;
14345         union lpfc_sli4_cfg_shdr *shdr;
14346
14347         /* SLI4 ports that support extents do not require RPI headers. */
14348         if (!phba->sli4_hba.rpi_hdrs_in_use)
14349                 return rc;
14350         if (phba->sli4_hba.extents_in_use)
14351                 return -EIO;
14352
14353         /* The port is notified of the header region via a mailbox command. */
14354         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14355         if (!mboxq) {
14356                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14357                                 "2001 Unable to allocate memory for issuing "
14358                                 "SLI_CONFIG_SPECIAL mailbox command\n");
14359                 return -ENOMEM;
14360         }
14361
14362         /* Post all rpi memory regions to the port. */
14363         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
14364         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14365                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14366                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
14367                          sizeof(struct lpfc_sli4_cfg_mhdr),
14368                          LPFC_SLI4_MBX_EMBED);
14369
14370
14371         /* Post the physical rpi to the port for this rpi header. */
14372         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14373                rpi_page->start_rpi);
14374         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14375                hdr_tmpl, rpi_page->page_count);
14376
14377         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14378         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
14379         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
14380         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14381         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14382         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14383         if (rc != MBX_TIMEOUT)
14384                 mempool_free(mboxq, phba->mbox_mem_pool);
14385         if (shdr_status || shdr_add_status || rc) {
14386                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14387                                 "2514 POST_RPI_HDR mailbox failed with "
14388                                 "status x%x add_status x%x, mbx status x%x\n",
14389                                 shdr_status, shdr_add_status, rc);
14390                 rc = -ENXIO;
14391         }
14392         return rc;
14393 }
14394
14395 /**
14396  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14397  * @phba: pointer to lpfc hba data structure.
14398  *
14399  * This routine is invoked to post rpi header templates to the
14400  * HBA consistent with the SLI-4 interface spec.  This routine
14401  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14402  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14403  *
14404  * Returns
14405  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14406  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
14407  **/
14408 int
14409 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14410 {
14411         unsigned long rpi;
14412         uint16_t max_rpi, rpi_limit;
14413         uint16_t rpi_remaining, lrpi = 0;
14414         struct lpfc_rpi_hdr *rpi_hdr;
14415
14416         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
14417         rpi_limit = phba->sli4_hba.next_rpi;
14418
14419         /*
14420          * Fetch the next logical rpi.  Because this index is logical,
14421          * the  driver starts at 0 each time.
14422          */
14423         spin_lock_irq(&phba->hbalock);
14424         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14425         if (rpi >= rpi_limit)
14426                 rpi = LPFC_RPI_ALLOC_ERROR;
14427         else {
14428                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14429                 phba->sli4_hba.max_cfg_param.rpi_used++;
14430                 phba->sli4_hba.rpi_count++;
14431         }
14432
14433         /*
14434          * Don't try to allocate more rpi header regions if the device limit
14435          * has been exhausted.
14436          */
14437         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14438             (phba->sli4_hba.rpi_count >= max_rpi)) {
14439                 spin_unlock_irq(&phba->hbalock);
14440                 return rpi;
14441         }
14442
14443         /*
14444          * RPI header postings are not required for SLI4 ports capable of
14445          * extents.
14446          */
14447         if (!phba->sli4_hba.rpi_hdrs_in_use) {
14448                 spin_unlock_irq(&phba->hbalock);
14449                 return rpi;
14450         }
14451
14452         /*
14453          * If the driver is running low on rpi resources, allocate another
14454          * page now.  Note that the next_rpi value is used because
14455          * it represents how many are actually in use whereas max_rpi notes
14456          * how many are supported max by the device.
14457          */
14458         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
14459         spin_unlock_irq(&phba->hbalock);
14460         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
14461                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
14462                 if (!rpi_hdr) {
14463                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14464                                         "2002 Error Could not grow rpi "
14465                                         "count\n");
14466                 } else {
14467                         lrpi = rpi_hdr->start_rpi;
14468                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14469                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
14470                 }
14471         }
14472
14473         return rpi;
14474 }
14475
14476 /**
14477  * lpfc_sli4_free_rpi - Release an rpi for reuse.
14478  * @phba: pointer to lpfc hba data structure.
14479  *
14480  * This routine is invoked to release an rpi to the pool of
14481  * available rpis maintained by the driver.
14482  **/
14483 void
14484 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14485 {
14486         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
14487                 phba->sli4_hba.rpi_count--;
14488                 phba->sli4_hba.max_cfg_param.rpi_used--;
14489         }
14490 }
14491
14492 /**
14493  * lpfc_sli4_free_rpi - Release an rpi for reuse.
14494  * @phba: pointer to lpfc hba data structure.
14495  *
14496  * This routine is invoked to release an rpi to the pool of
14497  * available rpis maintained by the driver.
14498  **/
14499 void
14500 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14501 {
14502         spin_lock_irq(&phba->hbalock);
14503         __lpfc_sli4_free_rpi(phba, rpi);
14504         spin_unlock_irq(&phba->hbalock);
14505 }
14506
14507 /**
14508  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
14509  * @phba: pointer to lpfc hba data structure.
14510  *
14511  * This routine is invoked to remove the memory region that
14512  * provided rpi via a bitmask.
14513  **/
14514 void
14515 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
14516 {
14517         kfree(phba->sli4_hba.rpi_bmask);
14518         kfree(phba->sli4_hba.rpi_ids);
14519         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
14520 }
14521
14522 /**
14523  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
14524  * @phba: pointer to lpfc hba data structure.
14525  *
14526  * This routine is invoked to remove the memory region that
14527  * provided rpi via a bitmask.
14528  **/
14529 int
14530 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
14531 {
14532         LPFC_MBOXQ_t *mboxq;
14533         struct lpfc_hba *phba = ndlp->phba;
14534         int rc;
14535
14536         /* The port is notified of the header region via a mailbox command. */
14537         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14538         if (!mboxq)
14539                 return -ENOMEM;
14540
14541         /* Post all rpi memory regions to the port. */
14542         lpfc_resume_rpi(mboxq, ndlp);
14543         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14544         if (rc == MBX_NOT_FINISHED) {
14545                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14546                                 "2010 Resume RPI Mailbox failed "
14547                                 "status %d, mbxStatus x%x\n", rc,
14548                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14549                 mempool_free(mboxq, phba->mbox_mem_pool);
14550                 return -EIO;
14551         }
14552         return 0;
14553 }
14554
14555 /**
14556  * lpfc_sli4_init_vpi - Initialize a vpi with the port
14557  * @vport: Pointer to the vport for which the vpi is being initialized
14558  *
14559  * This routine is invoked to activate a vpi with the port.
14560  *
14561  * Returns:
14562  *    0 success
14563  *    -Evalue otherwise
14564  **/
14565 int
14566 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
14567 {
14568         LPFC_MBOXQ_t *mboxq;
14569         int rc = 0;
14570         int retval = MBX_SUCCESS;
14571         uint32_t mbox_tmo;
14572         struct lpfc_hba *phba = vport->phba;
14573         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14574         if (!mboxq)
14575                 return -ENOMEM;
14576         lpfc_init_vpi(phba, mboxq, vport->vpi);
14577         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
14578         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
14579         if (rc != MBX_SUCCESS) {
14580                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
14581                                 "2022 INIT VPI Mailbox failed "
14582                                 "status %d, mbxStatus x%x\n", rc,
14583                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14584                 retval = -EIO;
14585         }
14586         if (rc != MBX_TIMEOUT)
14587                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
14588
14589         return retval;
14590 }
14591
14592 /**
14593  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
14594  * @phba: pointer to lpfc hba data structure.
14595  * @mboxq: Pointer to mailbox object.
14596  *
14597  * This routine is invoked to manually add a single FCF record. The caller
14598  * must pass a completely initialized FCF_Record.  This routine takes
14599  * care of the nonembedded mailbox operations.
14600  **/
14601 static void
14602 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
14603 {
14604         void *virt_addr;
14605         union lpfc_sli4_cfg_shdr *shdr;
14606         uint32_t shdr_status, shdr_add_status;
14607
14608         virt_addr = mboxq->sge_array->addr[0];
14609         /* The IOCTL status is embedded in the mailbox subheader. */
14610         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
14611         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14612         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14613
14614         if ((shdr_status || shdr_add_status) &&
14615                 (shdr_status != STATUS_FCF_IN_USE))
14616                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14617                         "2558 ADD_FCF_RECORD mailbox failed with "
14618                         "status x%x add_status x%x\n",
14619                         shdr_status, shdr_add_status);
14620
14621         lpfc_sli4_mbox_cmd_free(phba, mboxq);
14622 }
14623
14624 /**
14625  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
14626  * @phba: pointer to lpfc hba data structure.
14627  * @fcf_record:  pointer to the initialized fcf record to add.
14628  *
14629  * This routine is invoked to manually add a single FCF record. The caller
14630  * must pass a completely initialized FCF_Record.  This routine takes
14631  * care of the nonembedded mailbox operations.
14632  **/
14633 int
14634 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
14635 {
14636         int rc = 0;
14637         LPFC_MBOXQ_t *mboxq;
14638         uint8_t *bytep;
14639         void *virt_addr;
14640         dma_addr_t phys_addr;
14641         struct lpfc_mbx_sge sge;
14642         uint32_t alloc_len, req_len;
14643         uint32_t fcfindex;
14644
14645         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14646         if (!mboxq) {
14647                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14648                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
14649                 return -ENOMEM;
14650         }
14651
14652         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
14653                   sizeof(uint32_t);
14654
14655         /* Allocate DMA memory and set up the non-embedded mailbox command */
14656         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14657                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
14658                                      req_len, LPFC_SLI4_MBX_NEMBED);
14659         if (alloc_len < req_len) {
14660                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14661                         "2523 Allocated DMA memory size (x%x) is "
14662                         "less than the requested DMA memory "
14663                         "size (x%x)\n", alloc_len, req_len);
14664                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14665                 return -ENOMEM;
14666         }
14667
14668         /*
14669          * Get the first SGE entry from the non-embedded DMA memory.  This
14670          * routine only uses a single SGE.
14671          */
14672         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
14673         phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
14674         virt_addr = mboxq->sge_array->addr[0];
14675         /*
14676          * Configure the FCF record for FCFI 0.  This is the driver's
14677          * hardcoded default and gets used in nonFIP mode.
14678          */
14679         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
14680         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
14681         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
14682
14683         /*
14684          * Copy the fcf_index and the FCF Record Data. The data starts after
14685          * the FCoE header plus word10. The data copy needs to be endian
14686          * correct.
14687          */
14688         bytep += sizeof(uint32_t);
14689         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
14690         mboxq->vport = phba->pport;
14691         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
14692         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14693         if (rc == MBX_NOT_FINISHED) {
14694                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14695                         "2515 ADD_FCF_RECORD mailbox failed with "
14696                         "status 0x%x\n", rc);
14697                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14698                 rc = -EIO;
14699         } else
14700                 rc = 0;
14701
14702         return rc;
14703 }
14704
14705 /**
14706  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
14707  * @phba: pointer to lpfc hba data structure.
14708  * @fcf_record:  pointer to the fcf record to write the default data.
14709  * @fcf_index: FCF table entry index.
14710  *
14711  * This routine is invoked to build the driver's default FCF record.  The
14712  * values used are hardcoded.  This routine handles memory initialization.
14713  *
14714  **/
14715 void
14716 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
14717                                 struct fcf_record *fcf_record,
14718                                 uint16_t fcf_index)
14719 {
14720         memset(fcf_record, 0, sizeof(struct fcf_record));
14721         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
14722         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
14723         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
14724         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
14725         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
14726         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
14727         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
14728         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
14729         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
14730         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
14731         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
14732         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
14733         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
14734         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
14735         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
14736         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
14737                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
14738         /* Set the VLAN bit map */
14739         if (phba->valid_vlan) {
14740                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
14741                         = 1 << (phba->vlan_id % 8);
14742         }
14743 }
14744
14745 /**
14746  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
14747  * @phba: pointer to lpfc hba data structure.
14748  * @fcf_index: FCF table entry offset.
14749  *
14750  * This routine is invoked to scan the entire FCF table by reading FCF
14751  * record and processing it one at a time starting from the @fcf_index
14752  * for initial FCF discovery or fast FCF failover rediscovery.
14753  *
14754  * Return 0 if the mailbox command is submitted successfully, none 0
14755  * otherwise.
14756  **/
14757 int
14758 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14759 {
14760         int rc = 0, error;
14761         LPFC_MBOXQ_t *mboxq;
14762
14763         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
14764         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14765         if (!mboxq) {
14766                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14767                                 "2000 Failed to allocate mbox for "
14768                                 "READ_FCF cmd\n");
14769                 error = -ENOMEM;
14770                 goto fail_fcf_scan;
14771         }
14772         /* Construct the read FCF record mailbox command */
14773         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14774         if (rc) {
14775                 error = -EINVAL;
14776                 goto fail_fcf_scan;
14777         }
14778         /* Issue the mailbox command asynchronously */
14779         mboxq->vport = phba->pport;
14780         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
14781
14782         spin_lock_irq(&phba->hbalock);
14783         phba->hba_flag |= FCF_TS_INPROG;
14784         spin_unlock_irq(&phba->hbalock);
14785
14786         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14787         if (rc == MBX_NOT_FINISHED)
14788                 error = -EIO;
14789         else {
14790                 /* Reset eligible FCF count for new scan */
14791                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
14792                         phba->fcf.eligible_fcf_cnt = 0;
14793                 error = 0;
14794         }
14795 fail_fcf_scan:
14796         if (error) {
14797                 if (mboxq)
14798                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
14799                 /* FCF scan failed, clear FCF_TS_INPROG flag */
14800                 spin_lock_irq(&phba->hbalock);
14801                 phba->hba_flag &= ~FCF_TS_INPROG;
14802                 spin_unlock_irq(&phba->hbalock);
14803         }
14804         return error;
14805 }
14806
14807 /**
14808  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
14809  * @phba: pointer to lpfc hba data structure.
14810  * @fcf_index: FCF table entry offset.
14811  *
14812  * This routine is invoked to read an FCF record indicated by @fcf_index
14813  * and to use it for FLOGI roundrobin FCF failover.
14814  *
14815  * Return 0 if the mailbox command is submitted successfully, none 0
14816  * otherwise.
14817  **/
14818 int
14819 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14820 {
14821         int rc = 0, error;
14822         LPFC_MBOXQ_t *mboxq;
14823
14824         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14825         if (!mboxq) {
14826                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
14827                                 "2763 Failed to allocate mbox for "
14828                                 "READ_FCF cmd\n");
14829                 error = -ENOMEM;
14830                 goto fail_fcf_read;
14831         }
14832         /* Construct the read FCF record mailbox command */
14833         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14834         if (rc) {
14835                 error = -EINVAL;
14836                 goto fail_fcf_read;
14837         }
14838         /* Issue the mailbox command asynchronously */
14839         mboxq->vport = phba->pport;
14840         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
14841         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14842         if (rc == MBX_NOT_FINISHED)
14843                 error = -EIO;
14844         else
14845                 error = 0;
14846
14847 fail_fcf_read:
14848         if (error && mboxq)
14849                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14850         return error;
14851 }
14852
14853 /**
14854  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
14855  * @phba: pointer to lpfc hba data structure.
14856  * @fcf_index: FCF table entry offset.
14857  *
14858  * This routine is invoked to read an FCF record indicated by @fcf_index to
14859  * determine whether it's eligible for FLOGI roundrobin failover list.
14860  *
14861  * Return 0 if the mailbox command is submitted successfully, none 0
14862  * otherwise.
14863  **/
14864 int
14865 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14866 {
14867         int rc = 0, error;
14868         LPFC_MBOXQ_t *mboxq;
14869
14870         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14871         if (!mboxq) {
14872                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
14873                                 "2758 Failed to allocate mbox for "
14874                                 "READ_FCF cmd\n");
14875                                 error = -ENOMEM;
14876                                 goto fail_fcf_read;
14877         }
14878         /* Construct the read FCF record mailbox command */
14879         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14880         if (rc) {
14881                 error = -EINVAL;
14882                 goto fail_fcf_read;
14883         }
14884         /* Issue the mailbox command asynchronously */
14885         mboxq->vport = phba->pport;
14886         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
14887         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14888         if (rc == MBX_NOT_FINISHED)
14889                 error = -EIO;
14890         else
14891                 error = 0;
14892
14893 fail_fcf_read:
14894         if (error && mboxq)
14895                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14896         return error;
14897 }
14898
14899 /**
14900  * lpfc_check_next_fcf_pri
14901  * phba pointer to the lpfc_hba struct for this port.
14902  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
14903  * routine when the rr_bmask is empty. The FCF indecies are put into the
14904  * rr_bmask based on their priority level. Starting from the highest priority
14905  * to the lowest. The most likely FCF candidate will be in the highest
14906  * priority group. When this routine is called it searches the fcf_pri list for
14907  * next lowest priority group and repopulates the rr_bmask with only those
14908  * fcf_indexes.
14909  * returns:
14910  * 1=success 0=failure
14911  **/
14912 int
14913 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
14914 {
14915         uint16_t next_fcf_pri;
14916         uint16_t last_index;
14917         struct lpfc_fcf_pri *fcf_pri;
14918         int rc;
14919         int ret = 0;
14920
14921         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
14922                         LPFC_SLI4_FCF_TBL_INDX_MAX);
14923         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14924                         "3060 Last IDX %d\n", last_index);
14925         if (list_empty(&phba->fcf.fcf_pri_list)) {
14926                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14927                         "3061 Last IDX %d\n", last_index);
14928                 return 0; /* Empty rr list */
14929         }
14930         next_fcf_pri = 0;
14931         /*
14932          * Clear the rr_bmask and set all of the bits that are at this
14933          * priority.
14934          */
14935         memset(phba->fcf.fcf_rr_bmask, 0,
14936                         sizeof(*phba->fcf.fcf_rr_bmask));
14937         spin_lock_irq(&phba->hbalock);
14938         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14939                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
14940                         continue;
14941                 /*
14942                  * the 1st priority that has not FLOGI failed
14943                  * will be the highest.
14944                  */
14945                 if (!next_fcf_pri)
14946                         next_fcf_pri = fcf_pri->fcf_rec.priority;
14947                 spin_unlock_irq(&phba->hbalock);
14948                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14949                         rc = lpfc_sli4_fcf_rr_index_set(phba,
14950                                                 fcf_pri->fcf_rec.fcf_index);
14951                         if (rc)
14952                                 return 0;
14953                 }
14954                 spin_lock_irq(&phba->hbalock);
14955         }
14956         /*
14957          * if next_fcf_pri was not set above and the list is not empty then
14958          * we have failed flogis on all of them. So reset flogi failed
14959          * and start at the begining.
14960          */
14961         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
14962                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14963                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
14964                         /*
14965                          * the 1st priority that has not FLOGI failed
14966                          * will be the highest.
14967                          */
14968                         if (!next_fcf_pri)
14969                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
14970                         spin_unlock_irq(&phba->hbalock);
14971                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14972                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
14973                                                 fcf_pri->fcf_rec.fcf_index);
14974                                 if (rc)
14975                                         return 0;
14976                         }
14977                         spin_lock_irq(&phba->hbalock);
14978                 }
14979         } else
14980                 ret = 1;
14981         spin_unlock_irq(&phba->hbalock);
14982
14983         return ret;
14984 }
14985 /**
14986  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
14987  * @phba: pointer to lpfc hba data structure.
14988  *
14989  * This routine is to get the next eligible FCF record index in a round
14990  * robin fashion. If the next eligible FCF record index equals to the
14991  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
14992  * shall be returned, otherwise, the next eligible FCF record's index
14993  * shall be returned.
14994  **/
14995 uint16_t
14996 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14997 {
14998         uint16_t next_fcf_index;
14999
15000         /* Search start from next bit of currently registered FCF index */
15001 next_priority:
15002         next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
15003                                         LPFC_SLI4_FCF_TBL_INDX_MAX;
15004         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15005                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
15006                                        next_fcf_index);
15007
15008         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
15009         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15010                 /*
15011                  * If we have wrapped then we need to clear the bits that
15012                  * have been tested so that we can detect when we should
15013                  * change the priority level.
15014                  */
15015                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15016                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
15017         }
15018
15019
15020         /* Check roundrobin failover list empty condition */
15021         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15022                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15023                 /*
15024                  * If next fcf index is not found check if there are lower
15025                  * Priority level fcf's in the fcf_priority list.
15026                  * Set up the rr_bmask with all of the avaiable fcf bits
15027                  * at that level and continue the selection process.
15028                  */
15029                 if (lpfc_check_next_fcf_pri_level(phba))
15030                         goto next_priority;
15031                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15032                                 "2844 No roundrobin failover FCF available\n");
15033                 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15034                         return LPFC_FCOE_FCF_NEXT_NONE;
15035                 else {
15036                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15037                                 "3063 Only FCF available idx %d, flag %x\n",
15038                                 next_fcf_index,
15039                         phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15040                         return next_fcf_index;
15041                 }
15042         }
15043
15044         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15045                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15046                 LPFC_FCF_FLOGI_FAILED)
15047                 goto next_priority;
15048
15049         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15050                         "2845 Get next roundrobin failover FCF (x%x)\n",
15051                         next_fcf_index);
15052
15053         return next_fcf_index;
15054 }
15055
15056 /**
15057  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15058  * @phba: pointer to lpfc hba data structure.
15059  *
15060  * This routine sets the FCF record index in to the eligible bmask for
15061  * roundrobin failover search. It checks to make sure that the index
15062  * does not go beyond the range of the driver allocated bmask dimension
15063  * before setting the bit.
15064  *
15065  * Returns 0 if the index bit successfully set, otherwise, it returns
15066  * -EINVAL.
15067  **/
15068 int
15069 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15070 {
15071         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15072                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15073                                 "2610 FCF (x%x) reached driver's book "
15074                                 "keeping dimension:x%x\n",
15075                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15076                 return -EINVAL;
15077         }
15078         /* Set the eligible FCF record index bmask */
15079         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15080
15081         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15082                         "2790 Set FCF (x%x) to roundrobin FCF failover "
15083                         "bmask\n", fcf_index);
15084
15085         return 0;
15086 }
15087
15088 /**
15089  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
15090  * @phba: pointer to lpfc hba data structure.
15091  *
15092  * This routine clears the FCF record index from the eligible bmask for
15093  * roundrobin failover search. It checks to make sure that the index
15094  * does not go beyond the range of the driver allocated bmask dimension
15095  * before clearing the bit.
15096  **/
15097 void
15098 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15099 {
15100         struct lpfc_fcf_pri *fcf_pri;
15101         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15102                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15103                                 "2762 FCF (x%x) reached driver's book "
15104                                 "keeping dimension:x%x\n",
15105                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15106                 return;
15107         }
15108         /* Clear the eligible FCF record index bmask */
15109         spin_lock_irq(&phba->hbalock);
15110         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15111                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15112                         list_del_init(&fcf_pri->list);
15113                         break;
15114                 }
15115         }
15116         spin_unlock_irq(&phba->hbalock);
15117         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15118
15119         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15120                         "2791 Clear FCF (x%x) from roundrobin failover "
15121                         "bmask\n", fcf_index);
15122 }
15123
15124 /**
15125  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15126  * @phba: pointer to lpfc hba data structure.
15127  *
15128  * This routine is the completion routine for the rediscover FCF table mailbox
15129  * command. If the mailbox command returned failure, it will try to stop the
15130  * FCF rediscover wait timer.
15131  **/
15132 void
15133 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15134 {
15135         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15136         uint32_t shdr_status, shdr_add_status;
15137
15138         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15139
15140         shdr_status = bf_get(lpfc_mbox_hdr_status,
15141                              &redisc_fcf->header.cfg_shdr.response);
15142         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15143                              &redisc_fcf->header.cfg_shdr.response);
15144         if (shdr_status || shdr_add_status) {
15145                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15146                                 "2746 Requesting for FCF rediscovery failed "
15147                                 "status x%x add_status x%x\n",
15148                                 shdr_status, shdr_add_status);
15149                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
15150                         spin_lock_irq(&phba->hbalock);
15151                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
15152                         spin_unlock_irq(&phba->hbalock);
15153                         /*
15154                          * CVL event triggered FCF rediscover request failed,
15155                          * last resort to re-try current registered FCF entry.
15156                          */
15157                         lpfc_retry_pport_discovery(phba);
15158                 } else {
15159                         spin_lock_irq(&phba->hbalock);
15160                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
15161                         spin_unlock_irq(&phba->hbalock);
15162                         /*
15163                          * DEAD FCF event triggered FCF rediscover request
15164                          * failed, last resort to fail over as a link down
15165                          * to FCF registration.
15166                          */
15167                         lpfc_sli4_fcf_dead_failthrough(phba);
15168                 }
15169         } else {
15170                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15171                                 "2775 Start FCF rediscover quiescent timer\n");
15172                 /*
15173                  * Start FCF rediscovery wait timer for pending FCF
15174                  * before rescan FCF record table.
15175                  */
15176                 lpfc_fcf_redisc_wait_start_timer(phba);
15177         }
15178
15179         mempool_free(mbox, phba->mbox_mem_pool);
15180 }
15181
15182 /**
15183  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
15184  * @phba: pointer to lpfc hba data structure.
15185  *
15186  * This routine is invoked to request for rediscovery of the entire FCF table
15187  * by the port.
15188  **/
15189 int
15190 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15191 {
15192         LPFC_MBOXQ_t *mbox;
15193         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15194         int rc, length;
15195
15196         /* Cancel retry delay timers to all vports before FCF rediscover */
15197         lpfc_cancel_all_vport_retry_delay_timer(phba);
15198
15199         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15200         if (!mbox) {
15201                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15202                                 "2745 Failed to allocate mbox for "
15203                                 "requesting FCF rediscover.\n");
15204                 return -ENOMEM;
15205         }
15206
15207         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15208                   sizeof(struct lpfc_sli4_cfg_mhdr));
15209         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15210                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15211                          length, LPFC_SLI4_MBX_EMBED);
15212
15213         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15214         /* Set count to 0 for invalidating the entire FCF database */
15215         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15216
15217         /* Issue the mailbox command asynchronously */
15218         mbox->vport = phba->pport;
15219         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15220         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15221
15222         if (rc == MBX_NOT_FINISHED) {
15223                 mempool_free(mbox, phba->mbox_mem_pool);
15224                 return -EIO;
15225         }
15226         return 0;
15227 }
15228
15229 /**
15230  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15231  * @phba: pointer to lpfc hba data structure.
15232  *
15233  * This function is the failover routine as a last resort to the FCF DEAD
15234  * event when driver failed to perform fast FCF failover.
15235  **/
15236 void
15237 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15238 {
15239         uint32_t link_state;
15240
15241         /*
15242          * Last resort as FCF DEAD event failover will treat this as
15243          * a link down, but save the link state because we don't want
15244          * it to be changed to Link Down unless it is already down.
15245          */
15246         link_state = phba->link_state;
15247         lpfc_linkdown(phba);
15248         phba->link_state = link_state;
15249
15250         /* Unregister FCF if no devices connected to it */
15251         lpfc_unregister_unused_fcf(phba);
15252 }
15253
15254 /**
15255  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15256  * @phba: pointer to lpfc hba data structure.
15257  *
15258  * This function read region 23 and parse TLV for port status to
15259  * decide if the user disaled the port. If the TLV indicates the
15260  * port is disabled, the hba_flag is set accordingly.
15261  **/
15262 void
15263 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15264 {
15265         LPFC_MBOXQ_t *pmb = NULL;
15266         MAILBOX_t *mb;
15267         uint8_t *rgn23_data = NULL;
15268         uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
15269         int rc;
15270
15271         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15272         if (!pmb) {
15273                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15274                         "2600 lpfc_sli_read_serdes_param failed to"
15275                         " allocate mailbox memory\n");
15276                 goto out;
15277         }
15278         mb = &pmb->u.mb;
15279
15280         /* Get adapter Region 23 data */
15281         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15282         if (!rgn23_data)
15283                 goto out;
15284
15285         do {
15286                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15287                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15288
15289                 if (rc != MBX_SUCCESS) {
15290                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15291                                 "2601 lpfc_sli_read_link_ste failed to"
15292                                 " read config region 23 rc 0x%x Status 0x%x\n",
15293                                 rc, mb->mbxStatus);
15294                         mb->un.varDmp.word_cnt = 0;
15295                 }
15296                 /*
15297                  * dump mem may return a zero when finished or we got a
15298                  * mailbox error, either way we are done.
15299                  */
15300                 if (mb->un.varDmp.word_cnt == 0)
15301                         break;
15302                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15303                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15304
15305                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
15306                         rgn23_data + offset,
15307                         mb->un.varDmp.word_cnt);
15308                 offset += mb->un.varDmp.word_cnt;
15309         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15310
15311         data_size = offset;
15312         offset = 0;
15313
15314         if (!data_size)
15315                 goto out;
15316
15317         /* Check the region signature first */
15318         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15319                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15320                         "2619 Config region 23 has bad signature\n");
15321                         goto out;
15322         }
15323         offset += 4;
15324
15325         /* Check the data structure version */
15326         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15327                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15328                         "2620 Config region 23 has bad version\n");
15329                 goto out;
15330         }
15331         offset += 4;
15332
15333         /* Parse TLV entries in the region */
15334         while (offset < data_size) {
15335                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15336                         break;
15337                 /*
15338                  * If the TLV is not driver specific TLV or driver id is
15339                  * not linux driver id, skip the record.
15340                  */
15341                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15342                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15343                     (rgn23_data[offset + 3] != 0)) {
15344                         offset += rgn23_data[offset + 1] * 4 + 4;
15345                         continue;
15346                 }
15347
15348                 /* Driver found a driver specific TLV in the config region */
15349                 sub_tlv_len = rgn23_data[offset + 1] * 4;
15350                 offset += 4;
15351                 tlv_offset = 0;
15352
15353                 /*
15354                  * Search for configured port state sub-TLV.
15355                  */
15356                 while ((offset < data_size) &&
15357                         (tlv_offset < sub_tlv_len)) {
15358                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
15359                                 offset += 4;
15360                                 tlv_offset += 4;
15361                                 break;
15362                         }
15363                         if (rgn23_data[offset] != PORT_STE_TYPE) {
15364                                 offset += rgn23_data[offset + 1] * 4 + 4;
15365                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
15366                                 continue;
15367                         }
15368
15369                         /* This HBA contains PORT_STE configured */
15370                         if (!rgn23_data[offset + 2])
15371                                 phba->hba_flag |= LINK_DISABLED;
15372
15373                         goto out;
15374                 }
15375         }
15376 out:
15377         if (pmb)
15378                 mempool_free(pmb, phba->mbox_mem_pool);
15379         kfree(rgn23_data);
15380         return;
15381 }
15382
15383 /**
15384  * lpfc_wr_object - write an object to the firmware
15385  * @phba: HBA structure that indicates port to create a queue on.
15386  * @dmabuf_list: list of dmabufs to write to the port.
15387  * @size: the total byte value of the objects to write to the port.
15388  * @offset: the current offset to be used to start the transfer.
15389  *
15390  * This routine will create a wr_object mailbox command to send to the port.
15391  * the mailbox command will be constructed using the dma buffers described in
15392  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
15393  * BDEs that the imbedded mailbox can support. The @offset variable will be
15394  * used to indicate the starting offset of the transfer and will also return
15395  * the offset after the write object mailbox has completed. @size is used to
15396  * determine the end of the object and whether the eof bit should be set.
15397  *
15398  * Return 0 is successful and offset will contain the the new offset to use
15399  * for the next write.
15400  * Return negative value for error cases.
15401  **/
15402 int
15403 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
15404                uint32_t size, uint32_t *offset)
15405 {
15406         struct lpfc_mbx_wr_object *wr_object;
15407         LPFC_MBOXQ_t *mbox;
15408         int rc = 0, i = 0;
15409         uint32_t shdr_status, shdr_add_status;
15410         uint32_t mbox_tmo;
15411         union lpfc_sli4_cfg_shdr *shdr;
15412         struct lpfc_dmabuf *dmabuf;
15413         uint32_t written = 0;
15414
15415         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15416         if (!mbox)
15417                 return -ENOMEM;
15418
15419         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15420                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
15421                         sizeof(struct lpfc_mbx_wr_object) -
15422                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15423
15424         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
15425         wr_object->u.request.write_offset = *offset;
15426         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
15427         wr_object->u.request.object_name[0] =
15428                 cpu_to_le32(wr_object->u.request.object_name[0]);
15429         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
15430         list_for_each_entry(dmabuf, dmabuf_list, list) {
15431                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
15432                         break;
15433                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
15434                 wr_object->u.request.bde[i].addrHigh =
15435                         putPaddrHigh(dmabuf->phys);
15436                 if (written + SLI4_PAGE_SIZE >= size) {
15437                         wr_object->u.request.bde[i].tus.f.bdeSize =
15438                                 (size - written);
15439                         written += (size - written);
15440                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
15441                 } else {
15442                         wr_object->u.request.bde[i].tus.f.bdeSize =
15443                                 SLI4_PAGE_SIZE;
15444                         written += SLI4_PAGE_SIZE;
15445                 }
15446                 i++;
15447         }
15448         wr_object->u.request.bde_count = i;
15449         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
15450         if (!phba->sli4_hba.intr_enable)
15451                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15452         else {
15453                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15454                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15455         }
15456         /* The IOCTL status is embedded in the mailbox subheader. */
15457         shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
15458         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15459         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15460         if (rc != MBX_TIMEOUT)
15461                 mempool_free(mbox, phba->mbox_mem_pool);
15462         if (shdr_status || shdr_add_status || rc) {
15463                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15464                                 "3025 Write Object mailbox failed with "
15465                                 "status x%x add_status x%x, mbx status x%x\n",
15466                                 shdr_status, shdr_add_status, rc);
15467                 rc = -ENXIO;
15468         } else
15469                 *offset += wr_object->u.response.actual_write_length;
15470         return rc;
15471 }
15472
15473 /**
15474  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
15475  * @vport: pointer to vport data structure.
15476  *
15477  * This function iterate through the mailboxq and clean up all REG_LOGIN
15478  * and REG_VPI mailbox commands associated with the vport. This function
15479  * is called when driver want to restart discovery of the vport due to
15480  * a Clear Virtual Link event.
15481  **/
15482 void
15483 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
15484 {
15485         struct lpfc_hba *phba = vport->phba;
15486         LPFC_MBOXQ_t *mb, *nextmb;
15487         struct lpfc_dmabuf *mp;
15488         struct lpfc_nodelist *ndlp;
15489         struct lpfc_nodelist *act_mbx_ndlp = NULL;
15490         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
15491         LIST_HEAD(mbox_cmd_list);
15492         uint8_t restart_loop;
15493
15494         /* Clean up internally queued mailbox commands with the vport */
15495         spin_lock_irq(&phba->hbalock);
15496         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
15497                 if (mb->vport != vport)
15498                         continue;
15499
15500                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15501                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
15502                         continue;
15503
15504                 list_del(&mb->list);
15505                 list_add_tail(&mb->list, &mbox_cmd_list);
15506         }
15507         /* Clean up active mailbox command with the vport */
15508         mb = phba->sli.mbox_active;
15509         if (mb && (mb->vport == vport)) {
15510                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
15511                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
15512                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15513                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15514                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
15515                         /* Put reference count for delayed processing */
15516                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
15517                         /* Unregister the RPI when mailbox complete */
15518                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15519                 }
15520         }
15521         /* Cleanup any mailbox completions which are not yet processed */
15522         do {
15523                 restart_loop = 0;
15524                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
15525                         /*
15526                          * If this mailox is already processed or it is
15527                          * for another vport ignore it.
15528                          */
15529                         if ((mb->vport != vport) ||
15530                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
15531                                 continue;
15532
15533                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15534                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15535                                 continue;
15536
15537                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15538                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15539                                 ndlp = (struct lpfc_nodelist *)mb->context2;
15540                                 /* Unregister the RPI when mailbox complete */
15541                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15542                                 restart_loop = 1;
15543                                 spin_unlock_irq(&phba->hbalock);
15544                                 spin_lock(shost->host_lock);
15545                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15546                                 spin_unlock(shost->host_lock);
15547                                 spin_lock_irq(&phba->hbalock);
15548                                 break;
15549                         }
15550                 }
15551         } while (restart_loop);
15552
15553         spin_unlock_irq(&phba->hbalock);
15554
15555         /* Release the cleaned-up mailbox commands */
15556         while (!list_empty(&mbox_cmd_list)) {
15557                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
15558                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15559                         mp = (struct lpfc_dmabuf *) (mb->context1);
15560                         if (mp) {
15561                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
15562                                 kfree(mp);
15563                         }
15564                         ndlp = (struct lpfc_nodelist *) mb->context2;
15565                         mb->context2 = NULL;
15566                         if (ndlp) {
15567                                 spin_lock(shost->host_lock);
15568                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15569                                 spin_unlock(shost->host_lock);
15570                                 lpfc_nlp_put(ndlp);
15571                         }
15572                 }
15573                 mempool_free(mb, phba->mbox_mem_pool);
15574         }
15575
15576         /* Release the ndlp with the cleaned-up active mailbox command */
15577         if (act_mbx_ndlp) {
15578                 spin_lock(shost->host_lock);
15579                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15580                 spin_unlock(shost->host_lock);
15581                 lpfc_nlp_put(act_mbx_ndlp);
15582         }
15583 }
15584
15585 /**
15586  * lpfc_drain_txq - Drain the txq
15587  * @phba: Pointer to HBA context object.
15588  *
15589  * This function attempt to submit IOCBs on the txq
15590  * to the adapter.  For SLI4 adapters, the txq contains
15591  * ELS IOCBs that have been deferred because the there
15592  * are no SGLs.  This congestion can occur with large
15593  * vport counts during node discovery.
15594  **/
15595
15596 uint32_t
15597 lpfc_drain_txq(struct lpfc_hba *phba)
15598 {
15599         LIST_HEAD(completions);
15600         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
15601         struct lpfc_iocbq *piocbq = 0;
15602         unsigned long iflags = 0;
15603         char *fail_msg = NULL;
15604         struct lpfc_sglq *sglq;
15605         union lpfc_wqe wqe;
15606
15607         spin_lock_irqsave(&phba->hbalock, iflags);
15608         if (pring->txq_cnt > pring->txq_max)
15609                 pring->txq_max = pring->txq_cnt;
15610
15611         spin_unlock_irqrestore(&phba->hbalock, iflags);
15612
15613         while (pring->txq_cnt) {
15614                 spin_lock_irqsave(&phba->hbalock, iflags);
15615
15616                 piocbq = lpfc_sli_ringtx_get(phba, pring);
15617                 sglq = __lpfc_sli_get_sglq(phba, piocbq);
15618                 if (!sglq) {
15619                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
15620                         spin_unlock_irqrestore(&phba->hbalock, iflags);
15621                         break;
15622                 } else {
15623                         if (!piocbq) {
15624                                 /* The txq_cnt out of sync. This should
15625                                  * never happen
15626                                  */
15627                                 sglq = __lpfc_clear_active_sglq(phba,
15628                                                  sglq->sli4_lxritag);
15629                                 spin_unlock_irqrestore(&phba->hbalock, iflags);
15630                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15631                                         "2823 txq empty and txq_cnt is %d\n ",
15632                                         pring->txq_cnt);
15633                                 break;
15634                         }
15635                 }
15636
15637                 /* The xri and iocb resources secured,
15638                  * attempt to issue request
15639                  */
15640                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
15641                 piocbq->sli4_xritag = sglq->sli4_xritag;
15642                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
15643                         fail_msg = "to convert bpl to sgl";
15644                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
15645                         fail_msg = "to convert iocb to wqe";
15646                 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
15647                         fail_msg = " - Wq is full";
15648                 else
15649                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
15650
15651                 if (fail_msg) {
15652                         /* Failed means we can't issue and need to cancel */
15653                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15654                                         "2822 IOCB failed %s iotag 0x%x "
15655                                         "xri 0x%x\n",
15656                                         fail_msg,
15657                                         piocbq->iotag, piocbq->sli4_xritag);
15658                         list_add_tail(&piocbq->list, &completions);
15659                 }
15660                 spin_unlock_irqrestore(&phba->hbalock, iflags);
15661         }
15662
15663         /* Cancel all the IOCBs that cannot be issued */
15664         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
15665                                 IOERR_SLI_ABORTED);
15666
15667         return pring->txq_cnt;
15668 }