634b2fea9c4d5e82b4278cd6d217798476a4389d
[pandora-kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
49
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
52         LPFC_UNKNOWN_IOCB,
53         LPFC_UNSOL_IOCB,
54         LPFC_SOL_IOCB,
55         LPFC_ABORT_IOCB
56 } lpfc_iocb_type;
57
58
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61                                   uint32_t);
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63                               uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65                                                          struct lpfc_iocbq *);
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67                                       struct hbq_dmabuf *);
68 static IOCB_t *
69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
70 {
71         return &iocbq->iocb;
72 }
73
74 /**
75  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
76  * @q: The Work Queue to operate on.
77  * @wqe: The work Queue Entry to put on the Work queue.
78  *
79  * This routine will copy the contents of @wqe to the next available entry on
80  * the @q. This function will then ring the Work Queue Doorbell to signal the
81  * HBA to start processing the Work Queue Entry. This function returns 0 if
82  * successful. If no entries are available on @q then this function will return
83  * -ENOMEM.
84  * The caller is expected to hold the hbalock when calling this routine.
85  **/
86 static uint32_t
87 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
88 {
89         union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
90         struct lpfc_register doorbell;
91         uint32_t host_index;
92
93         /* If the host has not yet processed the next entry then we are done */
94         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
95                 return -ENOMEM;
96         /* set consumption flag every once in a while */
97         if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
98                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
99
100         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
101
102         /* Update the host index before invoking device */
103         host_index = q->host_index;
104         q->host_index = ((q->host_index + 1) % q->entry_count);
105
106         /* Ring Doorbell */
107         doorbell.word0 = 0;
108         bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
109         bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
110         bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
111         writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
112         readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
113
114         return 0;
115 }
116
117 /**
118  * lpfc_sli4_wq_release - Updates internal hba index for WQ
119  * @q: The Work Queue to operate on.
120  * @index: The index to advance the hba index to.
121  *
122  * This routine will update the HBA index of a queue to reflect consumption of
123  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
124  * an entry the host calls this function to update the queue's internal
125  * pointers. This routine returns the number of entries that were consumed by
126  * the HBA.
127  **/
128 static uint32_t
129 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
130 {
131         uint32_t released = 0;
132
133         if (q->hba_index == index)
134                 return 0;
135         do {
136                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
137                 released++;
138         } while (q->hba_index != index);
139         return released;
140 }
141
142 /**
143  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
144  * @q: The Mailbox Queue to operate on.
145  * @wqe: The Mailbox Queue Entry to put on the Work queue.
146  *
147  * This routine will copy the contents of @mqe to the next available entry on
148  * the @q. This function will then ring the Work Queue Doorbell to signal the
149  * HBA to start processing the Work Queue Entry. This function returns 0 if
150  * successful. If no entries are available on @q then this function will return
151  * -ENOMEM.
152  * The caller is expected to hold the hbalock when calling this routine.
153  **/
154 static uint32_t
155 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
156 {
157         struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
158         struct lpfc_register doorbell;
159         uint32_t host_index;
160
161         /* If the host has not yet processed the next entry then we are done */
162         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
163                 return -ENOMEM;
164         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
165         /* Save off the mailbox pointer for completion */
166         q->phba->mbox = (MAILBOX_t *)temp_mqe;
167
168         /* Update the host index before invoking device */
169         host_index = q->host_index;
170         q->host_index = ((q->host_index + 1) % q->entry_count);
171
172         /* Ring Doorbell */
173         doorbell.word0 = 0;
174         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
175         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
176         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
177         readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
178         return 0;
179 }
180
181 /**
182  * lpfc_sli4_mq_release - Updates internal hba index for MQ
183  * @q: The Mailbox Queue to operate on.
184  *
185  * This routine will update the HBA index of a queue to reflect consumption of
186  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
187  * an entry the host calls this function to update the queue's internal
188  * pointers. This routine returns the number of entries that were consumed by
189  * the HBA.
190  **/
191 static uint32_t
192 lpfc_sli4_mq_release(struct lpfc_queue *q)
193 {
194         /* Clear the mailbox pointer for completion */
195         q->phba->mbox = NULL;
196         q->hba_index = ((q->hba_index + 1) % q->entry_count);
197         return 1;
198 }
199
200 /**
201  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
202  * @q: The Event Queue to get the first valid EQE from
203  *
204  * This routine will get the first valid Event Queue Entry from @q, update
205  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
206  * the Queue (no more work to do), or the Queue is full of EQEs that have been
207  * processed, but not popped back to the HBA then this routine will return NULL.
208  **/
209 static struct lpfc_eqe *
210 lpfc_sli4_eq_get(struct lpfc_queue *q)
211 {
212         struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
213
214         /* If the next EQE is not valid then we are done */
215         if (!bf_get_le32(lpfc_eqe_valid, eqe))
216                 return NULL;
217         /* If the host has not yet processed the next entry then we are done */
218         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
219                 return NULL;
220
221         q->hba_index = ((q->hba_index + 1) % q->entry_count);
222         return eqe;
223 }
224
225 /**
226  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
227  * @q: The Event Queue that the host has completed processing for.
228  * @arm: Indicates whether the host wants to arms this CQ.
229  *
230  * This routine will mark all Event Queue Entries on @q, from the last
231  * known completed entry to the last entry that was processed, as completed
232  * by clearing the valid bit for each completion queue entry. Then it will
233  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
234  * The internal host index in the @q will be updated by this routine to indicate
235  * that the host has finished processing the entries. The @arm parameter
236  * indicates that the queue should be rearmed when ringing the doorbell.
237  *
238  * This function will return the number of EQEs that were popped.
239  **/
240 uint32_t
241 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
242 {
243         uint32_t released = 0;
244         struct lpfc_eqe *temp_eqe;
245         struct lpfc_register doorbell;
246
247         /* while there are valid entries */
248         while (q->hba_index != q->host_index) {
249                 temp_eqe = q->qe[q->host_index].eqe;
250                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
251                 released++;
252                 q->host_index = ((q->host_index + 1) % q->entry_count);
253         }
254         if (unlikely(released == 0 && !arm))
255                 return 0;
256
257         /* ring doorbell for number popped */
258         doorbell.word0 = 0;
259         if (arm) {
260                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
261                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
262         }
263         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
264         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
265         bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
266         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
267         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
269                 readl(q->phba->sli4_hba.EQCQDBregaddr);
270         return released;
271 }
272
273 /**
274  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
275  * @q: The Completion Queue to get the first valid CQE from
276  *
277  * This routine will get the first valid Completion Queue Entry from @q, update
278  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
279  * the Queue (no more work to do), or the Queue is full of CQEs that have been
280  * processed, but not popped back to the HBA then this routine will return NULL.
281  **/
282 static struct lpfc_cqe *
283 lpfc_sli4_cq_get(struct lpfc_queue *q)
284 {
285         struct lpfc_cqe *cqe;
286
287         /* If the next CQE is not valid then we are done */
288         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
289                 return NULL;
290         /* If the host has not yet processed the next entry then we are done */
291         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
292                 return NULL;
293
294         cqe = q->qe[q->hba_index].cqe;
295         q->hba_index = ((q->hba_index + 1) % q->entry_count);
296         return cqe;
297 }
298
299 /**
300  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
301  * @q: The Completion Queue that the host has completed processing for.
302  * @arm: Indicates whether the host wants to arms this CQ.
303  *
304  * This routine will mark all Completion queue entries on @q, from the last
305  * known completed entry to the last entry that was processed, as completed
306  * by clearing the valid bit for each completion queue entry. Then it will
307  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
308  * The internal host index in the @q will be updated by this routine to indicate
309  * that the host has finished processing the entries. The @arm parameter
310  * indicates that the queue should be rearmed when ringing the doorbell.
311  *
312  * This function will return the number of CQEs that were released.
313  **/
314 uint32_t
315 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
316 {
317         uint32_t released = 0;
318         struct lpfc_cqe *temp_qe;
319         struct lpfc_register doorbell;
320
321         /* while there are valid entries */
322         while (q->hba_index != q->host_index) {
323                 temp_qe = q->qe[q->host_index].cqe;
324                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
325                 released++;
326                 q->host_index = ((q->host_index + 1) % q->entry_count);
327         }
328         if (unlikely(released == 0 && !arm))
329                 return 0;
330
331         /* ring doorbell for number popped */
332         doorbell.word0 = 0;
333         if (arm)
334                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
335         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
336         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
337         bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
338         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
339         return released;
340 }
341
342 /**
343  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
344  * @q: The Header Receive Queue to operate on.
345  * @wqe: The Receive Queue Entry to put on the Receive queue.
346  *
347  * This routine will copy the contents of @wqe to the next available entry on
348  * the @q. This function will then ring the Receive Queue Doorbell to signal the
349  * HBA to start processing the Receive Queue Entry. This function returns the
350  * index that the rqe was copied to if successful. If no entries are available
351  * on @q then this function will return -ENOMEM.
352  * The caller is expected to hold the hbalock when calling this routine.
353  **/
354 static int
355 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
356                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
357 {
358         struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
359         struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
360         struct lpfc_register doorbell;
361         int put_index = hq->host_index;
362
363         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
364                 return -EINVAL;
365         if (hq->host_index != dq->host_index)
366                 return -EINVAL;
367         /* If the host has not yet processed the next entry then we are done */
368         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
369                 return -EBUSY;
370         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
371         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
372
373         /* Update the host index to point to the next slot */
374         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
375         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
376
377         /* Ring The Header Receive Queue Doorbell */
378         if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
379                 doorbell.word0 = 0;
380                 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
381                        LPFC_RQ_POST_BATCH);
382                 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
383                 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
384         }
385         return put_index;
386 }
387
388 /**
389  * lpfc_sli4_rq_release - Updates internal hba index for RQ
390  * @q: The Header Receive Queue to operate on.
391  *
392  * This routine will update the HBA index of a queue to reflect consumption of
393  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
394  * consumed an entry the host calls this function to update the queue's
395  * internal pointers. This routine returns the number of entries that were
396  * consumed by the HBA.
397  **/
398 static uint32_t
399 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
400 {
401         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
402                 return 0;
403         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
404         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
405         return 1;
406 }
407
408 /**
409  * lpfc_cmd_iocb - Get next command iocb entry in the ring
410  * @phba: Pointer to HBA context object.
411  * @pring: Pointer to driver SLI ring object.
412  *
413  * This function returns pointer to next command iocb entry
414  * in the command ring. The caller must hold hbalock to prevent
415  * other threads consume the next command iocb.
416  * SLI-2/SLI-3 provide different sized iocbs.
417  **/
418 static inline IOCB_t *
419 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
420 {
421         return (IOCB_t *) (((char *) pring->cmdringaddr) +
422                            pring->cmdidx * phba->iocb_cmd_size);
423 }
424
425 /**
426  * lpfc_resp_iocb - Get next response iocb entry in the ring
427  * @phba: Pointer to HBA context object.
428  * @pring: Pointer to driver SLI ring object.
429  *
430  * This function returns pointer to next response iocb entry
431  * in the response ring. The caller must hold hbalock to make sure
432  * that no other thread consume the next response iocb.
433  * SLI-2/SLI-3 provide different sized iocbs.
434  **/
435 static inline IOCB_t *
436 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
437 {
438         return (IOCB_t *) (((char *) pring->rspringaddr) +
439                            pring->rspidx * phba->iocb_rsp_size);
440 }
441
442 /**
443  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
444  * @phba: Pointer to HBA context object.
445  *
446  * This function is called with hbalock held. This function
447  * allocates a new driver iocb object from the iocb pool. If the
448  * allocation is successful, it returns pointer to the newly
449  * allocated iocb object else it returns NULL.
450  **/
451 static struct lpfc_iocbq *
452 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
453 {
454         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
455         struct lpfc_iocbq * iocbq = NULL;
456
457         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
458
459         if (iocbq)
460                 phba->iocb_cnt++;
461         if (phba->iocb_cnt > phba->iocb_max)
462                 phba->iocb_max = phba->iocb_cnt;
463         return iocbq;
464 }
465
466 /**
467  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
468  * @phba: Pointer to HBA context object.
469  * @xritag: XRI value.
470  *
471  * This function clears the sglq pointer from the array of acive
472  * sglq's. The xritag that is passed in is used to index into the
473  * array. Before the xritag can be used it needs to be adjusted
474  * by subtracting the xribase.
475  *
476  * Returns sglq ponter = success, NULL = Failure.
477  **/
478 static struct lpfc_sglq *
479 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
480 {
481         uint16_t adj_xri;
482         struct lpfc_sglq *sglq;
483         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
484         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
485                 return NULL;
486         sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
487         phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
488         return sglq;
489 }
490
491 /**
492  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
493  * @phba: Pointer to HBA context object.
494  * @xritag: XRI value.
495  *
496  * This function returns the sglq pointer from the array of acive
497  * sglq's. The xritag that is passed in is used to index into the
498  * array. Before the xritag can be used it needs to be adjusted
499  * by subtracting the xribase.
500  *
501  * Returns sglq ponter = success, NULL = Failure.
502  **/
503 struct lpfc_sglq *
504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
505 {
506         uint16_t adj_xri;
507         struct lpfc_sglq *sglq;
508         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
509         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
510                 return NULL;
511         sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
512         return sglq;
513 }
514
515 /**
516  * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
517  * @phba: Pointer to HBA context object.
518  * @ndlp: nodelist pointer for this target.
519  * @xritag: xri used in this exchange.
520  * @rxid: Remote Exchange ID.
521  * @send_rrq: Flag used to determine if we should send rrq els cmd.
522  *
523  * This function is called with hbalock held.
524  * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
525  * rrq struct and adds it to the active_rrq_list.
526  *
527  * returns  0 for rrq slot for this xri
528  *         < 0  Were not able to get rrq mem or invalid parameter.
529  **/
530 static int
531 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
532                 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
533 {
534         uint16_t adj_xri;
535         struct lpfc_node_rrq *rrq;
536         int empty;
537
538         /*
539          * set the active bit even if there is no mem available.
540          */
541         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
542         if (!ndlp)
543                 return -EINVAL;
544         if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
545                 return -EINVAL;
546         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
547         if (rrq) {
548                 rrq->send_rrq = send_rrq;
549                 rrq->xritag = xritag;
550                 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
551                 rrq->ndlp = ndlp;
552                 rrq->nlp_DID = ndlp->nlp_DID;
553                 rrq->vport = ndlp->vport;
554                 rrq->rxid = rxid;
555                 empty = list_empty(&phba->active_rrq_list);
556                 if (phba->cfg_enable_rrq && send_rrq)
557                         /*
558                          * We need the xri before we can add this to the
559                          * phba active rrq list.
560                          */
561                         rrq->send_rrq = send_rrq;
562                 else
563                         rrq->send_rrq = 0;
564                 list_add_tail(&rrq->list, &phba->active_rrq_list);
565                 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
566                         phba->hba_flag |= HBA_RRQ_ACTIVE;
567                         if (empty)
568                                 lpfc_worker_wake_up(phba);
569                 }
570                 return 0;
571         }
572         return -ENOMEM;
573 }
574
575 /**
576  * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
577  * @phba: Pointer to HBA context object.
578  * @xritag: xri used in this exchange.
579  * @rrq: The RRQ to be cleared.
580  *
581  * This function is called with hbalock held. This function
582  **/
583 static void
584 __lpfc_clr_rrq_active(struct lpfc_hba *phba,
585                         uint16_t xritag,
586                         struct lpfc_node_rrq *rrq)
587 {
588         uint16_t adj_xri;
589         struct lpfc_nodelist *ndlp;
590
591         ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
592
593         /* The target DID could have been swapped (cable swap)
594          * we should use the ndlp from the findnode if it is
595          * available.
596          */
597         if (!ndlp)
598                 ndlp = rrq->ndlp;
599
600         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
601         if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
602                 rrq->send_rrq = 0;
603                 rrq->xritag = 0;
604                 rrq->rrq_stop_time = 0;
605         }
606         mempool_free(rrq, phba->rrq_pool);
607 }
608
609 /**
610  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
611  * @phba: Pointer to HBA context object.
612  *
613  * This function is called with hbalock held. This function
614  * Checks if stop_time (ratov from setting rrq active) has
615  * been reached, if it has and the send_rrq flag is set then
616  * it will call lpfc_send_rrq. If the send_rrq flag is not set
617  * then it will just call the routine to clear the rrq and
618  * free the rrq resource.
619  * The timer is set to the next rrq that is going to expire before
620  * leaving the routine.
621  *
622  **/
623 void
624 lpfc_handle_rrq_active(struct lpfc_hba *phba)
625 {
626         struct lpfc_node_rrq *rrq;
627         struct lpfc_node_rrq *nextrrq;
628         unsigned long next_time;
629         unsigned long iflags;
630
631         spin_lock_irqsave(&phba->hbalock, iflags);
632         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
633         next_time = jiffies + HZ * (phba->fc_ratov + 1);
634         list_for_each_entry_safe(rrq, nextrrq,
635                         &phba->active_rrq_list, list) {
636                 if (time_after(jiffies, rrq->rrq_stop_time)) {
637                         list_del(&rrq->list);
638                         if (!rrq->send_rrq)
639                                 /* this call will free the rrq */
640                                 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
641                         else {
642                         /* if we send the rrq then the completion handler
643                          *  will clear the bit in the xribitmap.
644                          */
645                                 spin_unlock_irqrestore(&phba->hbalock, iflags);
646                                 if (lpfc_send_rrq(phba, rrq)) {
647                                         lpfc_clr_rrq_active(phba, rrq->xritag,
648                                                                  rrq);
649                                 }
650                                 spin_lock_irqsave(&phba->hbalock, iflags);
651                         }
652                 } else if  (time_before(rrq->rrq_stop_time, next_time))
653                         next_time = rrq->rrq_stop_time;
654         }
655         spin_unlock_irqrestore(&phba->hbalock, iflags);
656         if (!list_empty(&phba->active_rrq_list))
657                 mod_timer(&phba->rrq_tmr, next_time);
658 }
659
660 /**
661  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
662  * @vport: Pointer to vport context object.
663  * @xri: The xri used in the exchange.
664  * @did: The targets DID for this exchange.
665  *
666  * returns NULL = rrq not found in the phba->active_rrq_list.
667  *         rrq = rrq for this xri and target.
668  **/
669 struct lpfc_node_rrq *
670 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
671 {
672         struct lpfc_hba *phba = vport->phba;
673         struct lpfc_node_rrq *rrq;
674         struct lpfc_node_rrq *nextrrq;
675         unsigned long iflags;
676
677         if (phba->sli_rev != LPFC_SLI_REV4)
678                 return NULL;
679         spin_lock_irqsave(&phba->hbalock, iflags);
680         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
681                 if (rrq->vport == vport && rrq->xritag == xri &&
682                                 rrq->nlp_DID == did){
683                         list_del(&rrq->list);
684                         spin_unlock_irqrestore(&phba->hbalock, iflags);
685                         return rrq;
686                 }
687         }
688         spin_unlock_irqrestore(&phba->hbalock, iflags);
689         return NULL;
690 }
691
692 /**
693  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
694  * @vport: Pointer to vport context object.
695  *
696  * Remove all active RRQs for this vport from the phba->active_rrq_list and
697  * clear the rrq.
698  **/
699 void
700 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
701
702 {
703         struct lpfc_hba *phba = vport->phba;
704         struct lpfc_node_rrq *rrq;
705         struct lpfc_node_rrq *nextrrq;
706         unsigned long iflags;
707
708         if (phba->sli_rev != LPFC_SLI_REV4)
709                 return;
710         spin_lock_irqsave(&phba->hbalock, iflags);
711         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
712                 if (rrq->vport == vport) {
713                         list_del(&rrq->list);
714                         __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
715                 }
716         }
717         spin_unlock_irqrestore(&phba->hbalock, iflags);
718 }
719
720 /**
721  * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
722  * @phba: Pointer to HBA context object.
723  *
724  * Remove all rrqs from the phba->active_rrq_list and free them by
725  * calling __lpfc_clr_active_rrq
726  *
727  **/
728 void
729 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
730 {
731         struct lpfc_node_rrq *rrq;
732         struct lpfc_node_rrq *nextrrq;
733         unsigned long next_time;
734         unsigned long iflags;
735
736         if (phba->sli_rev != LPFC_SLI_REV4)
737                 return;
738         spin_lock_irqsave(&phba->hbalock, iflags);
739         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
740         next_time = jiffies + HZ * (phba->fc_ratov * 2);
741         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
742                 list_del(&rrq->list);
743                 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
744         }
745         spin_unlock_irqrestore(&phba->hbalock, iflags);
746         if (!list_empty(&phba->active_rrq_list))
747                 mod_timer(&phba->rrq_tmr, next_time);
748 }
749
750
751 /**
752  * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
753  * @phba: Pointer to HBA context object.
754  * @ndlp: Targets nodelist pointer for this exchange.
755  * @xritag the xri in the bitmap to test.
756  *
757  * This function is called with hbalock held. This function
758  * returns 0 = rrq not active for this xri
759  *         1 = rrq is valid for this xri.
760  **/
761 static int
762 __lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
763                         uint16_t  xritag)
764 {
765         uint16_t adj_xri;
766
767         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
768         if (!ndlp)
769                 return 0;
770         if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
771                         return 1;
772         else
773                 return 0;
774 }
775
776 /**
777  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
778  * @phba: Pointer to HBA context object.
779  * @ndlp: nodelist pointer for this target.
780  * @xritag: xri used in this exchange.
781  * @rxid: Remote Exchange ID.
782  * @send_rrq: Flag used to determine if we should send rrq els cmd.
783  *
784  * This function takes the hbalock.
785  * The active bit is always set in the active rrq xri_bitmap even
786  * if there is no slot avaiable for the other rrq information.
787  *
788  * returns 0 rrq actived for this xri
789  *         < 0 No memory or invalid ndlp.
790  **/
791 int
792 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
793                         uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
794 {
795         int ret;
796         unsigned long iflags;
797
798         spin_lock_irqsave(&phba->hbalock, iflags);
799         ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
800         spin_unlock_irqrestore(&phba->hbalock, iflags);
801         return ret;
802 }
803
804 /**
805  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
806  * @phba: Pointer to HBA context object.
807  * @xritag: xri used in this exchange.
808  * @rrq: The RRQ to be cleared.
809  *
810  * This function is takes the hbalock.
811  **/
812 void
813 lpfc_clr_rrq_active(struct lpfc_hba *phba,
814                         uint16_t xritag,
815                         struct lpfc_node_rrq *rrq)
816 {
817         unsigned long iflags;
818
819         spin_lock_irqsave(&phba->hbalock, iflags);
820         __lpfc_clr_rrq_active(phba, xritag, rrq);
821         spin_unlock_irqrestore(&phba->hbalock, iflags);
822         return;
823 }
824
825
826
827 /**
828  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
829  * @phba: Pointer to HBA context object.
830  * @ndlp: Targets nodelist pointer for this exchange.
831  * @xritag the xri in the bitmap to test.
832  *
833  * This function takes the hbalock.
834  * returns 0 = rrq not active for this xri
835  *         1 = rrq is valid for this xri.
836  **/
837 int
838 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
839                         uint16_t  xritag)
840 {
841         int ret;
842         unsigned long iflags;
843
844         spin_lock_irqsave(&phba->hbalock, iflags);
845         ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
846         spin_unlock_irqrestore(&phba->hbalock, iflags);
847         return ret;
848 }
849
850 /**
851  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
852  * @phba: Pointer to HBA context object.
853  * @piocb: Pointer to the iocbq.
854  *
855  * This function is called with hbalock held. This function
856  * Gets a new driver sglq object from the sglq list. If the
857  * list is not empty then it is successful, it returns pointer to the newly
858  * allocated sglq object else it returns NULL.
859  **/
860 static struct lpfc_sglq *
861 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
862 {
863         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
864         struct lpfc_sglq *sglq = NULL;
865         struct lpfc_sglq *start_sglq = NULL;
866         uint16_t adj_xri;
867         struct lpfc_scsi_buf *lpfc_cmd;
868         struct lpfc_nodelist *ndlp;
869         int found = 0;
870
871         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
872                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
873                 ndlp = lpfc_cmd->rdata->pnode;
874         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
875                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
876                 ndlp = piocbq->context_un.ndlp;
877         else
878                 ndlp = piocbq->context1;
879
880         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
881         start_sglq = sglq;
882         while (!found) {
883                 if (!sglq)
884                         return NULL;
885                 adj_xri = sglq->sli4_xritag -
886                                 phba->sli4_hba.max_cfg_param.xri_base;
887                 if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
888                         /* This xri has an rrq outstanding for this DID.
889                          * put it back in the list and get another xri.
890                          */
891                         list_add_tail(&sglq->list, lpfc_sgl_list);
892                         sglq = NULL;
893                         list_remove_head(lpfc_sgl_list, sglq,
894                                                 struct lpfc_sglq, list);
895                         if (sglq == start_sglq) {
896                                 sglq = NULL;
897                                 break;
898                         } else
899                                 continue;
900                 }
901                 sglq->ndlp = ndlp;
902                 found = 1;
903                 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
904                 sglq->state = SGL_ALLOCATED;
905         }
906         return sglq;
907 }
908
909 /**
910  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
911  * @phba: Pointer to HBA context object.
912  *
913  * This function is called with no lock held. This function
914  * allocates a new driver iocb object from the iocb pool. If the
915  * allocation is successful, it returns pointer to the newly
916  * allocated iocb object else it returns NULL.
917  **/
918 struct lpfc_iocbq *
919 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
920 {
921         struct lpfc_iocbq * iocbq = NULL;
922         unsigned long iflags;
923
924         spin_lock_irqsave(&phba->hbalock, iflags);
925         iocbq = __lpfc_sli_get_iocbq(phba);
926         spin_unlock_irqrestore(&phba->hbalock, iflags);
927         return iocbq;
928 }
929
930 /**
931  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
932  * @phba: Pointer to HBA context object.
933  * @iocbq: Pointer to driver iocb object.
934  *
935  * This function is called with hbalock held to release driver
936  * iocb object to the iocb pool. The iotag in the iocb object
937  * does not change for each use of the iocb object. This function
938  * clears all other fields of the iocb object when it is freed.
939  * The sqlq structure that holds the xritag and phys and virtual
940  * mappings for the scatter gather list is retrieved from the
941  * active array of sglq. The get of the sglq pointer also clears
942  * the entry in the array. If the status of the IO indiactes that
943  * this IO was aborted then the sglq entry it put on the
944  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
945  * IO has good status or fails for any other reason then the sglq
946  * entry is added to the free list (lpfc_sgl_list).
947  **/
948 static void
949 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
950 {
951         struct lpfc_sglq *sglq;
952         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
953         unsigned long iflag = 0;
954         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
955
956         if (iocbq->sli4_xritag == NO_XRI)
957                 sglq = NULL;
958         else
959                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
960         if (sglq)  {
961                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
962                         (sglq->state != SGL_XRI_ABORTED)) {
963                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
964                                         iflag);
965                         list_add(&sglq->list,
966                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
967                         spin_unlock_irqrestore(
968                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
969                 } else {
970                         sglq->state = SGL_FREED;
971                         sglq->ndlp = NULL;
972                         list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
973
974                         /* Check if TXQ queue needs to be serviced */
975                         if (pring->txq_cnt)
976                                 lpfc_worker_wake_up(phba);
977                 }
978         }
979
980
981         /*
982          * Clean all volatile data fields, preserve iotag and node struct.
983          */
984         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
985         iocbq->sli4_xritag = NO_XRI;
986         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
987 }
988
989
990 /**
991  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
992  * @phba: Pointer to HBA context object.
993  * @iocbq: Pointer to driver iocb object.
994  *
995  * This function is called with hbalock held to release driver
996  * iocb object to the iocb pool. The iotag in the iocb object
997  * does not change for each use of the iocb object. This function
998  * clears all other fields of the iocb object when it is freed.
999  **/
1000 static void
1001 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1002 {
1003         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1004
1005         /*
1006          * Clean all volatile data fields, preserve iotag and node struct.
1007          */
1008         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1009         iocbq->sli4_xritag = NO_XRI;
1010         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1011 }
1012
1013 /**
1014  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1015  * @phba: Pointer to HBA context object.
1016  * @iocbq: Pointer to driver iocb object.
1017  *
1018  * This function is called with hbalock held to release driver
1019  * iocb object to the iocb pool. The iotag in the iocb object
1020  * does not change for each use of the iocb object. This function
1021  * clears all other fields of the iocb object when it is freed.
1022  **/
1023 static void
1024 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1025 {
1026         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1027         phba->iocb_cnt--;
1028 }
1029
1030 /**
1031  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1032  * @phba: Pointer to HBA context object.
1033  * @iocbq: Pointer to driver iocb object.
1034  *
1035  * This function is called with no lock held to release the iocb to
1036  * iocb pool.
1037  **/
1038 void
1039 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1040 {
1041         unsigned long iflags;
1042
1043         /*
1044          * Clean all volatile data fields, preserve iotag and node struct.
1045          */
1046         spin_lock_irqsave(&phba->hbalock, iflags);
1047         __lpfc_sli_release_iocbq(phba, iocbq);
1048         spin_unlock_irqrestore(&phba->hbalock, iflags);
1049 }
1050
1051 /**
1052  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1053  * @phba: Pointer to HBA context object.
1054  * @iocblist: List of IOCBs.
1055  * @ulpstatus: ULP status in IOCB command field.
1056  * @ulpWord4: ULP word-4 in IOCB command field.
1057  *
1058  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1059  * on the list by invoking the complete callback function associated with the
1060  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1061  * fields.
1062  **/
1063 void
1064 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1065                       uint32_t ulpstatus, uint32_t ulpWord4)
1066 {
1067         struct lpfc_iocbq *piocb;
1068
1069         while (!list_empty(iocblist)) {
1070                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1071
1072                 if (!piocb->iocb_cmpl)
1073                         lpfc_sli_release_iocbq(phba, piocb);
1074                 else {
1075                         piocb->iocb.ulpStatus = ulpstatus;
1076                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1077                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1078                 }
1079         }
1080         return;
1081 }
1082
1083 /**
1084  * lpfc_sli_iocb_cmd_type - Get the iocb type
1085  * @iocb_cmnd: iocb command code.
1086  *
1087  * This function is called by ring event handler function to get the iocb type.
1088  * This function translates the iocb command to an iocb command type used to
1089  * decide the final disposition of each completed IOCB.
1090  * The function returns
1091  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1092  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1093  * LPFC_ABORT_IOCB   if it is an abort iocb
1094  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1095  *
1096  * The caller is not required to hold any lock.
1097  **/
1098 static lpfc_iocb_type
1099 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1100 {
1101         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1102
1103         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1104                 return 0;
1105
1106         switch (iocb_cmnd) {
1107         case CMD_XMIT_SEQUENCE_CR:
1108         case CMD_XMIT_SEQUENCE_CX:
1109         case CMD_XMIT_BCAST_CN:
1110         case CMD_XMIT_BCAST_CX:
1111         case CMD_ELS_REQUEST_CR:
1112         case CMD_ELS_REQUEST_CX:
1113         case CMD_CREATE_XRI_CR:
1114         case CMD_CREATE_XRI_CX:
1115         case CMD_GET_RPI_CN:
1116         case CMD_XMIT_ELS_RSP_CX:
1117         case CMD_GET_RPI_CR:
1118         case CMD_FCP_IWRITE_CR:
1119         case CMD_FCP_IWRITE_CX:
1120         case CMD_FCP_IREAD_CR:
1121         case CMD_FCP_IREAD_CX:
1122         case CMD_FCP_ICMND_CR:
1123         case CMD_FCP_ICMND_CX:
1124         case CMD_FCP_TSEND_CX:
1125         case CMD_FCP_TRSP_CX:
1126         case CMD_FCP_TRECEIVE_CX:
1127         case CMD_FCP_AUTO_TRSP_CX:
1128         case CMD_ADAPTER_MSG:
1129         case CMD_ADAPTER_DUMP:
1130         case CMD_XMIT_SEQUENCE64_CR:
1131         case CMD_XMIT_SEQUENCE64_CX:
1132         case CMD_XMIT_BCAST64_CN:
1133         case CMD_XMIT_BCAST64_CX:
1134         case CMD_ELS_REQUEST64_CR:
1135         case CMD_ELS_REQUEST64_CX:
1136         case CMD_FCP_IWRITE64_CR:
1137         case CMD_FCP_IWRITE64_CX:
1138         case CMD_FCP_IREAD64_CR:
1139         case CMD_FCP_IREAD64_CX:
1140         case CMD_FCP_ICMND64_CR:
1141         case CMD_FCP_ICMND64_CX:
1142         case CMD_FCP_TSEND64_CX:
1143         case CMD_FCP_TRSP64_CX:
1144         case CMD_FCP_TRECEIVE64_CX:
1145         case CMD_GEN_REQUEST64_CR:
1146         case CMD_GEN_REQUEST64_CX:
1147         case CMD_XMIT_ELS_RSP64_CX:
1148         case DSSCMD_IWRITE64_CR:
1149         case DSSCMD_IWRITE64_CX:
1150         case DSSCMD_IREAD64_CR:
1151         case DSSCMD_IREAD64_CX:
1152                 type = LPFC_SOL_IOCB;
1153                 break;
1154         case CMD_ABORT_XRI_CN:
1155         case CMD_ABORT_XRI_CX:
1156         case CMD_CLOSE_XRI_CN:
1157         case CMD_CLOSE_XRI_CX:
1158         case CMD_XRI_ABORTED_CX:
1159         case CMD_ABORT_MXRI64_CN:
1160         case CMD_XMIT_BLS_RSP64_CX:
1161                 type = LPFC_ABORT_IOCB;
1162                 break;
1163         case CMD_RCV_SEQUENCE_CX:
1164         case CMD_RCV_ELS_REQ_CX:
1165         case CMD_RCV_SEQUENCE64_CX:
1166         case CMD_RCV_ELS_REQ64_CX:
1167         case CMD_ASYNC_STATUS:
1168         case CMD_IOCB_RCV_SEQ64_CX:
1169         case CMD_IOCB_RCV_ELS64_CX:
1170         case CMD_IOCB_RCV_CONT64_CX:
1171         case CMD_IOCB_RET_XRI64_CX:
1172                 type = LPFC_UNSOL_IOCB;
1173                 break;
1174         case CMD_IOCB_XMIT_MSEQ64_CR:
1175         case CMD_IOCB_XMIT_MSEQ64_CX:
1176         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1177         case CMD_IOCB_RCV_ELS_LIST64_CX:
1178         case CMD_IOCB_CLOSE_EXTENDED_CN:
1179         case CMD_IOCB_ABORT_EXTENDED_CN:
1180         case CMD_IOCB_RET_HBQE64_CN:
1181         case CMD_IOCB_FCP_IBIDIR64_CR:
1182         case CMD_IOCB_FCP_IBIDIR64_CX:
1183         case CMD_IOCB_FCP_ITASKMGT64_CX:
1184         case CMD_IOCB_LOGENTRY_CN:
1185         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1186                 printk("%s - Unhandled SLI-3 Command x%x\n",
1187                                 __func__, iocb_cmnd);
1188                 type = LPFC_UNKNOWN_IOCB;
1189                 break;
1190         default:
1191                 type = LPFC_UNKNOWN_IOCB;
1192                 break;
1193         }
1194
1195         return type;
1196 }
1197
1198 /**
1199  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1200  * @phba: Pointer to HBA context object.
1201  *
1202  * This function is called from SLI initialization code
1203  * to configure every ring of the HBA's SLI interface. The
1204  * caller is not required to hold any lock. This function issues
1205  * a config_ring mailbox command for each ring.
1206  * This function returns zero if successful else returns a negative
1207  * error code.
1208  **/
1209 static int
1210 lpfc_sli_ring_map(struct lpfc_hba *phba)
1211 {
1212         struct lpfc_sli *psli = &phba->sli;
1213         LPFC_MBOXQ_t *pmb;
1214         MAILBOX_t *pmbox;
1215         int i, rc, ret = 0;
1216
1217         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1218         if (!pmb)
1219                 return -ENOMEM;
1220         pmbox = &pmb->u.mb;
1221         phba->link_state = LPFC_INIT_MBX_CMDS;
1222         for (i = 0; i < psli->num_rings; i++) {
1223                 lpfc_config_ring(phba, i, pmb);
1224                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1225                 if (rc != MBX_SUCCESS) {
1226                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1227                                         "0446 Adapter failed to init (%d), "
1228                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1229                                         "ring %d\n",
1230                                         rc, pmbox->mbxCommand,
1231                                         pmbox->mbxStatus, i);
1232                         phba->link_state = LPFC_HBA_ERROR;
1233                         ret = -ENXIO;
1234                         break;
1235                 }
1236         }
1237         mempool_free(pmb, phba->mbox_mem_pool);
1238         return ret;
1239 }
1240
1241 /**
1242  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1243  * @phba: Pointer to HBA context object.
1244  * @pring: Pointer to driver SLI ring object.
1245  * @piocb: Pointer to the driver iocb object.
1246  *
1247  * This function is called with hbalock held. The function adds the
1248  * new iocb to txcmplq of the given ring. This function always returns
1249  * 0. If this function is called for ELS ring, this function checks if
1250  * there is a vport associated with the ELS command. This function also
1251  * starts els_tmofunc timer if this is an ELS command.
1252  **/
1253 static int
1254 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1255                         struct lpfc_iocbq *piocb)
1256 {
1257         list_add_tail(&piocb->list, &pring->txcmplq);
1258         piocb->iocb_flag |= LPFC_IO_ON_Q;
1259         pring->txcmplq_cnt++;
1260         if (pring->txcmplq_cnt > pring->txcmplq_max)
1261                 pring->txcmplq_max = pring->txcmplq_cnt;
1262
1263         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1264            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1265            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1266                 if (!piocb->vport)
1267                         BUG();
1268                 else
1269                         mod_timer(&piocb->vport->els_tmofunc,
1270                                   jiffies + HZ * (phba->fc_ratov << 1));
1271         }
1272
1273
1274         return 0;
1275 }
1276
1277 /**
1278  * lpfc_sli_ringtx_get - Get first element of the txq
1279  * @phba: Pointer to HBA context object.
1280  * @pring: Pointer to driver SLI ring object.
1281  *
1282  * This function is called with hbalock held to get next
1283  * iocb in txq of the given ring. If there is any iocb in
1284  * the txq, the function returns first iocb in the list after
1285  * removing the iocb from the list, else it returns NULL.
1286  **/
1287 struct lpfc_iocbq *
1288 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1289 {
1290         struct lpfc_iocbq *cmd_iocb;
1291
1292         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1293         if (cmd_iocb != NULL)
1294                 pring->txq_cnt--;
1295         return cmd_iocb;
1296 }
1297
1298 /**
1299  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1300  * @phba: Pointer to HBA context object.
1301  * @pring: Pointer to driver SLI ring object.
1302  *
1303  * This function is called with hbalock held and the caller must post the
1304  * iocb without releasing the lock. If the caller releases the lock,
1305  * iocb slot returned by the function is not guaranteed to be available.
1306  * The function returns pointer to the next available iocb slot if there
1307  * is available slot in the ring, else it returns NULL.
1308  * If the get index of the ring is ahead of the put index, the function
1309  * will post an error attention event to the worker thread to take the
1310  * HBA to offline state.
1311  **/
1312 static IOCB_t *
1313 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1314 {
1315         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1316         uint32_t  max_cmd_idx = pring->numCiocb;
1317         if ((pring->next_cmdidx == pring->cmdidx) &&
1318            (++pring->next_cmdidx >= max_cmd_idx))
1319                 pring->next_cmdidx = 0;
1320
1321         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
1322
1323                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1324
1325                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
1326                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1327                                         "0315 Ring %d issue: portCmdGet %d "
1328                                         "is bigger than cmd ring %d\n",
1329                                         pring->ringno,
1330                                         pring->local_getidx, max_cmd_idx);
1331
1332                         phba->link_state = LPFC_HBA_ERROR;
1333                         /*
1334                          * All error attention handlers are posted to
1335                          * worker thread
1336                          */
1337                         phba->work_ha |= HA_ERATT;
1338                         phba->work_hs = HS_FFER3;
1339
1340                         lpfc_worker_wake_up(phba);
1341
1342                         return NULL;
1343                 }
1344
1345                 if (pring->local_getidx == pring->next_cmdidx)
1346                         return NULL;
1347         }
1348
1349         return lpfc_cmd_iocb(phba, pring);
1350 }
1351
1352 /**
1353  * lpfc_sli_next_iotag - Get an iotag for the iocb
1354  * @phba: Pointer to HBA context object.
1355  * @iocbq: Pointer to driver iocb object.
1356  *
1357  * This function gets an iotag for the iocb. If there is no unused iotag and
1358  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1359  * array and assigns a new iotag.
1360  * The function returns the allocated iotag if successful, else returns zero.
1361  * Zero is not a valid iotag.
1362  * The caller is not required to hold any lock.
1363  **/
1364 uint16_t
1365 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1366 {
1367         struct lpfc_iocbq **new_arr;
1368         struct lpfc_iocbq **old_arr;
1369         size_t new_len;
1370         struct lpfc_sli *psli = &phba->sli;
1371         uint16_t iotag;
1372
1373         spin_lock_irq(&phba->hbalock);
1374         iotag = psli->last_iotag;
1375         if(++iotag < psli->iocbq_lookup_len) {
1376                 psli->last_iotag = iotag;
1377                 psli->iocbq_lookup[iotag] = iocbq;
1378                 spin_unlock_irq(&phba->hbalock);
1379                 iocbq->iotag = iotag;
1380                 return iotag;
1381         } else if (psli->iocbq_lookup_len < (0xffff
1382                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1383                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1384                 spin_unlock_irq(&phba->hbalock);
1385                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1386                                   GFP_KERNEL);
1387                 if (new_arr) {
1388                         spin_lock_irq(&phba->hbalock);
1389                         old_arr = psli->iocbq_lookup;
1390                         if (new_len <= psli->iocbq_lookup_len) {
1391                                 /* highly unprobable case */
1392                                 kfree(new_arr);
1393                                 iotag = psli->last_iotag;
1394                                 if(++iotag < psli->iocbq_lookup_len) {
1395                                         psli->last_iotag = iotag;
1396                                         psli->iocbq_lookup[iotag] = iocbq;
1397                                         spin_unlock_irq(&phba->hbalock);
1398                                         iocbq->iotag = iotag;
1399                                         return iotag;
1400                                 }
1401                                 spin_unlock_irq(&phba->hbalock);
1402                                 return 0;
1403                         }
1404                         if (psli->iocbq_lookup)
1405                                 memcpy(new_arr, old_arr,
1406                                        ((psli->last_iotag  + 1) *
1407                                         sizeof (struct lpfc_iocbq *)));
1408                         psli->iocbq_lookup = new_arr;
1409                         psli->iocbq_lookup_len = new_len;
1410                         psli->last_iotag = iotag;
1411                         psli->iocbq_lookup[iotag] = iocbq;
1412                         spin_unlock_irq(&phba->hbalock);
1413                         iocbq->iotag = iotag;
1414                         kfree(old_arr);
1415                         return iotag;
1416                 }
1417         } else
1418                 spin_unlock_irq(&phba->hbalock);
1419
1420         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1421                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1422                         psli->last_iotag);
1423
1424         return 0;
1425 }
1426
1427 /**
1428  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1429  * @phba: Pointer to HBA context object.
1430  * @pring: Pointer to driver SLI ring object.
1431  * @iocb: Pointer to iocb slot in the ring.
1432  * @nextiocb: Pointer to driver iocb object which need to be
1433  *            posted to firmware.
1434  *
1435  * This function is called with hbalock held to post a new iocb to
1436  * the firmware. This function copies the new iocb to ring iocb slot and
1437  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1438  * a completion call back for this iocb else the function will free the
1439  * iocb object.
1440  **/
1441 static void
1442 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1443                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1444 {
1445         /*
1446          * Set up an iotag
1447          */
1448         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1449
1450
1451         if (pring->ringno == LPFC_ELS_RING) {
1452                 lpfc_debugfs_slow_ring_trc(phba,
1453                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1454                         *(((uint32_t *) &nextiocb->iocb) + 4),
1455                         *(((uint32_t *) &nextiocb->iocb) + 6),
1456                         *(((uint32_t *) &nextiocb->iocb) + 7));
1457         }
1458
1459         /*
1460          * Issue iocb command to adapter
1461          */
1462         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1463         wmb();
1464         pring->stats.iocb_cmd++;
1465
1466         /*
1467          * If there is no completion routine to call, we can release the
1468          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1469          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1470          */
1471         if (nextiocb->iocb_cmpl)
1472                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1473         else
1474                 __lpfc_sli_release_iocbq(phba, nextiocb);
1475
1476         /*
1477          * Let the HBA know what IOCB slot will be the next one the
1478          * driver will put a command into.
1479          */
1480         pring->cmdidx = pring->next_cmdidx;
1481         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1482 }
1483
1484 /**
1485  * lpfc_sli_update_full_ring - Update the chip attention register
1486  * @phba: Pointer to HBA context object.
1487  * @pring: Pointer to driver SLI ring object.
1488  *
1489  * The caller is not required to hold any lock for calling this function.
1490  * This function updates the chip attention bits for the ring to inform firmware
1491  * that there are pending work to be done for this ring and requests an
1492  * interrupt when there is space available in the ring. This function is
1493  * called when the driver is unable to post more iocbs to the ring due
1494  * to unavailability of space in the ring.
1495  **/
1496 static void
1497 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1498 {
1499         int ringno = pring->ringno;
1500
1501         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1502
1503         wmb();
1504
1505         /*
1506          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1507          * The HBA will tell us when an IOCB entry is available.
1508          */
1509         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1510         readl(phba->CAregaddr); /* flush */
1511
1512         pring->stats.iocb_cmd_full++;
1513 }
1514
1515 /**
1516  * lpfc_sli_update_ring - Update chip attention register
1517  * @phba: Pointer to HBA context object.
1518  * @pring: Pointer to driver SLI ring object.
1519  *
1520  * This function updates the chip attention register bit for the
1521  * given ring to inform HBA that there is more work to be done
1522  * in this ring. The caller is not required to hold any lock.
1523  **/
1524 static void
1525 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1526 {
1527         int ringno = pring->ringno;
1528
1529         /*
1530          * Tell the HBA that there is work to do in this ring.
1531          */
1532         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1533                 wmb();
1534                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1535                 readl(phba->CAregaddr); /* flush */
1536         }
1537 }
1538
1539 /**
1540  * lpfc_sli_resume_iocb - Process iocbs in the txq
1541  * @phba: Pointer to HBA context object.
1542  * @pring: Pointer to driver SLI ring object.
1543  *
1544  * This function is called with hbalock held to post pending iocbs
1545  * in the txq to the firmware. This function is called when driver
1546  * detects space available in the ring.
1547  **/
1548 static void
1549 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1550 {
1551         IOCB_t *iocb;
1552         struct lpfc_iocbq *nextiocb;
1553
1554         /*
1555          * Check to see if:
1556          *  (a) there is anything on the txq to send
1557          *  (b) link is up
1558          *  (c) link attention events can be processed (fcp ring only)
1559          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1560          */
1561         if (pring->txq_cnt &&
1562             lpfc_is_link_up(phba) &&
1563             (pring->ringno != phba->sli.fcp_ring ||
1564              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1565
1566                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1567                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1568                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1569
1570                 if (iocb)
1571                         lpfc_sli_update_ring(phba, pring);
1572                 else
1573                         lpfc_sli_update_full_ring(phba, pring);
1574         }
1575
1576         return;
1577 }
1578
1579 /**
1580  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1581  * @phba: Pointer to HBA context object.
1582  * @hbqno: HBQ number.
1583  *
1584  * This function is called with hbalock held to get the next
1585  * available slot for the given HBQ. If there is free slot
1586  * available for the HBQ it will return pointer to the next available
1587  * HBQ entry else it will return NULL.
1588  **/
1589 static struct lpfc_hbq_entry *
1590 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1591 {
1592         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1593
1594         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1595             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1596                 hbqp->next_hbqPutIdx = 0;
1597
1598         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1599                 uint32_t raw_index = phba->hbq_get[hbqno];
1600                 uint32_t getidx = le32_to_cpu(raw_index);
1601
1602                 hbqp->local_hbqGetIdx = getidx;
1603
1604                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1605                         lpfc_printf_log(phba, KERN_ERR,
1606                                         LOG_SLI | LOG_VPORT,
1607                                         "1802 HBQ %d: local_hbqGetIdx "
1608                                         "%u is > than hbqp->entry_count %u\n",
1609                                         hbqno, hbqp->local_hbqGetIdx,
1610                                         hbqp->entry_count);
1611
1612                         phba->link_state = LPFC_HBA_ERROR;
1613                         return NULL;
1614                 }
1615
1616                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1617                         return NULL;
1618         }
1619
1620         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1621                         hbqp->hbqPutIdx;
1622 }
1623
1624 /**
1625  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1626  * @phba: Pointer to HBA context object.
1627  *
1628  * This function is called with no lock held to free all the
1629  * hbq buffers while uninitializing the SLI interface. It also
1630  * frees the HBQ buffers returned by the firmware but not yet
1631  * processed by the upper layers.
1632  **/
1633 void
1634 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1635 {
1636         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1637         struct hbq_dmabuf *hbq_buf;
1638         unsigned long flags;
1639         int i, hbq_count;
1640         uint32_t hbqno;
1641
1642         hbq_count = lpfc_sli_hbq_count();
1643         /* Return all memory used by all HBQs */
1644         spin_lock_irqsave(&phba->hbalock, flags);
1645         for (i = 0; i < hbq_count; ++i) {
1646                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1647                                 &phba->hbqs[i].hbq_buffer_list, list) {
1648                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1649                         list_del(&hbq_buf->dbuf.list);
1650                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1651                 }
1652                 phba->hbqs[i].buffer_count = 0;
1653         }
1654         /* Return all HBQ buffer that are in-fly */
1655         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1656                                  list) {
1657                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1658                 list_del(&hbq_buf->dbuf.list);
1659                 if (hbq_buf->tag == -1) {
1660                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1661                                 (phba, hbq_buf);
1662                 } else {
1663                         hbqno = hbq_buf->tag >> 16;
1664                         if (hbqno >= LPFC_MAX_HBQS)
1665                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1666                                         (phba, hbq_buf);
1667                         else
1668                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1669                                         hbq_buf);
1670                 }
1671         }
1672
1673         /* Mark the HBQs not in use */
1674         phba->hbq_in_use = 0;
1675         spin_unlock_irqrestore(&phba->hbalock, flags);
1676 }
1677
1678 /**
1679  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1680  * @phba: Pointer to HBA context object.
1681  * @hbqno: HBQ number.
1682  * @hbq_buf: Pointer to HBQ buffer.
1683  *
1684  * This function is called with the hbalock held to post a
1685  * hbq buffer to the firmware. If the function finds an empty
1686  * slot in the HBQ, it will post the buffer. The function will return
1687  * pointer to the hbq entry if it successfully post the buffer
1688  * else it will return NULL.
1689  **/
1690 static int
1691 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1692                          struct hbq_dmabuf *hbq_buf)
1693 {
1694         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1695 }
1696
1697 /**
1698  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1699  * @phba: Pointer to HBA context object.
1700  * @hbqno: HBQ number.
1701  * @hbq_buf: Pointer to HBQ buffer.
1702  *
1703  * This function is called with the hbalock held to post a hbq buffer to the
1704  * firmware. If the function finds an empty slot in the HBQ, it will post the
1705  * buffer and place it on the hbq_buffer_list. The function will return zero if
1706  * it successfully post the buffer else it will return an error.
1707  **/
1708 static int
1709 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1710                             struct hbq_dmabuf *hbq_buf)
1711 {
1712         struct lpfc_hbq_entry *hbqe;
1713         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1714
1715         /* Get next HBQ entry slot to use */
1716         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1717         if (hbqe) {
1718                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1719
1720                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1721                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1722                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1723                 hbqe->bde.tus.f.bdeFlags = 0;
1724                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1725                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1726                                 /* Sync SLIM */
1727                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1728                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1729                                 /* flush */
1730                 readl(phba->hbq_put + hbqno);
1731                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1732                 return 0;
1733         } else
1734                 return -ENOMEM;
1735 }
1736
1737 /**
1738  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1739  * @phba: Pointer to HBA context object.
1740  * @hbqno: HBQ number.
1741  * @hbq_buf: Pointer to HBQ buffer.
1742  *
1743  * This function is called with the hbalock held to post an RQE to the SLI4
1744  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1745  * the hbq_buffer_list and return zero, otherwise it will return an error.
1746  **/
1747 static int
1748 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1749                             struct hbq_dmabuf *hbq_buf)
1750 {
1751         int rc;
1752         struct lpfc_rqe hrqe;
1753         struct lpfc_rqe drqe;
1754
1755         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1756         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1757         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1758         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1759         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1760                               &hrqe, &drqe);
1761         if (rc < 0)
1762                 return rc;
1763         hbq_buf->tag = rc;
1764         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1765         return 0;
1766 }
1767
1768 /* HBQ for ELS and CT traffic. */
1769 static struct lpfc_hbq_init lpfc_els_hbq = {
1770         .rn = 1,
1771         .entry_count = 256,
1772         .mask_count = 0,
1773         .profile = 0,
1774         .ring_mask = (1 << LPFC_ELS_RING),
1775         .buffer_count = 0,
1776         .init_count = 40,
1777         .add_count = 40,
1778 };
1779
1780 /* HBQ for the extra ring if needed */
1781 static struct lpfc_hbq_init lpfc_extra_hbq = {
1782         .rn = 1,
1783         .entry_count = 200,
1784         .mask_count = 0,
1785         .profile = 0,
1786         .ring_mask = (1 << LPFC_EXTRA_RING),
1787         .buffer_count = 0,
1788         .init_count = 0,
1789         .add_count = 5,
1790 };
1791
1792 /* Array of HBQs */
1793 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1794         &lpfc_els_hbq,
1795         &lpfc_extra_hbq,
1796 };
1797
1798 /**
1799  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1800  * @phba: Pointer to HBA context object.
1801  * @hbqno: HBQ number.
1802  * @count: Number of HBQ buffers to be posted.
1803  *
1804  * This function is called with no lock held to post more hbq buffers to the
1805  * given HBQ. The function returns the number of HBQ buffers successfully
1806  * posted.
1807  **/
1808 static int
1809 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1810 {
1811         uint32_t i, posted = 0;
1812         unsigned long flags;
1813         struct hbq_dmabuf *hbq_buffer;
1814         LIST_HEAD(hbq_buf_list);
1815         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1816                 return 0;
1817
1818         if ((phba->hbqs[hbqno].buffer_count + count) >
1819             lpfc_hbq_defs[hbqno]->entry_count)
1820                 count = lpfc_hbq_defs[hbqno]->entry_count -
1821                                         phba->hbqs[hbqno].buffer_count;
1822         if (!count)
1823                 return 0;
1824         /* Allocate HBQ entries */
1825         for (i = 0; i < count; i++) {
1826                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1827                 if (!hbq_buffer)
1828                         break;
1829                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1830         }
1831         /* Check whether HBQ is still in use */
1832         spin_lock_irqsave(&phba->hbalock, flags);
1833         if (!phba->hbq_in_use)
1834                 goto err;
1835         while (!list_empty(&hbq_buf_list)) {
1836                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1837                                  dbuf.list);
1838                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1839                                       (hbqno << 16));
1840                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1841                         phba->hbqs[hbqno].buffer_count++;
1842                         posted++;
1843                 } else
1844                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1845         }
1846         spin_unlock_irqrestore(&phba->hbalock, flags);
1847         return posted;
1848 err:
1849         spin_unlock_irqrestore(&phba->hbalock, flags);
1850         while (!list_empty(&hbq_buf_list)) {
1851                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1852                                  dbuf.list);
1853                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1854         }
1855         return 0;
1856 }
1857
1858 /**
1859  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1860  * @phba: Pointer to HBA context object.
1861  * @qno: HBQ number.
1862  *
1863  * This function posts more buffers to the HBQ. This function
1864  * is called with no lock held. The function returns the number of HBQ entries
1865  * successfully allocated.
1866  **/
1867 int
1868 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1869 {
1870         if (phba->sli_rev == LPFC_SLI_REV4)
1871                 return 0;
1872         else
1873                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1874                                          lpfc_hbq_defs[qno]->add_count);
1875 }
1876
1877 /**
1878  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1879  * @phba: Pointer to HBA context object.
1880  * @qno:  HBQ queue number.
1881  *
1882  * This function is called from SLI initialization code path with
1883  * no lock held to post initial HBQ buffers to firmware. The
1884  * function returns the number of HBQ entries successfully allocated.
1885  **/
1886 static int
1887 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1888 {
1889         if (phba->sli_rev == LPFC_SLI_REV4)
1890                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1891                                          lpfc_hbq_defs[qno]->entry_count);
1892         else
1893                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1894                                          lpfc_hbq_defs[qno]->init_count);
1895 }
1896
1897 /**
1898  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1899  * @phba: Pointer to HBA context object.
1900  * @hbqno: HBQ number.
1901  *
1902  * This function removes the first hbq buffer on an hbq list and returns a
1903  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1904  **/
1905 static struct hbq_dmabuf *
1906 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1907 {
1908         struct lpfc_dmabuf *d_buf;
1909
1910         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1911         if (!d_buf)
1912                 return NULL;
1913         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1914 }
1915
1916 /**
1917  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1918  * @phba: Pointer to HBA context object.
1919  * @tag: Tag of the hbq buffer.
1920  *
1921  * This function is called with hbalock held. This function searches
1922  * for the hbq buffer associated with the given tag in the hbq buffer
1923  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1924  * it returns NULL.
1925  **/
1926 static struct hbq_dmabuf *
1927 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1928 {
1929         struct lpfc_dmabuf *d_buf;
1930         struct hbq_dmabuf *hbq_buf;
1931         uint32_t hbqno;
1932
1933         hbqno = tag >> 16;
1934         if (hbqno >= LPFC_MAX_HBQS)
1935                 return NULL;
1936
1937         spin_lock_irq(&phba->hbalock);
1938         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1939                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1940                 if (hbq_buf->tag == tag) {
1941                         spin_unlock_irq(&phba->hbalock);
1942                         return hbq_buf;
1943                 }
1944         }
1945         spin_unlock_irq(&phba->hbalock);
1946         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1947                         "1803 Bad hbq tag. Data: x%x x%x\n",
1948                         tag, phba->hbqs[tag >> 16].buffer_count);
1949         return NULL;
1950 }
1951
1952 /**
1953  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1954  * @phba: Pointer to HBA context object.
1955  * @hbq_buffer: Pointer to HBQ buffer.
1956  *
1957  * This function is called with hbalock. This function gives back
1958  * the hbq buffer to firmware. If the HBQ does not have space to
1959  * post the buffer, it will free the buffer.
1960  **/
1961 void
1962 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1963 {
1964         uint32_t hbqno;
1965
1966         if (hbq_buffer) {
1967                 hbqno = hbq_buffer->tag >> 16;
1968                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1969                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1970         }
1971 }
1972
1973 /**
1974  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1975  * @mbxCommand: mailbox command code.
1976  *
1977  * This function is called by the mailbox event handler function to verify
1978  * that the completed mailbox command is a legitimate mailbox command. If the
1979  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1980  * and the mailbox event handler will take the HBA offline.
1981  **/
1982 static int
1983 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1984 {
1985         uint8_t ret;
1986
1987         switch (mbxCommand) {
1988         case MBX_LOAD_SM:
1989         case MBX_READ_NV:
1990         case MBX_WRITE_NV:
1991         case MBX_WRITE_VPARMS:
1992         case MBX_RUN_BIU_DIAG:
1993         case MBX_INIT_LINK:
1994         case MBX_DOWN_LINK:
1995         case MBX_CONFIG_LINK:
1996         case MBX_CONFIG_RING:
1997         case MBX_RESET_RING:
1998         case MBX_READ_CONFIG:
1999         case MBX_READ_RCONFIG:
2000         case MBX_READ_SPARM:
2001         case MBX_READ_STATUS:
2002         case MBX_READ_RPI:
2003         case MBX_READ_XRI:
2004         case MBX_READ_REV:
2005         case MBX_READ_LNK_STAT:
2006         case MBX_REG_LOGIN:
2007         case MBX_UNREG_LOGIN:
2008         case MBX_CLEAR_LA:
2009         case MBX_DUMP_MEMORY:
2010         case MBX_DUMP_CONTEXT:
2011         case MBX_RUN_DIAGS:
2012         case MBX_RESTART:
2013         case MBX_UPDATE_CFG:
2014         case MBX_DOWN_LOAD:
2015         case MBX_DEL_LD_ENTRY:
2016         case MBX_RUN_PROGRAM:
2017         case MBX_SET_MASK:
2018         case MBX_SET_VARIABLE:
2019         case MBX_UNREG_D_ID:
2020         case MBX_KILL_BOARD:
2021         case MBX_CONFIG_FARP:
2022         case MBX_BEACON:
2023         case MBX_LOAD_AREA:
2024         case MBX_RUN_BIU_DIAG64:
2025         case MBX_CONFIG_PORT:
2026         case MBX_READ_SPARM64:
2027         case MBX_READ_RPI64:
2028         case MBX_REG_LOGIN64:
2029         case MBX_READ_TOPOLOGY:
2030         case MBX_WRITE_WWN:
2031         case MBX_SET_DEBUG:
2032         case MBX_LOAD_EXP_ROM:
2033         case MBX_ASYNCEVT_ENABLE:
2034         case MBX_REG_VPI:
2035         case MBX_UNREG_VPI:
2036         case MBX_HEARTBEAT:
2037         case MBX_PORT_CAPABILITIES:
2038         case MBX_PORT_IOV_CONTROL:
2039         case MBX_SLI4_CONFIG:
2040         case MBX_SLI4_REQ_FTRS:
2041         case MBX_REG_FCFI:
2042         case MBX_UNREG_FCFI:
2043         case MBX_REG_VFI:
2044         case MBX_UNREG_VFI:
2045         case MBX_INIT_VPI:
2046         case MBX_INIT_VFI:
2047         case MBX_RESUME_RPI:
2048         case MBX_READ_EVENT_LOG_STATUS:
2049         case MBX_READ_EVENT_LOG:
2050         case MBX_SECURITY_MGMT:
2051         case MBX_AUTH_PORT:
2052                 ret = mbxCommand;
2053                 break;
2054         default:
2055                 ret = MBX_SHUTDOWN;
2056                 break;
2057         }
2058         return ret;
2059 }
2060
2061 /**
2062  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2063  * @phba: Pointer to HBA context object.
2064  * @pmboxq: Pointer to mailbox command.
2065  *
2066  * This is completion handler function for mailbox commands issued from
2067  * lpfc_sli_issue_mbox_wait function. This function is called by the
2068  * mailbox event handler function with no lock held. This function
2069  * will wake up thread waiting on the wait queue pointed by context1
2070  * of the mailbox.
2071  **/
2072 void
2073 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2074 {
2075         wait_queue_head_t *pdone_q;
2076         unsigned long drvr_flag;
2077
2078         /*
2079          * If pdone_q is empty, the driver thread gave up waiting and
2080          * continued running.
2081          */
2082         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2083         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2084         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2085         if (pdone_q)
2086                 wake_up_interruptible(pdone_q);
2087         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2088         return;
2089 }
2090
2091
2092 /**
2093  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2094  * @phba: Pointer to HBA context object.
2095  * @pmb: Pointer to mailbox object.
2096  *
2097  * This function is the default mailbox completion handler. It
2098  * frees the memory resources associated with the completed mailbox
2099  * command. If the completed command is a REG_LOGIN mailbox command,
2100  * this function will issue a UREG_LOGIN to re-claim the RPI.
2101  **/
2102 void
2103 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2104 {
2105         struct lpfc_vport  *vport = pmb->vport;
2106         struct lpfc_dmabuf *mp;
2107         struct lpfc_nodelist *ndlp;
2108         struct Scsi_Host *shost;
2109         uint16_t rpi, vpi;
2110         int rc;
2111
2112         mp = (struct lpfc_dmabuf *) (pmb->context1);
2113
2114         if (mp) {
2115                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2116                 kfree(mp);
2117         }
2118
2119         /*
2120          * If a REG_LOGIN succeeded  after node is destroyed or node
2121          * is in re-discovery driver need to cleanup the RPI.
2122          */
2123         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2124             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2125             !pmb->u.mb.mbxStatus) {
2126                 rpi = pmb->u.mb.un.varWords[0];
2127                 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
2128                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2129                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2130                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2131                 if (rc != MBX_NOT_FINISHED)
2132                         return;
2133         }
2134
2135         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2136                 !(phba->pport->load_flag & FC_UNLOADING) &&
2137                 !pmb->u.mb.mbxStatus) {
2138                 shost = lpfc_shost_from_vport(vport);
2139                 spin_lock_irq(shost->host_lock);
2140                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2141                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2142                 spin_unlock_irq(shost->host_lock);
2143         }
2144
2145         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2146                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2147                 lpfc_nlp_put(ndlp);
2148                 pmb->context2 = NULL;
2149         }
2150
2151         /* Check security permission status on INIT_LINK mailbox command */
2152         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2153             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2154                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2155                                 "2860 SLI authentication is required "
2156                                 "for INIT_LINK but has not done yet\n");
2157
2158         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2159                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2160         else
2161                 mempool_free(pmb, phba->mbox_mem_pool);
2162 }
2163
2164 /**
2165  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2166  * @phba: Pointer to HBA context object.
2167  *
2168  * This function is called with no lock held. This function processes all
2169  * the completed mailbox commands and gives it to upper layers. The interrupt
2170  * service routine processes mailbox completion interrupt and adds completed
2171  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2172  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2173  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2174  * function returns the mailbox commands to the upper layer by calling the
2175  * completion handler function of each mailbox.
2176  **/
2177 int
2178 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2179 {
2180         MAILBOX_t *pmbox;
2181         LPFC_MBOXQ_t *pmb;
2182         int rc;
2183         LIST_HEAD(cmplq);
2184
2185         phba->sli.slistat.mbox_event++;
2186
2187         /* Get all completed mailboxe buffers into the cmplq */
2188         spin_lock_irq(&phba->hbalock);
2189         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2190         spin_unlock_irq(&phba->hbalock);
2191
2192         /* Get a Mailbox buffer to setup mailbox commands for callback */
2193         do {
2194                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2195                 if (pmb == NULL)
2196                         break;
2197
2198                 pmbox = &pmb->u.mb;
2199
2200                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2201                         if (pmb->vport) {
2202                                 lpfc_debugfs_disc_trc(pmb->vport,
2203                                         LPFC_DISC_TRC_MBOX_VPORT,
2204                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2205                                         (uint32_t)pmbox->mbxCommand,
2206                                         pmbox->un.varWords[0],
2207                                         pmbox->un.varWords[1]);
2208                         }
2209                         else {
2210                                 lpfc_debugfs_disc_trc(phba->pport,
2211                                         LPFC_DISC_TRC_MBOX,
2212                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2213                                         (uint32_t)pmbox->mbxCommand,
2214                                         pmbox->un.varWords[0],
2215                                         pmbox->un.varWords[1]);
2216                         }
2217                 }
2218
2219                 /*
2220                  * It is a fatal error if unknown mbox command completion.
2221                  */
2222                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2223                     MBX_SHUTDOWN) {
2224                         /* Unknown mailbox command compl */
2225                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2226                                         "(%d):0323 Unknown Mailbox command "
2227                                         "x%x (x%x) Cmpl\n",
2228                                         pmb->vport ? pmb->vport->vpi : 0,
2229                                         pmbox->mbxCommand,
2230                                         lpfc_sli4_mbox_opcode_get(phba, pmb));
2231                         phba->link_state = LPFC_HBA_ERROR;
2232                         phba->work_hs = HS_FFER3;
2233                         lpfc_handle_eratt(phba);
2234                         continue;
2235                 }
2236
2237                 if (pmbox->mbxStatus) {
2238                         phba->sli.slistat.mbox_stat_err++;
2239                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2240                                 /* Mbox cmd cmpl error - RETRYing */
2241                                 lpfc_printf_log(phba, KERN_INFO,
2242                                                 LOG_MBOX | LOG_SLI,
2243                                                 "(%d):0305 Mbox cmd cmpl "
2244                                                 "error - RETRYing Data: x%x "
2245                                                 "(x%x) x%x x%x x%x\n",
2246                                                 pmb->vport ? pmb->vport->vpi :0,
2247                                                 pmbox->mbxCommand,
2248                                                 lpfc_sli4_mbox_opcode_get(phba,
2249                                                                           pmb),
2250                                                 pmbox->mbxStatus,
2251                                                 pmbox->un.varWords[0],
2252                                                 pmb->vport->port_state);
2253                                 pmbox->mbxStatus = 0;
2254                                 pmbox->mbxOwner = OWN_HOST;
2255                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2256                                 if (rc != MBX_NOT_FINISHED)
2257                                         continue;
2258                         }
2259                 }
2260
2261                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2262                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2263                                 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
2264                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2265                                 pmb->vport ? pmb->vport->vpi : 0,
2266                                 pmbox->mbxCommand,
2267                                 lpfc_sli4_mbox_opcode_get(phba, pmb),
2268                                 pmb->mbox_cmpl,
2269                                 *((uint32_t *) pmbox),
2270                                 pmbox->un.varWords[0],
2271                                 pmbox->un.varWords[1],
2272                                 pmbox->un.varWords[2],
2273                                 pmbox->un.varWords[3],
2274                                 pmbox->un.varWords[4],
2275                                 pmbox->un.varWords[5],
2276                                 pmbox->un.varWords[6],
2277                                 pmbox->un.varWords[7]);
2278
2279                 if (pmb->mbox_cmpl)
2280                         pmb->mbox_cmpl(phba,pmb);
2281         } while (1);
2282         return 0;
2283 }
2284
2285 /**
2286  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2287  * @phba: Pointer to HBA context object.
2288  * @pring: Pointer to driver SLI ring object.
2289  * @tag: buffer tag.
2290  *
2291  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2292  * is set in the tag the buffer is posted for a particular exchange,
2293  * the function will return the buffer without replacing the buffer.
2294  * If the buffer is for unsolicited ELS or CT traffic, this function
2295  * returns the buffer and also posts another buffer to the firmware.
2296  **/
2297 static struct lpfc_dmabuf *
2298 lpfc_sli_get_buff(struct lpfc_hba *phba,
2299                   struct lpfc_sli_ring *pring,
2300                   uint32_t tag)
2301 {
2302         struct hbq_dmabuf *hbq_entry;
2303
2304         if (tag & QUE_BUFTAG_BIT)
2305                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2306         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2307         if (!hbq_entry)
2308                 return NULL;
2309         return &hbq_entry->dbuf;
2310 }
2311
2312 /**
2313  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2314  * @phba: Pointer to HBA context object.
2315  * @pring: Pointer to driver SLI ring object.
2316  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2317  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2318  * @fch_type: the type for the first frame of the sequence.
2319  *
2320  * This function is called with no lock held. This function uses the r_ctl and
2321  * type of the received sequence to find the correct callback function to call
2322  * to process the sequence.
2323  **/
2324 static int
2325 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2326                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2327                          uint32_t fch_type)
2328 {
2329         int i;
2330
2331         /* unSolicited Responses */
2332         if (pring->prt[0].profile) {
2333                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2334                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2335                                                                         saveq);
2336                 return 1;
2337         }
2338         /* We must search, based on rctl / type
2339            for the right routine */
2340         for (i = 0; i < pring->num_mask; i++) {
2341                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2342                     (pring->prt[i].type == fch_type)) {
2343                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2344                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2345                                                 (phba, pring, saveq);
2346                         return 1;
2347                 }
2348         }
2349         return 0;
2350 }
2351
2352 /**
2353  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2354  * @phba: Pointer to HBA context object.
2355  * @pring: Pointer to driver SLI ring object.
2356  * @saveq: Pointer to the unsolicited iocb.
2357  *
2358  * This function is called with no lock held by the ring event handler
2359  * when there is an unsolicited iocb posted to the response ring by the
2360  * firmware. This function gets the buffer associated with the iocbs
2361  * and calls the event handler for the ring. This function handles both
2362  * qring buffers and hbq buffers.
2363  * When the function returns 1 the caller can free the iocb object otherwise
2364  * upper layer functions will free the iocb objects.
2365  **/
2366 static int
2367 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2368                             struct lpfc_iocbq *saveq)
2369 {
2370         IOCB_t           * irsp;
2371         WORD5            * w5p;
2372         uint32_t           Rctl, Type;
2373         uint32_t           match;
2374         struct lpfc_iocbq *iocbq;
2375         struct lpfc_dmabuf *dmzbuf;
2376
2377         match = 0;
2378         irsp = &(saveq->iocb);
2379
2380         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2381                 if (pring->lpfc_sli_rcv_async_status)
2382                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2383                 else
2384                         lpfc_printf_log(phba,
2385                                         KERN_WARNING,
2386                                         LOG_SLI,
2387                                         "0316 Ring %d handler: unexpected "
2388                                         "ASYNC_STATUS iocb received evt_code "
2389                                         "0x%x\n",
2390                                         pring->ringno,
2391                                         irsp->un.asyncstat.evt_code);
2392                 return 1;
2393         }
2394
2395         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2396                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2397                 if (irsp->ulpBdeCount > 0) {
2398                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2399                                         irsp->un.ulpWord[3]);
2400                         lpfc_in_buf_free(phba, dmzbuf);
2401                 }
2402
2403                 if (irsp->ulpBdeCount > 1) {
2404                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2405                                         irsp->unsli3.sli3Words[3]);
2406                         lpfc_in_buf_free(phba, dmzbuf);
2407                 }
2408
2409                 if (irsp->ulpBdeCount > 2) {
2410                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2411                                 irsp->unsli3.sli3Words[7]);
2412                         lpfc_in_buf_free(phba, dmzbuf);
2413                 }
2414
2415                 return 1;
2416         }
2417
2418         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2419                 if (irsp->ulpBdeCount != 0) {
2420                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2421                                                 irsp->un.ulpWord[3]);
2422                         if (!saveq->context2)
2423                                 lpfc_printf_log(phba,
2424                                         KERN_ERR,
2425                                         LOG_SLI,
2426                                         "0341 Ring %d Cannot find buffer for "
2427                                         "an unsolicited iocb. tag 0x%x\n",
2428                                         pring->ringno,
2429                                         irsp->un.ulpWord[3]);
2430                 }
2431                 if (irsp->ulpBdeCount == 2) {
2432                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2433                                                 irsp->unsli3.sli3Words[7]);
2434                         if (!saveq->context3)
2435                                 lpfc_printf_log(phba,
2436                                         KERN_ERR,
2437                                         LOG_SLI,
2438                                         "0342 Ring %d Cannot find buffer for an"
2439                                         " unsolicited iocb. tag 0x%x\n",
2440                                         pring->ringno,
2441                                         irsp->unsli3.sli3Words[7]);
2442                 }
2443                 list_for_each_entry(iocbq, &saveq->list, list) {
2444                         irsp = &(iocbq->iocb);
2445                         if (irsp->ulpBdeCount != 0) {
2446                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2447                                                         irsp->un.ulpWord[3]);
2448                                 if (!iocbq->context2)
2449                                         lpfc_printf_log(phba,
2450                                                 KERN_ERR,
2451                                                 LOG_SLI,
2452                                                 "0343 Ring %d Cannot find "
2453                                                 "buffer for an unsolicited iocb"
2454                                                 ". tag 0x%x\n", pring->ringno,
2455                                                 irsp->un.ulpWord[3]);
2456                         }
2457                         if (irsp->ulpBdeCount == 2) {
2458                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2459                                                 irsp->unsli3.sli3Words[7]);
2460                                 if (!iocbq->context3)
2461                                         lpfc_printf_log(phba,
2462                                                 KERN_ERR,
2463                                                 LOG_SLI,
2464                                                 "0344 Ring %d Cannot find "
2465                                                 "buffer for an unsolicited "
2466                                                 "iocb. tag 0x%x\n",
2467                                                 pring->ringno,
2468                                                 irsp->unsli3.sli3Words[7]);
2469                         }
2470                 }
2471         }
2472         if (irsp->ulpBdeCount != 0 &&
2473             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2474              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2475                 int found = 0;
2476
2477                 /* search continue save q for same XRI */
2478                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2479                         if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2480                                 list_add_tail(&saveq->list, &iocbq->list);
2481                                 found = 1;
2482                                 break;
2483                         }
2484                 }
2485                 if (!found)
2486                         list_add_tail(&saveq->clist,
2487                                       &pring->iocb_continue_saveq);
2488                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2489                         list_del_init(&iocbq->clist);
2490                         saveq = iocbq;
2491                         irsp = &(saveq->iocb);
2492                 } else
2493                         return 0;
2494         }
2495         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2496             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2497             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2498                 Rctl = FC_RCTL_ELS_REQ;
2499                 Type = FC_TYPE_ELS;
2500         } else {
2501                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2502                 Rctl = w5p->hcsw.Rctl;
2503                 Type = w5p->hcsw.Type;
2504
2505                 /* Firmware Workaround */
2506                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2507                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2508                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2509                         Rctl = FC_RCTL_ELS_REQ;
2510                         Type = FC_TYPE_ELS;
2511                         w5p->hcsw.Rctl = Rctl;
2512                         w5p->hcsw.Type = Type;
2513                 }
2514         }
2515
2516         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2517                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2518                                 "0313 Ring %d handler: unexpected Rctl x%x "
2519                                 "Type x%x received\n",
2520                                 pring->ringno, Rctl, Type);
2521
2522         return 1;
2523 }
2524
2525 /**
2526  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2527  * @phba: Pointer to HBA context object.
2528  * @pring: Pointer to driver SLI ring object.
2529  * @prspiocb: Pointer to response iocb object.
2530  *
2531  * This function looks up the iocb_lookup table to get the command iocb
2532  * corresponding to the given response iocb using the iotag of the
2533  * response iocb. This function is called with the hbalock held.
2534  * This function returns the command iocb object if it finds the command
2535  * iocb else returns NULL.
2536  **/
2537 static struct lpfc_iocbq *
2538 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2539                       struct lpfc_sli_ring *pring,
2540                       struct lpfc_iocbq *prspiocb)
2541 {
2542         struct lpfc_iocbq *cmd_iocb = NULL;
2543         uint16_t iotag;
2544
2545         iotag = prspiocb->iocb.ulpIoTag;
2546
2547         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2548                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2549                 list_del_init(&cmd_iocb->list);
2550                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2551                         pring->txcmplq_cnt--;
2552                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2553                 }
2554                 return cmd_iocb;
2555         }
2556
2557         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2558                         "0317 iotag x%x is out off "
2559                         "range: max iotag x%x wd0 x%x\n",
2560                         iotag, phba->sli.last_iotag,
2561                         *(((uint32_t *) &prspiocb->iocb) + 7));
2562         return NULL;
2563 }
2564
2565 /**
2566  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2567  * @phba: Pointer to HBA context object.
2568  * @pring: Pointer to driver SLI ring object.
2569  * @iotag: IOCB tag.
2570  *
2571  * This function looks up the iocb_lookup table to get the command iocb
2572  * corresponding to the given iotag. This function is called with the
2573  * hbalock held.
2574  * This function returns the command iocb object if it finds the command
2575  * iocb else returns NULL.
2576  **/
2577 static struct lpfc_iocbq *
2578 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2579                              struct lpfc_sli_ring *pring, uint16_t iotag)
2580 {
2581         struct lpfc_iocbq *cmd_iocb;
2582
2583         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2584                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2585                 list_del_init(&cmd_iocb->list);
2586                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2587                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2588                         pring->txcmplq_cnt--;
2589                 }
2590                 return cmd_iocb;
2591         }
2592
2593         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2594                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2595                         iotag, phba->sli.last_iotag);
2596         return NULL;
2597 }
2598
2599 /**
2600  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2601  * @phba: Pointer to HBA context object.
2602  * @pring: Pointer to driver SLI ring object.
2603  * @saveq: Pointer to the response iocb to be processed.
2604  *
2605  * This function is called by the ring event handler for non-fcp
2606  * rings when there is a new response iocb in the response ring.
2607  * The caller is not required to hold any locks. This function
2608  * gets the command iocb associated with the response iocb and
2609  * calls the completion handler for the command iocb. If there
2610  * is no completion handler, the function will free the resources
2611  * associated with command iocb. If the response iocb is for
2612  * an already aborted command iocb, the status of the completion
2613  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2614  * This function always returns 1.
2615  **/
2616 static int
2617 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2618                           struct lpfc_iocbq *saveq)
2619 {
2620         struct lpfc_iocbq *cmdiocbp;
2621         int rc = 1;
2622         unsigned long iflag;
2623
2624         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2625         spin_lock_irqsave(&phba->hbalock, iflag);
2626         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2627         spin_unlock_irqrestore(&phba->hbalock, iflag);
2628
2629         if (cmdiocbp) {
2630                 if (cmdiocbp->iocb_cmpl) {
2631                         /*
2632                          * If an ELS command failed send an event to mgmt
2633                          * application.
2634                          */
2635                         if (saveq->iocb.ulpStatus &&
2636                              (pring->ringno == LPFC_ELS_RING) &&
2637                              (cmdiocbp->iocb.ulpCommand ==
2638                                 CMD_ELS_REQUEST64_CR))
2639                                 lpfc_send_els_failure_event(phba,
2640                                         cmdiocbp, saveq);
2641
2642                         /*
2643                          * Post all ELS completions to the worker thread.
2644                          * All other are passed to the completion callback.
2645                          */
2646                         if (pring->ringno == LPFC_ELS_RING) {
2647                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2648                                     (cmdiocbp->iocb_flag &
2649                                                         LPFC_DRIVER_ABORTED)) {
2650                                         spin_lock_irqsave(&phba->hbalock,
2651                                                           iflag);
2652                                         cmdiocbp->iocb_flag &=
2653                                                 ~LPFC_DRIVER_ABORTED;
2654                                         spin_unlock_irqrestore(&phba->hbalock,
2655                                                                iflag);
2656                                         saveq->iocb.ulpStatus =
2657                                                 IOSTAT_LOCAL_REJECT;
2658                                         saveq->iocb.un.ulpWord[4] =
2659                                                 IOERR_SLI_ABORTED;
2660
2661                                         /* Firmware could still be in progress
2662                                          * of DMAing payload, so don't free data
2663                                          * buffer till after a hbeat.
2664                                          */
2665                                         spin_lock_irqsave(&phba->hbalock,
2666                                                           iflag);
2667                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2668                                         spin_unlock_irqrestore(&phba->hbalock,
2669                                                                iflag);
2670                                 }
2671                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2672                                         if (saveq->iocb_flag &
2673                                             LPFC_EXCHANGE_BUSY) {
2674                                                 /* Set cmdiocb flag for the
2675                                                  * exchange busy so sgl (xri)
2676                                                  * will not be released until
2677                                                  * the abort xri is received
2678                                                  * from hba.
2679                                                  */
2680                                                 spin_lock_irqsave(
2681                                                         &phba->hbalock, iflag);
2682                                                 cmdiocbp->iocb_flag |=
2683                                                         LPFC_EXCHANGE_BUSY;
2684                                                 spin_unlock_irqrestore(
2685                                                         &phba->hbalock, iflag);
2686                                         }
2687                                         if (cmdiocbp->iocb_flag &
2688                                             LPFC_DRIVER_ABORTED) {
2689                                                 /*
2690                                                  * Clear LPFC_DRIVER_ABORTED
2691                                                  * bit in case it was driver
2692                                                  * initiated abort.
2693                                                  */
2694                                                 spin_lock_irqsave(
2695                                                         &phba->hbalock, iflag);
2696                                                 cmdiocbp->iocb_flag &=
2697                                                         ~LPFC_DRIVER_ABORTED;
2698                                                 spin_unlock_irqrestore(
2699                                                         &phba->hbalock, iflag);
2700                                                 cmdiocbp->iocb.ulpStatus =
2701                                                         IOSTAT_LOCAL_REJECT;
2702                                                 cmdiocbp->iocb.un.ulpWord[4] =
2703                                                         IOERR_ABORT_REQUESTED;
2704                                                 /*
2705                                                  * For SLI4, irsiocb contains
2706                                                  * NO_XRI in sli_xritag, it
2707                                                  * shall not affect releasing
2708                                                  * sgl (xri) process.
2709                                                  */
2710                                                 saveq->iocb.ulpStatus =
2711                                                         IOSTAT_LOCAL_REJECT;
2712                                                 saveq->iocb.un.ulpWord[4] =
2713                                                         IOERR_SLI_ABORTED;
2714                                                 spin_lock_irqsave(
2715                                                         &phba->hbalock, iflag);
2716                                                 saveq->iocb_flag |=
2717                                                         LPFC_DELAY_MEM_FREE;
2718                                                 spin_unlock_irqrestore(
2719                                                         &phba->hbalock, iflag);
2720                                         }
2721                                 }
2722                         }
2723                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2724                 } else
2725                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2726         } else {
2727                 /*
2728                  * Unknown initiating command based on the response iotag.
2729                  * This could be the case on the ELS ring because of
2730                  * lpfc_els_abort().
2731                  */
2732                 if (pring->ringno != LPFC_ELS_RING) {
2733                         /*
2734                          * Ring <ringno> handler: unexpected completion IoTag
2735                          * <IoTag>
2736                          */
2737                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2738                                          "0322 Ring %d handler: "
2739                                          "unexpected completion IoTag x%x "
2740                                          "Data: x%x x%x x%x x%x\n",
2741                                          pring->ringno,
2742                                          saveq->iocb.ulpIoTag,
2743                                          saveq->iocb.ulpStatus,
2744                                          saveq->iocb.un.ulpWord[4],
2745                                          saveq->iocb.ulpCommand,
2746                                          saveq->iocb.ulpContext);
2747                 }
2748         }
2749
2750         return rc;
2751 }
2752
2753 /**
2754  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2755  * @phba: Pointer to HBA context object.
2756  * @pring: Pointer to driver SLI ring object.
2757  *
2758  * This function is called from the iocb ring event handlers when
2759  * put pointer is ahead of the get pointer for a ring. This function signal
2760  * an error attention condition to the worker thread and the worker
2761  * thread will transition the HBA to offline state.
2762  **/
2763 static void
2764 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2765 {
2766         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2767         /*
2768          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2769          * rsp ring <portRspMax>
2770          */
2771         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2772                         "0312 Ring %d handler: portRspPut %d "
2773                         "is bigger than rsp ring %d\n",
2774                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2775                         pring->numRiocb);
2776
2777         phba->link_state = LPFC_HBA_ERROR;
2778
2779         /*
2780          * All error attention handlers are posted to
2781          * worker thread
2782          */
2783         phba->work_ha |= HA_ERATT;
2784         phba->work_hs = HS_FFER3;
2785
2786         lpfc_worker_wake_up(phba);
2787
2788         return;
2789 }
2790
2791 /**
2792  * lpfc_poll_eratt - Error attention polling timer timeout handler
2793  * @ptr: Pointer to address of HBA context object.
2794  *
2795  * This function is invoked by the Error Attention polling timer when the
2796  * timer times out. It will check the SLI Error Attention register for
2797  * possible attention events. If so, it will post an Error Attention event
2798  * and wake up worker thread to process it. Otherwise, it will set up the
2799  * Error Attention polling timer for the next poll.
2800  **/
2801 void lpfc_poll_eratt(unsigned long ptr)
2802 {
2803         struct lpfc_hba *phba;
2804         uint32_t eratt = 0;
2805
2806         phba = (struct lpfc_hba *)ptr;
2807
2808         /* Check chip HA register for error event */
2809         eratt = lpfc_sli_check_eratt(phba);
2810
2811         if (eratt)
2812                 /* Tell the worker thread there is work to do */
2813                 lpfc_worker_wake_up(phba);
2814         else
2815                 /* Restart the timer for next eratt poll */
2816                 mod_timer(&phba->eratt_poll, jiffies +
2817                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2818         return;
2819 }
2820
2821
2822 /**
2823  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2824  * @phba: Pointer to HBA context object.
2825  * @pring: Pointer to driver SLI ring object.
2826  * @mask: Host attention register mask for this ring.
2827  *
2828  * This function is called from the interrupt context when there is a ring
2829  * event for the fcp ring. The caller does not hold any lock.
2830  * The function processes each response iocb in the response ring until it
2831  * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2832  * LE bit set. The function will call the completion handler of the command iocb
2833  * if the response iocb indicates a completion for a command iocb or it is
2834  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2835  * function if this is an unsolicited iocb.
2836  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2837  * to check it explicitly.
2838  */
2839 int
2840 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2841                                 struct lpfc_sli_ring *pring, uint32_t mask)
2842 {
2843         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2844         IOCB_t *irsp = NULL;
2845         IOCB_t *entry = NULL;
2846         struct lpfc_iocbq *cmdiocbq = NULL;
2847         struct lpfc_iocbq rspiocbq;
2848         uint32_t status;
2849         uint32_t portRspPut, portRspMax;
2850         int rc = 1;
2851         lpfc_iocb_type type;
2852         unsigned long iflag;
2853         uint32_t rsp_cmpl = 0;
2854
2855         spin_lock_irqsave(&phba->hbalock, iflag);
2856         pring->stats.iocb_event++;
2857
2858         /*
2859          * The next available response entry should never exceed the maximum
2860          * entries.  If it does, treat it as an adapter hardware error.
2861          */
2862         portRspMax = pring->numRiocb;
2863         portRspPut = le32_to_cpu(pgp->rspPutInx);
2864         if (unlikely(portRspPut >= portRspMax)) {
2865                 lpfc_sli_rsp_pointers_error(phba, pring);
2866                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2867                 return 1;
2868         }
2869         if (phba->fcp_ring_in_use) {
2870                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2871                 return 1;
2872         } else
2873                 phba->fcp_ring_in_use = 1;
2874
2875         rmb();
2876         while (pring->rspidx != portRspPut) {
2877                 /*
2878                  * Fetch an entry off the ring and copy it into a local data
2879                  * structure.  The copy involves a byte-swap since the
2880                  * network byte order and pci byte orders are different.
2881                  */
2882                 entry = lpfc_resp_iocb(phba, pring);
2883                 phba->last_completion_time = jiffies;
2884
2885                 if (++pring->rspidx >= portRspMax)
2886                         pring->rspidx = 0;
2887
2888                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2889                                       (uint32_t *) &rspiocbq.iocb,
2890                                       phba->iocb_rsp_size);
2891                 INIT_LIST_HEAD(&(rspiocbq.list));
2892                 irsp = &rspiocbq.iocb;
2893
2894                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2895                 pring->stats.iocb_rsp++;
2896                 rsp_cmpl++;
2897
2898                 if (unlikely(irsp->ulpStatus)) {
2899                         /*
2900                          * If resource errors reported from HBA, reduce
2901                          * queuedepths of the SCSI device.
2902                          */
2903                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2904                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2905                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2906                                 phba->lpfc_rampdown_queue_depth(phba);
2907                                 spin_lock_irqsave(&phba->hbalock, iflag);
2908                         }
2909
2910                         /* Rsp ring <ringno> error: IOCB */
2911                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2912                                         "0336 Rsp Ring %d error: IOCB Data: "
2913                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2914                                         pring->ringno,
2915                                         irsp->un.ulpWord[0],
2916                                         irsp->un.ulpWord[1],
2917                                         irsp->un.ulpWord[2],
2918                                         irsp->un.ulpWord[3],
2919                                         irsp->un.ulpWord[4],
2920                                         irsp->un.ulpWord[5],
2921                                         *(uint32_t *)&irsp->un1,
2922                                         *((uint32_t *)&irsp->un1 + 1));
2923                 }
2924
2925                 switch (type) {
2926                 case LPFC_ABORT_IOCB:
2927                 case LPFC_SOL_IOCB:
2928                         /*
2929                          * Idle exchange closed via ABTS from port.  No iocb
2930                          * resources need to be recovered.
2931                          */
2932                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2933                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2934                                                 "0333 IOCB cmd 0x%x"
2935                                                 " processed. Skipping"
2936                                                 " completion\n",
2937                                                 irsp->ulpCommand);
2938                                 break;
2939                         }
2940
2941                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2942                                                          &rspiocbq);
2943                         if (unlikely(!cmdiocbq))
2944                                 break;
2945                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2946                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2947                         if (cmdiocbq->iocb_cmpl) {
2948                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2949                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2950                                                       &rspiocbq);
2951                                 spin_lock_irqsave(&phba->hbalock, iflag);
2952                         }
2953                         break;
2954                 case LPFC_UNSOL_IOCB:
2955                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2956                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2957                         spin_lock_irqsave(&phba->hbalock, iflag);
2958                         break;
2959                 default:
2960                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2961                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2962                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2963                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2964                                        MAX_MSG_DATA);
2965                                 dev_warn(&((phba->pcidev)->dev),
2966                                          "lpfc%d: %s\n",
2967                                          phba->brd_no, adaptermsg);
2968                         } else {
2969                                 /* Unknown IOCB command */
2970                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2971                                                 "0334 Unknown IOCB command "
2972                                                 "Data: x%x, x%x x%x x%x x%x\n",
2973                                                 type, irsp->ulpCommand,
2974                                                 irsp->ulpStatus,
2975                                                 irsp->ulpIoTag,
2976                                                 irsp->ulpContext);
2977                         }
2978                         break;
2979                 }
2980
2981                 /*
2982                  * The response IOCB has been processed.  Update the ring
2983                  * pointer in SLIM.  If the port response put pointer has not
2984                  * been updated, sync the pgp->rspPutInx and fetch the new port
2985                  * response put pointer.
2986                  */
2987                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2988
2989                 if (pring->rspidx == portRspPut)
2990                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2991         }
2992
2993         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2994                 pring->stats.iocb_rsp_full++;
2995                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2996                 writel(status, phba->CAregaddr);
2997                 readl(phba->CAregaddr);
2998         }
2999         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3000                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3001                 pring->stats.iocb_cmd_empty++;
3002