1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
39 #include "lpfc_sli4.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63 uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
75 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
76 * @q: The Work Queue to operate on.
77 * @wqe: The work Queue Entry to put on the Work queue.
79 * This routine will copy the contents of @wqe to the next available entry on
80 * the @q. This function will then ring the Work Queue Doorbell to signal the
81 * HBA to start processing the Work Queue Entry. This function returns 0 if
82 * successful. If no entries are available on @q then this function will return
84 * The caller is expected to hold the hbalock when calling this routine.
87 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
89 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
90 struct lpfc_register doorbell;
93 /* If the host has not yet processed the next entry then we are done */
94 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
96 /* set consumption flag every once in a while */
97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
98 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
102 /* Update the host index before invoking device */
103 host_index = q->host_index;
104 q->host_index = ((q->host_index + 1) % q->entry_count);
108 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
109 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
110 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
111 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
112 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
118 * lpfc_sli4_wq_release - Updates internal hba index for WQ
119 * @q: The Work Queue to operate on.
120 * @index: The index to advance the hba index to.
122 * This routine will update the HBA index of a queue to reflect consumption of
123 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
124 * an entry the host calls this function to update the queue's internal
125 * pointers. This routine returns the number of entries that were consumed by
129 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
131 uint32_t released = 0;
133 if (q->hba_index == index)
136 q->hba_index = ((q->hba_index + 1) % q->entry_count);
138 } while (q->hba_index != index);
143 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
144 * @q: The Mailbox Queue to operate on.
145 * @wqe: The Mailbox Queue Entry to put on the Work queue.
147 * This routine will copy the contents of @mqe to the next available entry on
148 * the @q. This function will then ring the Work Queue Doorbell to signal the
149 * HBA to start processing the Work Queue Entry. This function returns 0 if
150 * successful. If no entries are available on @q then this function will return
152 * The caller is expected to hold the hbalock when calling this routine.
155 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
157 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
158 struct lpfc_register doorbell;
161 /* If the host has not yet processed the next entry then we are done */
162 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
164 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
165 /* Save off the mailbox pointer for completion */
166 q->phba->mbox = (MAILBOX_t *)temp_mqe;
168 /* Update the host index before invoking device */
169 host_index = q->host_index;
170 q->host_index = ((q->host_index + 1) % q->entry_count);
174 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
175 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
176 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
177 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
182 * lpfc_sli4_mq_release - Updates internal hba index for MQ
183 * @q: The Mailbox Queue to operate on.
185 * This routine will update the HBA index of a queue to reflect consumption of
186 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
187 * an entry the host calls this function to update the queue's internal
188 * pointers. This routine returns the number of entries that were consumed by
192 lpfc_sli4_mq_release(struct lpfc_queue *q)
194 /* Clear the mailbox pointer for completion */
195 q->phba->mbox = NULL;
196 q->hba_index = ((q->hba_index + 1) % q->entry_count);
201 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
202 * @q: The Event Queue to get the first valid EQE from
204 * This routine will get the first valid Event Queue Entry from @q, update
205 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
206 * the Queue (no more work to do), or the Queue is full of EQEs that have been
207 * processed, but not popped back to the HBA then this routine will return NULL.
209 static struct lpfc_eqe *
210 lpfc_sli4_eq_get(struct lpfc_queue *q)
212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
214 /* If the next EQE is not valid then we are done */
215 if (!bf_get_le32(lpfc_eqe_valid, eqe))
217 /* If the host has not yet processed the next entry then we are done */
218 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
221 q->hba_index = ((q->hba_index + 1) % q->entry_count);
226 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
227 * @q: The Event Queue that the host has completed processing for.
228 * @arm: Indicates whether the host wants to arms this CQ.
230 * This routine will mark all Event Queue Entries on @q, from the last
231 * known completed entry to the last entry that was processed, as completed
232 * by clearing the valid bit for each completion queue entry. Then it will
233 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
234 * The internal host index in the @q will be updated by this routine to indicate
235 * that the host has finished processing the entries. The @arm parameter
236 * indicates that the queue should be rearmed when ringing the doorbell.
238 * This function will return the number of EQEs that were popped.
241 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
243 uint32_t released = 0;
244 struct lpfc_eqe *temp_eqe;
245 struct lpfc_register doorbell;
247 /* while there are valid entries */
248 while (q->hba_index != q->host_index) {
249 temp_eqe = q->qe[q->host_index].eqe;
250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
252 q->host_index = ((q->host_index + 1) % q->entry_count);
254 if (unlikely(released == 0 && !arm))
257 /* ring doorbell for number popped */
260 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
261 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
263 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
269 readl(q->phba->sli4_hba.EQCQDBregaddr);
274 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
275 * @q: The Completion Queue to get the first valid CQE from
277 * This routine will get the first valid Completion Queue Entry from @q, update
278 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
279 * the Queue (no more work to do), or the Queue is full of CQEs that have been
280 * processed, but not popped back to the HBA then this routine will return NULL.
282 static struct lpfc_cqe *
283 lpfc_sli4_cq_get(struct lpfc_queue *q)
285 struct lpfc_cqe *cqe;
287 /* If the next CQE is not valid then we are done */
288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
290 /* If the host has not yet processed the next entry then we are done */
291 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
294 cqe = q->qe[q->hba_index].cqe;
295 q->hba_index = ((q->hba_index + 1) % q->entry_count);
300 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
301 * @q: The Completion Queue that the host has completed processing for.
302 * @arm: Indicates whether the host wants to arms this CQ.
304 * This routine will mark all Completion queue entries on @q, from the last
305 * known completed entry to the last entry that was processed, as completed
306 * by clearing the valid bit for each completion queue entry. Then it will
307 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
308 * The internal host index in the @q will be updated by this routine to indicate
309 * that the host has finished processing the entries. The @arm parameter
310 * indicates that the queue should be rearmed when ringing the doorbell.
312 * This function will return the number of CQEs that were released.
315 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
317 uint32_t released = 0;
318 struct lpfc_cqe *temp_qe;
319 struct lpfc_register doorbell;
321 /* while there are valid entries */
322 while (q->hba_index != q->host_index) {
323 temp_qe = q->qe[q->host_index].cqe;
324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
326 q->host_index = ((q->host_index + 1) % q->entry_count);
328 if (unlikely(released == 0 && !arm))
331 /* ring doorbell for number popped */
334 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
335 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
336 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
337 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
338 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
343 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
344 * @q: The Header Receive Queue to operate on.
345 * @wqe: The Receive Queue Entry to put on the Receive queue.
347 * This routine will copy the contents of @wqe to the next available entry on
348 * the @q. This function will then ring the Receive Queue Doorbell to signal the
349 * HBA to start processing the Receive Queue Entry. This function returns the
350 * index that the rqe was copied to if successful. If no entries are available
351 * on @q then this function will return -ENOMEM.
352 * The caller is expected to hold the hbalock when calling this routine.
355 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
356 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
358 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
359 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
360 struct lpfc_register doorbell;
361 int put_index = hq->host_index;
363 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
365 if (hq->host_index != dq->host_index)
367 /* If the host has not yet processed the next entry then we are done */
368 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
370 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
371 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
373 /* Update the host index to point to the next slot */
374 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
375 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
377 /* Ring The Header Receive Queue Doorbell */
378 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
380 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
382 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
383 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
389 * lpfc_sli4_rq_release - Updates internal hba index for RQ
390 * @q: The Header Receive Queue to operate on.
392 * This routine will update the HBA index of a queue to reflect consumption of
393 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
394 * consumed an entry the host calls this function to update the queue's
395 * internal pointers. This routine returns the number of entries that were
396 * consumed by the HBA.
399 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
401 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
403 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
404 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
409 * lpfc_cmd_iocb - Get next command iocb entry in the ring
410 * @phba: Pointer to HBA context object.
411 * @pring: Pointer to driver SLI ring object.
413 * This function returns pointer to next command iocb entry
414 * in the command ring. The caller must hold hbalock to prevent
415 * other threads consume the next command iocb.
416 * SLI-2/SLI-3 provide different sized iocbs.
418 static inline IOCB_t *
419 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
421 return (IOCB_t *) (((char *) pring->cmdringaddr) +
422 pring->cmdidx * phba->iocb_cmd_size);
426 * lpfc_resp_iocb - Get next response iocb entry in the ring
427 * @phba: Pointer to HBA context object.
428 * @pring: Pointer to driver SLI ring object.
430 * This function returns pointer to next response iocb entry
431 * in the response ring. The caller must hold hbalock to make sure
432 * that no other thread consume the next response iocb.
433 * SLI-2/SLI-3 provide different sized iocbs.
435 static inline IOCB_t *
436 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
438 return (IOCB_t *) (((char *) pring->rspringaddr) +
439 pring->rspidx * phba->iocb_rsp_size);
443 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
444 * @phba: Pointer to HBA context object.
446 * This function is called with hbalock held. This function
447 * allocates a new driver iocb object from the iocb pool. If the
448 * allocation is successful, it returns pointer to the newly
449 * allocated iocb object else it returns NULL.
451 static struct lpfc_iocbq *
452 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
454 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
455 struct lpfc_iocbq * iocbq = NULL;
457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
461 if (phba->iocb_cnt > phba->iocb_max)
462 phba->iocb_max = phba->iocb_cnt;
467 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
468 * @phba: Pointer to HBA context object.
469 * @xritag: XRI value.
471 * This function clears the sglq pointer from the array of acive
472 * sglq's. The xritag that is passed in is used to index into the
473 * array. Before the xritag can be used it needs to be adjusted
474 * by subtracting the xribase.
476 * Returns sglq ponter = success, NULL = Failure.
478 static struct lpfc_sglq *
479 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
482 struct lpfc_sglq *sglq;
483 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
484 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
486 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
487 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
492 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
493 * @phba: Pointer to HBA context object.
494 * @xritag: XRI value.
496 * This function returns the sglq pointer from the array of acive
497 * sglq's. The xritag that is passed in is used to index into the
498 * array. Before the xritag can be used it needs to be adjusted
499 * by subtracting the xribase.
501 * Returns sglq ponter = success, NULL = Failure.
504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
507 struct lpfc_sglq *sglq;
508 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
509 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
511 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
516 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
517 * @phba: Pointer to HBA context object.
518 * @ndlp: nodelist pointer for this target.
519 * @xritag: xri used in this exchange.
520 * @rxid: Remote Exchange ID.
521 * @send_rrq: Flag used to determine if we should send rrq els cmd.
523 * This function is called with hbalock held.
524 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
525 * rrq struct and adds it to the active_rrq_list.
527 * returns 0 for rrq slot for this xri
528 * < 0 Were not able to get rrq mem or invalid parameter.
531 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
532 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
535 struct lpfc_node_rrq *rrq;
539 * set the active bit even if there is no mem available.
541 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
544 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
546 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
548 rrq->send_rrq = send_rrq;
549 rrq->xritag = xritag;
550 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
552 rrq->nlp_DID = ndlp->nlp_DID;
553 rrq->vport = ndlp->vport;
555 empty = list_empty(&phba->active_rrq_list);
556 if (phba->cfg_enable_rrq && send_rrq)
558 * We need the xri before we can add this to the
559 * phba active rrq list.
561 rrq->send_rrq = send_rrq;
564 list_add_tail(&rrq->list, &phba->active_rrq_list);
565 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
566 phba->hba_flag |= HBA_RRQ_ACTIVE;
568 lpfc_worker_wake_up(phba);
576 * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
577 * @phba: Pointer to HBA context object.
578 * @xritag: xri used in this exchange.
579 * @rrq: The RRQ to be cleared.
581 * This function is called with hbalock held. This function
584 __lpfc_clr_rrq_active(struct lpfc_hba *phba,
586 struct lpfc_node_rrq *rrq)
589 struct lpfc_nodelist *ndlp;
591 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
593 /* The target DID could have been swapped (cable swap)
594 * we should use the ndlp from the findnode if it is
600 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
601 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
604 rrq->rrq_stop_time = 0;
606 mempool_free(rrq, phba->rrq_pool);
610 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
611 * @phba: Pointer to HBA context object.
613 * This function is called with hbalock held. This function
614 * Checks if stop_time (ratov from setting rrq active) has
615 * been reached, if it has and the send_rrq flag is set then
616 * it will call lpfc_send_rrq. If the send_rrq flag is not set
617 * then it will just call the routine to clear the rrq and
618 * free the rrq resource.
619 * The timer is set to the next rrq that is going to expire before
620 * leaving the routine.
624 lpfc_handle_rrq_active(struct lpfc_hba *phba)
626 struct lpfc_node_rrq *rrq;
627 struct lpfc_node_rrq *nextrrq;
628 unsigned long next_time;
629 unsigned long iflags;
631 spin_lock_irqsave(&phba->hbalock, iflags);
632 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
633 next_time = jiffies + HZ * (phba->fc_ratov + 1);
634 list_for_each_entry_safe(rrq, nextrrq,
635 &phba->active_rrq_list, list) {
636 if (time_after(jiffies, rrq->rrq_stop_time)) {
637 list_del(&rrq->list);
639 /* this call will free the rrq */
640 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
642 /* if we send the rrq then the completion handler
643 * will clear the bit in the xribitmap.
645 spin_unlock_irqrestore(&phba->hbalock, iflags);
646 if (lpfc_send_rrq(phba, rrq)) {
647 lpfc_clr_rrq_active(phba, rrq->xritag,
650 spin_lock_irqsave(&phba->hbalock, iflags);
652 } else if (time_before(rrq->rrq_stop_time, next_time))
653 next_time = rrq->rrq_stop_time;
655 spin_unlock_irqrestore(&phba->hbalock, iflags);
656 if (!list_empty(&phba->active_rrq_list))
657 mod_timer(&phba->rrq_tmr, next_time);
661 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
662 * @vport: Pointer to vport context object.
663 * @xri: The xri used in the exchange.
664 * @did: The targets DID for this exchange.
666 * returns NULL = rrq not found in the phba->active_rrq_list.
667 * rrq = rrq for this xri and target.
669 struct lpfc_node_rrq *
670 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
672 struct lpfc_hba *phba = vport->phba;
673 struct lpfc_node_rrq *rrq;
674 struct lpfc_node_rrq *nextrrq;
675 unsigned long iflags;
677 if (phba->sli_rev != LPFC_SLI_REV4)
679 spin_lock_irqsave(&phba->hbalock, iflags);
680 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
681 if (rrq->vport == vport && rrq->xritag == xri &&
682 rrq->nlp_DID == did){
683 list_del(&rrq->list);
684 spin_unlock_irqrestore(&phba->hbalock, iflags);
688 spin_unlock_irqrestore(&phba->hbalock, iflags);
693 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
694 * @vport: Pointer to vport context object.
696 * Remove all active RRQs for this vport from the phba->active_rrq_list and
700 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
703 struct lpfc_hba *phba = vport->phba;
704 struct lpfc_node_rrq *rrq;
705 struct lpfc_node_rrq *nextrrq;
706 unsigned long iflags;
708 if (phba->sli_rev != LPFC_SLI_REV4)
710 spin_lock_irqsave(&phba->hbalock, iflags);
711 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
712 if (rrq->vport == vport) {
713 list_del(&rrq->list);
714 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
717 spin_unlock_irqrestore(&phba->hbalock, iflags);
721 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
722 * @phba: Pointer to HBA context object.
724 * Remove all rrqs from the phba->active_rrq_list and free them by
725 * calling __lpfc_clr_active_rrq
729 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
731 struct lpfc_node_rrq *rrq;
732 struct lpfc_node_rrq *nextrrq;
733 unsigned long next_time;
734 unsigned long iflags;
736 if (phba->sli_rev != LPFC_SLI_REV4)
738 spin_lock_irqsave(&phba->hbalock, iflags);
739 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
740 next_time = jiffies + HZ * (phba->fc_ratov * 2);
741 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
742 list_del(&rrq->list);
743 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
745 spin_unlock_irqrestore(&phba->hbalock, iflags);
746 if (!list_empty(&phba->active_rrq_list))
747 mod_timer(&phba->rrq_tmr, next_time);
752 * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
753 * @phba: Pointer to HBA context object.
754 * @ndlp: Targets nodelist pointer for this exchange.
755 * @xritag the xri in the bitmap to test.
757 * This function is called with hbalock held. This function
758 * returns 0 = rrq not active for this xri
759 * 1 = rrq is valid for this xri.
762 __lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
767 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
770 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
777 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
778 * @phba: Pointer to HBA context object.
779 * @ndlp: nodelist pointer for this target.
780 * @xritag: xri used in this exchange.
781 * @rxid: Remote Exchange ID.
782 * @send_rrq: Flag used to determine if we should send rrq els cmd.
784 * This function takes the hbalock.
785 * The active bit is always set in the active rrq xri_bitmap even
786 * if there is no slot avaiable for the other rrq information.
788 * returns 0 rrq actived for this xri
789 * < 0 No memory or invalid ndlp.
792 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
793 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
796 unsigned long iflags;
798 spin_lock_irqsave(&phba->hbalock, iflags);
799 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
800 spin_unlock_irqrestore(&phba->hbalock, iflags);
805 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
806 * @phba: Pointer to HBA context object.
807 * @xritag: xri used in this exchange.
808 * @rrq: The RRQ to be cleared.
810 * This function is takes the hbalock.
813 lpfc_clr_rrq_active(struct lpfc_hba *phba,
815 struct lpfc_node_rrq *rrq)
817 unsigned long iflags;
819 spin_lock_irqsave(&phba->hbalock, iflags);
820 __lpfc_clr_rrq_active(phba, xritag, rrq);
821 spin_unlock_irqrestore(&phba->hbalock, iflags);
828 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
829 * @phba: Pointer to HBA context object.
830 * @ndlp: Targets nodelist pointer for this exchange.
831 * @xritag the xri in the bitmap to test.
833 * This function takes the hbalock.
834 * returns 0 = rrq not active for this xri
835 * 1 = rrq is valid for this xri.
838 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
842 unsigned long iflags;
844 spin_lock_irqsave(&phba->hbalock, iflags);
845 ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
846 spin_unlock_irqrestore(&phba->hbalock, iflags);
851 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
852 * @phba: Pointer to HBA context object.
853 * @piocb: Pointer to the iocbq.
855 * This function is called with hbalock held. This function
856 * Gets a new driver sglq object from the sglq list. If the
857 * list is not empty then it is successful, it returns pointer to the newly
858 * allocated sglq object else it returns NULL.
860 static struct lpfc_sglq *
861 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
863 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
864 struct lpfc_sglq *sglq = NULL;
865 struct lpfc_sglq *start_sglq = NULL;
867 struct lpfc_scsi_buf *lpfc_cmd;
868 struct lpfc_nodelist *ndlp;
871 if (piocbq->iocb_flag & LPFC_IO_FCP) {
872 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
873 ndlp = lpfc_cmd->rdata->pnode;
874 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
875 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
876 ndlp = piocbq->context_un.ndlp;
878 ndlp = piocbq->context1;
880 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
885 adj_xri = sglq->sli4_xritag -
886 phba->sli4_hba.max_cfg_param.xri_base;
887 if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
888 /* This xri has an rrq outstanding for this DID.
889 * put it back in the list and get another xri.
891 list_add_tail(&sglq->list, lpfc_sgl_list);
893 list_remove_head(lpfc_sgl_list, sglq,
894 struct lpfc_sglq, list);
895 if (sglq == start_sglq) {
903 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
904 sglq->state = SGL_ALLOCATED;
910 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
911 * @phba: Pointer to HBA context object.
913 * This function is called with no lock held. This function
914 * allocates a new driver iocb object from the iocb pool. If the
915 * allocation is successful, it returns pointer to the newly
916 * allocated iocb object else it returns NULL.
919 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
921 struct lpfc_iocbq * iocbq = NULL;
922 unsigned long iflags;
924 spin_lock_irqsave(&phba->hbalock, iflags);
925 iocbq = __lpfc_sli_get_iocbq(phba);
926 spin_unlock_irqrestore(&phba->hbalock, iflags);
931 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
932 * @phba: Pointer to HBA context object.
933 * @iocbq: Pointer to driver iocb object.
935 * This function is called with hbalock held to release driver
936 * iocb object to the iocb pool. The iotag in the iocb object
937 * does not change for each use of the iocb object. This function
938 * clears all other fields of the iocb object when it is freed.
939 * The sqlq structure that holds the xritag and phys and virtual
940 * mappings for the scatter gather list is retrieved from the
941 * active array of sglq. The get of the sglq pointer also clears
942 * the entry in the array. If the status of the IO indiactes that
943 * this IO was aborted then the sglq entry it put on the
944 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
945 * IO has good status or fails for any other reason then the sglq
946 * entry is added to the free list (lpfc_sgl_list).
949 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
951 struct lpfc_sglq *sglq;
952 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
953 unsigned long iflag = 0;
954 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
956 if (iocbq->sli4_xritag == NO_XRI)
959 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
961 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
962 (sglq->state != SGL_XRI_ABORTED)) {
963 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
965 list_add(&sglq->list,
966 &phba->sli4_hba.lpfc_abts_els_sgl_list);
967 spin_unlock_irqrestore(
968 &phba->sli4_hba.abts_sgl_list_lock, iflag);
970 sglq->state = SGL_FREED;
972 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
974 /* Check if TXQ queue needs to be serviced */
976 lpfc_worker_wake_up(phba);
982 * Clean all volatile data fields, preserve iotag and node struct.
984 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
985 iocbq->sli4_xritag = NO_XRI;
986 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
991 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
992 * @phba: Pointer to HBA context object.
993 * @iocbq: Pointer to driver iocb object.
995 * This function is called with hbalock held to release driver
996 * iocb object to the iocb pool. The iotag in the iocb object
997 * does not change for each use of the iocb object. This function
998 * clears all other fields of the iocb object when it is freed.
1001 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1003 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1006 * Clean all volatile data fields, preserve iotag and node struct.
1008 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1009 iocbq->sli4_xritag = NO_XRI;
1010 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1014 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1015 * @phba: Pointer to HBA context object.
1016 * @iocbq: Pointer to driver iocb object.
1018 * This function is called with hbalock held to release driver
1019 * iocb object to the iocb pool. The iotag in the iocb object
1020 * does not change for each use of the iocb object. This function
1021 * clears all other fields of the iocb object when it is freed.
1024 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1026 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1031 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1032 * @phba: Pointer to HBA context object.
1033 * @iocbq: Pointer to driver iocb object.
1035 * This function is called with no lock held to release the iocb to
1039 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1041 unsigned long iflags;
1044 * Clean all volatile data fields, preserve iotag and node struct.
1046 spin_lock_irqsave(&phba->hbalock, iflags);
1047 __lpfc_sli_release_iocbq(phba, iocbq);
1048 spin_unlock_irqrestore(&phba->hbalock, iflags);
1052 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1053 * @phba: Pointer to HBA context object.
1054 * @iocblist: List of IOCBs.
1055 * @ulpstatus: ULP status in IOCB command field.
1056 * @ulpWord4: ULP word-4 in IOCB command field.
1058 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1059 * on the list by invoking the complete callback function associated with the
1060 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1064 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1065 uint32_t ulpstatus, uint32_t ulpWord4)
1067 struct lpfc_iocbq *piocb;
1069 while (!list_empty(iocblist)) {
1070 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1072 if (!piocb->iocb_cmpl)
1073 lpfc_sli_release_iocbq(phba, piocb);
1075 piocb->iocb.ulpStatus = ulpstatus;
1076 piocb->iocb.un.ulpWord[4] = ulpWord4;
1077 (piocb->iocb_cmpl) (phba, piocb, piocb);
1084 * lpfc_sli_iocb_cmd_type - Get the iocb type
1085 * @iocb_cmnd: iocb command code.
1087 * This function is called by ring event handler function to get the iocb type.
1088 * This function translates the iocb command to an iocb command type used to
1089 * decide the final disposition of each completed IOCB.
1090 * The function returns
1091 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1092 * LPFC_SOL_IOCB if it is a solicited iocb completion
1093 * LPFC_ABORT_IOCB if it is an abort iocb
1094 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1096 * The caller is not required to hold any lock.
1098 static lpfc_iocb_type
1099 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1101 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1103 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1106 switch (iocb_cmnd) {
1107 case CMD_XMIT_SEQUENCE_CR:
1108 case CMD_XMIT_SEQUENCE_CX:
1109 case CMD_XMIT_BCAST_CN:
1110 case CMD_XMIT_BCAST_CX:
1111 case CMD_ELS_REQUEST_CR:
1112 case CMD_ELS_REQUEST_CX:
1113 case CMD_CREATE_XRI_CR:
1114 case CMD_CREATE_XRI_CX:
1115 case CMD_GET_RPI_CN:
1116 case CMD_XMIT_ELS_RSP_CX:
1117 case CMD_GET_RPI_CR:
1118 case CMD_FCP_IWRITE_CR:
1119 case CMD_FCP_IWRITE_CX:
1120 case CMD_FCP_IREAD_CR:
1121 case CMD_FCP_IREAD_CX:
1122 case CMD_FCP_ICMND_CR:
1123 case CMD_FCP_ICMND_CX:
1124 case CMD_FCP_TSEND_CX:
1125 case CMD_FCP_TRSP_CX:
1126 case CMD_FCP_TRECEIVE_CX:
1127 case CMD_FCP_AUTO_TRSP_CX:
1128 case CMD_ADAPTER_MSG:
1129 case CMD_ADAPTER_DUMP:
1130 case CMD_XMIT_SEQUENCE64_CR:
1131 case CMD_XMIT_SEQUENCE64_CX:
1132 case CMD_XMIT_BCAST64_CN:
1133 case CMD_XMIT_BCAST64_CX:
1134 case CMD_ELS_REQUEST64_CR:
1135 case CMD_ELS_REQUEST64_CX:
1136 case CMD_FCP_IWRITE64_CR:
1137 case CMD_FCP_IWRITE64_CX:
1138 case CMD_FCP_IREAD64_CR:
1139 case CMD_FCP_IREAD64_CX:
1140 case CMD_FCP_ICMND64_CR:
1141 case CMD_FCP_ICMND64_CX:
1142 case CMD_FCP_TSEND64_CX:
1143 case CMD_FCP_TRSP64_CX:
1144 case CMD_FCP_TRECEIVE64_CX:
1145 case CMD_GEN_REQUEST64_CR:
1146 case CMD_GEN_REQUEST64_CX:
1147 case CMD_XMIT_ELS_RSP64_CX:
1148 case DSSCMD_IWRITE64_CR:
1149 case DSSCMD_IWRITE64_CX:
1150 case DSSCMD_IREAD64_CR:
1151 case DSSCMD_IREAD64_CX:
1152 type = LPFC_SOL_IOCB;
1154 case CMD_ABORT_XRI_CN:
1155 case CMD_ABORT_XRI_CX:
1156 case CMD_CLOSE_XRI_CN:
1157 case CMD_CLOSE_XRI_CX:
1158 case CMD_XRI_ABORTED_CX:
1159 case CMD_ABORT_MXRI64_CN:
1160 case CMD_XMIT_BLS_RSP64_CX:
1161 type = LPFC_ABORT_IOCB;
1163 case CMD_RCV_SEQUENCE_CX:
1164 case CMD_RCV_ELS_REQ_CX:
1165 case CMD_RCV_SEQUENCE64_CX:
1166 case CMD_RCV_ELS_REQ64_CX:
1167 case CMD_ASYNC_STATUS:
1168 case CMD_IOCB_RCV_SEQ64_CX:
1169 case CMD_IOCB_RCV_ELS64_CX:
1170 case CMD_IOCB_RCV_CONT64_CX:
1171 case CMD_IOCB_RET_XRI64_CX:
1172 type = LPFC_UNSOL_IOCB;
1174 case CMD_IOCB_XMIT_MSEQ64_CR:
1175 case CMD_IOCB_XMIT_MSEQ64_CX:
1176 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1177 case CMD_IOCB_RCV_ELS_LIST64_CX:
1178 case CMD_IOCB_CLOSE_EXTENDED_CN:
1179 case CMD_IOCB_ABORT_EXTENDED_CN:
1180 case CMD_IOCB_RET_HBQE64_CN:
1181 case CMD_IOCB_FCP_IBIDIR64_CR:
1182 case CMD_IOCB_FCP_IBIDIR64_CX:
1183 case CMD_IOCB_FCP_ITASKMGT64_CX:
1184 case CMD_IOCB_LOGENTRY_CN:
1185 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1186 printk("%s - Unhandled SLI-3 Command x%x\n",
1187 __func__, iocb_cmnd);
1188 type = LPFC_UNKNOWN_IOCB;
1191 type = LPFC_UNKNOWN_IOCB;
1199 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1200 * @phba: Pointer to HBA context object.
1202 * This function is called from SLI initialization code
1203 * to configure every ring of the HBA's SLI interface. The
1204 * caller is not required to hold any lock. This function issues
1205 * a config_ring mailbox command for each ring.
1206 * This function returns zero if successful else returns a negative
1210 lpfc_sli_ring_map(struct lpfc_hba *phba)
1212 struct lpfc_sli *psli = &phba->sli;
1217 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1221 phba->link_state = LPFC_INIT_MBX_CMDS;
1222 for (i = 0; i < psli->num_rings; i++) {
1223 lpfc_config_ring(phba, i, pmb);
1224 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1225 if (rc != MBX_SUCCESS) {
1226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1227 "0446 Adapter failed to init (%d), "
1228 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1230 rc, pmbox->mbxCommand,
1231 pmbox->mbxStatus, i);
1232 phba->link_state = LPFC_HBA_ERROR;
1237 mempool_free(pmb, phba->mbox_mem_pool);
1242 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1243 * @phba: Pointer to HBA context object.
1244 * @pring: Pointer to driver SLI ring object.
1245 * @piocb: Pointer to the driver iocb object.
1247 * This function is called with hbalock held. The function adds the
1248 * new iocb to txcmplq of the given ring. This function always returns
1249 * 0. If this function is called for ELS ring, this function checks if
1250 * there is a vport associated with the ELS command. This function also
1251 * starts els_tmofunc timer if this is an ELS command.
1254 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1255 struct lpfc_iocbq *piocb)
1257 list_add_tail(&piocb->list, &pring->txcmplq);
1258 piocb->iocb_flag |= LPFC_IO_ON_Q;
1259 pring->txcmplq_cnt++;
1260 if (pring->txcmplq_cnt > pring->txcmplq_max)
1261 pring->txcmplq_max = pring->txcmplq_cnt;
1263 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1264 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1265 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1269 mod_timer(&piocb->vport->els_tmofunc,
1270 jiffies + HZ * (phba->fc_ratov << 1));
1278 * lpfc_sli_ringtx_get - Get first element of the txq
1279 * @phba: Pointer to HBA context object.
1280 * @pring: Pointer to driver SLI ring object.
1282 * This function is called with hbalock held to get next
1283 * iocb in txq of the given ring. If there is any iocb in
1284 * the txq, the function returns first iocb in the list after
1285 * removing the iocb from the list, else it returns NULL.
1288 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1290 struct lpfc_iocbq *cmd_iocb;
1292 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1293 if (cmd_iocb != NULL)
1299 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1300 * @phba: Pointer to HBA context object.
1301 * @pring: Pointer to driver SLI ring object.
1303 * This function is called with hbalock held and the caller must post the
1304 * iocb without releasing the lock. If the caller releases the lock,
1305 * iocb slot returned by the function is not guaranteed to be available.
1306 * The function returns pointer to the next available iocb slot if there
1307 * is available slot in the ring, else it returns NULL.
1308 * If the get index of the ring is ahead of the put index, the function
1309 * will post an error attention event to the worker thread to take the
1310 * HBA to offline state.
1313 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1315 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1316 uint32_t max_cmd_idx = pring->numCiocb;
1317 if ((pring->next_cmdidx == pring->cmdidx) &&
1318 (++pring->next_cmdidx >= max_cmd_idx))
1319 pring->next_cmdidx = 0;
1321 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
1323 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1325 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
1326 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1327 "0315 Ring %d issue: portCmdGet %d "
1328 "is bigger than cmd ring %d\n",
1330 pring->local_getidx, max_cmd_idx);
1332 phba->link_state = LPFC_HBA_ERROR;
1334 * All error attention handlers are posted to
1337 phba->work_ha |= HA_ERATT;
1338 phba->work_hs = HS_FFER3;
1340 lpfc_worker_wake_up(phba);
1345 if (pring->local_getidx == pring->next_cmdidx)
1349 return lpfc_cmd_iocb(phba, pring);
1353 * lpfc_sli_next_iotag - Get an iotag for the iocb
1354 * @phba: Pointer to HBA context object.
1355 * @iocbq: Pointer to driver iocb object.
1357 * This function gets an iotag for the iocb. If there is no unused iotag and
1358 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1359 * array and assigns a new iotag.
1360 * The function returns the allocated iotag if successful, else returns zero.
1361 * Zero is not a valid iotag.
1362 * The caller is not required to hold any lock.
1365 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1367 struct lpfc_iocbq **new_arr;
1368 struct lpfc_iocbq **old_arr;
1370 struct lpfc_sli *psli = &phba->sli;
1373 spin_lock_irq(&phba->hbalock);
1374 iotag = psli->last_iotag;
1375 if(++iotag < psli->iocbq_lookup_len) {
1376 psli->last_iotag = iotag;
1377 psli->iocbq_lookup[iotag] = iocbq;
1378 spin_unlock_irq(&phba->hbalock);
1379 iocbq->iotag = iotag;
1381 } else if (psli->iocbq_lookup_len < (0xffff
1382 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1383 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1384 spin_unlock_irq(&phba->hbalock);
1385 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1388 spin_lock_irq(&phba->hbalock);
1389 old_arr = psli->iocbq_lookup;
1390 if (new_len <= psli->iocbq_lookup_len) {
1391 /* highly unprobable case */
1393 iotag = psli->last_iotag;
1394 if(++iotag < psli->iocbq_lookup_len) {
1395 psli->last_iotag = iotag;
1396 psli->iocbq_lookup[iotag] = iocbq;
1397 spin_unlock_irq(&phba->hbalock);
1398 iocbq->iotag = iotag;
1401 spin_unlock_irq(&phba->hbalock);
1404 if (psli->iocbq_lookup)
1405 memcpy(new_arr, old_arr,
1406 ((psli->last_iotag + 1) *
1407 sizeof (struct lpfc_iocbq *)));
1408 psli->iocbq_lookup = new_arr;
1409 psli->iocbq_lookup_len = new_len;
1410 psli->last_iotag = iotag;
1411 psli->iocbq_lookup[iotag] = iocbq;
1412 spin_unlock_irq(&phba->hbalock);
1413 iocbq->iotag = iotag;
1418 spin_unlock_irq(&phba->hbalock);
1420 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1421 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1428 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1429 * @phba: Pointer to HBA context object.
1430 * @pring: Pointer to driver SLI ring object.
1431 * @iocb: Pointer to iocb slot in the ring.
1432 * @nextiocb: Pointer to driver iocb object which need to be
1433 * posted to firmware.
1435 * This function is called with hbalock held to post a new iocb to
1436 * the firmware. This function copies the new iocb to ring iocb slot and
1437 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1438 * a completion call back for this iocb else the function will free the
1442 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1443 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1448 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1451 if (pring->ringno == LPFC_ELS_RING) {
1452 lpfc_debugfs_slow_ring_trc(phba,
1453 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1454 *(((uint32_t *) &nextiocb->iocb) + 4),
1455 *(((uint32_t *) &nextiocb->iocb) + 6),
1456 *(((uint32_t *) &nextiocb->iocb) + 7));
1460 * Issue iocb command to adapter
1462 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1464 pring->stats.iocb_cmd++;
1467 * If there is no completion routine to call, we can release the
1468 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1469 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1471 if (nextiocb->iocb_cmpl)
1472 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1474 __lpfc_sli_release_iocbq(phba, nextiocb);
1477 * Let the HBA know what IOCB slot will be the next one the
1478 * driver will put a command into.
1480 pring->cmdidx = pring->next_cmdidx;
1481 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1485 * lpfc_sli_update_full_ring - Update the chip attention register
1486 * @phba: Pointer to HBA context object.
1487 * @pring: Pointer to driver SLI ring object.
1489 * The caller is not required to hold any lock for calling this function.
1490 * This function updates the chip attention bits for the ring to inform firmware
1491 * that there are pending work to be done for this ring and requests an
1492 * interrupt when there is space available in the ring. This function is
1493 * called when the driver is unable to post more iocbs to the ring due
1494 * to unavailability of space in the ring.
1497 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1499 int ringno = pring->ringno;
1501 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1506 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1507 * The HBA will tell us when an IOCB entry is available.
1509 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1510 readl(phba->CAregaddr); /* flush */
1512 pring->stats.iocb_cmd_full++;
1516 * lpfc_sli_update_ring - Update chip attention register
1517 * @phba: Pointer to HBA context object.
1518 * @pring: Pointer to driver SLI ring object.
1520 * This function updates the chip attention register bit for the
1521 * given ring to inform HBA that there is more work to be done
1522 * in this ring. The caller is not required to hold any lock.
1525 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1527 int ringno = pring->ringno;
1530 * Tell the HBA that there is work to do in this ring.
1532 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1534 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1535 readl(phba->CAregaddr); /* flush */
1540 * lpfc_sli_resume_iocb - Process iocbs in the txq
1541 * @phba: Pointer to HBA context object.
1542 * @pring: Pointer to driver SLI ring object.
1544 * This function is called with hbalock held to post pending iocbs
1545 * in the txq to the firmware. This function is called when driver
1546 * detects space available in the ring.
1549 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1552 struct lpfc_iocbq *nextiocb;
1556 * (a) there is anything on the txq to send
1558 * (c) link attention events can be processed (fcp ring only)
1559 * (d) IOCB processing is not blocked by the outstanding mbox command.
1561 if (pring->txq_cnt &&
1562 lpfc_is_link_up(phba) &&
1563 (pring->ringno != phba->sli.fcp_ring ||
1564 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1566 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1567 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1568 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1571 lpfc_sli_update_ring(phba, pring);
1573 lpfc_sli_update_full_ring(phba, pring);
1580 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1581 * @phba: Pointer to HBA context object.
1582 * @hbqno: HBQ number.
1584 * This function is called with hbalock held to get the next
1585 * available slot for the given HBQ. If there is free slot
1586 * available for the HBQ it will return pointer to the next available
1587 * HBQ entry else it will return NULL.
1589 static struct lpfc_hbq_entry *
1590 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1592 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1594 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1595 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1596 hbqp->next_hbqPutIdx = 0;
1598 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1599 uint32_t raw_index = phba->hbq_get[hbqno];
1600 uint32_t getidx = le32_to_cpu(raw_index);
1602 hbqp->local_hbqGetIdx = getidx;
1604 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1605 lpfc_printf_log(phba, KERN_ERR,
1606 LOG_SLI | LOG_VPORT,
1607 "1802 HBQ %d: local_hbqGetIdx "
1608 "%u is > than hbqp->entry_count %u\n",
1609 hbqno, hbqp->local_hbqGetIdx,
1612 phba->link_state = LPFC_HBA_ERROR;
1616 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1620 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1625 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1626 * @phba: Pointer to HBA context object.
1628 * This function is called with no lock held to free all the
1629 * hbq buffers while uninitializing the SLI interface. It also
1630 * frees the HBQ buffers returned by the firmware but not yet
1631 * processed by the upper layers.
1634 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1636 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1637 struct hbq_dmabuf *hbq_buf;
1638 unsigned long flags;
1642 hbq_count = lpfc_sli_hbq_count();
1643 /* Return all memory used by all HBQs */
1644 spin_lock_irqsave(&phba->hbalock, flags);
1645 for (i = 0; i < hbq_count; ++i) {
1646 list_for_each_entry_safe(dmabuf, next_dmabuf,
1647 &phba->hbqs[i].hbq_buffer_list, list) {
1648 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1649 list_del(&hbq_buf->dbuf.list);
1650 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1652 phba->hbqs[i].buffer_count = 0;
1654 /* Return all HBQ buffer that are in-fly */
1655 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1657 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1658 list_del(&hbq_buf->dbuf.list);
1659 if (hbq_buf->tag == -1) {
1660 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1663 hbqno = hbq_buf->tag >> 16;
1664 if (hbqno >= LPFC_MAX_HBQS)
1665 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1668 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1673 /* Mark the HBQs not in use */
1674 phba->hbq_in_use = 0;
1675 spin_unlock_irqrestore(&phba->hbalock, flags);
1679 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1680 * @phba: Pointer to HBA context object.
1681 * @hbqno: HBQ number.
1682 * @hbq_buf: Pointer to HBQ buffer.
1684 * This function is called with the hbalock held to post a
1685 * hbq buffer to the firmware. If the function finds an empty
1686 * slot in the HBQ, it will post the buffer. The function will return
1687 * pointer to the hbq entry if it successfully post the buffer
1688 * else it will return NULL.
1691 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1692 struct hbq_dmabuf *hbq_buf)
1694 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1698 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1699 * @phba: Pointer to HBA context object.
1700 * @hbqno: HBQ number.
1701 * @hbq_buf: Pointer to HBQ buffer.
1703 * This function is called with the hbalock held to post a hbq buffer to the
1704 * firmware. If the function finds an empty slot in the HBQ, it will post the
1705 * buffer and place it on the hbq_buffer_list. The function will return zero if
1706 * it successfully post the buffer else it will return an error.
1709 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1710 struct hbq_dmabuf *hbq_buf)
1712 struct lpfc_hbq_entry *hbqe;
1713 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1715 /* Get next HBQ entry slot to use */
1716 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1718 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1720 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1721 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1722 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1723 hbqe->bde.tus.f.bdeFlags = 0;
1724 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1725 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1727 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1728 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1730 readl(phba->hbq_put + hbqno);
1731 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1738 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1739 * @phba: Pointer to HBA context object.
1740 * @hbqno: HBQ number.
1741 * @hbq_buf: Pointer to HBQ buffer.
1743 * This function is called with the hbalock held to post an RQE to the SLI4
1744 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1745 * the hbq_buffer_list and return zero, otherwise it will return an error.
1748 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1749 struct hbq_dmabuf *hbq_buf)
1752 struct lpfc_rqe hrqe;
1753 struct lpfc_rqe drqe;
1755 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1756 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1757 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1758 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1759 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1764 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1768 /* HBQ for ELS and CT traffic. */
1769 static struct lpfc_hbq_init lpfc_els_hbq = {
1774 .ring_mask = (1 << LPFC_ELS_RING),
1780 /* HBQ for the extra ring if needed */
1781 static struct lpfc_hbq_init lpfc_extra_hbq = {
1786 .ring_mask = (1 << LPFC_EXTRA_RING),
1793 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1799 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1800 * @phba: Pointer to HBA context object.
1801 * @hbqno: HBQ number.
1802 * @count: Number of HBQ buffers to be posted.
1804 * This function is called with no lock held to post more hbq buffers to the
1805 * given HBQ. The function returns the number of HBQ buffers successfully
1809 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1811 uint32_t i, posted = 0;
1812 unsigned long flags;
1813 struct hbq_dmabuf *hbq_buffer;
1814 LIST_HEAD(hbq_buf_list);
1815 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1818 if ((phba->hbqs[hbqno].buffer_count + count) >
1819 lpfc_hbq_defs[hbqno]->entry_count)
1820 count = lpfc_hbq_defs[hbqno]->entry_count -
1821 phba->hbqs[hbqno].buffer_count;
1824 /* Allocate HBQ entries */
1825 for (i = 0; i < count; i++) {
1826 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1829 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1831 /* Check whether HBQ is still in use */
1832 spin_lock_irqsave(&phba->hbalock, flags);
1833 if (!phba->hbq_in_use)
1835 while (!list_empty(&hbq_buf_list)) {
1836 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1838 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1840 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1841 phba->hbqs[hbqno].buffer_count++;
1844 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1846 spin_unlock_irqrestore(&phba->hbalock, flags);
1849 spin_unlock_irqrestore(&phba->hbalock, flags);
1850 while (!list_empty(&hbq_buf_list)) {
1851 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1853 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1859 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1860 * @phba: Pointer to HBA context object.
1863 * This function posts more buffers to the HBQ. This function
1864 * is called with no lock held. The function returns the number of HBQ entries
1865 * successfully allocated.
1868 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1870 if (phba->sli_rev == LPFC_SLI_REV4)
1873 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1874 lpfc_hbq_defs[qno]->add_count);
1878 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1879 * @phba: Pointer to HBA context object.
1880 * @qno: HBQ queue number.
1882 * This function is called from SLI initialization code path with
1883 * no lock held to post initial HBQ buffers to firmware. The
1884 * function returns the number of HBQ entries successfully allocated.
1887 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1889 if (phba->sli_rev == LPFC_SLI_REV4)
1890 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1891 lpfc_hbq_defs[qno]->entry_count);
1893 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1894 lpfc_hbq_defs[qno]->init_count);
1898 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1899 * @phba: Pointer to HBA context object.
1900 * @hbqno: HBQ number.
1902 * This function removes the first hbq buffer on an hbq list and returns a
1903 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1905 static struct hbq_dmabuf *
1906 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1908 struct lpfc_dmabuf *d_buf;
1910 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1913 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1917 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1918 * @phba: Pointer to HBA context object.
1919 * @tag: Tag of the hbq buffer.
1921 * This function is called with hbalock held. This function searches
1922 * for the hbq buffer associated with the given tag in the hbq buffer
1923 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1926 static struct hbq_dmabuf *
1927 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1929 struct lpfc_dmabuf *d_buf;
1930 struct hbq_dmabuf *hbq_buf;
1934 if (hbqno >= LPFC_MAX_HBQS)
1937 spin_lock_irq(&phba->hbalock);
1938 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1939 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1940 if (hbq_buf->tag == tag) {
1941 spin_unlock_irq(&phba->hbalock);
1945 spin_unlock_irq(&phba->hbalock);
1946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1947 "1803 Bad hbq tag. Data: x%x x%x\n",
1948 tag, phba->hbqs[tag >> 16].buffer_count);
1953 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1954 * @phba: Pointer to HBA context object.
1955 * @hbq_buffer: Pointer to HBQ buffer.
1957 * This function is called with hbalock. This function gives back
1958 * the hbq buffer to firmware. If the HBQ does not have space to
1959 * post the buffer, it will free the buffer.
1962 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1967 hbqno = hbq_buffer->tag >> 16;
1968 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1969 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1974 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1975 * @mbxCommand: mailbox command code.
1977 * This function is called by the mailbox event handler function to verify
1978 * that the completed mailbox command is a legitimate mailbox command. If the
1979 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1980 * and the mailbox event handler will take the HBA offline.
1983 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1987 switch (mbxCommand) {
1991 case MBX_WRITE_VPARMS:
1992 case MBX_RUN_BIU_DIAG:
1995 case MBX_CONFIG_LINK:
1996 case MBX_CONFIG_RING:
1997 case MBX_RESET_RING:
1998 case MBX_READ_CONFIG:
1999 case MBX_READ_RCONFIG:
2000 case MBX_READ_SPARM:
2001 case MBX_READ_STATUS:
2005 case MBX_READ_LNK_STAT:
2007 case MBX_UNREG_LOGIN:
2009 case MBX_DUMP_MEMORY:
2010 case MBX_DUMP_CONTEXT:
2013 case MBX_UPDATE_CFG:
2015 case MBX_DEL_LD_ENTRY:
2016 case MBX_RUN_PROGRAM:
2018 case MBX_SET_VARIABLE:
2019 case MBX_UNREG_D_ID:
2020 case MBX_KILL_BOARD:
2021 case MBX_CONFIG_FARP:
2024 case MBX_RUN_BIU_DIAG64:
2025 case MBX_CONFIG_PORT:
2026 case MBX_READ_SPARM64:
2027 case MBX_READ_RPI64:
2028 case MBX_REG_LOGIN64:
2029 case MBX_READ_TOPOLOGY:
2032 case MBX_LOAD_EXP_ROM:
2033 case MBX_ASYNCEVT_ENABLE:
2037 case MBX_PORT_CAPABILITIES:
2038 case MBX_PORT_IOV_CONTROL:
2039 case MBX_SLI4_CONFIG:
2040 case MBX_SLI4_REQ_FTRS:
2042 case MBX_UNREG_FCFI:
2047 case MBX_RESUME_RPI:
2048 case MBX_READ_EVENT_LOG_STATUS:
2049 case MBX_READ_EVENT_LOG:
2050 case MBX_SECURITY_MGMT:
2062 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2063 * @phba: Pointer to HBA context object.
2064 * @pmboxq: Pointer to mailbox command.
2066 * This is completion handler function for mailbox commands issued from
2067 * lpfc_sli_issue_mbox_wait function. This function is called by the
2068 * mailbox event handler function with no lock held. This function
2069 * will wake up thread waiting on the wait queue pointed by context1
2073 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2075 wait_queue_head_t *pdone_q;
2076 unsigned long drvr_flag;
2079 * If pdone_q is empty, the driver thread gave up waiting and
2080 * continued running.
2082 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2083 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2084 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2086 wake_up_interruptible(pdone_q);
2087 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2093 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2094 * @phba: Pointer to HBA context object.
2095 * @pmb: Pointer to mailbox object.
2097 * This function is the default mailbox completion handler. It
2098 * frees the memory resources associated with the completed mailbox
2099 * command. If the completed command is a REG_LOGIN mailbox command,
2100 * this function will issue a UREG_LOGIN to re-claim the RPI.
2103 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2105 struct lpfc_vport *vport = pmb->vport;
2106 struct lpfc_dmabuf *mp;
2107 struct lpfc_nodelist *ndlp;
2108 struct Scsi_Host *shost;
2112 mp = (struct lpfc_dmabuf *) (pmb->context1);
2115 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2120 * If a REG_LOGIN succeeded after node is destroyed or node
2121 * is in re-discovery driver need to cleanup the RPI.
2123 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2124 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2125 !pmb->u.mb.mbxStatus) {
2126 rpi = pmb->u.mb.un.varWords[0];
2127 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
2128 lpfc_unreg_login(phba, vpi, rpi, pmb);
2129 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2130 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2131 if (rc != MBX_NOT_FINISHED)
2135 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2136 !(phba->pport->load_flag & FC_UNLOADING) &&
2137 !pmb->u.mb.mbxStatus) {
2138 shost = lpfc_shost_from_vport(vport);
2139 spin_lock_irq(shost->host_lock);
2140 vport->vpi_state |= LPFC_VPI_REGISTERED;
2141 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2142 spin_unlock_irq(shost->host_lock);
2145 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2146 ndlp = (struct lpfc_nodelist *)pmb->context2;
2148 pmb->context2 = NULL;
2151 /* Check security permission status on INIT_LINK mailbox command */
2152 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2153 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2154 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2155 "2860 SLI authentication is required "
2156 "for INIT_LINK but has not done yet\n");
2158 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2159 lpfc_sli4_mbox_cmd_free(phba, pmb);
2161 mempool_free(pmb, phba->mbox_mem_pool);
2165 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2166 * @phba: Pointer to HBA context object.
2168 * This function is called with no lock held. This function processes all
2169 * the completed mailbox commands and gives it to upper layers. The interrupt
2170 * service routine processes mailbox completion interrupt and adds completed
2171 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2172 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2173 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2174 * function returns the mailbox commands to the upper layer by calling the
2175 * completion handler function of each mailbox.
2178 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2185 phba->sli.slistat.mbox_event++;
2187 /* Get all completed mailboxe buffers into the cmplq */
2188 spin_lock_irq(&phba->hbalock);
2189 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2190 spin_unlock_irq(&phba->hbalock);
2192 /* Get a Mailbox buffer to setup mailbox commands for callback */
2194 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2200 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2202 lpfc_debugfs_disc_trc(pmb->vport,
2203 LPFC_DISC_TRC_MBOX_VPORT,
2204 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2205 (uint32_t)pmbox->mbxCommand,
2206 pmbox->un.varWords[0],
2207 pmbox->un.varWords[1]);
2210 lpfc_debugfs_disc_trc(phba->pport,
2212 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2213 (uint32_t)pmbox->mbxCommand,
2214 pmbox->un.varWords[0],
2215 pmbox->un.varWords[1]);
2220 * It is a fatal error if unknown mbox command completion.
2222 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2224 /* Unknown mailbox command compl */
2225 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2226 "(%d):0323 Unknown Mailbox command "
2228 pmb->vport ? pmb->vport->vpi : 0,
2230 lpfc_sli4_mbox_opcode_get(phba, pmb));
2231 phba->link_state = LPFC_HBA_ERROR;
2232 phba->work_hs = HS_FFER3;
2233 lpfc_handle_eratt(phba);
2237 if (pmbox->mbxStatus) {
2238 phba->sli.slistat.mbox_stat_err++;
2239 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2240 /* Mbox cmd cmpl error - RETRYing */
2241 lpfc_printf_log(phba, KERN_INFO,
2243 "(%d):0305 Mbox cmd cmpl "
2244 "error - RETRYing Data: x%x "
2245 "(x%x) x%x x%x x%x\n",
2246 pmb->vport ? pmb->vport->vpi :0,
2248 lpfc_sli4_mbox_opcode_get(phba,
2251 pmbox->un.varWords[0],
2252 pmb->vport->port_state);
2253 pmbox->mbxStatus = 0;
2254 pmbox->mbxOwner = OWN_HOST;
2255 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2256 if (rc != MBX_NOT_FINISHED)
2261 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2262 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2263 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
2264 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2265 pmb->vport ? pmb->vport->vpi : 0,
2267 lpfc_sli4_mbox_opcode_get(phba, pmb),
2269 *((uint32_t *) pmbox),
2270 pmbox->un.varWords[0],
2271 pmbox->un.varWords[1],
2272 pmbox->un.varWords[2],
2273 pmbox->un.varWords[3],
2274 pmbox->un.varWords[4],
2275 pmbox->un.varWords[5],
2276 pmbox->un.varWords[6],
2277 pmbox->un.varWords[7]);
2280 pmb->mbox_cmpl(phba,pmb);
2286 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2287 * @phba: Pointer to HBA context object.
2288 * @pring: Pointer to driver SLI ring object.
2291 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2292 * is set in the tag the buffer is posted for a particular exchange,
2293 * the function will return the buffer without replacing the buffer.
2294 * If the buffer is for unsolicited ELS or CT traffic, this function
2295 * returns the buffer and also posts another buffer to the firmware.
2297 static struct lpfc_dmabuf *
2298 lpfc_sli_get_buff(struct lpfc_hba *phba,
2299 struct lpfc_sli_ring *pring,
2302 struct hbq_dmabuf *hbq_entry;
2304 if (tag & QUE_BUFTAG_BIT)
2305 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2306 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2309 return &hbq_entry->dbuf;
2313 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2314 * @phba: Pointer to HBA context object.
2315 * @pring: Pointer to driver SLI ring object.
2316 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2317 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2318 * @fch_type: the type for the first frame of the sequence.
2320 * This function is called with no lock held. This function uses the r_ctl and
2321 * type of the received sequence to find the correct callback function to call
2322 * to process the sequence.
2325 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2326 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2331 /* unSolicited Responses */
2332 if (pring->prt[0].profile) {
2333 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2334 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2338 /* We must search, based on rctl / type
2339 for the right routine */
2340 for (i = 0; i < pring->num_mask; i++) {
2341 if ((pring->prt[i].rctl == fch_r_ctl) &&
2342 (pring->prt[i].type == fch_type)) {
2343 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2344 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2345 (phba, pring, saveq);
2353 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2354 * @phba: Pointer to HBA context object.
2355 * @pring: Pointer to driver SLI ring object.
2356 * @saveq: Pointer to the unsolicited iocb.
2358 * This function is called with no lock held by the ring event handler
2359 * when there is an unsolicited iocb posted to the response ring by the
2360 * firmware. This function gets the buffer associated with the iocbs
2361 * and calls the event handler for the ring. This function handles both
2362 * qring buffers and hbq buffers.
2363 * When the function returns 1 the caller can free the iocb object otherwise
2364 * upper layer functions will free the iocb objects.
2367 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2368 struct lpfc_iocbq *saveq)
2372 uint32_t Rctl, Type;
2374 struct lpfc_iocbq *iocbq;
2375 struct lpfc_dmabuf *dmzbuf;
2378 irsp = &(saveq->iocb);
2380 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2381 if (pring->lpfc_sli_rcv_async_status)
2382 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2384 lpfc_printf_log(phba,
2387 "0316 Ring %d handler: unexpected "
2388 "ASYNC_STATUS iocb received evt_code "
2391 irsp->un.asyncstat.evt_code);
2395 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2396 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2397 if (irsp->ulpBdeCount > 0) {
2398 dmzbuf = lpfc_sli_get_buff(phba, pring,
2399 irsp->un.ulpWord[3]);
2400 lpfc_in_buf_free(phba, dmzbuf);
2403 if (irsp->ulpBdeCount > 1) {
2404 dmzbuf = lpfc_sli_get_buff(phba, pring,
2405 irsp->unsli3.sli3Words[3]);
2406 lpfc_in_buf_free(phba, dmzbuf);
2409 if (irsp->ulpBdeCount > 2) {
2410 dmzbuf = lpfc_sli_get_buff(phba, pring,
2411 irsp->unsli3.sli3Words[7]);
2412 lpfc_in_buf_free(phba, dmzbuf);
2418 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2419 if (irsp->ulpBdeCount != 0) {
2420 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2421 irsp->un.ulpWord[3]);
2422 if (!saveq->context2)
2423 lpfc_printf_log(phba,
2426 "0341 Ring %d Cannot find buffer for "
2427 "an unsolicited iocb. tag 0x%x\n",
2429 irsp->un.ulpWord[3]);
2431 if (irsp->ulpBdeCount == 2) {
2432 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2433 irsp->unsli3.sli3Words[7]);
2434 if (!saveq->context3)
2435 lpfc_printf_log(phba,
2438 "0342 Ring %d Cannot find buffer for an"
2439 " unsolicited iocb. tag 0x%x\n",
2441 irsp->unsli3.sli3Words[7]);
2443 list_for_each_entry(iocbq, &saveq->list, list) {
2444 irsp = &(iocbq->iocb);
2445 if (irsp->ulpBdeCount != 0) {
2446 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2447 irsp->un.ulpWord[3]);
2448 if (!iocbq->context2)
2449 lpfc_printf_log(phba,
2452 "0343 Ring %d Cannot find "
2453 "buffer for an unsolicited iocb"
2454 ". tag 0x%x\n", pring->ringno,
2455 irsp->un.ulpWord[3]);
2457 if (irsp->ulpBdeCount == 2) {
2458 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2459 irsp->unsli3.sli3Words[7]);
2460 if (!iocbq->context3)
2461 lpfc_printf_log(phba,
2464 "0344 Ring %d Cannot find "
2465 "buffer for an unsolicited "
2468 irsp->unsli3.sli3Words[7]);
2472 if (irsp->ulpBdeCount != 0 &&
2473 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2474 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2477 /* search continue save q for same XRI */
2478 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2479 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2480 list_add_tail(&saveq->list, &iocbq->list);
2486 list_add_tail(&saveq->clist,
2487 &pring->iocb_continue_saveq);
2488 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2489 list_del_init(&iocbq->clist);
2491 irsp = &(saveq->iocb);
2495 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2496 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2497 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2498 Rctl = FC_RCTL_ELS_REQ;
2501 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2502 Rctl = w5p->hcsw.Rctl;
2503 Type = w5p->hcsw.Type;
2505 /* Firmware Workaround */
2506 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2507 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2508 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2509 Rctl = FC_RCTL_ELS_REQ;
2511 w5p->hcsw.Rctl = Rctl;
2512 w5p->hcsw.Type = Type;
2516 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2517 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2518 "0313 Ring %d handler: unexpected Rctl x%x "
2519 "Type x%x received\n",
2520 pring->ringno, Rctl, Type);
2526 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2527 * @phba: Pointer to HBA context object.
2528 * @pring: Pointer to driver SLI ring object.
2529 * @prspiocb: Pointer to response iocb object.
2531 * This function looks up the iocb_lookup table to get the command iocb
2532 * corresponding to the given response iocb using the iotag of the
2533 * response iocb. This function is called with the hbalock held.
2534 * This function returns the command iocb object if it finds the command
2535 * iocb else returns NULL.
2537 static struct lpfc_iocbq *
2538 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2539 struct lpfc_sli_ring *pring,
2540 struct lpfc_iocbq *prspiocb)
2542 struct lpfc_iocbq *cmd_iocb = NULL;
2545 iotag = prspiocb->iocb.ulpIoTag;
2547 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2548 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2549 list_del_init(&cmd_iocb->list);
2550 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2551 pring->txcmplq_cnt--;
2552 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2557 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2558 "0317 iotag x%x is out off "
2559 "range: max iotag x%x wd0 x%x\n",
2560 iotag, phba->sli.last_iotag,
2561 *(((uint32_t *) &prspiocb->iocb) + 7));
2566 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2567 * @phba: Pointer to HBA context object.
2568 * @pring: Pointer to driver SLI ring object.
2571 * This function looks up the iocb_lookup table to get the command iocb
2572 * corresponding to the given iotag. This function is called with the
2574 * This function returns the command iocb object if it finds the command
2575 * iocb else returns NULL.
2577 static struct lpfc_iocbq *
2578 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2579 struct lpfc_sli_ring *pring, uint16_t iotag)
2581 struct lpfc_iocbq *cmd_iocb;
2583 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2584 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2585 list_del_init(&cmd_iocb->list);
2586 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2587 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2588 pring->txcmplq_cnt--;
2593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2594 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2595 iotag, phba->sli.last_iotag);
2600 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2601 * @phba: Pointer to HBA context object.
2602 * @pring: Pointer to driver SLI ring object.
2603 * @saveq: Pointer to the response iocb to be processed.
2605 * This function is called by the ring event handler for non-fcp
2606 * rings when there is a new response iocb in the response ring.
2607 * The caller is not required to hold any locks. This function
2608 * gets the command iocb associated with the response iocb and
2609 * calls the completion handler for the command iocb. If there
2610 * is no completion handler, the function will free the resources
2611 * associated with command iocb. If the response iocb is for
2612 * an already aborted command iocb, the status of the completion
2613 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2614 * This function always returns 1.
2617 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2618 struct lpfc_iocbq *saveq)
2620 struct lpfc_iocbq *cmdiocbp;
2622 unsigned long iflag;
2624 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2625 spin_lock_irqsave(&phba->hbalock, iflag);
2626 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2627 spin_unlock_irqrestore(&phba->hbalock, iflag);
2630 if (cmdiocbp->iocb_cmpl) {
2632 * If an ELS command failed send an event to mgmt
2635 if (saveq->iocb.ulpStatus &&
2636 (pring->ringno == LPFC_ELS_RING) &&
2637 (cmdiocbp->iocb.ulpCommand ==
2638 CMD_ELS_REQUEST64_CR))
2639 lpfc_send_els_failure_event(phba,
2643 * Post all ELS completions to the worker thread.
2644 * All other are passed to the completion callback.
2646 if (pring->ringno == LPFC_ELS_RING) {
2647 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2648 (cmdiocbp->iocb_flag &
2649 LPFC_DRIVER_ABORTED)) {
2650 spin_lock_irqsave(&phba->hbalock,
2652 cmdiocbp->iocb_flag &=
2653 ~LPFC_DRIVER_ABORTED;
2654 spin_unlock_irqrestore(&phba->hbalock,
2656 saveq->iocb.ulpStatus =
2657 IOSTAT_LOCAL_REJECT;
2658 saveq->iocb.un.ulpWord[4] =
2661 /* Firmware could still be in progress
2662 * of DMAing payload, so don't free data
2663 * buffer till after a hbeat.
2665 spin_lock_irqsave(&phba->hbalock,
2667 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2668 spin_unlock_irqrestore(&phba->hbalock,
2671 if (phba->sli_rev == LPFC_SLI_REV4) {
2672 if (saveq->iocb_flag &
2673 LPFC_EXCHANGE_BUSY) {
2674 /* Set cmdiocb flag for the
2675 * exchange busy so sgl (xri)
2676 * will not be released until
2677 * the abort xri is received
2681 &phba->hbalock, iflag);
2682 cmdiocbp->iocb_flag |=
2684 spin_unlock_irqrestore(
2685 &phba->hbalock, iflag);
2687 if (cmdiocbp->iocb_flag &
2688 LPFC_DRIVER_ABORTED) {
2690 * Clear LPFC_DRIVER_ABORTED
2691 * bit in case it was driver
2695 &phba->hbalock, iflag);
2696 cmdiocbp->iocb_flag &=
2697 ~LPFC_DRIVER_ABORTED;
2698 spin_unlock_irqrestore(
2699 &phba->hbalock, iflag);
2700 cmdiocbp->iocb.ulpStatus =
2701 IOSTAT_LOCAL_REJECT;
2702 cmdiocbp->iocb.un.ulpWord[4] =
2703 IOERR_ABORT_REQUESTED;
2705 * For SLI4, irsiocb contains
2706 * NO_XRI in sli_xritag, it
2707 * shall not affect releasing
2708 * sgl (xri) process.
2710 saveq->iocb.ulpStatus =
2711 IOSTAT_LOCAL_REJECT;
2712 saveq->iocb.un.ulpWord[4] =
2715 &phba->hbalock, iflag);
2717 LPFC_DELAY_MEM_FREE;
2718 spin_unlock_irqrestore(
2719 &phba->hbalock, iflag);
2723 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2725 lpfc_sli_release_iocbq(phba, cmdiocbp);
2728 * Unknown initiating command based on the response iotag.
2729 * This could be the case on the ELS ring because of
2732 if (pring->ringno != LPFC_ELS_RING) {
2734 * Ring <ringno> handler: unexpected completion IoTag
2737 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2738 "0322 Ring %d handler: "
2739 "unexpected completion IoTag x%x "
2740 "Data: x%x x%x x%x x%x\n",
2742 saveq->iocb.ulpIoTag,
2743 saveq->iocb.ulpStatus,
2744 saveq->iocb.un.ulpWord[4],
2745 saveq->iocb.ulpCommand,
2746 saveq->iocb.ulpContext);
2754 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2755 * @phba: Pointer to HBA context object.
2756 * @pring: Pointer to driver SLI ring object.
2758 * This function is called from the iocb ring event handlers when
2759 * put pointer is ahead of the get pointer for a ring. This function signal
2760 * an error attention condition to the worker thread and the worker
2761 * thread will transition the HBA to offline state.
2764 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2766 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2768 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2769 * rsp ring <portRspMax>
2771 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2772 "0312 Ring %d handler: portRspPut %d "
2773 "is bigger than rsp ring %d\n",
2774 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2777 phba->link_state = LPFC_HBA_ERROR;
2780 * All error attention handlers are posted to
2783 phba->work_ha |= HA_ERATT;
2784 phba->work_hs = HS_FFER3;
2786 lpfc_worker_wake_up(phba);
2792 * lpfc_poll_eratt - Error attention polling timer timeout handler
2793 * @ptr: Pointer to address of HBA context object.
2795 * This function is invoked by the Error Attention polling timer when the
2796 * timer times out. It will check the SLI Error Attention register for
2797 * possible attention events. If so, it will post an Error Attention event
2798 * and wake up worker thread to process it. Otherwise, it will set up the
2799 * Error Attention polling timer for the next poll.
2801 void lpfc_poll_eratt(unsigned long ptr)
2803 struct lpfc_hba *phba;
2806 phba = (struct lpfc_hba *)ptr;
2808 /* Check chip HA register for error event */
2809 eratt = lpfc_sli_check_eratt(phba);
2812 /* Tell the worker thread there is work to do */
2813 lpfc_worker_wake_up(phba);
2815 /* Restart the timer for next eratt poll */
2816 mod_timer(&phba->eratt_poll, jiffies +
2817 HZ * LPFC_ERATT_POLL_INTERVAL);
2823 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2824 * @phba: Pointer to HBA context object.
2825 * @pring: Pointer to driver SLI ring object.
2826 * @mask: Host attention register mask for this ring.
2828 * This function is called from the interrupt context when there is a ring
2829 * event for the fcp ring. The caller does not hold any lock.
2830 * The function processes each response iocb in the response ring until it
2831 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2832 * LE bit set. The function will call the completion handler of the command iocb
2833 * if the response iocb indicates a completion for a command iocb or it is
2834 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2835 * function if this is an unsolicited iocb.
2836 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2837 * to check it explicitly.
2840 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2841 struct lpfc_sli_ring *pring, uint32_t mask)
2843 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2844 IOCB_t *irsp = NULL;
2845 IOCB_t *entry = NULL;
2846 struct lpfc_iocbq *cmdiocbq = NULL;
2847 struct lpfc_iocbq rspiocbq;
2849 uint32_t portRspPut, portRspMax;
2851 lpfc_iocb_type type;
2852 unsigned long iflag;
2853 uint32_t rsp_cmpl = 0;
2855 spin_lock_irqsave(&phba->hbalock, iflag);
2856 pring->stats.iocb_event++;
2859 * The next available response entry should never exceed the maximum
2860 * entries. If it does, treat it as an adapter hardware error.
2862 portRspMax = pring->numRiocb;
2863 portRspPut = le32_to_cpu(pgp->rspPutInx);
2864 if (unlikely(portRspPut >= portRspMax)) {
2865 lpfc_sli_rsp_pointers_error(phba, pring);
2866 spin_unlock_irqrestore(&phba->hbalock, iflag);
2869 if (phba->fcp_ring_in_use) {
2870 spin_unlock_irqrestore(&phba->hbalock, iflag);
2873 phba->fcp_ring_in_use = 1;
2876 while (pring->rspidx != portRspPut) {
2878 * Fetch an entry off the ring and copy it into a local data
2879 * structure. The copy involves a byte-swap since the
2880 * network byte order and pci byte orders are different.
2882 entry = lpfc_resp_iocb(phba, pring);
2883 phba->last_completion_time = jiffies;
2885 if (++pring->rspidx >= portRspMax)
2888 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2889 (uint32_t *) &rspiocbq.iocb,
2890 phba->iocb_rsp_size);
2891 INIT_LIST_HEAD(&(rspiocbq.list));
2892 irsp = &rspiocbq.iocb;
2894 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2895 pring->stats.iocb_rsp++;
2898 if (unlikely(irsp->ulpStatus)) {
2900 * If resource errors reported from HBA, reduce
2901 * queuedepths of the SCSI device.
2903 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2904 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2905 spin_unlock_irqrestore(&phba->hbalock, iflag);
2906 phba->lpfc_rampdown_queue_depth(phba);
2907 spin_lock_irqsave(&phba->hbalock, iflag);
2910 /* Rsp ring <ringno> error: IOCB */
2911 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2912 "0336 Rsp Ring %d error: IOCB Data: "
2913 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2915 irsp->un.ulpWord[0],
2916 irsp->un.ulpWord[1],
2917 irsp->un.ulpWord[2],
2918 irsp->un.ulpWord[3],
2919 irsp->un.ulpWord[4],
2920 irsp->un.ulpWord[5],
2921 *(uint32_t *)&irsp->un1,
2922 *((uint32_t *)&irsp->un1 + 1));
2926 case LPFC_ABORT_IOCB:
2929 * Idle exchange closed via ABTS from port. No iocb
2930 * resources need to be recovered.
2932 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2933 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2934 "0333 IOCB cmd 0x%x"
2935 " processed. Skipping"
2941 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2943 if (unlikely(!cmdiocbq))
2945 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2946 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2947 if (cmdiocbq->iocb_cmpl) {
2948 spin_unlock_irqrestore(&phba->hbalock, iflag);
2949 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2951 spin_lock_irqsave(&phba->hbalock, iflag);
2954 case LPFC_UNSOL_IOCB:
2955 spin_unlock_irqrestore(&phba->hbalock, iflag);
2956 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2957 spin_lock_irqsave(&phba->hbalock, iflag);
2960 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2961 char adaptermsg[LPFC_MAX_ADPTMSG];
2962 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2963 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2965 dev_warn(&((phba->pcidev)->dev),
2967 phba->brd_no, adaptermsg);
2969 /* Unknown IOCB command */
2970 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2971 "0334 Unknown IOCB command "
2972 "Data: x%x, x%x x%x x%x x%x\n",
2973 type, irsp->ulpCommand,
2982 * The response IOCB has been processed. Update the ring
2983 * pointer in SLIM. If the port response put pointer has not
2984 * been updated, sync the pgp->rspPutInx and fetch the new port
2985 * response put pointer.
2987 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2989 if (pring->rspidx == portRspPut)
2990 portRspPut = le32_to_cpu(pgp->rspPutInx);
2993 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2994 pring->stats.iocb_rsp_full++;
2995 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2996 writel(status, phba->CAregaddr);
2997 readl(phba->CAregaddr);
2999 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3000 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3001 pring->stats.iocb_cmd_empty++;