2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
22 static void be_mcc_notify(struct beiscsi_hba *phba)
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
27 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
32 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
34 if (compl->flags != 0) {
35 compl->flags = le32_to_cpu(compl->flags);
36 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
42 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
48 struct be_mcc_compl *compl)
50 u16 compl_status, extd_status;
52 be_dws_le_to_cpu(compl, 4);
54 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
55 CQE_STATUS_COMPL_MASK;
56 if (compl_status != MCC_STATUS_SUCCESS) {
57 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
59 dev_err(&ctrl->pdev->dev,
60 "error in cmd completion: status(compl/extd)=%d/%d\n",
61 compl_status, extd_status);
68 static inline bool is_link_state_evt(u32 trailer)
70 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
71 ASYNC_TRAILER_EVENT_CODE_MASK) ==
72 ASYNC_EVENT_CODE_LINK_STATE);
75 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
77 struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
78 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
80 if (be_mcc_compl_is_new(compl)) {
81 queue_tail_inc(mcc_cq);
87 static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
89 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
92 static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
93 struct be_async_event_link_state *evt)
95 switch (evt->port_link_status) {
96 case ASYNC_EVENT_LINK_DOWN:
97 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
99 phba->state |= BE_ADAPTER_LINK_DOWN;
101 case ASYNC_EVENT_LINK_UP:
102 phba->state = BE_ADAPTER_UP;
103 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
105 iscsi_host_for_each_session(phba->shost,
106 be2iscsi_fail_session);
109 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
110 "Physical Port %d \n",
111 evt->port_link_status,
116 static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
120 val |= qid & DB_CQ_RING_ID_MASK;
122 val |= 1 << DB_CQ_REARM_SHIFT;
123 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
124 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
128 int beiscsi_process_mcc(struct beiscsi_hba *phba)
130 struct be_mcc_compl *compl;
131 int num = 0, status = 0;
132 struct be_ctrl_info *ctrl = &phba->ctrl;
134 spin_lock_bh(&phba->ctrl.mcc_cq_lock);
135 while ((compl = be_mcc_compl_get(phba))) {
136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
137 /* Interpret flags as an async trailer */
138 BUG_ON(!is_link_state_evt(compl->flags));
140 /* Interpret compl as a async link evt */
141 beiscsi_async_link_state_process(phba,
142 (struct be_async_event_link_state *) compl);
143 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
144 status = be_mcc_compl_process(ctrl, compl);
145 atomic_dec(&phba->ctrl.mcc_obj.q.used);
147 be_mcc_compl_use(compl);
152 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
154 spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
158 /* Wait till no more pending mcc requests are present */
159 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
161 #define mcc_timeout 120000 /* 5s timeout */
163 for (i = 0; i < mcc_timeout; i++) {
164 status = beiscsi_process_mcc(phba);
168 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
172 if (i == mcc_timeout) {
173 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
179 /* Notify MCC requests and wait for completion */
180 int be_mcc_notify_wait(struct beiscsi_hba *phba)
183 return be_mcc_wait_compl(phba);
186 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
188 #define long_delay 2000
189 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
190 int cnt = 0, wait = 5; /* in usecs */
194 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
199 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
205 mdelay(long_delay / 1000);
213 int be_mbox_notify(struct be_ctrl_info *ctrl)
217 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
218 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
219 struct be_mcc_mailbox *mbox = mbox_mem->va;
220 struct be_mcc_compl *compl = &mbox->compl;
222 val &= ~MPU_MAILBOX_DB_RDY_MASK;
223 val |= MPU_MAILBOX_DB_HI_MASK;
224 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
227 status = be_mbox_db_ready_wait(ctrl);
229 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
233 val &= ~MPU_MAILBOX_DB_RDY_MASK;
234 val &= ~MPU_MAILBOX_DB_HI_MASK;
235 val |= (u32) (mbox_mem->dma >> 4) << 2;
238 status = be_mbox_db_ready_wait(ctrl);
240 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
243 if (be_mcc_compl_is_new(compl)) {
244 status = be_mcc_compl_process(ctrl, &mbox->compl);
245 be_mcc_compl_use(compl);
247 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
251 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
258 * Insert the mailbox address into the doorbell in two steps
259 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
261 static int be_mbox_notify_wait(struct beiscsi_hba *phba)
265 void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
266 struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
267 struct be_mcc_mailbox *mbox = mbox_mem->va;
268 struct be_mcc_compl *compl = &mbox->compl;
269 struct be_ctrl_info *ctrl = &phba->ctrl;
271 val |= MPU_MAILBOX_DB_HI_MASK;
272 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
273 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
276 /* wait for ready to be set */
277 status = be_mbox_db_ready_wait(ctrl);
282 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
283 val |= (u32)(mbox_mem->dma >> 4) << 2;
286 status = be_mbox_db_ready_wait(ctrl);
290 /* A cq entry has been made now */
291 if (be_mcc_compl_is_new(compl)) {
292 status = be_mcc_compl_process(ctrl, &mbox->compl);
293 be_mcc_compl_use(compl);
297 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
303 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
304 bool embedded, u8 sge_cnt)
307 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
309 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
310 MCC_WRB_SGE_CNT_SHIFT;
311 wrb->payload_length = payload_len;
312 be_dws_cpu_to_le(wrb, 8);
315 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
316 u8 subsystem, u8 opcode, int cmd_len)
318 req_hdr->opcode = opcode;
319 req_hdr->subsystem = subsystem;
320 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
323 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
324 struct be_dma_mem *mem)
327 u64 dma = (u64) mem->dma;
329 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
330 for (i = 0; i < buf_pages; i++) {
331 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
332 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
337 static u32 eq_delay_to_mult(u32 usec_delay)
339 #define MAX_INTR_RATE 651042
340 const u32 round = 10;
346 u32 interrupt_rate = 1000000 / usec_delay;
347 if (interrupt_rate == 0)
350 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
351 multiplier /= interrupt_rate;
352 multiplier = (multiplier + round / 2) / round;
353 multiplier = min(multiplier, (u32) 1023);
359 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
361 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
364 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
366 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
367 struct be_mcc_wrb *wrb;
369 BUG_ON(atomic_read(&mccq->used) >= mccq->len);
370 wrb = queue_head_node(mccq);
371 queue_head_inc(mccq);
372 atomic_inc(&mccq->used);
373 memset(wrb, 0, sizeof(*wrb));
378 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
379 struct be_queue_info *eq, int eq_delay)
381 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
382 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
383 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
384 struct be_dma_mem *q_mem = &eq->dma_mem;
387 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
388 spin_lock(&ctrl->mbox_lock);
389 memset(wrb, 0, sizeof(*wrb));
391 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
393 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
394 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
396 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
398 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
399 PCI_FUNC(ctrl->pdev->devfn));
400 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
401 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
402 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
403 __ilog2_u32(eq->len / 256));
404 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
405 eq_delay_to_mult(eq_delay));
406 be_dws_cpu_to_le(req->context, sizeof(req->context));
408 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
410 status = be_mbox_notify(ctrl);
412 eq->id = le16_to_cpu(resp->eq_id);
415 spin_unlock(&ctrl->mbox_lock);
419 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
421 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
425 SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
426 spin_lock(&ctrl->mbox_lock);
427 memset(wrb, 0, sizeof(*wrb));
429 endian_check = (u8 *) wrb;
430 *endian_check++ = 0xFF;
431 *endian_check++ = 0x12;
432 *endian_check++ = 0x34;
433 *endian_check++ = 0xFF;
434 *endian_check++ = 0xFF;
435 *endian_check++ = 0x56;
436 *endian_check++ = 0x78;
437 *endian_check++ = 0xFF;
438 be_dws_cpu_to_le(wrb, sizeof(*wrb));
440 status = be_mbox_notify(ctrl);
442 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
444 spin_unlock(&ctrl->mbox_lock);
448 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
449 struct be_queue_info *cq, struct be_queue_info *eq,
450 bool sol_evts, bool no_delay, int coalesce_wm)
452 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
453 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
454 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
455 struct be_dma_mem *q_mem = &cq->dma_mem;
456 void *ctxt = &req->context;
459 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
460 spin_lock(&ctrl->mbox_lock);
461 memset(wrb, 0, sizeof(*wrb));
463 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
465 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
466 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
468 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
470 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
472 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
473 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
474 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
475 __ilog2_u32(cq->len / 256));
476 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
477 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
478 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
479 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
480 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
481 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
482 PCI_FUNC(ctrl->pdev->devfn));
483 be_dws_cpu_to_le(ctxt, sizeof(req->context));
485 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
487 status = be_mbox_notify(ctrl);
489 cq->id = le16_to_cpu(resp->cq_id);
492 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
494 spin_unlock(&ctrl->mbox_lock);
499 static u32 be_encoded_q_len(int q_len)
501 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
502 if (len_encoded == 16)
507 int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
508 struct be_queue_info *mccq,
509 struct be_queue_info *cq)
511 struct be_mcc_wrb *wrb;
512 struct be_cmd_req_mcc_create *req;
513 struct be_dma_mem *q_mem = &mccq->dma_mem;
514 struct be_ctrl_info *ctrl;
518 spin_lock(&phba->ctrl.mbox_lock);
520 wrb = wrb_from_mbox(&ctrl->mbox_mem);
521 req = embedded_payload(wrb);
522 ctxt = &req->context;
524 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
526 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
527 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
529 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
531 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
532 PCI_FUNC(phba->pcidev->devfn));
533 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
534 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
535 be_encoded_q_len(mccq->len));
536 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
538 be_dws_cpu_to_le(ctxt, sizeof(req->context));
540 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
542 status = be_mbox_notify_wait(phba);
544 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
545 mccq->id = le16_to_cpu(resp->id);
546 mccq->created = true;
548 spin_unlock(&phba->ctrl.mbox_lock);
553 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
556 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
557 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
558 u8 subsys = 0, opcode = 0;
561 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
562 spin_lock(&ctrl->mbox_lock);
563 memset(wrb, 0, sizeof(*wrb));
564 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
566 switch (queue_type) {
568 subsys = CMD_SUBSYSTEM_COMMON;
569 opcode = OPCODE_COMMON_EQ_DESTROY;
572 subsys = CMD_SUBSYSTEM_COMMON;
573 opcode = OPCODE_COMMON_CQ_DESTROY;
576 subsys = CMD_SUBSYSTEM_COMMON;
577 opcode = OPCODE_COMMON_MCC_DESTROY;
580 subsys = CMD_SUBSYSTEM_ISCSI;
581 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
584 subsys = CMD_SUBSYSTEM_ISCSI;
585 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
588 subsys = CMD_SUBSYSTEM_ISCSI;
589 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
592 spin_unlock(&ctrl->mbox_lock);
596 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
597 if (queue_type != QTYPE_SGL)
598 req->id = cpu_to_le16(q->id);
600 status = be_mbox_notify(ctrl);
602 spin_unlock(&ctrl->mbox_lock);
606 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
607 struct be_queue_info *cq,
608 struct be_queue_info *dq, int length,
611 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
612 struct be_defq_create_req *req = embedded_payload(wrb);
613 struct be_dma_mem *q_mem = &dq->dma_mem;
614 void *ctxt = &req->context;
617 SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
618 spin_lock(&ctrl->mbox_lock);
619 memset(wrb, 0, sizeof(*wrb));
621 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
623 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
624 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
626 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
627 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
628 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
630 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
631 PCI_FUNC(ctrl->pdev->devfn));
632 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
633 be_encoded_q_len(length / sizeof(struct phys_addr)));
634 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
636 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
639 be_dws_cpu_to_le(ctxt, sizeof(req->context));
641 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
643 status = be_mbox_notify(ctrl);
645 struct be_defq_create_resp *resp = embedded_payload(wrb);
647 dq->id = le16_to_cpu(resp->id);
650 spin_unlock(&ctrl->mbox_lock);
655 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
656 struct be_queue_info *wrbq)
658 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
659 struct be_wrbq_create_req *req = embedded_payload(wrb);
660 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
663 spin_lock(&ctrl->mbox_lock);
664 memset(wrb, 0, sizeof(*wrb));
666 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
668 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
669 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
670 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
671 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
673 status = be_mbox_notify(ctrl);
675 wrbq->id = le16_to_cpu(resp->cid);
676 wrbq->created = true;
678 spin_unlock(&ctrl->mbox_lock);
682 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
683 struct be_dma_mem *q_mem,
684 u32 page_offset, u32 num_pages)
686 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
687 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
689 unsigned int curr_pages;
690 u32 internal_page_offset = 0;
691 u32 temp_num_pages = num_pages;
693 if (num_pages == 0xff)
696 spin_lock(&ctrl->mbox_lock);
698 memset(wrb, 0, sizeof(*wrb));
699 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
700 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
701 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
703 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
705 req->num_pages = min(num_pages, curr_pages);
706 req->page_offset = page_offset;
707 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
708 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
709 internal_page_offset += req->num_pages;
710 page_offset += req->num_pages;
711 num_pages -= req->num_pages;
713 if (temp_num_pages == 0xff)
714 req->num_pages = temp_num_pages;
716 status = be_mbox_notify(ctrl);
719 "FW CMD to map iscsi frags failed.\n");
722 } while (num_pages > 0);
724 spin_unlock(&ctrl->mbox_lock);
726 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);