2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq = 64;
24 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
26 return wrb->payload.embedded_payload;
29 static void be_mcc_notify(struct be_adapter *adapter)
31 struct be_queue_info *mccq = &adapter->mcc_obj.q;
34 if (adapter->eeh_err) {
35 dev_info(&adapter->pdev->dev,
36 "Error in Card Detected! Cannot issue commands\n");
40 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
41 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
44 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
47 /* To check if valid bit is set, check the entire word as we don't know
48 * the endianness of the data (old entry is host endian while a new entry is
50 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
52 if (compl->flags != 0) {
53 compl->flags = le32_to_cpu(compl->flags);
54 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
61 /* Need to reset the entire word that houses the valid bit */
62 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
67 static int be_mcc_compl_process(struct be_adapter *adapter,
68 struct be_mcc_compl *compl)
70 u16 compl_status, extd_status;
72 /* Just swap the status to host endian; mcc tag is opaquely copied
74 be_dws_le_to_cpu(compl, 4);
76 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
77 CQE_STATUS_COMPL_MASK;
79 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
80 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
81 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
82 adapter->flash_status = compl_status;
83 complete(&adapter->flash_compl);
86 if (compl_status == MCC_STATUS_SUCCESS) {
87 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
88 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
89 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
90 be_parse_stats(adapter);
91 adapter->stats_cmd_sent = false;
94 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
95 struct be_mcc_wrb *mcc_wrb =
96 queue_index_node(&adapter->mcc_obj.q,
98 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
99 embedded_payload(mcc_wrb);
100 adapter->drv_stats.be_on_die_temperature =
101 resp->on_die_temperature;
104 if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
105 be_get_temp_freq = 0;
107 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
108 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
111 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
112 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
113 "permitted to execute this cmd (opcode %d)\n",
116 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
117 CQE_STATUS_EXTD_MASK;
118 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
119 "status %d, extd-status %d\n",
120 compl->tag0, compl_status, extd_status);
127 /* Link state evt is a string of bytes; no need for endian swapping */
128 static void be_async_link_state_process(struct be_adapter *adapter,
129 struct be_async_event_link_state *evt)
131 be_link_status_update(adapter, evt->port_link_status);
134 /* Grp5 CoS Priority evt */
135 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
136 struct be_async_event_grp5_cos_priority *evt)
139 adapter->vlan_prio_bmap = evt->available_priority_bmap;
140 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
141 adapter->recommended_prio =
142 evt->reco_default_priority << VLAN_PRIO_SHIFT;
146 /* Grp5 QOS Speed evt */
147 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
148 struct be_async_event_grp5_qos_link_speed *evt)
150 if (evt->physical_port == adapter->port_num) {
151 /* qos_link_speed is in units of 10 Mbps */
152 adapter->link_speed = evt->qos_link_speed * 10;
157 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
158 struct be_async_event_grp5_pvid_state *evt)
161 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
166 static void be_async_grp5_evt_process(struct be_adapter *adapter,
167 u32 trailer, struct be_mcc_compl *evt)
171 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
172 ASYNC_TRAILER_EVENT_TYPE_MASK;
174 switch (event_type) {
175 case ASYNC_EVENT_COS_PRIORITY:
176 be_async_grp5_cos_priority_process(adapter,
177 (struct be_async_event_grp5_cos_priority *)evt);
179 case ASYNC_EVENT_QOS_SPEED:
180 be_async_grp5_qos_speed_process(adapter,
181 (struct be_async_event_grp5_qos_link_speed *)evt);
183 case ASYNC_EVENT_PVID_STATE:
184 be_async_grp5_pvid_state_process(adapter,
185 (struct be_async_event_grp5_pvid_state *)evt);
188 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
193 static inline bool is_link_state_evt(u32 trailer)
195 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
196 ASYNC_TRAILER_EVENT_CODE_MASK) ==
197 ASYNC_EVENT_CODE_LINK_STATE;
200 static inline bool is_grp5_evt(u32 trailer)
202 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
203 ASYNC_TRAILER_EVENT_CODE_MASK) ==
204 ASYNC_EVENT_CODE_GRP_5);
207 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
209 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
210 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
212 if (be_mcc_compl_is_new(compl)) {
213 queue_tail_inc(mcc_cq);
219 void be_async_mcc_enable(struct be_adapter *adapter)
221 spin_lock_bh(&adapter->mcc_cq_lock);
223 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
224 adapter->mcc_obj.rearm_cq = true;
226 spin_unlock_bh(&adapter->mcc_cq_lock);
229 void be_async_mcc_disable(struct be_adapter *adapter)
231 adapter->mcc_obj.rearm_cq = false;
234 int be_process_mcc(struct be_adapter *adapter, int *status)
236 struct be_mcc_compl *compl;
238 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
240 spin_lock_bh(&adapter->mcc_cq_lock);
241 while ((compl = be_mcc_compl_get(adapter))) {
242 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
243 /* Interpret flags as an async trailer */
244 if (is_link_state_evt(compl->flags))
245 be_async_link_state_process(adapter,
246 (struct be_async_event_link_state *) compl);
247 else if (is_grp5_evt(compl->flags))
248 be_async_grp5_evt_process(adapter,
249 compl->flags, compl);
250 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
251 *status = be_mcc_compl_process(adapter, compl);
252 atomic_dec(&mcc_obj->q.used);
254 be_mcc_compl_use(compl);
258 spin_unlock_bh(&adapter->mcc_cq_lock);
262 /* Wait till no more pending mcc requests are present */
263 static int be_mcc_wait_compl(struct be_adapter *adapter)
265 #define mcc_timeout 120000 /* 12s timeout */
266 int i, num, status = 0;
267 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
269 if (adapter->eeh_err)
272 for (i = 0; i < mcc_timeout; i++) {
273 num = be_process_mcc(adapter, &status);
275 be_cq_notify(adapter, mcc_obj->cq.id,
276 mcc_obj->rearm_cq, num);
278 if (atomic_read(&mcc_obj->q.used) == 0)
282 if (i == mcc_timeout) {
283 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
289 /* Notify MCC requests and wait for completion */
290 static int be_mcc_notify_wait(struct be_adapter *adapter)
292 be_mcc_notify(adapter);
293 return be_mcc_wait_compl(adapter);
296 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
301 if (adapter->eeh_err) {
302 dev_err(&adapter->pdev->dev,
303 "Error detected in card.Cannot issue commands\n");
308 ready = ioread32(db);
309 if (ready == 0xffffffff) {
310 dev_err(&adapter->pdev->dev,
311 "pci slot disconnected\n");
315 ready &= MPU_MAILBOX_DB_RDY_MASK;
320 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
321 if (!lancer_chip(adapter))
322 be_detect_dump_ue(adapter);
334 * Insert the mailbox address into the doorbell in two steps
335 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
337 static int be_mbox_notify_wait(struct be_adapter *adapter)
341 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
342 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
343 struct be_mcc_mailbox *mbox = mbox_mem->va;
344 struct be_mcc_compl *compl = &mbox->compl;
346 /* wait for ready to be set */
347 status = be_mbox_db_ready_wait(adapter, db);
351 val |= MPU_MAILBOX_DB_HI_MASK;
352 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
353 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
356 /* wait for ready to be set */
357 status = be_mbox_db_ready_wait(adapter, db);
362 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
363 val |= (u32)(mbox_mem->dma >> 4) << 2;
366 status = be_mbox_db_ready_wait(adapter, db);
370 /* A cq entry has been made now */
371 if (be_mcc_compl_is_new(compl)) {
372 status = be_mcc_compl_process(adapter, &mbox->compl);
373 be_mcc_compl_use(compl);
377 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
383 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
387 if (lancer_chip(adapter))
388 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
390 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
392 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
393 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
399 int be_cmd_POST(struct be_adapter *adapter)
402 int status, timeout = 0;
403 struct device *dev = &adapter->pdev->dev;
406 status = be_POST_stage_get(adapter, &stage);
408 dev_err(dev, "POST error; stage=0x%x\n", stage);
410 } else if (stage != POST_STAGE_ARMFW_RDY) {
411 if (msleep_interruptible(2000)) {
412 dev_err(dev, "Waiting for POST aborted\n");
419 } while (timeout < 60);
421 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
426 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
428 return &wrb->payload.sgl[0];
432 /* Don't touch the hdr after it's prepared */
433 /* mem will be NULL for embedded commands */
434 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
435 u8 subsystem, u8 opcode, int cmd_len,
436 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
440 req_hdr->opcode = opcode;
441 req_hdr->subsystem = subsystem;
442 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
443 req_hdr->version = 0;
446 wrb->tag1 = subsystem;
447 wrb->payload_length = cmd_len;
449 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
450 MCC_WRB_SGE_CNT_SHIFT;
451 sge = nonembedded_sgl(wrb);
452 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
453 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
454 sge->len = cpu_to_le32(mem->size);
456 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
457 be_dws_cpu_to_le(wrb, 8);
460 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
461 struct be_dma_mem *mem)
463 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
464 u64 dma = (u64)mem->dma;
466 for (i = 0; i < buf_pages; i++) {
467 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
468 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
473 /* Converts interrupt delay in microseconds to multiplier value */
474 static u32 eq_delay_to_mult(u32 usec_delay)
476 #define MAX_INTR_RATE 651042
477 const u32 round = 10;
483 u32 interrupt_rate = 1000000 / usec_delay;
484 /* Max delay, corresponding to the lowest interrupt rate */
485 if (interrupt_rate == 0)
488 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
489 multiplier /= interrupt_rate;
490 /* Round the multiplier to the closest value.*/
491 multiplier = (multiplier + round/2) / round;
492 multiplier = min(multiplier, (u32)1023);
498 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
500 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
501 struct be_mcc_wrb *wrb
502 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
503 memset(wrb, 0, sizeof(*wrb));
507 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
509 struct be_queue_info *mccq = &adapter->mcc_obj.q;
510 struct be_mcc_wrb *wrb;
512 if (atomic_read(&mccq->used) >= mccq->len) {
513 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
517 wrb = queue_head_node(mccq);
518 queue_head_inc(mccq);
519 atomic_inc(&mccq->used);
520 memset(wrb, 0, sizeof(*wrb));
524 /* Tell fw we're about to start firing cmds by writing a
525 * special pattern across the wrb hdr; uses mbox
527 int be_cmd_fw_init(struct be_adapter *adapter)
532 if (mutex_lock_interruptible(&adapter->mbox_lock))
535 wrb = (u8 *)wrb_from_mbox(adapter);
545 status = be_mbox_notify_wait(adapter);
547 mutex_unlock(&adapter->mbox_lock);
551 /* Tell fw we're done with firing cmds by writing a
552 * special pattern across the wrb hdr; uses mbox
554 int be_cmd_fw_clean(struct be_adapter *adapter)
559 if (adapter->eeh_err)
562 if (mutex_lock_interruptible(&adapter->mbox_lock))
565 wrb = (u8 *)wrb_from_mbox(adapter);
575 status = be_mbox_notify_wait(adapter);
577 mutex_unlock(&adapter->mbox_lock);
580 int be_cmd_eq_create(struct be_adapter *adapter,
581 struct be_queue_info *eq, int eq_delay)
583 struct be_mcc_wrb *wrb;
584 struct be_cmd_req_eq_create *req;
585 struct be_dma_mem *q_mem = &eq->dma_mem;
588 if (mutex_lock_interruptible(&adapter->mbox_lock))
591 wrb = wrb_from_mbox(adapter);
592 req = embedded_payload(wrb);
594 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
595 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
597 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
599 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
601 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
602 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
603 __ilog2_u32(eq->len/256));
604 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
605 eq_delay_to_mult(eq_delay));
606 be_dws_cpu_to_le(req->context, sizeof(req->context));
608 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
610 status = be_mbox_notify_wait(adapter);
612 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
613 eq->id = le16_to_cpu(resp->eq_id);
617 mutex_unlock(&adapter->mbox_lock);
622 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
623 u8 type, bool permanent, u32 if_handle)
625 struct be_mcc_wrb *wrb;
626 struct be_cmd_req_mac_query *req;
629 spin_lock_bh(&adapter->mcc_lock);
631 wrb = wrb_from_mccq(adapter);
636 req = embedded_payload(wrb);
638 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
639 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
644 req->if_id = cpu_to_le16((u16) if_handle);
648 status = be_mcc_notify_wait(adapter);
650 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
651 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
655 spin_unlock_bh(&adapter->mcc_lock);
659 /* Uses synchronous MCCQ */
660 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
661 u32 if_id, u32 *pmac_id, u32 domain)
663 struct be_mcc_wrb *wrb;
664 struct be_cmd_req_pmac_add *req;
667 spin_lock_bh(&adapter->mcc_lock);
669 wrb = wrb_from_mccq(adapter);
674 req = embedded_payload(wrb);
676 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
677 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
679 req->hdr.domain = domain;
680 req->if_id = cpu_to_le32(if_id);
681 memcpy(req->mac_address, mac_addr, ETH_ALEN);
683 status = be_mcc_notify_wait(adapter);
685 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
686 *pmac_id = le32_to_cpu(resp->pmac_id);
690 spin_unlock_bh(&adapter->mcc_lock);
692 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
698 /* Uses synchronous MCCQ */
699 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
701 struct be_mcc_wrb *wrb;
702 struct be_cmd_req_pmac_del *req;
705 spin_lock_bh(&adapter->mcc_lock);
707 wrb = wrb_from_mccq(adapter);
712 req = embedded_payload(wrb);
714 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
715 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
717 req->hdr.domain = dom;
718 req->if_id = cpu_to_le32(if_id);
719 req->pmac_id = cpu_to_le32(pmac_id);
721 status = be_mcc_notify_wait(adapter);
724 spin_unlock_bh(&adapter->mcc_lock);
729 int be_cmd_cq_create(struct be_adapter *adapter,
730 struct be_queue_info *cq, struct be_queue_info *eq,
731 bool sol_evts, bool no_delay, int coalesce_wm)
733 struct be_mcc_wrb *wrb;
734 struct be_cmd_req_cq_create *req;
735 struct be_dma_mem *q_mem = &cq->dma_mem;
739 if (mutex_lock_interruptible(&adapter->mbox_lock))
742 wrb = wrb_from_mbox(adapter);
743 req = embedded_payload(wrb);
744 ctxt = &req->context;
746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
747 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
749 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
750 if (lancer_chip(adapter)) {
751 req->hdr.version = 2;
752 req->page_size = 1; /* 1 for 4K */
753 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
755 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
756 __ilog2_u32(cq->len/256));
757 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
758 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
760 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
762 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
764 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
766 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
768 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
769 __ilog2_u32(cq->len/256));
770 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
771 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
773 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
774 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
775 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
778 be_dws_cpu_to_le(ctxt, sizeof(req->context));
780 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
782 status = be_mbox_notify_wait(adapter);
784 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
785 cq->id = le16_to_cpu(resp->cq_id);
789 mutex_unlock(&adapter->mbox_lock);
794 static u32 be_encoded_q_len(int q_len)
796 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
797 if (len_encoded == 16)
802 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
803 struct be_queue_info *mccq,
804 struct be_queue_info *cq)
806 struct be_mcc_wrb *wrb;
807 struct be_cmd_req_mcc_ext_create *req;
808 struct be_dma_mem *q_mem = &mccq->dma_mem;
812 if (mutex_lock_interruptible(&adapter->mbox_lock))
815 wrb = wrb_from_mbox(adapter);
816 req = embedded_payload(wrb);
817 ctxt = &req->context;
819 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
820 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
822 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
823 if (lancer_chip(adapter)) {
824 req->hdr.version = 1;
825 req->cq_id = cpu_to_le16(cq->id);
827 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
828 be_encoded_q_len(mccq->len));
829 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
830 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
832 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
836 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
837 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
838 be_encoded_q_len(mccq->len));
839 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
842 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
843 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
844 be_dws_cpu_to_le(ctxt, sizeof(req->context));
846 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
848 status = be_mbox_notify_wait(adapter);
850 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
851 mccq->id = le16_to_cpu(resp->id);
852 mccq->created = true;
854 mutex_unlock(&adapter->mbox_lock);
859 int be_cmd_mccq_org_create(struct be_adapter *adapter,
860 struct be_queue_info *mccq,
861 struct be_queue_info *cq)
863 struct be_mcc_wrb *wrb;
864 struct be_cmd_req_mcc_create *req;
865 struct be_dma_mem *q_mem = &mccq->dma_mem;
869 if (mutex_lock_interruptible(&adapter->mbox_lock))
872 wrb = wrb_from_mbox(adapter);
873 req = embedded_payload(wrb);
874 ctxt = &req->context;
876 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
877 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
879 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
881 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
882 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
883 be_encoded_q_len(mccq->len));
884 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
886 be_dws_cpu_to_le(ctxt, sizeof(req->context));
888 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
890 status = be_mbox_notify_wait(adapter);
892 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
893 mccq->id = le16_to_cpu(resp->id);
894 mccq->created = true;
897 mutex_unlock(&adapter->mbox_lock);
901 int be_cmd_mccq_create(struct be_adapter *adapter,
902 struct be_queue_info *mccq,
903 struct be_queue_info *cq)
907 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
908 if (status && !lancer_chip(adapter)) {
909 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
910 "or newer to avoid conflicting priorities between NIC "
912 status = be_cmd_mccq_org_create(adapter, mccq, cq);
917 int be_cmd_txq_create(struct be_adapter *adapter,
918 struct be_queue_info *txq,
919 struct be_queue_info *cq)
921 struct be_mcc_wrb *wrb;
922 struct be_cmd_req_eth_tx_create *req;
923 struct be_dma_mem *q_mem = &txq->dma_mem;
927 if (mutex_lock_interruptible(&adapter->mbox_lock))
930 wrb = wrb_from_mbox(adapter);
931 req = embedded_payload(wrb);
932 ctxt = &req->context;
934 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
935 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
937 if (lancer_chip(adapter)) {
938 req->hdr.version = 1;
939 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
943 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
944 req->ulp_num = BE_ULP1_NUM;
945 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
947 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
948 be_encoded_q_len(txq->len));
949 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
950 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
952 be_dws_cpu_to_le(ctxt, sizeof(req->context));
954 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
956 status = be_mbox_notify_wait(adapter);
958 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
959 txq->id = le16_to_cpu(resp->cid);
963 mutex_unlock(&adapter->mbox_lock);
969 int be_cmd_rxq_create(struct be_adapter *adapter,
970 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
971 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
973 struct be_mcc_wrb *wrb;
974 struct be_cmd_req_eth_rx_create *req;
975 struct be_dma_mem *q_mem = &rxq->dma_mem;
978 spin_lock_bh(&adapter->mcc_lock);
980 wrb = wrb_from_mccq(adapter);
985 req = embedded_payload(wrb);
987 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
988 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
990 req->cq_id = cpu_to_le16(cq_id);
991 req->frag_size = fls(frag_size) - 1;
993 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
994 req->interface_id = cpu_to_le32(if_id);
995 req->max_frame_size = cpu_to_le16(max_frame_size);
996 req->rss_queue = cpu_to_le32(rss);
998 status = be_mcc_notify_wait(adapter);
1000 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1001 rxq->id = le16_to_cpu(resp->id);
1002 rxq->created = true;
1003 *rss_id = resp->rss_id;
1007 spin_unlock_bh(&adapter->mcc_lock);
1011 /* Generic destroyer function for all types of queues
1014 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1017 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_q_destroy *req;
1019 u8 subsys = 0, opcode = 0;
1022 if (adapter->eeh_err)
1025 if (mutex_lock_interruptible(&adapter->mbox_lock))
1028 wrb = wrb_from_mbox(adapter);
1029 req = embedded_payload(wrb);
1031 switch (queue_type) {
1033 subsys = CMD_SUBSYSTEM_COMMON;
1034 opcode = OPCODE_COMMON_EQ_DESTROY;
1037 subsys = CMD_SUBSYSTEM_COMMON;
1038 opcode = OPCODE_COMMON_CQ_DESTROY;
1041 subsys = CMD_SUBSYSTEM_ETH;
1042 opcode = OPCODE_ETH_TX_DESTROY;
1045 subsys = CMD_SUBSYSTEM_ETH;
1046 opcode = OPCODE_ETH_RX_DESTROY;
1049 subsys = CMD_SUBSYSTEM_COMMON;
1050 opcode = OPCODE_COMMON_MCC_DESTROY;
1056 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1058 req->id = cpu_to_le16(q->id);
1060 status = be_mbox_notify_wait(adapter);
1064 mutex_unlock(&adapter->mbox_lock);
1069 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1071 struct be_mcc_wrb *wrb;
1072 struct be_cmd_req_q_destroy *req;
1075 spin_lock_bh(&adapter->mcc_lock);
1077 wrb = wrb_from_mccq(adapter);
1082 req = embedded_payload(wrb);
1084 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1085 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1086 req->id = cpu_to_le16(q->id);
1088 status = be_mcc_notify_wait(adapter);
1093 spin_unlock_bh(&adapter->mcc_lock);
1097 /* Create an rx filtering policy configuration on an i/f
1100 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1101 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
1103 struct be_mcc_wrb *wrb;
1104 struct be_cmd_req_if_create *req;
1107 spin_lock_bh(&adapter->mcc_lock);
1109 wrb = wrb_from_mccq(adapter);
1114 req = embedded_payload(wrb);
1116 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1117 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1118 req->hdr.domain = domain;
1119 req->capability_flags = cpu_to_le32(cap_flags);
1120 req->enable_flags = cpu_to_le32(en_flags);
1122 memcpy(req->mac_addr, mac, ETH_ALEN);
1124 req->pmac_invalid = true;
1126 status = be_mcc_notify_wait(adapter);
1128 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1129 *if_handle = le32_to_cpu(resp->interface_id);
1131 *pmac_id = le32_to_cpu(resp->pmac_id);
1135 spin_unlock_bh(&adapter->mcc_lock);
1140 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1142 struct be_mcc_wrb *wrb;
1143 struct be_cmd_req_if_destroy *req;
1146 if (adapter->eeh_err)
1152 spin_lock_bh(&adapter->mcc_lock);
1154 wrb = wrb_from_mccq(adapter);
1159 req = embedded_payload(wrb);
1161 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1162 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1163 req->hdr.domain = domain;
1164 req->interface_id = cpu_to_le32(interface_id);
1166 status = be_mcc_notify_wait(adapter);
1168 spin_unlock_bh(&adapter->mcc_lock);
1172 /* Get stats is a non embedded command: the request is not embedded inside
1173 * WRB but is a separate dma memory block
1174 * Uses asynchronous MCC
1176 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1178 struct be_mcc_wrb *wrb;
1179 struct be_cmd_req_hdr *hdr;
1182 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1183 be_cmd_get_die_temperature(adapter);
1185 spin_lock_bh(&adapter->mcc_lock);
1187 wrb = wrb_from_mccq(adapter);
1192 hdr = nonemb_cmd->va;
1194 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1195 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1197 if (adapter->generation == BE_GEN3)
1200 be_mcc_notify(adapter);
1201 adapter->stats_cmd_sent = true;
1204 spin_unlock_bh(&adapter->mcc_lock);
1209 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1210 struct be_dma_mem *nonemb_cmd)
1213 struct be_mcc_wrb *wrb;
1214 struct lancer_cmd_req_pport_stats *req;
1217 spin_lock_bh(&adapter->mcc_lock);
1219 wrb = wrb_from_mccq(adapter);
1224 req = nonemb_cmd->va;
1226 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1227 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1230 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1231 req->cmd_params.params.reset_stats = 0;
1233 be_mcc_notify(adapter);
1234 adapter->stats_cmd_sent = true;
1237 spin_unlock_bh(&adapter->mcc_lock);
1241 /* Uses synchronous mcc */
1242 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1243 u16 *link_speed, u32 dom)
1245 struct be_mcc_wrb *wrb;
1246 struct be_cmd_req_link_status *req;
1249 spin_lock_bh(&adapter->mcc_lock);
1251 wrb = wrb_from_mccq(adapter);
1256 req = embedded_payload(wrb);
1258 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1259 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1261 status = be_mcc_notify_wait(adapter);
1263 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1264 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1265 *link_speed = le16_to_cpu(resp->link_speed);
1267 *mac_speed = resp->mac_speed;
1272 spin_unlock_bh(&adapter->mcc_lock);
1276 /* Uses synchronous mcc */
1277 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1279 struct be_mcc_wrb *wrb;
1280 struct be_cmd_req_get_cntl_addnl_attribs *req;
1284 spin_lock_bh(&adapter->mcc_lock);
1286 mccq_index = adapter->mcc_obj.q.head;
1288 wrb = wrb_from_mccq(adapter);
1293 req = embedded_payload(wrb);
1295 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1296 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1299 wrb->tag1 = mccq_index;
1301 be_mcc_notify(adapter);
1304 spin_unlock_bh(&adapter->mcc_lock);
1308 /* Uses synchronous mcc */
1309 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1311 struct be_mcc_wrb *wrb;
1312 struct be_cmd_req_get_fat *req;
1315 spin_lock_bh(&adapter->mcc_lock);
1317 wrb = wrb_from_mccq(adapter);
1322 req = embedded_payload(wrb);
1324 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1325 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1326 req->fat_operation = cpu_to_le32(QUERY_FAT);
1327 status = be_mcc_notify_wait(adapter);
1329 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1330 if (log_size && resp->log_size)
1331 *log_size = le32_to_cpu(resp->log_size) -
1335 spin_unlock_bh(&adapter->mcc_lock);
1339 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1341 struct be_dma_mem get_fat_cmd;
1342 struct be_mcc_wrb *wrb;
1343 struct be_cmd_req_get_fat *req;
1344 u32 offset = 0, total_size, buf_size,
1345 log_offset = sizeof(u32), payload_len;
1351 total_size = buf_len;
1353 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1354 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1357 if (!get_fat_cmd.va) {
1359 dev_err(&adapter->pdev->dev,
1360 "Memory allocation failure while retrieving FAT data\n");
1364 spin_lock_bh(&adapter->mcc_lock);
1366 while (total_size) {
1367 buf_size = min(total_size, (u32)60*1024);
1368 total_size -= buf_size;
1370 wrb = wrb_from_mccq(adapter);
1375 req = get_fat_cmd.va;
1377 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1378 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1379 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1382 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1383 req->read_log_offset = cpu_to_le32(log_offset);
1384 req->read_log_length = cpu_to_le32(buf_size);
1385 req->data_buffer_size = cpu_to_le32(buf_size);
1387 status = be_mcc_notify_wait(adapter);
1389 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1390 memcpy(buf + offset,
1392 le32_to_cpu(resp->read_log_length));
1394 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1398 log_offset += buf_size;
1401 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1404 spin_unlock_bh(&adapter->mcc_lock);
1407 /* Uses synchronous mcc */
1408 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1411 struct be_mcc_wrb *wrb;
1412 struct be_cmd_req_get_fw_version *req;
1415 spin_lock_bh(&adapter->mcc_lock);
1417 wrb = wrb_from_mccq(adapter);
1423 req = embedded_payload(wrb);
1425 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1426 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1427 status = be_mcc_notify_wait(adapter);
1429 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1430 strcpy(fw_ver, resp->firmware_version_string);
1432 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1435 spin_unlock_bh(&adapter->mcc_lock);
1439 /* set the EQ delay interval of an EQ to specified value
1442 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1444 struct be_mcc_wrb *wrb;
1445 struct be_cmd_req_modify_eq_delay *req;
1448 spin_lock_bh(&adapter->mcc_lock);
1450 wrb = wrb_from_mccq(adapter);
1455 req = embedded_payload(wrb);
1457 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1458 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1460 req->num_eq = cpu_to_le32(1);
1461 req->delay[0].eq_id = cpu_to_le32(eq_id);
1462 req->delay[0].phase = 0;
1463 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1465 be_mcc_notify(adapter);
1468 spin_unlock_bh(&adapter->mcc_lock);
1472 /* Uses sycnhronous mcc */
1473 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1474 u32 num, bool untagged, bool promiscuous)
1476 struct be_mcc_wrb *wrb;
1477 struct be_cmd_req_vlan_config *req;
1480 spin_lock_bh(&adapter->mcc_lock);
1482 wrb = wrb_from_mccq(adapter);
1487 req = embedded_payload(wrb);
1489 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1490 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1492 req->interface_id = if_id;
1493 req->promiscuous = promiscuous;
1494 req->untagged = untagged;
1495 req->num_vlan = num;
1497 memcpy(req->normal_vlan, vtag_array,
1498 req->num_vlan * sizeof(vtag_array[0]));
1501 status = be_mcc_notify_wait(adapter);
1504 spin_unlock_bh(&adapter->mcc_lock);
1508 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1510 struct be_mcc_wrb *wrb;
1511 struct be_dma_mem *mem = &adapter->rx_filter;
1512 struct be_cmd_req_rx_filter *req = mem->va;
1515 spin_lock_bh(&adapter->mcc_lock);
1517 wrb = wrb_from_mccq(adapter);
1522 memset(req, 0, sizeof(*req));
1523 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1524 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1527 req->if_id = cpu_to_le32(adapter->if_handle);
1528 if (flags & IFF_PROMISC) {
1529 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1530 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1532 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1533 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1534 } else if (flags & IFF_ALLMULTI) {
1535 req->if_flags_mask = req->if_flags =
1536 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1538 struct netdev_hw_addr *ha;
1541 req->if_flags_mask = req->if_flags =
1542 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1544 /* Reset mcast promisc mode if already set by setting mask
1545 * and not setting flags field
1547 req->if_flags_mask |=
1548 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1550 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1551 netdev_for_each_mc_addr(ha, adapter->netdev)
1552 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1555 status = be_mcc_notify_wait(adapter);
1557 spin_unlock_bh(&adapter->mcc_lock);
1561 /* Uses synchrounous mcc */
1562 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1564 struct be_mcc_wrb *wrb;
1565 struct be_cmd_req_set_flow_control *req;
1568 spin_lock_bh(&adapter->mcc_lock);
1570 wrb = wrb_from_mccq(adapter);
1575 req = embedded_payload(wrb);
1577 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1578 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1580 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1581 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1583 status = be_mcc_notify_wait(adapter);
1586 spin_unlock_bh(&adapter->mcc_lock);
1591 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1593 struct be_mcc_wrb *wrb;
1594 struct be_cmd_req_get_flow_control *req;
1597 spin_lock_bh(&adapter->mcc_lock);
1599 wrb = wrb_from_mccq(adapter);
1604 req = embedded_payload(wrb);
1606 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1607 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1609 status = be_mcc_notify_wait(adapter);
1611 struct be_cmd_resp_get_flow_control *resp =
1612 embedded_payload(wrb);
1613 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1614 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1618 spin_unlock_bh(&adapter->mcc_lock);
1623 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1624 u32 *mode, u32 *caps)
1626 struct be_mcc_wrb *wrb;
1627 struct be_cmd_req_query_fw_cfg *req;
1630 if (mutex_lock_interruptible(&adapter->mbox_lock))
1633 wrb = wrb_from_mbox(adapter);
1634 req = embedded_payload(wrb);
1636 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1637 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1639 status = be_mbox_notify_wait(adapter);
1641 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1642 *port_num = le32_to_cpu(resp->phys_port);
1643 *mode = le32_to_cpu(resp->function_mode);
1644 *caps = le32_to_cpu(resp->function_caps);
1647 mutex_unlock(&adapter->mbox_lock);
1652 int be_cmd_reset_function(struct be_adapter *adapter)
1654 struct be_mcc_wrb *wrb;
1655 struct be_cmd_req_hdr *req;
1658 if (mutex_lock_interruptible(&adapter->mbox_lock))
1661 wrb = wrb_from_mbox(adapter);
1662 req = embedded_payload(wrb);
1664 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1665 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1667 status = be_mbox_notify_wait(adapter);
1669 mutex_unlock(&adapter->mbox_lock);
1673 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1675 struct be_mcc_wrb *wrb;
1676 struct be_cmd_req_rss_config *req;
1677 u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1678 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
1681 if (mutex_lock_interruptible(&adapter->mbox_lock))
1684 wrb = wrb_from_mbox(adapter);
1685 req = embedded_payload(wrb);
1687 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1688 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1690 req->if_id = cpu_to_le32(adapter->if_handle);
1691 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1692 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1693 memcpy(req->cpu_table, rsstable, table_size);
1694 memcpy(req->hash, myhash, sizeof(myhash));
1695 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1697 status = be_mbox_notify_wait(adapter);
1699 mutex_unlock(&adapter->mbox_lock);
1704 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1705 u8 bcn, u8 sts, u8 state)
1707 struct be_mcc_wrb *wrb;
1708 struct be_cmd_req_enable_disable_beacon *req;
1711 spin_lock_bh(&adapter->mcc_lock);
1713 wrb = wrb_from_mccq(adapter);
1718 req = embedded_payload(wrb);
1720 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1721 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1723 req->port_num = port_num;
1724 req->beacon_state = state;
1725 req->beacon_duration = bcn;
1726 req->status_duration = sts;
1728 status = be_mcc_notify_wait(adapter);
1731 spin_unlock_bh(&adapter->mcc_lock);
1736 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1738 struct be_mcc_wrb *wrb;
1739 struct be_cmd_req_get_beacon_state *req;
1742 spin_lock_bh(&adapter->mcc_lock);
1744 wrb = wrb_from_mccq(adapter);
1749 req = embedded_payload(wrb);
1751 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1752 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1754 req->port_num = port_num;
1756 status = be_mcc_notify_wait(adapter);
1758 struct be_cmd_resp_get_beacon_state *resp =
1759 embedded_payload(wrb);
1760 *state = resp->beacon_state;
1764 spin_unlock_bh(&adapter->mcc_lock);
1768 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1769 u32 data_size, u32 data_offset, const char *obj_name,
1770 u32 *data_written, u8 *addn_status)
1772 struct be_mcc_wrb *wrb;
1773 struct lancer_cmd_req_write_object *req;
1774 struct lancer_cmd_resp_write_object *resp;
1778 spin_lock_bh(&adapter->mcc_lock);
1779 adapter->flash_status = 0;
1781 wrb = wrb_from_mccq(adapter);
1787 req = embedded_payload(wrb);
1789 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1790 OPCODE_COMMON_WRITE_OBJECT,
1791 sizeof(struct lancer_cmd_req_write_object), wrb,
1794 ctxt = &req->context;
1795 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1796 write_length, ctxt, data_size);
1799 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1802 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1805 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1806 req->write_offset = cpu_to_le32(data_offset);
1807 strcpy(req->object_name, obj_name);
1808 req->descriptor_count = cpu_to_le32(1);
1809 req->buf_len = cpu_to_le32(data_size);
1810 req->addr_low = cpu_to_le32((cmd->dma +
1811 sizeof(struct lancer_cmd_req_write_object))
1813 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1814 sizeof(struct lancer_cmd_req_write_object)));
1816 be_mcc_notify(adapter);
1817 spin_unlock_bh(&adapter->mcc_lock);
1819 if (!wait_for_completion_timeout(&adapter->flash_compl,
1820 msecs_to_jiffies(12000)))
1823 status = adapter->flash_status;
1825 resp = embedded_payload(wrb);
1827 *data_written = le32_to_cpu(resp->actual_write_len);
1829 *addn_status = resp->additional_status;
1830 status = resp->status;
1836 spin_unlock_bh(&adapter->mcc_lock);
1840 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1841 u32 flash_type, u32 flash_opcode, u32 buf_size)
1843 struct be_mcc_wrb *wrb;
1844 struct be_cmd_write_flashrom *req;
1847 spin_lock_bh(&adapter->mcc_lock);
1848 adapter->flash_status = 0;
1850 wrb = wrb_from_mccq(adapter);
1857 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1858 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
1860 req->params.op_type = cpu_to_le32(flash_type);
1861 req->params.op_code = cpu_to_le32(flash_opcode);
1862 req->params.data_buf_size = cpu_to_le32(buf_size);
1864 be_mcc_notify(adapter);
1865 spin_unlock_bh(&adapter->mcc_lock);
1867 if (!wait_for_completion_timeout(&adapter->flash_compl,
1868 msecs_to_jiffies(40000)))
1871 status = adapter->flash_status;
1876 spin_unlock_bh(&adapter->mcc_lock);
1880 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1883 struct be_mcc_wrb *wrb;
1884 struct be_cmd_write_flashrom *req;
1887 spin_lock_bh(&adapter->mcc_lock);
1889 wrb = wrb_from_mccq(adapter);
1894 req = embedded_payload(wrb);
1896 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1897 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
1899 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1900 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1901 req->params.offset = cpu_to_le32(offset);
1902 req->params.data_buf_size = cpu_to_le32(0x4);
1904 status = be_mcc_notify_wait(adapter);
1906 memcpy(flashed_crc, req->params.data_buf, 4);
1909 spin_unlock_bh(&adapter->mcc_lock);
1913 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1914 struct be_dma_mem *nonemb_cmd)
1916 struct be_mcc_wrb *wrb;
1917 struct be_cmd_req_acpi_wol_magic_config *req;
1920 spin_lock_bh(&adapter->mcc_lock);
1922 wrb = wrb_from_mccq(adapter);
1927 req = nonemb_cmd->va;
1929 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1930 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
1932 memcpy(req->magic_mac, mac, ETH_ALEN);
1934 status = be_mcc_notify_wait(adapter);
1937 spin_unlock_bh(&adapter->mcc_lock);
1941 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1942 u8 loopback_type, u8 enable)
1944 struct be_mcc_wrb *wrb;
1945 struct be_cmd_req_set_lmode *req;
1948 spin_lock_bh(&adapter->mcc_lock);
1950 wrb = wrb_from_mccq(adapter);
1956 req = embedded_payload(wrb);
1958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1959 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
1962 req->src_port = port_num;
1963 req->dest_port = port_num;
1964 req->loopback_type = loopback_type;
1965 req->loopback_state = enable;
1967 status = be_mcc_notify_wait(adapter);
1969 spin_unlock_bh(&adapter->mcc_lock);
1973 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1974 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1976 struct be_mcc_wrb *wrb;
1977 struct be_cmd_req_loopback_test *req;
1980 spin_lock_bh(&adapter->mcc_lock);
1982 wrb = wrb_from_mccq(adapter);
1988 req = embedded_payload(wrb);
1990 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1991 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
1992 req->hdr.timeout = cpu_to_le32(4);
1994 req->pattern = cpu_to_le64(pattern);
1995 req->src_port = cpu_to_le32(port_num);
1996 req->dest_port = cpu_to_le32(port_num);
1997 req->pkt_size = cpu_to_le32(pkt_size);
1998 req->num_pkts = cpu_to_le32(num_pkts);
1999 req->loopback_type = cpu_to_le32(loopback_type);
2001 status = be_mcc_notify_wait(adapter);
2003 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2004 status = le32_to_cpu(resp->status);
2008 spin_unlock_bh(&adapter->mcc_lock);
2012 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2013 u32 byte_cnt, struct be_dma_mem *cmd)
2015 struct be_mcc_wrb *wrb;
2016 struct be_cmd_req_ddrdma_test *req;
2020 spin_lock_bh(&adapter->mcc_lock);
2022 wrb = wrb_from_mccq(adapter);
2028 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2029 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2031 req->pattern = cpu_to_le64(pattern);
2032 req->byte_count = cpu_to_le32(byte_cnt);
2033 for (i = 0; i < byte_cnt; i++) {
2034 req->snd_buff[i] = (u8)(pattern >> (j*8));
2040 status = be_mcc_notify_wait(adapter);
2043 struct be_cmd_resp_ddrdma_test *resp;
2045 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2052 spin_unlock_bh(&adapter->mcc_lock);
2056 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2057 struct be_dma_mem *nonemb_cmd)
2059 struct be_mcc_wrb *wrb;
2060 struct be_cmd_req_seeprom_read *req;
2064 spin_lock_bh(&adapter->mcc_lock);
2066 wrb = wrb_from_mccq(adapter);
2071 req = nonemb_cmd->va;
2072 sge = nonembedded_sgl(wrb);
2074 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2075 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2078 status = be_mcc_notify_wait(adapter);
2081 spin_unlock_bh(&adapter->mcc_lock);
2085 int be_cmd_get_phy_info(struct be_adapter *adapter,
2086 struct be_phy_info *phy_info)
2088 struct be_mcc_wrb *wrb;
2089 struct be_cmd_req_get_phy_info *req;
2090 struct be_dma_mem cmd;
2093 spin_lock_bh(&adapter->mcc_lock);
2095 wrb = wrb_from_mccq(adapter);
2100 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2101 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2104 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2111 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2112 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2115 status = be_mcc_notify_wait(adapter);
2117 struct be_phy_info *resp_phy_info =
2118 cmd.va + sizeof(struct be_cmd_req_hdr);
2119 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
2120 phy_info->interface_type =
2121 le16_to_cpu(resp_phy_info->interface_type);
2123 pci_free_consistent(adapter->pdev, cmd.size,
2126 spin_unlock_bh(&adapter->mcc_lock);
2130 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2132 struct be_mcc_wrb *wrb;
2133 struct be_cmd_req_set_qos *req;
2136 spin_lock_bh(&adapter->mcc_lock);
2138 wrb = wrb_from_mccq(adapter);
2144 req = embedded_payload(wrb);
2146 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2147 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2149 req->hdr.domain = domain;
2150 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2151 req->max_bps_nic = cpu_to_le32(bps);
2153 status = be_mcc_notify_wait(adapter);
2156 spin_unlock_bh(&adapter->mcc_lock);
2160 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2162 struct be_mcc_wrb *wrb;
2163 struct be_cmd_req_cntl_attribs *req;
2164 struct be_cmd_resp_cntl_attribs *resp;
2166 int payload_len = max(sizeof(*req), sizeof(*resp));
2167 struct mgmt_controller_attrib *attribs;
2168 struct be_dma_mem attribs_cmd;
2170 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2171 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2172 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2174 if (!attribs_cmd.va) {
2175 dev_err(&adapter->pdev->dev,
2176 "Memory allocation failure\n");
2180 if (mutex_lock_interruptible(&adapter->mbox_lock))
2183 wrb = wrb_from_mbox(adapter);
2188 req = attribs_cmd.va;
2190 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2191 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2194 status = be_mbox_notify_wait(adapter);
2196 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2197 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2201 mutex_unlock(&adapter->mbox_lock);
2202 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2208 int be_cmd_req_native_mode(struct be_adapter *adapter)
2210 struct be_mcc_wrb *wrb;
2211 struct be_cmd_req_set_func_cap *req;
2214 if (mutex_lock_interruptible(&adapter->mbox_lock))
2217 wrb = wrb_from_mbox(adapter);
2223 req = embedded_payload(wrb);
2225 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2226 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2228 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2229 CAPABILITY_BE3_NATIVE_ERX_API);
2230 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2232 status = be_mbox_notify_wait(adapter);
2234 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2235 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2236 CAPABILITY_BE3_NATIVE_ERX_API;
2239 mutex_unlock(&adapter->mbox_lock);