Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt...
[pandora-kernel.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 }
30
31 /* To check if valid bit is set, check the entire word as we don't know
32  * the endianness of the data (old entry is host endian while a new entry is
33  * little endian) */
34 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 {
36         if (compl->flags != 0) {
37                 compl->flags = le32_to_cpu(compl->flags);
38                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
39                 return true;
40         } else {
41                 return false;
42         }
43 }
44
45 /* Need to reset the entire word that houses the valid bit */
46 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 {
48         compl->flags = 0;
49 }
50
51 static int be_mcc_compl_process(struct be_adapter *adapter,
52         struct be_mcc_compl *compl)
53 {
54         u16 compl_status, extd_status;
55
56         /* Just swap the status to host endian; mcc tag is opaquely copied
57          * from mcc_wrb */
58         be_dws_le_to_cpu(compl, 4);
59
60         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
61                                 CQE_STATUS_COMPL_MASK;
62
63         if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
64                 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
65                 adapter->flash_status = compl_status;
66                 complete(&adapter->flash_compl);
67         }
68
69         if (compl_status == MCC_STATUS_SUCCESS) {
70                 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
71                         struct be_cmd_resp_get_stats *resp =
72                                                 adapter->stats.cmd.va;
73                         be_dws_le_to_cpu(&resp->hw_stats,
74                                                 sizeof(resp->hw_stats));
75                         netdev_stats_update(adapter);
76                 }
77         } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
78                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
79                                 CQE_STATUS_EXTD_MASK;
80                 dev_warn(&adapter->pdev->dev,
81                 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
82                         compl->tag0, compl_status, extd_status);
83         }
84         return compl_status;
85 }
86
87 /* Link state evt is a string of bytes; no need for endian swapping */
88 static void be_async_link_state_process(struct be_adapter *adapter,
89                 struct be_async_event_link_state *evt)
90 {
91         be_link_status_update(adapter,
92                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
93 }
94
95 static inline bool is_link_state_evt(u32 trailer)
96 {
97         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
98                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
99                                 ASYNC_EVENT_CODE_LINK_STATE);
100 }
101
102 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
103 {
104         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
105         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
106
107         if (be_mcc_compl_is_new(compl)) {
108                 queue_tail_inc(mcc_cq);
109                 return compl;
110         }
111         return NULL;
112 }
113
114 void be_async_mcc_enable(struct be_adapter *adapter)
115 {
116         spin_lock_bh(&adapter->mcc_cq_lock);
117
118         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
119         adapter->mcc_obj.rearm_cq = true;
120
121         spin_unlock_bh(&adapter->mcc_cq_lock);
122 }
123
124 void be_async_mcc_disable(struct be_adapter *adapter)
125 {
126         adapter->mcc_obj.rearm_cq = false;
127 }
128
129 int be_process_mcc(struct be_adapter *adapter, int *status)
130 {
131         struct be_mcc_compl *compl;
132         int num = 0;
133         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
134
135         spin_lock_bh(&adapter->mcc_cq_lock);
136         while ((compl = be_mcc_compl_get(adapter))) {
137                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
138                         /* Interpret flags as an async trailer */
139                         BUG_ON(!is_link_state_evt(compl->flags));
140
141                         /* Interpret compl as a async link evt */
142                         be_async_link_state_process(adapter,
143                                 (struct be_async_event_link_state *) compl);
144                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
145                                 *status = be_mcc_compl_process(adapter, compl);
146                                 atomic_dec(&mcc_obj->q.used);
147                 }
148                 be_mcc_compl_use(compl);
149                 num++;
150         }
151
152         spin_unlock_bh(&adapter->mcc_cq_lock);
153         return num;
154 }
155
156 /* Wait till no more pending mcc requests are present */
157 static int be_mcc_wait_compl(struct be_adapter *adapter)
158 {
159 #define mcc_timeout             120000 /* 12s timeout */
160         int i, num, status = 0;
161         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
162
163         for (i = 0; i < mcc_timeout; i++) {
164                 num = be_process_mcc(adapter, &status);
165                 if (num)
166                         be_cq_notify(adapter, mcc_obj->cq.id,
167                                 mcc_obj->rearm_cq, num);
168
169                 if (atomic_read(&mcc_obj->q.used) == 0)
170                         break;
171                 udelay(100);
172         }
173         if (i == mcc_timeout) {
174                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
175                 return -1;
176         }
177         return status;
178 }
179
180 /* Notify MCC requests and wait for completion */
181 static int be_mcc_notify_wait(struct be_adapter *adapter)
182 {
183         be_mcc_notify(adapter);
184         return be_mcc_wait_compl(adapter);
185 }
186
187 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
188 {
189         int cnt = 0, wait = 5;
190         u32 ready;
191
192         do {
193                 ready = ioread32(db);
194                 if (ready == 0xffffffff) {
195                         dev_err(&adapter->pdev->dev,
196                                 "pci slot disconnected\n");
197                         return -1;
198                 }
199
200                 ready &= MPU_MAILBOX_DB_RDY_MASK;
201                 if (ready)
202                         break;
203
204                 if (cnt > 4000000) {
205                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
206                         return -1;
207                 }
208
209                 if (cnt > 50)
210                         wait = 200;
211                 cnt += wait;
212                 udelay(wait);
213         } while (true);
214
215         return 0;
216 }
217
218 /*
219  * Insert the mailbox address into the doorbell in two steps
220  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
221  */
222 static int be_mbox_notify_wait(struct be_adapter *adapter)
223 {
224         int status;
225         u32 val = 0;
226         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
227         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
228         struct be_mcc_mailbox *mbox = mbox_mem->va;
229         struct be_mcc_compl *compl = &mbox->compl;
230
231         /* wait for ready to be set */
232         status = be_mbox_db_ready_wait(adapter, db);
233         if (status != 0)
234                 return status;
235
236         val |= MPU_MAILBOX_DB_HI_MASK;
237         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
238         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
239         iowrite32(val, db);
240
241         /* wait for ready to be set */
242         status = be_mbox_db_ready_wait(adapter, db);
243         if (status != 0)
244                 return status;
245
246         val = 0;
247         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
248         val |= (u32)(mbox_mem->dma >> 4) << 2;
249         iowrite32(val, db);
250
251         status = be_mbox_db_ready_wait(adapter, db);
252         if (status != 0)
253                 return status;
254
255         /* A cq entry has been made now */
256         if (be_mcc_compl_is_new(compl)) {
257                 status = be_mcc_compl_process(adapter, &mbox->compl);
258                 be_mcc_compl_use(compl);
259                 if (status)
260                         return status;
261         } else {
262                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
263                 return -1;
264         }
265         return 0;
266 }
267
268 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
269 {
270         u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
271
272         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
273         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
274                 return -1;
275         else
276                 return 0;
277 }
278
279 int be_cmd_POST(struct be_adapter *adapter)
280 {
281         u16 stage;
282         int status, timeout = 0;
283
284         do {
285                 status = be_POST_stage_get(adapter, &stage);
286                 if (status) {
287                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
288                                 stage);
289                         return -1;
290                 } else if (stage != POST_STAGE_ARMFW_RDY) {
291                         set_current_state(TASK_INTERRUPTIBLE);
292                         schedule_timeout(2 * HZ);
293                         timeout += 2;
294                 } else {
295                         return 0;
296                 }
297         } while (timeout < 40);
298
299         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
300         return -1;
301 }
302
303 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
304 {
305         return wrb->payload.embedded_payload;
306 }
307
308 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
309 {
310         return &wrb->payload.sgl[0];
311 }
312
313 /* Don't touch the hdr after it's prepared */
314 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
315                                 bool embedded, u8 sge_cnt, u32 opcode)
316 {
317         if (embedded)
318                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
319         else
320                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
321                                 MCC_WRB_SGE_CNT_SHIFT;
322         wrb->payload_length = payload_len;
323         wrb->tag0 = opcode;
324         be_dws_cpu_to_le(wrb, 8);
325 }
326
327 /* Don't touch the hdr after it's prepared */
328 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
329                                 u8 subsystem, u8 opcode, int cmd_len)
330 {
331         req_hdr->opcode = opcode;
332         req_hdr->subsystem = subsystem;
333         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
334         req_hdr->version = 0;
335 }
336
337 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
338                         struct be_dma_mem *mem)
339 {
340         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
341         u64 dma = (u64)mem->dma;
342
343         for (i = 0; i < buf_pages; i++) {
344                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
345                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
346                 dma += PAGE_SIZE_4K;
347         }
348 }
349
350 /* Converts interrupt delay in microseconds to multiplier value */
351 static u32 eq_delay_to_mult(u32 usec_delay)
352 {
353 #define MAX_INTR_RATE                   651042
354         const u32 round = 10;
355         u32 multiplier;
356
357         if (usec_delay == 0)
358                 multiplier = 0;
359         else {
360                 u32 interrupt_rate = 1000000 / usec_delay;
361                 /* Max delay, corresponding to the lowest interrupt rate */
362                 if (interrupt_rate == 0)
363                         multiplier = 1023;
364                 else {
365                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
366                         multiplier /= interrupt_rate;
367                         /* Round the multiplier to the closest value.*/
368                         multiplier = (multiplier + round/2) / round;
369                         multiplier = min(multiplier, (u32)1023);
370                 }
371         }
372         return multiplier;
373 }
374
375 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
376 {
377         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
378         struct be_mcc_wrb *wrb
379                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
380         memset(wrb, 0, sizeof(*wrb));
381         return wrb;
382 }
383
384 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
385 {
386         struct be_queue_info *mccq = &adapter->mcc_obj.q;
387         struct be_mcc_wrb *wrb;
388
389         if (atomic_read(&mccq->used) >= mccq->len) {
390                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
391                 return NULL;
392         }
393
394         wrb = queue_head_node(mccq);
395         queue_head_inc(mccq);
396         atomic_inc(&mccq->used);
397         memset(wrb, 0, sizeof(*wrb));
398         return wrb;
399 }
400
401 /* Tell fw we're about to start firing cmds by writing a
402  * special pattern across the wrb hdr; uses mbox
403  */
404 int be_cmd_fw_init(struct be_adapter *adapter)
405 {
406         u8 *wrb;
407         int status;
408
409         spin_lock(&adapter->mbox_lock);
410
411         wrb = (u8 *)wrb_from_mbox(adapter);
412         *wrb++ = 0xFF;
413         *wrb++ = 0x12;
414         *wrb++ = 0x34;
415         *wrb++ = 0xFF;
416         *wrb++ = 0xFF;
417         *wrb++ = 0x56;
418         *wrb++ = 0x78;
419         *wrb = 0xFF;
420
421         status = be_mbox_notify_wait(adapter);
422
423         spin_unlock(&adapter->mbox_lock);
424         return status;
425 }
426
427 /* Tell fw we're done with firing cmds by writing a
428  * special pattern across the wrb hdr; uses mbox
429  */
430 int be_cmd_fw_clean(struct be_adapter *adapter)
431 {
432         u8 *wrb;
433         int status;
434
435         if (adapter->eeh_err)
436                 return -EIO;
437
438         spin_lock(&adapter->mbox_lock);
439
440         wrb = (u8 *)wrb_from_mbox(adapter);
441         *wrb++ = 0xFF;
442         *wrb++ = 0xAA;
443         *wrb++ = 0xBB;
444         *wrb++ = 0xFF;
445         *wrb++ = 0xFF;
446         *wrb++ = 0xCC;
447         *wrb++ = 0xDD;
448         *wrb = 0xFF;
449
450         status = be_mbox_notify_wait(adapter);
451
452         spin_unlock(&adapter->mbox_lock);
453         return status;
454 }
455 int be_cmd_eq_create(struct be_adapter *adapter,
456                 struct be_queue_info *eq, int eq_delay)
457 {
458         struct be_mcc_wrb *wrb;
459         struct be_cmd_req_eq_create *req;
460         struct be_dma_mem *q_mem = &eq->dma_mem;
461         int status;
462
463         spin_lock(&adapter->mbox_lock);
464
465         wrb = wrb_from_mbox(adapter);
466         req = embedded_payload(wrb);
467
468         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
469
470         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
471                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
472
473         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
474
475         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
476         /* 4byte eqe*/
477         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
478         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
479                         __ilog2_u32(eq->len/256));
480         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
481                         eq_delay_to_mult(eq_delay));
482         be_dws_cpu_to_le(req->context, sizeof(req->context));
483
484         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
485
486         status = be_mbox_notify_wait(adapter);
487         if (!status) {
488                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
489                 eq->id = le16_to_cpu(resp->eq_id);
490                 eq->created = true;
491         }
492
493         spin_unlock(&adapter->mbox_lock);
494         return status;
495 }
496
497 /* Uses mbox */
498 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
499                         u8 type, bool permanent, u32 if_handle)
500 {
501         struct be_mcc_wrb *wrb;
502         struct be_cmd_req_mac_query *req;
503         int status;
504
505         spin_lock(&adapter->mbox_lock);
506
507         wrb = wrb_from_mbox(adapter);
508         req = embedded_payload(wrb);
509
510         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
511                         OPCODE_COMMON_NTWK_MAC_QUERY);
512
513         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
514                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
515
516         req->type = type;
517         if (permanent) {
518                 req->permanent = 1;
519         } else {
520                 req->if_id = cpu_to_le16((u16) if_handle);
521                 req->permanent = 0;
522         }
523
524         status = be_mbox_notify_wait(adapter);
525         if (!status) {
526                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
527                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
528         }
529
530         spin_unlock(&adapter->mbox_lock);
531         return status;
532 }
533
534 /* Uses synchronous MCCQ */
535 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
536                 u32 if_id, u32 *pmac_id)
537 {
538         struct be_mcc_wrb *wrb;
539         struct be_cmd_req_pmac_add *req;
540         int status;
541
542         spin_lock_bh(&adapter->mcc_lock);
543
544         wrb = wrb_from_mccq(adapter);
545         if (!wrb) {
546                 status = -EBUSY;
547                 goto err;
548         }
549         req = embedded_payload(wrb);
550
551         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
552                         OPCODE_COMMON_NTWK_PMAC_ADD);
553
554         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
555                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
556
557         req->if_id = cpu_to_le32(if_id);
558         memcpy(req->mac_address, mac_addr, ETH_ALEN);
559
560         status = be_mcc_notify_wait(adapter);
561         if (!status) {
562                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
563                 *pmac_id = le32_to_cpu(resp->pmac_id);
564         }
565
566 err:
567         spin_unlock_bh(&adapter->mcc_lock);
568         return status;
569 }
570
571 /* Uses synchronous MCCQ */
572 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
573 {
574         struct be_mcc_wrb *wrb;
575         struct be_cmd_req_pmac_del *req;
576         int status;
577
578         spin_lock_bh(&adapter->mcc_lock);
579
580         wrb = wrb_from_mccq(adapter);
581         if (!wrb) {
582                 status = -EBUSY;
583                 goto err;
584         }
585         req = embedded_payload(wrb);
586
587         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
588                         OPCODE_COMMON_NTWK_PMAC_DEL);
589
590         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
591                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
592
593         req->if_id = cpu_to_le32(if_id);
594         req->pmac_id = cpu_to_le32(pmac_id);
595
596         status = be_mcc_notify_wait(adapter);
597
598 err:
599         spin_unlock_bh(&adapter->mcc_lock);
600         return status;
601 }
602
603 /* Uses Mbox */
604 int be_cmd_cq_create(struct be_adapter *adapter,
605                 struct be_queue_info *cq, struct be_queue_info *eq,
606                 bool sol_evts, bool no_delay, int coalesce_wm)
607 {
608         struct be_mcc_wrb *wrb;
609         struct be_cmd_req_cq_create *req;
610         struct be_dma_mem *q_mem = &cq->dma_mem;
611         void *ctxt;
612         int status;
613
614         spin_lock(&adapter->mbox_lock);
615
616         wrb = wrb_from_mbox(adapter);
617         req = embedded_payload(wrb);
618         ctxt = &req->context;
619
620         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
621                         OPCODE_COMMON_CQ_CREATE);
622
623         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
624                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
625
626         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
627
628         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
629         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
630         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
631                         __ilog2_u32(cq->len/256));
632         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
633         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
634         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
635         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
636         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
637         be_dws_cpu_to_le(ctxt, sizeof(req->context));
638
639         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
640
641         status = be_mbox_notify_wait(adapter);
642         if (!status) {
643                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
644                 cq->id = le16_to_cpu(resp->cq_id);
645                 cq->created = true;
646         }
647
648         spin_unlock(&adapter->mbox_lock);
649
650         return status;
651 }
652
653 static u32 be_encoded_q_len(int q_len)
654 {
655         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
656         if (len_encoded == 16)
657                 len_encoded = 0;
658         return len_encoded;
659 }
660
661 int be_cmd_mccq_create(struct be_adapter *adapter,
662                         struct be_queue_info *mccq,
663                         struct be_queue_info *cq)
664 {
665         struct be_mcc_wrb *wrb;
666         struct be_cmd_req_mcc_create *req;
667         struct be_dma_mem *q_mem = &mccq->dma_mem;
668         void *ctxt;
669         int status;
670
671         spin_lock(&adapter->mbox_lock);
672
673         wrb = wrb_from_mbox(adapter);
674         req = embedded_payload(wrb);
675         ctxt = &req->context;
676
677         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
678                         OPCODE_COMMON_MCC_CREATE);
679
680         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
681                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
682
683         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
684
685         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
686         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
687                 be_encoded_q_len(mccq->len));
688         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
689
690         be_dws_cpu_to_le(ctxt, sizeof(req->context));
691
692         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
693
694         status = be_mbox_notify_wait(adapter);
695         if (!status) {
696                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
697                 mccq->id = le16_to_cpu(resp->id);
698                 mccq->created = true;
699         }
700         spin_unlock(&adapter->mbox_lock);
701
702         return status;
703 }
704
705 int be_cmd_txq_create(struct be_adapter *adapter,
706                         struct be_queue_info *txq,
707                         struct be_queue_info *cq)
708 {
709         struct be_mcc_wrb *wrb;
710         struct be_cmd_req_eth_tx_create *req;
711         struct be_dma_mem *q_mem = &txq->dma_mem;
712         void *ctxt;
713         int status;
714
715         spin_lock(&adapter->mbox_lock);
716
717         wrb = wrb_from_mbox(adapter);
718         req = embedded_payload(wrb);
719         ctxt = &req->context;
720
721         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
722                         OPCODE_ETH_TX_CREATE);
723
724         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
725                 sizeof(*req));
726
727         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
728         req->ulp_num = BE_ULP1_NUM;
729         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
730
731         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
732                 be_encoded_q_len(txq->len));
733         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
734         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
735
736         be_dws_cpu_to_le(ctxt, sizeof(req->context));
737
738         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
739
740         status = be_mbox_notify_wait(adapter);
741         if (!status) {
742                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
743                 txq->id = le16_to_cpu(resp->cid);
744                 txq->created = true;
745         }
746
747         spin_unlock(&adapter->mbox_lock);
748
749         return status;
750 }
751
752 /* Uses mbox */
753 int be_cmd_rxq_create(struct be_adapter *adapter,
754                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
755                 u16 max_frame_size, u32 if_id, u32 rss)
756 {
757         struct be_mcc_wrb *wrb;
758         struct be_cmd_req_eth_rx_create *req;
759         struct be_dma_mem *q_mem = &rxq->dma_mem;
760         int status;
761
762         spin_lock(&adapter->mbox_lock);
763
764         wrb = wrb_from_mbox(adapter);
765         req = embedded_payload(wrb);
766
767         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
768                         OPCODE_ETH_RX_CREATE);
769
770         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
771                 sizeof(*req));
772
773         req->cq_id = cpu_to_le16(cq_id);
774         req->frag_size = fls(frag_size) - 1;
775         req->num_pages = 2;
776         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
777         req->interface_id = cpu_to_le32(if_id);
778         req->max_frame_size = cpu_to_le16(max_frame_size);
779         req->rss_queue = cpu_to_le32(rss);
780
781         status = be_mbox_notify_wait(adapter);
782         if (!status) {
783                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
784                 rxq->id = le16_to_cpu(resp->id);
785                 rxq->created = true;
786         }
787
788         spin_unlock(&adapter->mbox_lock);
789
790         return status;
791 }
792
793 /* Generic destroyer function for all types of queues
794  * Uses Mbox
795  */
796 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
797                 int queue_type)
798 {
799         struct be_mcc_wrb *wrb;
800         struct be_cmd_req_q_destroy *req;
801         u8 subsys = 0, opcode = 0;
802         int status;
803
804         if (adapter->eeh_err)
805                 return -EIO;
806
807         spin_lock(&adapter->mbox_lock);
808
809         wrb = wrb_from_mbox(adapter);
810         req = embedded_payload(wrb);
811
812         switch (queue_type) {
813         case QTYPE_EQ:
814                 subsys = CMD_SUBSYSTEM_COMMON;
815                 opcode = OPCODE_COMMON_EQ_DESTROY;
816                 break;
817         case QTYPE_CQ:
818                 subsys = CMD_SUBSYSTEM_COMMON;
819                 opcode = OPCODE_COMMON_CQ_DESTROY;
820                 break;
821         case QTYPE_TXQ:
822                 subsys = CMD_SUBSYSTEM_ETH;
823                 opcode = OPCODE_ETH_TX_DESTROY;
824                 break;
825         case QTYPE_RXQ:
826                 subsys = CMD_SUBSYSTEM_ETH;
827                 opcode = OPCODE_ETH_RX_DESTROY;
828                 break;
829         case QTYPE_MCCQ:
830                 subsys = CMD_SUBSYSTEM_COMMON;
831                 opcode = OPCODE_COMMON_MCC_DESTROY;
832                 break;
833         default:
834                 BUG();
835         }
836
837         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
838
839         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
840         req->id = cpu_to_le16(q->id);
841
842         status = be_mbox_notify_wait(adapter);
843
844         spin_unlock(&adapter->mbox_lock);
845
846         return status;
847 }
848
849 /* Create an rx filtering policy configuration on an i/f
850  * Uses mbox
851  */
852 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
853                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
854                 u32 domain)
855 {
856         struct be_mcc_wrb *wrb;
857         struct be_cmd_req_if_create *req;
858         int status;
859
860         spin_lock(&adapter->mbox_lock);
861
862         wrb = wrb_from_mbox(adapter);
863         req = embedded_payload(wrb);
864
865         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
866                         OPCODE_COMMON_NTWK_INTERFACE_CREATE);
867
868         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
869                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
870
871         req->hdr.domain = domain;
872         req->capability_flags = cpu_to_le32(cap_flags);
873         req->enable_flags = cpu_to_le32(en_flags);
874         req->pmac_invalid = pmac_invalid;
875         if (!pmac_invalid)
876                 memcpy(req->mac_addr, mac, ETH_ALEN);
877
878         status = be_mbox_notify_wait(adapter);
879         if (!status) {
880                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
881                 *if_handle = le32_to_cpu(resp->interface_id);
882                 if (!pmac_invalid)
883                         *pmac_id = le32_to_cpu(resp->pmac_id);
884         }
885
886         spin_unlock(&adapter->mbox_lock);
887         return status;
888 }
889
890 /* Uses mbox */
891 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
892 {
893         struct be_mcc_wrb *wrb;
894         struct be_cmd_req_if_destroy *req;
895         int status;
896
897         if (adapter->eeh_err)
898                 return -EIO;
899
900         spin_lock(&adapter->mbox_lock);
901
902         wrb = wrb_from_mbox(adapter);
903         req = embedded_payload(wrb);
904
905         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
906                         OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
907
908         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
909                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
910
911         req->interface_id = cpu_to_le32(interface_id);
912
913         status = be_mbox_notify_wait(adapter);
914
915         spin_unlock(&adapter->mbox_lock);
916
917         return status;
918 }
919
920 /* Get stats is a non embedded command: the request is not embedded inside
921  * WRB but is a separate dma memory block
922  * Uses asynchronous MCC
923  */
924 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
925 {
926         struct be_mcc_wrb *wrb;
927         struct be_cmd_req_get_stats *req;
928         struct be_sge *sge;
929         int status = 0;
930
931         spin_lock_bh(&adapter->mcc_lock);
932
933         wrb = wrb_from_mccq(adapter);
934         if (!wrb) {
935                 status = -EBUSY;
936                 goto err;
937         }
938         req = nonemb_cmd->va;
939         sge = nonembedded_sgl(wrb);
940
941         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
942                         OPCODE_ETH_GET_STATISTICS);
943
944         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
945                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
946         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
947         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
948         sge->len = cpu_to_le32(nonemb_cmd->size);
949
950         be_mcc_notify(adapter);
951
952 err:
953         spin_unlock_bh(&adapter->mcc_lock);
954         return status;
955 }
956
957 /* Uses synchronous mcc */
958 int be_cmd_link_status_query(struct be_adapter *adapter,
959                         bool *link_up, u8 *mac_speed, u16 *link_speed)
960 {
961         struct be_mcc_wrb *wrb;
962         struct be_cmd_req_link_status *req;
963         int status;
964
965         spin_lock_bh(&adapter->mcc_lock);
966
967         wrb = wrb_from_mccq(adapter);
968         if (!wrb) {
969                 status = -EBUSY;
970                 goto err;
971         }
972         req = embedded_payload(wrb);
973
974         *link_up = false;
975
976         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
977                         OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
978
979         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
980                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
981
982         status = be_mcc_notify_wait(adapter);
983         if (!status) {
984                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
985                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
986                         *link_up = true;
987                         *link_speed = le16_to_cpu(resp->link_speed);
988                         *mac_speed = resp->mac_speed;
989                 }
990         }
991
992 err:
993         spin_unlock_bh(&adapter->mcc_lock);
994         return status;
995 }
996
997 /* Uses Mbox */
998 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
999 {
1000         struct be_mcc_wrb *wrb;
1001         struct be_cmd_req_get_fw_version *req;
1002         int status;
1003
1004         spin_lock(&adapter->mbox_lock);
1005
1006         wrb = wrb_from_mbox(adapter);
1007         req = embedded_payload(wrb);
1008
1009         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1010                         OPCODE_COMMON_GET_FW_VERSION);
1011
1012         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1013                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1014
1015         status = be_mbox_notify_wait(adapter);
1016         if (!status) {
1017                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1018                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1019         }
1020
1021         spin_unlock(&adapter->mbox_lock);
1022         return status;
1023 }
1024
1025 /* set the EQ delay interval of an EQ to specified value
1026  * Uses async mcc
1027  */
1028 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1029 {
1030         struct be_mcc_wrb *wrb;
1031         struct be_cmd_req_modify_eq_delay *req;
1032         int status = 0;
1033
1034         spin_lock_bh(&adapter->mcc_lock);
1035
1036         wrb = wrb_from_mccq(adapter);
1037         if (!wrb) {
1038                 status = -EBUSY;
1039                 goto err;
1040         }
1041         req = embedded_payload(wrb);
1042
1043         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1044                         OPCODE_COMMON_MODIFY_EQ_DELAY);
1045
1046         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1047                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1048
1049         req->num_eq = cpu_to_le32(1);
1050         req->delay[0].eq_id = cpu_to_le32(eq_id);
1051         req->delay[0].phase = 0;
1052         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1053
1054         be_mcc_notify(adapter);
1055
1056 err:
1057         spin_unlock_bh(&adapter->mcc_lock);
1058         return status;
1059 }
1060
1061 /* Uses sycnhronous mcc */
1062 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1063                         u32 num, bool untagged, bool promiscuous)
1064 {
1065         struct be_mcc_wrb *wrb;
1066         struct be_cmd_req_vlan_config *req;
1067         int status;
1068
1069         spin_lock_bh(&adapter->mcc_lock);
1070
1071         wrb = wrb_from_mccq(adapter);
1072         if (!wrb) {
1073                 status = -EBUSY;
1074                 goto err;
1075         }
1076         req = embedded_payload(wrb);
1077
1078         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1079                         OPCODE_COMMON_NTWK_VLAN_CONFIG);
1080
1081         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1082                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1083
1084         req->interface_id = if_id;
1085         req->promiscuous = promiscuous;
1086         req->untagged = untagged;
1087         req->num_vlan = num;
1088         if (!promiscuous) {
1089                 memcpy(req->normal_vlan, vtag_array,
1090                         req->num_vlan * sizeof(vtag_array[0]));
1091         }
1092
1093         status = be_mcc_notify_wait(adapter);
1094
1095 err:
1096         spin_unlock_bh(&adapter->mcc_lock);
1097         return status;
1098 }
1099
1100 /* Uses MCC for this command as it may be called in BH context
1101  * Uses synchronous mcc
1102  */
1103 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1104 {
1105         struct be_mcc_wrb *wrb;
1106         struct be_cmd_req_promiscuous_config *req;
1107         int status;
1108
1109         spin_lock_bh(&adapter->mcc_lock);
1110
1111         wrb = wrb_from_mccq(adapter);
1112         if (!wrb) {
1113                 status = -EBUSY;
1114                 goto err;
1115         }
1116         req = embedded_payload(wrb);
1117
1118         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
1119
1120         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1121                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1122
1123         /* In FW versions X.102.149/X.101.487 and later,
1124          * the port setting associated only with the
1125          * issuing pci function will take effect
1126          */
1127         if (port_num)
1128                 req->port1_promiscuous = en;
1129         else
1130                 req->port0_promiscuous = en;
1131
1132         status = be_mcc_notify_wait(adapter);
1133
1134 err:
1135         spin_unlock_bh(&adapter->mcc_lock);
1136         return status;
1137 }
1138
1139 /*
1140  * Uses MCC for this command as it may be called in BH context
1141  * (mc == NULL) => multicast promiscous
1142  */
1143 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1144                 struct net_device *netdev, struct be_dma_mem *mem)
1145 {
1146         struct be_mcc_wrb *wrb;
1147         struct be_cmd_req_mcast_mac_config *req = mem->va;
1148         struct be_sge *sge;
1149         int status;
1150
1151         spin_lock_bh(&adapter->mcc_lock);
1152
1153         wrb = wrb_from_mccq(adapter);
1154         if (!wrb) {
1155                 status = -EBUSY;
1156                 goto err;
1157         }
1158         sge = nonembedded_sgl(wrb);
1159         memset(req, 0, sizeof(*req));
1160
1161         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1162                         OPCODE_COMMON_NTWK_MULTICAST_SET);
1163         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1164         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1165         sge->len = cpu_to_le32(mem->size);
1166
1167         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1168                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1169
1170         req->interface_id = if_id;
1171         if (netdev) {
1172                 int i;
1173                 struct netdev_hw_addr *ha;
1174
1175                 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1176
1177                 i = 0;
1178                 netdev_for_each_mc_addr(ha, netdev)
1179                         memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
1180         } else {
1181                 req->promiscuous = 1;
1182         }
1183
1184         status = be_mcc_notify_wait(adapter);
1185
1186 err:
1187         spin_unlock_bh(&adapter->mcc_lock);
1188         return status;
1189 }
1190
1191 /* Uses synchrounous mcc */
1192 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1193 {
1194         struct be_mcc_wrb *wrb;
1195         struct be_cmd_req_set_flow_control *req;
1196         int status;
1197
1198         spin_lock_bh(&adapter->mcc_lock);
1199
1200         wrb = wrb_from_mccq(adapter);
1201         if (!wrb) {
1202                 status = -EBUSY;
1203                 goto err;
1204         }
1205         req = embedded_payload(wrb);
1206
1207         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1208                         OPCODE_COMMON_SET_FLOW_CONTROL);
1209
1210         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1211                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1212
1213         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1214         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1215
1216         status = be_mcc_notify_wait(adapter);
1217
1218 err:
1219         spin_unlock_bh(&adapter->mcc_lock);
1220         return status;
1221 }
1222
1223 /* Uses sycn mcc */
1224 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1225 {
1226         struct be_mcc_wrb *wrb;
1227         struct be_cmd_req_get_flow_control *req;
1228         int status;
1229
1230         spin_lock_bh(&adapter->mcc_lock);
1231
1232         wrb = wrb_from_mccq(adapter);
1233         if (!wrb) {
1234                 status = -EBUSY;
1235                 goto err;
1236         }
1237         req = embedded_payload(wrb);
1238
1239         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1240                         OPCODE_COMMON_GET_FLOW_CONTROL);
1241
1242         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1243                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1244
1245         status = be_mcc_notify_wait(adapter);
1246         if (!status) {
1247                 struct be_cmd_resp_get_flow_control *resp =
1248                                                 embedded_payload(wrb);
1249                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1250                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1251         }
1252
1253 err:
1254         spin_unlock_bh(&adapter->mcc_lock);
1255         return status;
1256 }
1257
1258 /* Uses mbox */
1259 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1260 {
1261         struct be_mcc_wrb *wrb;
1262         struct be_cmd_req_query_fw_cfg *req;
1263         int status;
1264
1265         spin_lock(&adapter->mbox_lock);
1266
1267         wrb = wrb_from_mbox(adapter);
1268         req = embedded_payload(wrb);
1269
1270         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1271                         OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1272
1273         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1274                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1275
1276         status = be_mbox_notify_wait(adapter);
1277         if (!status) {
1278                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1279                 *port_num = le32_to_cpu(resp->phys_port);
1280                 *cap = le32_to_cpu(resp->function_cap);
1281         }
1282
1283         spin_unlock(&adapter->mbox_lock);
1284         return status;
1285 }
1286
1287 /* Uses mbox */
1288 int be_cmd_reset_function(struct be_adapter *adapter)
1289 {
1290         struct be_mcc_wrb *wrb;
1291         struct be_cmd_req_hdr *req;
1292         int status;
1293
1294         spin_lock(&adapter->mbox_lock);
1295
1296         wrb = wrb_from_mbox(adapter);
1297         req = embedded_payload(wrb);
1298
1299         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1300                         OPCODE_COMMON_FUNCTION_RESET);
1301
1302         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1303                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1304
1305         status = be_mbox_notify_wait(adapter);
1306
1307         spin_unlock(&adapter->mbox_lock);
1308         return status;
1309 }
1310
1311 /* Uses sync mcc */
1312 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1313                         u8 bcn, u8 sts, u8 state)
1314 {
1315         struct be_mcc_wrb *wrb;
1316         struct be_cmd_req_enable_disable_beacon *req;
1317         int status;
1318
1319         spin_lock_bh(&adapter->mcc_lock);
1320
1321         wrb = wrb_from_mccq(adapter);
1322         if (!wrb) {
1323                 status = -EBUSY;
1324                 goto err;
1325         }
1326         req = embedded_payload(wrb);
1327
1328         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1329                         OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1330
1331         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1332                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1333
1334         req->port_num = port_num;
1335         req->beacon_state = state;
1336         req->beacon_duration = bcn;
1337         req->status_duration = sts;
1338
1339         status = be_mcc_notify_wait(adapter);
1340
1341 err:
1342         spin_unlock_bh(&adapter->mcc_lock);
1343         return status;
1344 }
1345
1346 /* Uses sync mcc */
1347 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1348 {
1349         struct be_mcc_wrb *wrb;
1350         struct be_cmd_req_get_beacon_state *req;
1351         int status;
1352
1353         spin_lock_bh(&adapter->mcc_lock);
1354
1355         wrb = wrb_from_mccq(adapter);
1356         if (!wrb) {
1357                 status = -EBUSY;
1358                 goto err;
1359         }
1360         req = embedded_payload(wrb);
1361
1362         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1363                         OPCODE_COMMON_GET_BEACON_STATE);
1364
1365         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1366                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1367
1368         req->port_num = port_num;
1369
1370         status = be_mcc_notify_wait(adapter);
1371         if (!status) {
1372                 struct be_cmd_resp_get_beacon_state *resp =
1373                                                 embedded_payload(wrb);
1374                 *state = resp->beacon_state;
1375         }
1376
1377 err:
1378         spin_unlock_bh(&adapter->mcc_lock);
1379         return status;
1380 }
1381
1382 /* Uses sync mcc */
1383 int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1384                                 u8 *connector)
1385 {
1386         struct be_mcc_wrb *wrb;
1387         struct be_cmd_req_port_type *req;
1388         int status;
1389
1390         spin_lock_bh(&adapter->mcc_lock);
1391
1392         wrb = wrb_from_mccq(adapter);
1393         if (!wrb) {
1394                 status = -EBUSY;
1395                 goto err;
1396         }
1397         req = embedded_payload(wrb);
1398
1399         be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
1400                         OPCODE_COMMON_READ_TRANSRECV_DATA);
1401
1402         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1403                 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1404
1405         req->port = cpu_to_le32(port);
1406         req->page_num = cpu_to_le32(TR_PAGE_A0);
1407         status = be_mcc_notify_wait(adapter);
1408         if (!status) {
1409                 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1410                         *connector = resp->data.connector;
1411         }
1412
1413 err:
1414         spin_unlock_bh(&adapter->mcc_lock);
1415         return status;
1416 }
1417
1418 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1419                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1420 {
1421         struct be_mcc_wrb *wrb;
1422         struct be_cmd_write_flashrom *req;
1423         struct be_sge *sge;
1424         int status;
1425
1426         spin_lock_bh(&adapter->mcc_lock);
1427         adapter->flash_status = 0;
1428
1429         wrb = wrb_from_mccq(adapter);
1430         if (!wrb) {
1431                 status = -EBUSY;
1432                 goto err_unlock;
1433         }
1434         req = cmd->va;
1435         sge = nonembedded_sgl(wrb);
1436
1437         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1438                         OPCODE_COMMON_WRITE_FLASHROM);
1439         wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1440
1441         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1442                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1443         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1444         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1445         sge->len = cpu_to_le32(cmd->size);
1446
1447         req->params.op_type = cpu_to_le32(flash_type);
1448         req->params.op_code = cpu_to_le32(flash_opcode);
1449         req->params.data_buf_size = cpu_to_le32(buf_size);
1450
1451         be_mcc_notify(adapter);
1452         spin_unlock_bh(&adapter->mcc_lock);
1453
1454         if (!wait_for_completion_timeout(&adapter->flash_compl,
1455                         msecs_to_jiffies(12000)))
1456                 status = -1;
1457         else
1458                 status = adapter->flash_status;
1459
1460         return status;
1461
1462 err_unlock:
1463         spin_unlock_bh(&adapter->mcc_lock);
1464         return status;
1465 }
1466
1467 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1468                          int offset)
1469 {
1470         struct be_mcc_wrb *wrb;
1471         struct be_cmd_write_flashrom *req;
1472         int status;
1473
1474         spin_lock_bh(&adapter->mcc_lock);
1475
1476         wrb = wrb_from_mccq(adapter);
1477         if (!wrb) {
1478                 status = -EBUSY;
1479                 goto err;
1480         }
1481         req = embedded_payload(wrb);
1482
1483         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1484                         OPCODE_COMMON_READ_FLASHROM);
1485
1486         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1487                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1488
1489         req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1490         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1491         req->params.offset = cpu_to_le32(offset);
1492         req->params.data_buf_size = cpu_to_le32(0x4);
1493
1494         status = be_mcc_notify_wait(adapter);
1495         if (!status)
1496                 memcpy(flashed_crc, req->params.data_buf, 4);
1497
1498 err:
1499         spin_unlock_bh(&adapter->mcc_lock);
1500         return status;
1501 }
1502
1503 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1504                                 struct be_dma_mem *nonemb_cmd)
1505 {
1506         struct be_mcc_wrb *wrb;
1507         struct be_cmd_req_acpi_wol_magic_config *req;
1508         struct be_sge *sge;
1509         int status;
1510
1511         spin_lock_bh(&adapter->mcc_lock);
1512
1513         wrb = wrb_from_mccq(adapter);
1514         if (!wrb) {
1515                 status = -EBUSY;
1516                 goto err;
1517         }
1518         req = nonemb_cmd->va;
1519         sge = nonembedded_sgl(wrb);
1520
1521         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1522                         OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1523
1524         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1525                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1526         memcpy(req->magic_mac, mac, ETH_ALEN);
1527
1528         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1529         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1530         sge->len = cpu_to_le32(nonemb_cmd->size);
1531
1532         status = be_mcc_notify_wait(adapter);
1533
1534 err:
1535         spin_unlock_bh(&adapter->mcc_lock);
1536         return status;
1537 }
1538
1539 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1540                         u8 loopback_type, u8 enable)
1541 {
1542         struct be_mcc_wrb *wrb;
1543         struct be_cmd_req_set_lmode *req;
1544         int status;
1545
1546         spin_lock_bh(&adapter->mcc_lock);
1547
1548         wrb = wrb_from_mccq(adapter);
1549         if (!wrb) {
1550                 status = -EBUSY;
1551                 goto err;
1552         }
1553
1554         req = embedded_payload(wrb);
1555
1556         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1557                                 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1558
1559         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1560                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1561                         sizeof(*req));
1562
1563         req->src_port = port_num;
1564         req->dest_port = port_num;
1565         req->loopback_type = loopback_type;
1566         req->loopback_state = enable;
1567
1568         status = be_mcc_notify_wait(adapter);
1569 err:
1570         spin_unlock_bh(&adapter->mcc_lock);
1571         return status;
1572 }
1573
1574 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1575                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1576 {
1577         struct be_mcc_wrb *wrb;
1578         struct be_cmd_req_loopback_test *req;
1579         int status;
1580
1581         spin_lock_bh(&adapter->mcc_lock);
1582
1583         wrb = wrb_from_mccq(adapter);
1584         if (!wrb) {
1585                 status = -EBUSY;
1586                 goto err;
1587         }
1588
1589         req = embedded_payload(wrb);
1590
1591         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1592                                 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1593
1594         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1595                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1596         req->hdr.timeout = cpu_to_le32(4);
1597
1598         req->pattern = cpu_to_le64(pattern);
1599         req->src_port = cpu_to_le32(port_num);
1600         req->dest_port = cpu_to_le32(port_num);
1601         req->pkt_size = cpu_to_le32(pkt_size);
1602         req->num_pkts = cpu_to_le32(num_pkts);
1603         req->loopback_type = cpu_to_le32(loopback_type);
1604
1605         status = be_mcc_notify_wait(adapter);
1606         if (!status) {
1607                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1608                 status = le32_to_cpu(resp->status);
1609         }
1610
1611 err:
1612         spin_unlock_bh(&adapter->mcc_lock);
1613         return status;
1614 }
1615
1616 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1617                                 u32 byte_cnt, struct be_dma_mem *cmd)
1618 {
1619         struct be_mcc_wrb *wrb;
1620         struct be_cmd_req_ddrdma_test *req;
1621         struct be_sge *sge;
1622         int status;
1623         int i, j = 0;
1624
1625         spin_lock_bh(&adapter->mcc_lock);
1626
1627         wrb = wrb_from_mccq(adapter);
1628         if (!wrb) {
1629                 status = -EBUSY;
1630                 goto err;
1631         }
1632         req = cmd->va;
1633         sge = nonembedded_sgl(wrb);
1634         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1635                                 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1636         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1637                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1638
1639         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1640         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1641         sge->len = cpu_to_le32(cmd->size);
1642
1643         req->pattern = cpu_to_le64(pattern);
1644         req->byte_count = cpu_to_le32(byte_cnt);
1645         for (i = 0; i < byte_cnt; i++) {
1646                 req->snd_buff[i] = (u8)(pattern >> (j*8));
1647                 j++;
1648                 if (j > 7)
1649                         j = 0;
1650         }
1651
1652         status = be_mcc_notify_wait(adapter);
1653
1654         if (!status) {
1655                 struct be_cmd_resp_ddrdma_test *resp;
1656                 resp = cmd->va;
1657                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1658                                 resp->snd_err) {
1659                         status = -1;
1660                 }
1661         }
1662
1663 err:
1664         spin_unlock_bh(&adapter->mcc_lock);
1665         return status;
1666 }
1667
1668 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1669                                 struct be_dma_mem *nonemb_cmd)
1670 {
1671         struct be_mcc_wrb *wrb;
1672         struct be_cmd_req_seeprom_read *req;
1673         struct be_sge *sge;
1674         int status;
1675
1676         spin_lock_bh(&adapter->mcc_lock);
1677
1678         wrb = wrb_from_mccq(adapter);
1679         req = nonemb_cmd->va;
1680         sge = nonembedded_sgl(wrb);
1681
1682         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1683                         OPCODE_COMMON_SEEPROM_READ);
1684
1685         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1686                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1687
1688         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1689         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1690         sge->len = cpu_to_le32(nonemb_cmd->size);
1691
1692         status = be_mcc_notify_wait(adapter);
1693
1694         spin_unlock_bh(&adapter->mcc_lock);
1695         return status;
1696 }