be2net: Use NTWK_RX_FILTER command for promiscous mode
[pandora-kernel.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq = 32;
23
24 static void be_mcc_notify(struct be_adapter *adapter)
25 {
26         struct be_queue_info *mccq = &adapter->mcc_obj.q;
27         u32 val = 0;
28
29         if (adapter->eeh_err) {
30                 dev_info(&adapter->pdev->dev,
31                         "Error in Card Detected! Cannot issue commands\n");
32                 return;
33         }
34
35         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
37
38         wmb();
39         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40 }
41
42 /* To check if valid bit is set, check the entire word as we don't know
43  * the endianness of the data (old entry is host endian while a new entry is
44  * little endian) */
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
46 {
47         if (compl->flags != 0) {
48                 compl->flags = le32_to_cpu(compl->flags);
49                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50                 return true;
51         } else {
52                 return false;
53         }
54 }
55
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
58 {
59         compl->flags = 0;
60 }
61
62 static int be_mcc_compl_process(struct be_adapter *adapter,
63         struct be_mcc_compl *compl)
64 {
65         u16 compl_status, extd_status;
66
67         /* Just swap the status to host endian; mcc tag is opaquely copied
68          * from mcc_wrb */
69         be_dws_le_to_cpu(compl, 4);
70
71         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
72                                 CQE_STATUS_COMPL_MASK;
73
74         if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
75                 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
76                 adapter->flash_status = compl_status;
77                 complete(&adapter->flash_compl);
78         }
79
80         if (compl_status == MCC_STATUS_SUCCESS) {
81                 if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
82                         (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
83                         struct be_cmd_resp_get_stats *resp =
84                                                 adapter->stats_cmd.va;
85                         be_dws_le_to_cpu(&resp->hw_stats,
86                                                 sizeof(resp->hw_stats));
87                         netdev_stats_update(adapter);
88                         adapter->stats_cmd_sent = false;
89                 }
90         } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
91                    (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
92                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
93                                 CQE_STATUS_EXTD_MASK;
94                 dev_warn(&adapter->pdev->dev,
95                 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
96                         compl->tag0, compl_status, extd_status);
97         }
98         return compl_status;
99 }
100
101 /* Link state evt is a string of bytes; no need for endian swapping */
102 static void be_async_link_state_process(struct be_adapter *adapter,
103                 struct be_async_event_link_state *evt)
104 {
105         be_link_status_update(adapter,
106                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
107 }
108
109 /* Grp5 CoS Priority evt */
110 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
111                 struct be_async_event_grp5_cos_priority *evt)
112 {
113         if (evt->valid) {
114                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
115                 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
116                 adapter->recommended_prio =
117                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
118         }
119 }
120
121 /* Grp5 QOS Speed evt */
122 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
123                 struct be_async_event_grp5_qos_link_speed *evt)
124 {
125         if (evt->physical_port == adapter->port_num) {
126                 /* qos_link_speed is in units of 10 Mbps */
127                 adapter->link_speed = evt->qos_link_speed * 10;
128         }
129 }
130
131 /*Grp5 PVID evt*/
132 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
133                 struct be_async_event_grp5_pvid_state *evt)
134 {
135         if (evt->enabled)
136                 adapter->pvid = le16_to_cpu(evt->tag);
137         else
138                 adapter->pvid = 0;
139 }
140
141 static void be_async_grp5_evt_process(struct be_adapter *adapter,
142                 u32 trailer, struct be_mcc_compl *evt)
143 {
144         u8 event_type = 0;
145
146         event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
147                 ASYNC_TRAILER_EVENT_TYPE_MASK;
148
149         switch (event_type) {
150         case ASYNC_EVENT_COS_PRIORITY:
151                 be_async_grp5_cos_priority_process(adapter,
152                 (struct be_async_event_grp5_cos_priority *)evt);
153         break;
154         case ASYNC_EVENT_QOS_SPEED:
155                 be_async_grp5_qos_speed_process(adapter,
156                 (struct be_async_event_grp5_qos_link_speed *)evt);
157         break;
158         case ASYNC_EVENT_PVID_STATE:
159                 be_async_grp5_pvid_state_process(adapter,
160                 (struct be_async_event_grp5_pvid_state *)evt);
161         break;
162         default:
163                 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
164                 break;
165         }
166 }
167
168 static inline bool is_link_state_evt(u32 trailer)
169 {
170         return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
171                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
172                                 ASYNC_EVENT_CODE_LINK_STATE;
173 }
174
175 static inline bool is_grp5_evt(u32 trailer)
176 {
177         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
178                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
179                                 ASYNC_EVENT_CODE_GRP_5);
180 }
181
182 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
183 {
184         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
185         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
186
187         if (be_mcc_compl_is_new(compl)) {
188                 queue_tail_inc(mcc_cq);
189                 return compl;
190         }
191         return NULL;
192 }
193
194 void be_async_mcc_enable(struct be_adapter *adapter)
195 {
196         spin_lock_bh(&adapter->mcc_cq_lock);
197
198         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
199         adapter->mcc_obj.rearm_cq = true;
200
201         spin_unlock_bh(&adapter->mcc_cq_lock);
202 }
203
204 void be_async_mcc_disable(struct be_adapter *adapter)
205 {
206         adapter->mcc_obj.rearm_cq = false;
207 }
208
209 int be_process_mcc(struct be_adapter *adapter, int *status)
210 {
211         struct be_mcc_compl *compl;
212         int num = 0;
213         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
214
215         spin_lock_bh(&adapter->mcc_cq_lock);
216         while ((compl = be_mcc_compl_get(adapter))) {
217                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
218                         /* Interpret flags as an async trailer */
219                         if (is_link_state_evt(compl->flags))
220                                 be_async_link_state_process(adapter,
221                                 (struct be_async_event_link_state *) compl);
222                         else if (is_grp5_evt(compl->flags))
223                                 be_async_grp5_evt_process(adapter,
224                                 compl->flags, compl);
225                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
226                                 *status = be_mcc_compl_process(adapter, compl);
227                                 atomic_dec(&mcc_obj->q.used);
228                 }
229                 be_mcc_compl_use(compl);
230                 num++;
231         }
232
233         spin_unlock_bh(&adapter->mcc_cq_lock);
234         return num;
235 }
236
237 /* Wait till no more pending mcc requests are present */
238 static int be_mcc_wait_compl(struct be_adapter *adapter)
239 {
240 #define mcc_timeout             120000 /* 12s timeout */
241         int i, num, status = 0;
242         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
243
244         if (adapter->eeh_err)
245                 return -EIO;
246
247         for (i = 0; i < mcc_timeout; i++) {
248                 num = be_process_mcc(adapter, &status);
249                 if (num)
250                         be_cq_notify(adapter, mcc_obj->cq.id,
251                                 mcc_obj->rearm_cq, num);
252
253                 if (atomic_read(&mcc_obj->q.used) == 0)
254                         break;
255                 udelay(100);
256         }
257         if (i == mcc_timeout) {
258                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
259                 return -1;
260         }
261         return status;
262 }
263
264 /* Notify MCC requests and wait for completion */
265 static int be_mcc_notify_wait(struct be_adapter *adapter)
266 {
267         be_mcc_notify(adapter);
268         return be_mcc_wait_compl(adapter);
269 }
270
271 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
272 {
273         int msecs = 0;
274         u32 ready;
275
276         if (adapter->eeh_err) {
277                 dev_err(&adapter->pdev->dev,
278                         "Error detected in card.Cannot issue commands\n");
279                 return -EIO;
280         }
281
282         do {
283                 ready = ioread32(db);
284                 if (ready == 0xffffffff) {
285                         dev_err(&adapter->pdev->dev,
286                                 "pci slot disconnected\n");
287                         return -1;
288                 }
289
290                 ready &= MPU_MAILBOX_DB_RDY_MASK;
291                 if (ready)
292                         break;
293
294                 if (msecs > 4000) {
295                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
296                         if (!lancer_chip(adapter))
297                                 be_detect_dump_ue(adapter);
298                         return -1;
299                 }
300
301                 set_current_state(TASK_INTERRUPTIBLE);
302                 schedule_timeout(msecs_to_jiffies(1));
303                 msecs++;
304         } while (true);
305
306         return 0;
307 }
308
309 /*
310  * Insert the mailbox address into the doorbell in two steps
311  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
312  */
313 static int be_mbox_notify_wait(struct be_adapter *adapter)
314 {
315         int status;
316         u32 val = 0;
317         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
318         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
319         struct be_mcc_mailbox *mbox = mbox_mem->va;
320         struct be_mcc_compl *compl = &mbox->compl;
321
322         /* wait for ready to be set */
323         status = be_mbox_db_ready_wait(adapter, db);
324         if (status != 0)
325                 return status;
326
327         val |= MPU_MAILBOX_DB_HI_MASK;
328         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
329         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
330         iowrite32(val, db);
331
332         /* wait for ready to be set */
333         status = be_mbox_db_ready_wait(adapter, db);
334         if (status != 0)
335                 return status;
336
337         val = 0;
338         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
339         val |= (u32)(mbox_mem->dma >> 4) << 2;
340         iowrite32(val, db);
341
342         status = be_mbox_db_ready_wait(adapter, db);
343         if (status != 0)
344                 return status;
345
346         /* A cq entry has been made now */
347         if (be_mcc_compl_is_new(compl)) {
348                 status = be_mcc_compl_process(adapter, &mbox->compl);
349                 be_mcc_compl_use(compl);
350                 if (status)
351                         return status;
352         } else {
353                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
354                 return -1;
355         }
356         return 0;
357 }
358
359 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
360 {
361         u32 sem;
362
363         if (lancer_chip(adapter))
364                 sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
365         else
366                 sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
367
368         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
369         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
370                 return -1;
371         else
372                 return 0;
373 }
374
375 int be_cmd_POST(struct be_adapter *adapter)
376 {
377         u16 stage;
378         int status, timeout = 0;
379
380         do {
381                 status = be_POST_stage_get(adapter, &stage);
382                 if (status) {
383                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
384                                 stage);
385                         return -1;
386                 } else if (stage != POST_STAGE_ARMFW_RDY) {
387                         set_current_state(TASK_INTERRUPTIBLE);
388                         schedule_timeout(2 * HZ);
389                         timeout += 2;
390                 } else {
391                         return 0;
392                 }
393         } while (timeout < 40);
394
395         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
396         return -1;
397 }
398
399 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
400 {
401         return wrb->payload.embedded_payload;
402 }
403
404 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
405 {
406         return &wrb->payload.sgl[0];
407 }
408
409 /* Don't touch the hdr after it's prepared */
410 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
411                                 bool embedded, u8 sge_cnt, u32 opcode)
412 {
413         if (embedded)
414                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
415         else
416                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
417                                 MCC_WRB_SGE_CNT_SHIFT;
418         wrb->payload_length = payload_len;
419         wrb->tag0 = opcode;
420         be_dws_cpu_to_le(wrb, 8);
421 }
422
423 /* Don't touch the hdr after it's prepared */
424 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
425                                 u8 subsystem, u8 opcode, int cmd_len)
426 {
427         req_hdr->opcode = opcode;
428         req_hdr->subsystem = subsystem;
429         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
430         req_hdr->version = 0;
431 }
432
433 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
434                         struct be_dma_mem *mem)
435 {
436         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
437         u64 dma = (u64)mem->dma;
438
439         for (i = 0; i < buf_pages; i++) {
440                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
441                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
442                 dma += PAGE_SIZE_4K;
443         }
444 }
445
446 /* Converts interrupt delay in microseconds to multiplier value */
447 static u32 eq_delay_to_mult(u32 usec_delay)
448 {
449 #define MAX_INTR_RATE                   651042
450         const u32 round = 10;
451         u32 multiplier;
452
453         if (usec_delay == 0)
454                 multiplier = 0;
455         else {
456                 u32 interrupt_rate = 1000000 / usec_delay;
457                 /* Max delay, corresponding to the lowest interrupt rate */
458                 if (interrupt_rate == 0)
459                         multiplier = 1023;
460                 else {
461                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
462                         multiplier /= interrupt_rate;
463                         /* Round the multiplier to the closest value.*/
464                         multiplier = (multiplier + round/2) / round;
465                         multiplier = min(multiplier, (u32)1023);
466                 }
467         }
468         return multiplier;
469 }
470
471 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
472 {
473         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
474         struct be_mcc_wrb *wrb
475                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
476         memset(wrb, 0, sizeof(*wrb));
477         return wrb;
478 }
479
480 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
481 {
482         struct be_queue_info *mccq = &adapter->mcc_obj.q;
483         struct be_mcc_wrb *wrb;
484
485         if (atomic_read(&mccq->used) >= mccq->len) {
486                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
487                 return NULL;
488         }
489
490         wrb = queue_head_node(mccq);
491         queue_head_inc(mccq);
492         atomic_inc(&mccq->used);
493         memset(wrb, 0, sizeof(*wrb));
494         return wrb;
495 }
496
497 /* Tell fw we're about to start firing cmds by writing a
498  * special pattern across the wrb hdr; uses mbox
499  */
500 int be_cmd_fw_init(struct be_adapter *adapter)
501 {
502         u8 *wrb;
503         int status;
504
505         if (mutex_lock_interruptible(&adapter->mbox_lock))
506                 return -1;
507
508         wrb = (u8 *)wrb_from_mbox(adapter);
509         *wrb++ = 0xFF;
510         *wrb++ = 0x12;
511         *wrb++ = 0x34;
512         *wrb++ = 0xFF;
513         *wrb++ = 0xFF;
514         *wrb++ = 0x56;
515         *wrb++ = 0x78;
516         *wrb = 0xFF;
517
518         status = be_mbox_notify_wait(adapter);
519
520         mutex_unlock(&adapter->mbox_lock);
521         return status;
522 }
523
524 /* Tell fw we're done with firing cmds by writing a
525  * special pattern across the wrb hdr; uses mbox
526  */
527 int be_cmd_fw_clean(struct be_adapter *adapter)
528 {
529         u8 *wrb;
530         int status;
531
532         if (adapter->eeh_err)
533                 return -EIO;
534
535         if (mutex_lock_interruptible(&adapter->mbox_lock))
536                 return -1;
537
538         wrb = (u8 *)wrb_from_mbox(adapter);
539         *wrb++ = 0xFF;
540         *wrb++ = 0xAA;
541         *wrb++ = 0xBB;
542         *wrb++ = 0xFF;
543         *wrb++ = 0xFF;
544         *wrb++ = 0xCC;
545         *wrb++ = 0xDD;
546         *wrb = 0xFF;
547
548         status = be_mbox_notify_wait(adapter);
549
550         mutex_unlock(&adapter->mbox_lock);
551         return status;
552 }
553 int be_cmd_eq_create(struct be_adapter *adapter,
554                 struct be_queue_info *eq, int eq_delay)
555 {
556         struct be_mcc_wrb *wrb;
557         struct be_cmd_req_eq_create *req;
558         struct be_dma_mem *q_mem = &eq->dma_mem;
559         int status;
560
561         if (mutex_lock_interruptible(&adapter->mbox_lock))
562                 return -1;
563
564         wrb = wrb_from_mbox(adapter);
565         req = embedded_payload(wrb);
566
567         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
568
569         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
570                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
571
572         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
573
574         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
575         /* 4byte eqe*/
576         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
577         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
578                         __ilog2_u32(eq->len/256));
579         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
580                         eq_delay_to_mult(eq_delay));
581         be_dws_cpu_to_le(req->context, sizeof(req->context));
582
583         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
584
585         status = be_mbox_notify_wait(adapter);
586         if (!status) {
587                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
588                 eq->id = le16_to_cpu(resp->eq_id);
589                 eq->created = true;
590         }
591
592         mutex_unlock(&adapter->mbox_lock);
593         return status;
594 }
595
596 /* Uses mbox */
597 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
598                         u8 type, bool permanent, u32 if_handle)
599 {
600         struct be_mcc_wrb *wrb;
601         struct be_cmd_req_mac_query *req;
602         int status;
603
604         if (mutex_lock_interruptible(&adapter->mbox_lock))
605                 return -1;
606
607         wrb = wrb_from_mbox(adapter);
608         req = embedded_payload(wrb);
609
610         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
611                         OPCODE_COMMON_NTWK_MAC_QUERY);
612
613         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
614                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
615
616         req->type = type;
617         if (permanent) {
618                 req->permanent = 1;
619         } else {
620                 req->if_id = cpu_to_le16((u16) if_handle);
621                 req->permanent = 0;
622         }
623
624         status = be_mbox_notify_wait(adapter);
625         if (!status) {
626                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
627                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
628         }
629
630         mutex_unlock(&adapter->mbox_lock);
631         return status;
632 }
633
634 /* Uses synchronous MCCQ */
635 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
636                 u32 if_id, u32 *pmac_id, u32 domain)
637 {
638         struct be_mcc_wrb *wrb;
639         struct be_cmd_req_pmac_add *req;
640         int status;
641
642         spin_lock_bh(&adapter->mcc_lock);
643
644         wrb = wrb_from_mccq(adapter);
645         if (!wrb) {
646                 status = -EBUSY;
647                 goto err;
648         }
649         req = embedded_payload(wrb);
650
651         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
652                         OPCODE_COMMON_NTWK_PMAC_ADD);
653
654         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
655                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
656
657         req->hdr.domain = domain;
658         req->if_id = cpu_to_le32(if_id);
659         memcpy(req->mac_address, mac_addr, ETH_ALEN);
660
661         status = be_mcc_notify_wait(adapter);
662         if (!status) {
663                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
664                 *pmac_id = le32_to_cpu(resp->pmac_id);
665         }
666
667 err:
668         spin_unlock_bh(&adapter->mcc_lock);
669         return status;
670 }
671
672 /* Uses synchronous MCCQ */
673 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
674 {
675         struct be_mcc_wrb *wrb;
676         struct be_cmd_req_pmac_del *req;
677         int status;
678
679         spin_lock_bh(&adapter->mcc_lock);
680
681         wrb = wrb_from_mccq(adapter);
682         if (!wrb) {
683                 status = -EBUSY;
684                 goto err;
685         }
686         req = embedded_payload(wrb);
687
688         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
689                         OPCODE_COMMON_NTWK_PMAC_DEL);
690
691         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
692                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
693
694         req->hdr.domain = dom;
695         req->if_id = cpu_to_le32(if_id);
696         req->pmac_id = cpu_to_le32(pmac_id);
697
698         status = be_mcc_notify_wait(adapter);
699
700 err:
701         spin_unlock_bh(&adapter->mcc_lock);
702         return status;
703 }
704
705 /* Uses Mbox */
706 int be_cmd_cq_create(struct be_adapter *adapter,
707                 struct be_queue_info *cq, struct be_queue_info *eq,
708                 bool sol_evts, bool no_delay, int coalesce_wm)
709 {
710         struct be_mcc_wrb *wrb;
711         struct be_cmd_req_cq_create *req;
712         struct be_dma_mem *q_mem = &cq->dma_mem;
713         void *ctxt;
714         int status;
715
716         if (mutex_lock_interruptible(&adapter->mbox_lock))
717                 return -1;
718
719         wrb = wrb_from_mbox(adapter);
720         req = embedded_payload(wrb);
721         ctxt = &req->context;
722
723         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
724                         OPCODE_COMMON_CQ_CREATE);
725
726         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
727                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
728
729         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
730         if (lancer_chip(adapter)) {
731                 req->hdr.version = 2;
732                 req->page_size = 1; /* 1 for 4K */
733                 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
734                                                                 no_delay);
735                 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
736                                                 __ilog2_u32(cq->len/256));
737                 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
738                 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
739                                                                 ctxt, 1);
740                 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
741                                                                 ctxt, eq->id);
742                 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
743         } else {
744                 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
745                                                                 coalesce_wm);
746                 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
747                                                                 ctxt, no_delay);
748                 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
749                                                 __ilog2_u32(cq->len/256));
750                 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
751                 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
752                                                                 ctxt, sol_evts);
753                 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
754                 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
755                 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
756         }
757
758         be_dws_cpu_to_le(ctxt, sizeof(req->context));
759
760         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
761
762         status = be_mbox_notify_wait(adapter);
763         if (!status) {
764                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
765                 cq->id = le16_to_cpu(resp->cq_id);
766                 cq->created = true;
767         }
768
769         mutex_unlock(&adapter->mbox_lock);
770
771         return status;
772 }
773
774 static u32 be_encoded_q_len(int q_len)
775 {
776         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
777         if (len_encoded == 16)
778                 len_encoded = 0;
779         return len_encoded;
780 }
781
782 int be_cmd_mccq_create(struct be_adapter *adapter,
783                         struct be_queue_info *mccq,
784                         struct be_queue_info *cq)
785 {
786         struct be_mcc_wrb *wrb;
787         struct be_cmd_req_mcc_create *req;
788         struct be_dma_mem *q_mem = &mccq->dma_mem;
789         void *ctxt;
790         int status;
791
792         if (mutex_lock_interruptible(&adapter->mbox_lock))
793                 return -1;
794
795         wrb = wrb_from_mbox(adapter);
796         req = embedded_payload(wrb);
797         ctxt = &req->context;
798
799         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
800                         OPCODE_COMMON_MCC_CREATE_EXT);
801
802         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
803                         OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
804
805         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
806         if (lancer_chip(adapter)) {
807                 req->hdr.version = 1;
808                 req->cq_id = cpu_to_le16(cq->id);
809
810                 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
811                                                 be_encoded_q_len(mccq->len));
812                 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
813                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
814                                                                 ctxt, cq->id);
815                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
816                                                                  ctxt, 1);
817
818         } else {
819                 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
820                 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
821                                                 be_encoded_q_len(mccq->len));
822                 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
823         }
824
825         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
826         req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
827         be_dws_cpu_to_le(ctxt, sizeof(req->context));
828
829         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
830
831         status = be_mbox_notify_wait(adapter);
832         if (!status) {
833                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
834                 mccq->id = le16_to_cpu(resp->id);
835                 mccq->created = true;
836         }
837         mutex_unlock(&adapter->mbox_lock);
838
839         return status;
840 }
841
842 int be_cmd_txq_create(struct be_adapter *adapter,
843                         struct be_queue_info *txq,
844                         struct be_queue_info *cq)
845 {
846         struct be_mcc_wrb *wrb;
847         struct be_cmd_req_eth_tx_create *req;
848         struct be_dma_mem *q_mem = &txq->dma_mem;
849         void *ctxt;
850         int status;
851
852         if (mutex_lock_interruptible(&adapter->mbox_lock))
853                 return -1;
854
855         wrb = wrb_from_mbox(adapter);
856         req = embedded_payload(wrb);
857         ctxt = &req->context;
858
859         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
860                         OPCODE_ETH_TX_CREATE);
861
862         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
863                 sizeof(*req));
864
865         if (lancer_chip(adapter)) {
866                 req->hdr.version = 1;
867                 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
868                                         adapter->if_handle);
869         }
870
871         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
872         req->ulp_num = BE_ULP1_NUM;
873         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
874
875         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
876                 be_encoded_q_len(txq->len));
877         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
878         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
879
880         be_dws_cpu_to_le(ctxt, sizeof(req->context));
881
882         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
883
884         status = be_mbox_notify_wait(adapter);
885         if (!status) {
886                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
887                 txq->id = le16_to_cpu(resp->cid);
888                 txq->created = true;
889         }
890
891         mutex_unlock(&adapter->mbox_lock);
892
893         return status;
894 }
895
896 /* Uses mbox */
897 int be_cmd_rxq_create(struct be_adapter *adapter,
898                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
899                 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
900 {
901         struct be_mcc_wrb *wrb;
902         struct be_cmd_req_eth_rx_create *req;
903         struct be_dma_mem *q_mem = &rxq->dma_mem;
904         int status;
905
906         if (mutex_lock_interruptible(&adapter->mbox_lock))
907                 return -1;
908
909         wrb = wrb_from_mbox(adapter);
910         req = embedded_payload(wrb);
911
912         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
913                         OPCODE_ETH_RX_CREATE);
914
915         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
916                 sizeof(*req));
917
918         req->cq_id = cpu_to_le16(cq_id);
919         req->frag_size = fls(frag_size) - 1;
920         req->num_pages = 2;
921         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
922         req->interface_id = cpu_to_le32(if_id);
923         req->max_frame_size = cpu_to_le16(max_frame_size);
924         req->rss_queue = cpu_to_le32(rss);
925
926         status = be_mbox_notify_wait(adapter);
927         if (!status) {
928                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
929                 rxq->id = le16_to_cpu(resp->id);
930                 rxq->created = true;
931                 *rss_id = resp->rss_id;
932         }
933
934         mutex_unlock(&adapter->mbox_lock);
935
936         return status;
937 }
938
939 /* Generic destroyer function for all types of queues
940  * Uses Mbox
941  */
942 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
943                 int queue_type)
944 {
945         struct be_mcc_wrb *wrb;
946         struct be_cmd_req_q_destroy *req;
947         u8 subsys = 0, opcode = 0;
948         int status;
949
950         if (adapter->eeh_err)
951                 return -EIO;
952
953         if (mutex_lock_interruptible(&adapter->mbox_lock))
954                 return -1;
955
956         wrb = wrb_from_mbox(adapter);
957         req = embedded_payload(wrb);
958
959         switch (queue_type) {
960         case QTYPE_EQ:
961                 subsys = CMD_SUBSYSTEM_COMMON;
962                 opcode = OPCODE_COMMON_EQ_DESTROY;
963                 break;
964         case QTYPE_CQ:
965                 subsys = CMD_SUBSYSTEM_COMMON;
966                 opcode = OPCODE_COMMON_CQ_DESTROY;
967                 break;
968         case QTYPE_TXQ:
969                 subsys = CMD_SUBSYSTEM_ETH;
970                 opcode = OPCODE_ETH_TX_DESTROY;
971                 break;
972         case QTYPE_RXQ:
973                 subsys = CMD_SUBSYSTEM_ETH;
974                 opcode = OPCODE_ETH_RX_DESTROY;
975                 break;
976         case QTYPE_MCCQ:
977                 subsys = CMD_SUBSYSTEM_COMMON;
978                 opcode = OPCODE_COMMON_MCC_DESTROY;
979                 break;
980         default:
981                 BUG();
982         }
983
984         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
985
986         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
987         req->id = cpu_to_le16(q->id);
988
989         status = be_mbox_notify_wait(adapter);
990
991         mutex_unlock(&adapter->mbox_lock);
992
993         return status;
994 }
995
996 /* Create an rx filtering policy configuration on an i/f
997  * Uses mbox
998  */
999 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1000                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
1001                 u32 domain)
1002 {
1003         struct be_mcc_wrb *wrb;
1004         struct be_cmd_req_if_create *req;
1005         int status;
1006
1007         if (mutex_lock_interruptible(&adapter->mbox_lock))
1008                 return -1;
1009
1010         wrb = wrb_from_mbox(adapter);
1011         req = embedded_payload(wrb);
1012
1013         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1014                         OPCODE_COMMON_NTWK_INTERFACE_CREATE);
1015
1016         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
1018
1019         req->hdr.domain = domain;
1020         req->capability_flags = cpu_to_le32(cap_flags);
1021         req->enable_flags = cpu_to_le32(en_flags);
1022         req->pmac_invalid = pmac_invalid;
1023         if (!pmac_invalid)
1024                 memcpy(req->mac_addr, mac, ETH_ALEN);
1025
1026         status = be_mbox_notify_wait(adapter);
1027         if (!status) {
1028                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1029                 *if_handle = le32_to_cpu(resp->interface_id);
1030                 if (!pmac_invalid)
1031                         *pmac_id = le32_to_cpu(resp->pmac_id);
1032         }
1033
1034         mutex_unlock(&adapter->mbox_lock);
1035         return status;
1036 }
1037
1038 /* Uses mbox */
1039 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1040 {
1041         struct be_mcc_wrb *wrb;
1042         struct be_cmd_req_if_destroy *req;
1043         int status;
1044
1045         if (adapter->eeh_err)
1046                 return -EIO;
1047
1048         if (mutex_lock_interruptible(&adapter->mbox_lock))
1049                 return -1;
1050
1051         wrb = wrb_from_mbox(adapter);
1052         req = embedded_payload(wrb);
1053
1054         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1055                         OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1056
1057         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1058                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1059
1060         req->hdr.domain = domain;
1061         req->interface_id = cpu_to_le32(interface_id);
1062
1063         status = be_mbox_notify_wait(adapter);
1064
1065         mutex_unlock(&adapter->mbox_lock);
1066
1067         return status;
1068 }
1069
1070 /* Get stats is a non embedded command: the request is not embedded inside
1071  * WRB but is a separate dma memory block
1072  * Uses asynchronous MCC
1073  */
1074 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1075 {
1076         struct be_mcc_wrb *wrb;
1077         struct be_cmd_req_get_stats *req;
1078         struct be_sge *sge;
1079         int status = 0;
1080
1081         if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1082                 be_cmd_get_die_temperature(adapter);
1083
1084         spin_lock_bh(&adapter->mcc_lock);
1085
1086         wrb = wrb_from_mccq(adapter);
1087         if (!wrb) {
1088                 status = -EBUSY;
1089                 goto err;
1090         }
1091         req = nonemb_cmd->va;
1092         sge = nonembedded_sgl(wrb);
1093
1094         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1095                         OPCODE_ETH_GET_STATISTICS);
1096
1097         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1098                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
1099         wrb->tag1 = CMD_SUBSYSTEM_ETH;
1100         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1101         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1102         sge->len = cpu_to_le32(nonemb_cmd->size);
1103
1104         be_mcc_notify(adapter);
1105         adapter->stats_cmd_sent = true;
1106
1107 err:
1108         spin_unlock_bh(&adapter->mcc_lock);
1109         return status;
1110 }
1111
1112 /* Uses synchronous mcc */
1113 int be_cmd_link_status_query(struct be_adapter *adapter,
1114                         bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
1115 {
1116         struct be_mcc_wrb *wrb;
1117         struct be_cmd_req_link_status *req;
1118         int status;
1119
1120         spin_lock_bh(&adapter->mcc_lock);
1121
1122         wrb = wrb_from_mccq(adapter);
1123         if (!wrb) {
1124                 status = -EBUSY;
1125                 goto err;
1126         }
1127         req = embedded_payload(wrb);
1128
1129         *link_up = false;
1130
1131         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1132                         OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1133
1134         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1135                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1136
1137         status = be_mcc_notify_wait(adapter);
1138         if (!status) {
1139                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1140                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1141                         *link_up = true;
1142                         *link_speed = le16_to_cpu(resp->link_speed);
1143                         *mac_speed = resp->mac_speed;
1144                 }
1145         }
1146
1147 err:
1148         spin_unlock_bh(&adapter->mcc_lock);
1149         return status;
1150 }
1151
1152 /* Uses synchronous mcc */
1153 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1154 {
1155         struct be_mcc_wrb *wrb;
1156         struct be_cmd_req_get_cntl_addnl_attribs *req;
1157         int status;
1158
1159         spin_lock_bh(&adapter->mcc_lock);
1160
1161         wrb = wrb_from_mccq(adapter);
1162         if (!wrb) {
1163                 status = -EBUSY;
1164                 goto err;
1165         }
1166         req = embedded_payload(wrb);
1167
1168         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1169                         OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1170
1171         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1172                 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1173
1174         status = be_mcc_notify_wait(adapter);
1175         if (!status) {
1176                 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1177                                                 embedded_payload(wrb);
1178                 adapter->drv_stats.be_on_die_temperature =
1179                                                 resp->on_die_temperature;
1180         }
1181         /* If IOCTL fails once, do not bother issuing it again */
1182         else
1183                 be_get_temp_freq = 0;
1184
1185 err:
1186         spin_unlock_bh(&adapter->mcc_lock);
1187         return status;
1188 }
1189
1190 /* Uses synchronous mcc */
1191 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1192 {
1193         struct be_mcc_wrb *wrb;
1194         struct be_cmd_req_get_fat *req;
1195         int status;
1196
1197         spin_lock_bh(&adapter->mcc_lock);
1198
1199         wrb = wrb_from_mccq(adapter);
1200         if (!wrb) {
1201                 status = -EBUSY;
1202                 goto err;
1203         }
1204         req = embedded_payload(wrb);
1205
1206         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1207                         OPCODE_COMMON_MANAGE_FAT);
1208
1209         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1210                 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1211         req->fat_operation = cpu_to_le32(QUERY_FAT);
1212         status = be_mcc_notify_wait(adapter);
1213         if (!status) {
1214                 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1215                 if (log_size && resp->log_size)
1216                         *log_size = le32_to_cpu(resp->log_size) -
1217                                         sizeof(u32);
1218         }
1219 err:
1220         spin_unlock_bh(&adapter->mcc_lock);
1221         return status;
1222 }
1223
1224 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1225 {
1226         struct be_dma_mem get_fat_cmd;
1227         struct be_mcc_wrb *wrb;
1228         struct be_cmd_req_get_fat *req;
1229         struct be_sge *sge;
1230         u32 offset = 0, total_size, buf_size,
1231                                 log_offset = sizeof(u32), payload_len;
1232         int status;
1233
1234         if (buf_len == 0)
1235                 return;
1236
1237         total_size = buf_len;
1238
1239         get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1240         get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1241                         get_fat_cmd.size,
1242                         &get_fat_cmd.dma);
1243         if (!get_fat_cmd.va) {
1244                 status = -ENOMEM;
1245                 dev_err(&adapter->pdev->dev,
1246                 "Memory allocation failure while retrieving FAT data\n");
1247                 return;
1248         }
1249
1250         spin_lock_bh(&adapter->mcc_lock);
1251
1252         while (total_size) {
1253                 buf_size = min(total_size, (u32)60*1024);
1254                 total_size -= buf_size;
1255
1256                 wrb = wrb_from_mccq(adapter);
1257                 if (!wrb) {
1258                         status = -EBUSY;
1259                         goto err;
1260                 }
1261                 req = get_fat_cmd.va;
1262                 sge = nonembedded_sgl(wrb);
1263
1264                 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1265                 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1266                                 OPCODE_COMMON_MANAGE_FAT);
1267
1268                 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1269                                 OPCODE_COMMON_MANAGE_FAT, payload_len);
1270
1271                 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
1272                 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1273                 sge->len = cpu_to_le32(get_fat_cmd.size);
1274
1275                 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1276                 req->read_log_offset = cpu_to_le32(log_offset);
1277                 req->read_log_length = cpu_to_le32(buf_size);
1278                 req->data_buffer_size = cpu_to_le32(buf_size);
1279
1280                 status = be_mcc_notify_wait(adapter);
1281                 if (!status) {
1282                         struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1283                         memcpy(buf + offset,
1284                                 resp->data_buffer,
1285                                 resp->read_log_length);
1286                 } else {
1287                         dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1288                         goto err;
1289                 }
1290                 offset += buf_size;
1291                 log_offset += buf_size;
1292         }
1293 err:
1294         pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1295                         get_fat_cmd.va,
1296                         get_fat_cmd.dma);
1297         spin_unlock_bh(&adapter->mcc_lock);
1298 }
1299
1300 /* Uses Mbox */
1301 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1302 {
1303         struct be_mcc_wrb *wrb;
1304         struct be_cmd_req_get_fw_version *req;
1305         int status;
1306
1307         if (mutex_lock_interruptible(&adapter->mbox_lock))
1308                 return -1;
1309
1310         wrb = wrb_from_mbox(adapter);
1311         req = embedded_payload(wrb);
1312
1313         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1314                         OPCODE_COMMON_GET_FW_VERSION);
1315
1316         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1317                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1318
1319         status = be_mbox_notify_wait(adapter);
1320         if (!status) {
1321                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1322                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1323         }
1324
1325         mutex_unlock(&adapter->mbox_lock);
1326         return status;
1327 }
1328
1329 /* set the EQ delay interval of an EQ to specified value
1330  * Uses async mcc
1331  */
1332 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1333 {
1334         struct be_mcc_wrb *wrb;
1335         struct be_cmd_req_modify_eq_delay *req;
1336         int status = 0;
1337
1338         spin_lock_bh(&adapter->mcc_lock);
1339
1340         wrb = wrb_from_mccq(adapter);
1341         if (!wrb) {
1342                 status = -EBUSY;
1343                 goto err;
1344         }
1345         req = embedded_payload(wrb);
1346
1347         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1348                         OPCODE_COMMON_MODIFY_EQ_DELAY);
1349
1350         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1351                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1352
1353         req->num_eq = cpu_to_le32(1);
1354         req->delay[0].eq_id = cpu_to_le32(eq_id);
1355         req->delay[0].phase = 0;
1356         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1357
1358         be_mcc_notify(adapter);
1359
1360 err:
1361         spin_unlock_bh(&adapter->mcc_lock);
1362         return status;
1363 }
1364
1365 /* Uses sycnhronous mcc */
1366 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1367                         u32 num, bool untagged, bool promiscuous)
1368 {
1369         struct be_mcc_wrb *wrb;
1370         struct be_cmd_req_vlan_config *req;
1371         int status;
1372
1373         spin_lock_bh(&adapter->mcc_lock);
1374
1375         wrb = wrb_from_mccq(adapter);
1376         if (!wrb) {
1377                 status = -EBUSY;
1378                 goto err;
1379         }
1380         req = embedded_payload(wrb);
1381
1382         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1383                         OPCODE_COMMON_NTWK_VLAN_CONFIG);
1384
1385         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1386                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1387
1388         req->interface_id = if_id;
1389         req->promiscuous = promiscuous;
1390         req->untagged = untagged;
1391         req->num_vlan = num;
1392         if (!promiscuous) {
1393                 memcpy(req->normal_vlan, vtag_array,
1394                         req->num_vlan * sizeof(vtag_array[0]));
1395         }
1396
1397         status = be_mcc_notify_wait(adapter);
1398
1399 err:
1400         spin_unlock_bh(&adapter->mcc_lock);
1401         return status;
1402 }
1403
1404 /* Uses MCC for this command as it may be called in BH context
1405  * Uses synchronous mcc
1406  */
1407 int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
1408 {
1409         struct be_mcc_wrb *wrb;
1410         struct be_cmd_req_rx_filter *req;
1411         struct be_dma_mem promiscous_cmd;
1412         struct be_sge *sge;
1413         int status;
1414
1415         memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
1416         promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
1417         promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
1418                                 promiscous_cmd.size, &promiscous_cmd.dma);
1419         if (!promiscous_cmd.va) {
1420                 dev_err(&adapter->pdev->dev,
1421                                 "Memory allocation failure\n");
1422                 return -ENOMEM;
1423         }
1424
1425         spin_lock_bh(&adapter->mcc_lock);
1426
1427         wrb = wrb_from_mccq(adapter);
1428         if (!wrb) {
1429                 status = -EBUSY;
1430                 goto err;
1431         }
1432
1433         req = promiscous_cmd.va;
1434         sge = nonembedded_sgl(wrb);
1435
1436         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1437                                         OPCODE_COMMON_NTWK_RX_FILTER);
1438         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1439                         OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
1440
1441         req->if_id = cpu_to_le32(adapter->if_handle);
1442         req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1443         if (en)
1444                 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1445
1446         sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
1447         sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
1448         sge->len = cpu_to_le32(promiscous_cmd.size);
1449
1450         status = be_mcc_notify_wait(adapter);
1451
1452 err:
1453         spin_unlock_bh(&adapter->mcc_lock);
1454         pci_free_consistent(adapter->pdev, promiscous_cmd.size,
1455                         promiscous_cmd.va, promiscous_cmd.dma);
1456         return status;
1457 }
1458
1459 /*
1460  * Uses MCC for this command as it may be called in BH context
1461  * (mc == NULL) => multicast promiscuous
1462  */
1463 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1464                 struct net_device *netdev, struct be_dma_mem *mem)
1465 {
1466         struct be_mcc_wrb *wrb;
1467         struct be_cmd_req_mcast_mac_config *req = mem->va;
1468         struct be_sge *sge;
1469         int status;
1470
1471         spin_lock_bh(&adapter->mcc_lock);
1472
1473         wrb = wrb_from_mccq(adapter);
1474         if (!wrb) {
1475                 status = -EBUSY;
1476                 goto err;
1477         }
1478         sge = nonembedded_sgl(wrb);
1479         memset(req, 0, sizeof(*req));
1480
1481         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1482                         OPCODE_COMMON_NTWK_MULTICAST_SET);
1483         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1484         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1485         sge->len = cpu_to_le32(mem->size);
1486
1487         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1488                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1489
1490         req->interface_id = if_id;
1491         if (netdev) {
1492                 int i;
1493                 struct netdev_hw_addr *ha;
1494
1495                 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1496
1497                 i = 0;
1498                 netdev_for_each_mc_addr(ha, netdev)
1499                         memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1500         } else {
1501                 req->promiscuous = 1;
1502         }
1503
1504         status = be_mcc_notify_wait(adapter);
1505
1506 err:
1507         spin_unlock_bh(&adapter->mcc_lock);
1508         return status;
1509 }
1510
1511 /* Uses synchrounous mcc */
1512 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1513 {
1514         struct be_mcc_wrb *wrb;
1515         struct be_cmd_req_set_flow_control *req;
1516         int status;
1517
1518         spin_lock_bh(&adapter->mcc_lock);
1519
1520         wrb = wrb_from_mccq(adapter);
1521         if (!wrb) {
1522                 status = -EBUSY;
1523                 goto err;
1524         }
1525         req = embedded_payload(wrb);
1526
1527         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1528                         OPCODE_COMMON_SET_FLOW_CONTROL);
1529
1530         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1531                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1532
1533         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1534         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1535
1536         status = be_mcc_notify_wait(adapter);
1537
1538 err:
1539         spin_unlock_bh(&adapter->mcc_lock);
1540         return status;
1541 }
1542
1543 /* Uses sycn mcc */
1544 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1545 {
1546         struct be_mcc_wrb *wrb;
1547         struct be_cmd_req_get_flow_control *req;
1548         int status;
1549
1550         spin_lock_bh(&adapter->mcc_lock);
1551
1552         wrb = wrb_from_mccq(adapter);
1553         if (!wrb) {
1554                 status = -EBUSY;
1555                 goto err;
1556         }
1557         req = embedded_payload(wrb);
1558
1559         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1560                         OPCODE_COMMON_GET_FLOW_CONTROL);
1561
1562         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1563                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1564
1565         status = be_mcc_notify_wait(adapter);
1566         if (!status) {
1567                 struct be_cmd_resp_get_flow_control *resp =
1568                                                 embedded_payload(wrb);
1569                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1570                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1571         }
1572
1573 err:
1574         spin_unlock_bh(&adapter->mcc_lock);
1575         return status;
1576 }
1577
1578 /* Uses mbox */
1579 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1580                 u32 *mode, u32 *caps)
1581 {
1582         struct be_mcc_wrb *wrb;
1583         struct be_cmd_req_query_fw_cfg *req;
1584         int status;
1585
1586         if (mutex_lock_interruptible(&adapter->mbox_lock))
1587                 return -1;
1588
1589         wrb = wrb_from_mbox(adapter);
1590         req = embedded_payload(wrb);
1591
1592         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1593                         OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1594
1595         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1596                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1597
1598         status = be_mbox_notify_wait(adapter);
1599         if (!status) {
1600                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1601                 *port_num = le32_to_cpu(resp->phys_port);
1602                 *mode = le32_to_cpu(resp->function_mode);
1603                 *caps = le32_to_cpu(resp->function_caps);
1604         }
1605
1606         mutex_unlock(&adapter->mbox_lock);
1607         return status;
1608 }
1609
1610 /* Uses mbox */
1611 int be_cmd_reset_function(struct be_adapter *adapter)
1612 {
1613         struct be_mcc_wrb *wrb;
1614         struct be_cmd_req_hdr *req;
1615         int status;
1616
1617         if (mutex_lock_interruptible(&adapter->mbox_lock))
1618                 return -1;
1619
1620         wrb = wrb_from_mbox(adapter);
1621         req = embedded_payload(wrb);
1622
1623         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1624                         OPCODE_COMMON_FUNCTION_RESET);
1625
1626         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1627                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1628
1629         status = be_mbox_notify_wait(adapter);
1630
1631         mutex_unlock(&adapter->mbox_lock);
1632         return status;
1633 }
1634
1635 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1636 {
1637         struct be_mcc_wrb *wrb;
1638         struct be_cmd_req_rss_config *req;
1639         u32 myhash[10];
1640         int status;
1641
1642         if (mutex_lock_interruptible(&adapter->mbox_lock))
1643                 return -1;
1644
1645         wrb = wrb_from_mbox(adapter);
1646         req = embedded_payload(wrb);
1647
1648         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1649                 OPCODE_ETH_RSS_CONFIG);
1650
1651         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1652                 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1653
1654         req->if_id = cpu_to_le32(adapter->if_handle);
1655         req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1656         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1657         memcpy(req->cpu_table, rsstable, table_size);
1658         memcpy(req->hash, myhash, sizeof(myhash));
1659         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1660
1661         status = be_mbox_notify_wait(adapter);
1662
1663         mutex_unlock(&adapter->mbox_lock);
1664         return status;
1665 }
1666
1667 /* Uses sync mcc */
1668 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1669                         u8 bcn, u8 sts, u8 state)
1670 {
1671         struct be_mcc_wrb *wrb;
1672         struct be_cmd_req_enable_disable_beacon *req;
1673         int status;
1674
1675         spin_lock_bh(&adapter->mcc_lock);
1676
1677         wrb = wrb_from_mccq(adapter);
1678         if (!wrb) {
1679                 status = -EBUSY;
1680                 goto err;
1681         }
1682         req = embedded_payload(wrb);
1683
1684         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1685                         OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1686
1687         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1688                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1689
1690         req->port_num = port_num;
1691         req->beacon_state = state;
1692         req->beacon_duration = bcn;
1693         req->status_duration = sts;
1694
1695         status = be_mcc_notify_wait(adapter);
1696
1697 err:
1698         spin_unlock_bh(&adapter->mcc_lock);
1699         return status;
1700 }
1701
1702 /* Uses sync mcc */
1703 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1704 {
1705         struct be_mcc_wrb *wrb;
1706         struct be_cmd_req_get_beacon_state *req;
1707         int status;
1708
1709         spin_lock_bh(&adapter->mcc_lock);
1710
1711         wrb = wrb_from_mccq(adapter);
1712         if (!wrb) {
1713                 status = -EBUSY;
1714                 goto err;
1715         }
1716         req = embedded_payload(wrb);
1717
1718         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1719                         OPCODE_COMMON_GET_BEACON_STATE);
1720
1721         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1722                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1723
1724         req->port_num = port_num;
1725
1726         status = be_mcc_notify_wait(adapter);
1727         if (!status) {
1728                 struct be_cmd_resp_get_beacon_state *resp =
1729                                                 embedded_payload(wrb);
1730                 *state = resp->beacon_state;
1731         }
1732
1733 err:
1734         spin_unlock_bh(&adapter->mcc_lock);
1735         return status;
1736 }
1737
1738 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1739                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1740 {
1741         struct be_mcc_wrb *wrb;
1742         struct be_cmd_write_flashrom *req;
1743         struct be_sge *sge;
1744         int status;
1745
1746         spin_lock_bh(&adapter->mcc_lock);
1747         adapter->flash_status = 0;
1748
1749         wrb = wrb_from_mccq(adapter);
1750         if (!wrb) {
1751                 status = -EBUSY;
1752                 goto err_unlock;
1753         }
1754         req = cmd->va;
1755         sge = nonembedded_sgl(wrb);
1756
1757         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1758                         OPCODE_COMMON_WRITE_FLASHROM);
1759         wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1760
1761         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1762                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1763         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1764         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1765         sge->len = cpu_to_le32(cmd->size);
1766
1767         req->params.op_type = cpu_to_le32(flash_type);
1768         req->params.op_code = cpu_to_le32(flash_opcode);
1769         req->params.data_buf_size = cpu_to_le32(buf_size);
1770
1771         be_mcc_notify(adapter);
1772         spin_unlock_bh(&adapter->mcc_lock);
1773
1774         if (!wait_for_completion_timeout(&adapter->flash_compl,
1775                         msecs_to_jiffies(12000)))
1776                 status = -1;
1777         else
1778                 status = adapter->flash_status;
1779
1780         return status;
1781
1782 err_unlock:
1783         spin_unlock_bh(&adapter->mcc_lock);
1784         return status;
1785 }
1786
1787 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1788                          int offset)
1789 {
1790         struct be_mcc_wrb *wrb;
1791         struct be_cmd_write_flashrom *req;
1792         int status;
1793
1794         spin_lock_bh(&adapter->mcc_lock);
1795
1796         wrb = wrb_from_mccq(adapter);
1797         if (!wrb) {
1798                 status = -EBUSY;
1799                 goto err;
1800         }
1801         req = embedded_payload(wrb);
1802
1803         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1804                         OPCODE_COMMON_READ_FLASHROM);
1805
1806         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1807                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1808
1809         req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1810         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1811         req->params.offset = cpu_to_le32(offset);
1812         req->params.data_buf_size = cpu_to_le32(0x4);
1813
1814         status = be_mcc_notify_wait(adapter);
1815         if (!status)
1816                 memcpy(flashed_crc, req->params.data_buf, 4);
1817
1818 err:
1819         spin_unlock_bh(&adapter->mcc_lock);
1820         return status;
1821 }
1822
1823 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1824                                 struct be_dma_mem *nonemb_cmd)
1825 {
1826         struct be_mcc_wrb *wrb;
1827         struct be_cmd_req_acpi_wol_magic_config *req;
1828         struct be_sge *sge;
1829         int status;
1830
1831         spin_lock_bh(&adapter->mcc_lock);
1832
1833         wrb = wrb_from_mccq(adapter);
1834         if (!wrb) {
1835                 status = -EBUSY;
1836                 goto err;
1837         }
1838         req = nonemb_cmd->va;
1839         sge = nonembedded_sgl(wrb);
1840
1841         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1842                         OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1843
1844         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1845                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1846         memcpy(req->magic_mac, mac, ETH_ALEN);
1847
1848         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1849         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1850         sge->len = cpu_to_le32(nonemb_cmd->size);
1851
1852         status = be_mcc_notify_wait(adapter);
1853
1854 err:
1855         spin_unlock_bh(&adapter->mcc_lock);
1856         return status;
1857 }
1858
1859 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1860                         u8 loopback_type, u8 enable)
1861 {
1862         struct be_mcc_wrb *wrb;
1863         struct be_cmd_req_set_lmode *req;
1864         int status;
1865
1866         spin_lock_bh(&adapter->mcc_lock);
1867
1868         wrb = wrb_from_mccq(adapter);
1869         if (!wrb) {
1870                 status = -EBUSY;
1871                 goto err;
1872         }
1873
1874         req = embedded_payload(wrb);
1875
1876         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1877                                 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1878
1879         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1880                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1881                         sizeof(*req));
1882
1883         req->src_port = port_num;
1884         req->dest_port = port_num;
1885         req->loopback_type = loopback_type;
1886         req->loopback_state = enable;
1887
1888         status = be_mcc_notify_wait(adapter);
1889 err:
1890         spin_unlock_bh(&adapter->mcc_lock);
1891         return status;
1892 }
1893
1894 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1895                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1896 {
1897         struct be_mcc_wrb *wrb;
1898         struct be_cmd_req_loopback_test *req;
1899         int status;
1900
1901         spin_lock_bh(&adapter->mcc_lock);
1902
1903         wrb = wrb_from_mccq(adapter);
1904         if (!wrb) {
1905                 status = -EBUSY;
1906                 goto err;
1907         }
1908
1909         req = embedded_payload(wrb);
1910
1911         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1912                                 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1913
1914         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1915                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1916         req->hdr.timeout = cpu_to_le32(4);
1917
1918         req->pattern = cpu_to_le64(pattern);
1919         req->src_port = cpu_to_le32(port_num);
1920         req->dest_port = cpu_to_le32(port_num);
1921         req->pkt_size = cpu_to_le32(pkt_size);
1922         req->num_pkts = cpu_to_le32(num_pkts);
1923         req->loopback_type = cpu_to_le32(loopback_type);
1924
1925         status = be_mcc_notify_wait(adapter);
1926         if (!status) {
1927                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1928                 status = le32_to_cpu(resp->status);
1929         }
1930
1931 err:
1932         spin_unlock_bh(&adapter->mcc_lock);
1933         return status;
1934 }
1935
1936 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1937                                 u32 byte_cnt, struct be_dma_mem *cmd)
1938 {
1939         struct be_mcc_wrb *wrb;
1940         struct be_cmd_req_ddrdma_test *req;
1941         struct be_sge *sge;
1942         int status;
1943         int i, j = 0;
1944
1945         spin_lock_bh(&adapter->mcc_lock);
1946
1947         wrb = wrb_from_mccq(adapter);
1948         if (!wrb) {
1949                 status = -EBUSY;
1950                 goto err;
1951         }
1952         req = cmd->va;
1953         sge = nonembedded_sgl(wrb);
1954         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1955                                 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1956         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1957                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1958
1959         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1960         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1961         sge->len = cpu_to_le32(cmd->size);
1962
1963         req->pattern = cpu_to_le64(pattern);
1964         req->byte_count = cpu_to_le32(byte_cnt);
1965         for (i = 0; i < byte_cnt; i++) {
1966                 req->snd_buff[i] = (u8)(pattern >> (j*8));
1967                 j++;
1968                 if (j > 7)
1969                         j = 0;
1970         }
1971
1972         status = be_mcc_notify_wait(adapter);
1973
1974         if (!status) {
1975                 struct be_cmd_resp_ddrdma_test *resp;
1976                 resp = cmd->va;
1977                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1978                                 resp->snd_err) {
1979                         status = -1;
1980                 }
1981         }
1982
1983 err:
1984         spin_unlock_bh(&adapter->mcc_lock);
1985         return status;
1986 }
1987
1988 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1989                                 struct be_dma_mem *nonemb_cmd)
1990 {
1991         struct be_mcc_wrb *wrb;
1992         struct be_cmd_req_seeprom_read *req;
1993         struct be_sge *sge;
1994         int status;
1995
1996         spin_lock_bh(&adapter->mcc_lock);
1997
1998         wrb = wrb_from_mccq(adapter);
1999         if (!wrb) {
2000                 status = -EBUSY;
2001                 goto err;
2002         }
2003         req = nonemb_cmd->va;
2004         sge = nonembedded_sgl(wrb);
2005
2006         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2007                         OPCODE_COMMON_SEEPROM_READ);
2008
2009         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2010                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
2011
2012         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2013         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2014         sge->len = cpu_to_le32(nonemb_cmd->size);
2015
2016         status = be_mcc_notify_wait(adapter);
2017
2018 err:
2019         spin_unlock_bh(&adapter->mcc_lock);
2020         return status;
2021 }
2022
2023 int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
2024 {
2025         struct be_mcc_wrb *wrb;
2026         struct be_cmd_req_get_phy_info *req;
2027         struct be_sge *sge;
2028         int status;
2029
2030         spin_lock_bh(&adapter->mcc_lock);
2031
2032         wrb = wrb_from_mccq(adapter);
2033         if (!wrb) {
2034                 status = -EBUSY;
2035                 goto err;
2036         }
2037
2038         req = cmd->va;
2039         sge = nonembedded_sgl(wrb);
2040
2041         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2042                                 OPCODE_COMMON_GET_PHY_DETAILS);
2043
2044         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2045                         OPCODE_COMMON_GET_PHY_DETAILS,
2046                         sizeof(*req));
2047
2048         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2049         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2050         sge->len = cpu_to_le32(cmd->size);
2051
2052         status = be_mcc_notify_wait(adapter);
2053 err:
2054         spin_unlock_bh(&adapter->mcc_lock);
2055         return status;
2056 }
2057
2058 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2059 {
2060         struct be_mcc_wrb *wrb;
2061         struct be_cmd_req_set_qos *req;
2062         int status;
2063
2064         spin_lock_bh(&adapter->mcc_lock);
2065
2066         wrb = wrb_from_mccq(adapter);
2067         if (!wrb) {
2068                 status = -EBUSY;
2069                 goto err;
2070         }
2071
2072         req = embedded_payload(wrb);
2073
2074         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2075                                 OPCODE_COMMON_SET_QOS);
2076
2077         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2078                         OPCODE_COMMON_SET_QOS, sizeof(*req));
2079
2080         req->hdr.domain = domain;
2081         req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2082         req->max_bps_nic = cpu_to_le32(bps);
2083
2084         status = be_mcc_notify_wait(adapter);
2085
2086 err:
2087         spin_unlock_bh(&adapter->mcc_lock);
2088         return status;
2089 }
2090
2091 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2092 {
2093         struct be_mcc_wrb *wrb;
2094         struct be_cmd_req_cntl_attribs *req;
2095         struct be_cmd_resp_cntl_attribs *resp;
2096         struct be_sge *sge;
2097         int status;
2098         int payload_len = max(sizeof(*req), sizeof(*resp));
2099         struct mgmt_controller_attrib *attribs;
2100         struct be_dma_mem attribs_cmd;
2101
2102         memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2103         attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2104         attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2105                                                 &attribs_cmd.dma);
2106         if (!attribs_cmd.va) {
2107                 dev_err(&adapter->pdev->dev,
2108                                 "Memory allocation failure\n");
2109                 return -ENOMEM;
2110         }
2111
2112         if (mutex_lock_interruptible(&adapter->mbox_lock))
2113                 return -1;
2114
2115         wrb = wrb_from_mbox(adapter);
2116         if (!wrb) {
2117                 status = -EBUSY;
2118                 goto err;
2119         }
2120         req = attribs_cmd.va;
2121         sge = nonembedded_sgl(wrb);
2122
2123         be_wrb_hdr_prepare(wrb, payload_len, false, 1,
2124                         OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
2125         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2126                          OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2127         sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2128         sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2129         sge->len = cpu_to_le32(attribs_cmd.size);
2130
2131         status = be_mbox_notify_wait(adapter);
2132         if (!status) {
2133                 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2134                                         sizeof(struct be_cmd_resp_hdr));
2135                 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2136         }
2137
2138 err:
2139         mutex_unlock(&adapter->mbox_lock);
2140         pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2141                                         attribs_cmd.dma);
2142         return status;
2143 }
2144
2145 /* Uses mbox */
2146 int be_cmd_check_native_mode(struct be_adapter *adapter)
2147 {
2148         struct be_mcc_wrb *wrb;
2149         struct be_cmd_req_set_func_cap *req;
2150         int status;
2151
2152         if (mutex_lock_interruptible(&adapter->mbox_lock))
2153                 return -1;
2154
2155         wrb = wrb_from_mbox(adapter);
2156         if (!wrb) {
2157                 status = -EBUSY;
2158                 goto err;
2159         }
2160
2161         req = embedded_payload(wrb);
2162
2163         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2164                 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2165
2166         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2167                 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2168
2169         req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2170                                 CAPABILITY_BE3_NATIVE_ERX_API);
2171         req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2172
2173         status = be_mbox_notify_wait(adapter);
2174         if (!status) {
2175                 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2176                 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2177                                         CAPABILITY_BE3_NATIVE_ERX_API;
2178         }
2179 err:
2180         mutex_unlock(&adapter->mbox_lock);
2181         return status;
2182 }