Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19
20 static void be_mcc_notify(struct be_ctrl_info *ctrl)
21 {
22         struct be_queue_info *mccq = &ctrl->mcc_obj.q;
23         u32 val = 0;
24
25         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
26         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
27         iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
28 }
29
30 /* To check if valid bit is set, check the entire word as we don't know
31  * the endianness of the data (old entry is host endian while a new entry is
32  * little endian) */
33 static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
34 {
35         if (compl->flags != 0) {
36                 compl->flags = le32_to_cpu(compl->flags);
37                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
38                 return true;
39         } else {
40                 return false;
41         }
42 }
43
44 /* Need to reset the entire word that houses the valid bit */
45 static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
46 {
47         compl->flags = 0;
48 }
49
50 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
51         struct be_mcc_cq_entry *compl)
52 {
53         u16 compl_status, extd_status;
54
55         /* Just swap the status to host endian; mcc tag is opaquely copied
56          * from mcc_wrb */
57         be_dws_le_to_cpu(compl, 4);
58
59         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
60                                 CQE_STATUS_COMPL_MASK;
61         if (compl_status != MCC_STATUS_SUCCESS) {
62                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
63                                 CQE_STATUS_EXTD_MASK;
64                 printk(KERN_WARNING DRV_NAME
65                         " error in cmd completion: status(compl/extd)=%d/%d\n",
66                         compl_status, extd_status);
67                 return -1;
68         }
69         return 0;
70 }
71
72 /* Link state evt is a string of bytes; no need for endian swapping */
73 static void be_async_link_state_process(struct be_ctrl_info *ctrl,
74                 struct be_async_event_link_state *evt)
75 {
76         ctrl->async_cb(ctrl->adapter_ctxt,
77                 evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
78 }
79
80 static inline bool is_link_state_evt(u32 trailer)
81 {
82         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
83                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
84                                 ASYNC_EVENT_CODE_LINK_STATE);
85 }
86
87 static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
88 {
89         struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
90         struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
91
92         if (be_mcc_compl_is_new(compl)) {
93                 queue_tail_inc(mcc_cq);
94                 return compl;
95         }
96         return NULL;
97 }
98
99 void be_process_mcc(struct be_ctrl_info *ctrl)
100 {
101         struct be_mcc_cq_entry *compl;
102         int num = 0;
103
104         spin_lock_bh(&ctrl->mcc_cq_lock);
105         while ((compl = be_mcc_compl_get(ctrl))) {
106                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
107                         /* Interpret flags as an async trailer */
108                         BUG_ON(!is_link_state_evt(compl->flags));
109
110                         /* Interpret compl as a async link evt */
111                         be_async_link_state_process(ctrl,
112                                 (struct be_async_event_link_state *) compl);
113                 } else {
114                         be_mcc_compl_process(ctrl, compl);
115                         atomic_dec(&ctrl->mcc_obj.q.used);
116                 }
117                 be_mcc_compl_use(compl);
118                 num++;
119         }
120         if (num)
121                 be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
122         spin_unlock_bh(&ctrl->mcc_cq_lock);
123 }
124
125 /* Wait till no more pending mcc requests are present */
126 static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
127 {
128 #define mcc_timeout             50000 /* 5s timeout */
129         int i;
130         for (i = 0; i < mcc_timeout; i++) {
131                 be_process_mcc(ctrl);
132                 if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
133                         break;
134                 udelay(100);
135         }
136         if (i == mcc_timeout)
137                 printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
138 }
139
140 /* Notify MCC requests and wait for completion */
141 static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
142 {
143         be_mcc_notify(ctrl);
144         be_mcc_wait_compl(ctrl);
145 }
146
147 static int be_mbox_db_ready_wait(void __iomem *db)
148 {
149         int cnt = 0, wait = 5;
150         u32 ready;
151
152         do {
153                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
154                 if (ready)
155                         break;
156
157                 if (cnt > 200000) {
158                         printk(KERN_WARNING DRV_NAME
159                                 ": mbox_db poll timed out\n");
160                         return -1;
161                 }
162
163                 if (cnt > 50)
164                         wait = 200;
165                 cnt += wait;
166                 udelay(wait);
167         } while (true);
168
169         return 0;
170 }
171
172 /*
173  * Insert the mailbox address into the doorbell in two steps
174  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
175  */
176 static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
177 {
178         int status;
179         u32 val = 0;
180         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
181         struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
182         struct be_mcc_mailbox *mbox = mbox_mem->va;
183         struct be_mcc_cq_entry *cqe = &mbox->cqe;
184
185         memset(cqe, 0, sizeof(*cqe));
186
187         val &= ~MPU_MAILBOX_DB_RDY_MASK;
188         val |= MPU_MAILBOX_DB_HI_MASK;
189         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
190         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
191         iowrite32(val, db);
192
193         /* wait for ready to be set */
194         status = be_mbox_db_ready_wait(db);
195         if (status != 0)
196                 return status;
197
198         val = 0;
199         val &= ~MPU_MAILBOX_DB_RDY_MASK;
200         val &= ~MPU_MAILBOX_DB_HI_MASK;
201         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
202         val |= (u32)(mbox_mem->dma >> 4) << 2;
203         iowrite32(val, db);
204
205         status = be_mbox_db_ready_wait(db);
206         if (status != 0)
207                 return status;
208
209         /* A cq entry has been made now */
210         if (be_mcc_compl_is_new(cqe)) {
211                 status = be_mcc_compl_process(ctrl, &mbox->cqe);
212                 be_mcc_compl_use(cqe);
213                 if (status)
214                         return status;
215         } else {
216                 printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
217                 return -1;
218         }
219         return 0;
220 }
221
222 static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
223 {
224         u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
225
226         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
227         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
228                 return -1;
229         else
230                 return 0;
231 }
232
233 static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage)
234 {
235         u16 stage, cnt, error;
236         for (cnt = 0; cnt < 5000; cnt++) {
237                 error = be_POST_stage_get(ctrl, &stage);
238                 if (error)
239                         return -1;
240
241                 if (stage == poll_stage)
242                         break;
243                 udelay(1000);
244         }
245         if (stage != poll_stage)
246                 return -1;
247         return 0;
248 }
249
250
251 int be_cmd_POST(struct be_ctrl_info *ctrl)
252 {
253         u16 stage, error;
254
255         error = be_POST_stage_get(ctrl, &stage);
256         if (error)
257                 goto err;
258
259         if (stage == POST_STAGE_ARMFW_RDY)
260                 return 0;
261
262         if (stage != POST_STAGE_AWAITING_HOST_RDY)
263                 goto err;
264
265         /* On awaiting host rdy, reset and again poll on awaiting host rdy */
266         iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
267         error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY);
268         if (error)
269                 goto err;
270
271         /* Now kickoff POST and poll on armfw ready */
272         iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
273         error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY);
274         if (error)
275                 goto err;
276
277         return 0;
278 err:
279         printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
280         return -1;
281 }
282
283 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
284 {
285         return wrb->payload.embedded_payload;
286 }
287
288 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
289 {
290         return &wrb->payload.sgl[0];
291 }
292
293 /* Don't touch the hdr after it's prepared */
294 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
295                                 bool embedded, u8 sge_cnt)
296 {
297         if (embedded)
298                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
299         else
300                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
301                                 MCC_WRB_SGE_CNT_SHIFT;
302         wrb->payload_length = payload_len;
303         be_dws_cpu_to_le(wrb, 20);
304 }
305
306 /* Don't touch the hdr after it's prepared */
307 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
308                                 u8 subsystem, u8 opcode, int cmd_len)
309 {
310         req_hdr->opcode = opcode;
311         req_hdr->subsystem = subsystem;
312         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
313 }
314
315 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
316                         struct be_dma_mem *mem)
317 {
318         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
319         u64 dma = (u64)mem->dma;
320
321         for (i = 0; i < buf_pages; i++) {
322                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
323                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
324                 dma += PAGE_SIZE_4K;
325         }
326 }
327
328 /* Converts interrupt delay in microseconds to multiplier value */
329 static u32 eq_delay_to_mult(u32 usec_delay)
330 {
331 #define MAX_INTR_RATE                   651042
332         const u32 round = 10;
333         u32 multiplier;
334
335         if (usec_delay == 0)
336                 multiplier = 0;
337         else {
338                 u32 interrupt_rate = 1000000 / usec_delay;
339                 /* Max delay, corresponding to the lowest interrupt rate */
340                 if (interrupt_rate == 0)
341                         multiplier = 1023;
342                 else {
343                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
344                         multiplier /= interrupt_rate;
345                         /* Round the multiplier to the closest value.*/
346                         multiplier = (multiplier + round/2) / round;
347                         multiplier = min(multiplier, (u32)1023);
348                 }
349         }
350         return multiplier;
351 }
352
353 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
354 {
355         return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
356 }
357
358 static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
359 {
360         struct be_mcc_wrb *wrb = NULL;
361         if (atomic_read(&mccq->used) < mccq->len) {
362                 wrb = queue_head_node(mccq);
363                 queue_head_inc(mccq);
364                 atomic_inc(&mccq->used);
365                 memset(wrb, 0, sizeof(*wrb));
366         }
367         return wrb;
368 }
369
370 int be_cmd_eq_create(struct be_ctrl_info *ctrl,
371                 struct be_queue_info *eq, int eq_delay)
372 {
373         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
374         struct be_cmd_req_eq_create *req = embedded_payload(wrb);
375         struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
376         struct be_dma_mem *q_mem = &eq->dma_mem;
377         int status;
378
379         spin_lock(&ctrl->mbox_lock);
380         memset(wrb, 0, sizeof(*wrb));
381
382         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
383
384         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
385                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
386
387         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
388
389         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
390                         ctrl->pci_func);
391         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
392         /* 4byte eqe*/
393         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
394         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
395                         __ilog2_u32(eq->len/256));
396         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
397                         eq_delay_to_mult(eq_delay));
398         be_dws_cpu_to_le(req->context, sizeof(req->context));
399
400         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
401
402         status = be_mbox_db_ring(ctrl);
403         if (!status) {
404                 eq->id = le16_to_cpu(resp->eq_id);
405                 eq->created = true;
406         }
407         spin_unlock(&ctrl->mbox_lock);
408         return status;
409 }
410
411 int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
412                         u8 type, bool permanent, u32 if_handle)
413 {
414         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
415         struct be_cmd_req_mac_query *req = embedded_payload(wrb);
416         struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
417         int status;
418
419         spin_lock(&ctrl->mbox_lock);
420         memset(wrb, 0, sizeof(*wrb));
421
422         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
423
424         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
425                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
426
427         req->type = type;
428         if (permanent) {
429                 req->permanent = 1;
430         } else {
431                 req->if_id = cpu_to_le16((u16)if_handle);
432                 req->permanent = 0;
433         }
434
435         status = be_mbox_db_ring(ctrl);
436         if (!status)
437                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
438
439         spin_unlock(&ctrl->mbox_lock);
440         return status;
441 }
442
443 int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
444                 u32 if_id, u32 *pmac_id)
445 {
446         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
447         struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
448         int status;
449
450         spin_lock(&ctrl->mbox_lock);
451         memset(wrb, 0, sizeof(*wrb));
452
453         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
454
455         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
456                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
457
458         req->if_id = cpu_to_le32(if_id);
459         memcpy(req->mac_address, mac_addr, ETH_ALEN);
460
461         status = be_mbox_db_ring(ctrl);
462         if (!status) {
463                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
464                 *pmac_id = le32_to_cpu(resp->pmac_id);
465         }
466
467         spin_unlock(&ctrl->mbox_lock);
468         return status;
469 }
470
471 int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
472 {
473         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
474         struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
475         int status;
476
477         spin_lock(&ctrl->mbox_lock);
478         memset(wrb, 0, sizeof(*wrb));
479
480         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
481
482         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
483                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
484
485         req->if_id = cpu_to_le32(if_id);
486         req->pmac_id = cpu_to_le32(pmac_id);
487
488         status = be_mbox_db_ring(ctrl);
489         spin_unlock(&ctrl->mbox_lock);
490
491         return status;
492 }
493
494 int be_cmd_cq_create(struct be_ctrl_info *ctrl,
495                 struct be_queue_info *cq, struct be_queue_info *eq,
496                 bool sol_evts, bool no_delay, int coalesce_wm)
497 {
498         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
499         struct be_cmd_req_cq_create *req = embedded_payload(wrb);
500         struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
501         struct be_dma_mem *q_mem = &cq->dma_mem;
502         void *ctxt = &req->context;
503         int status;
504
505         spin_lock(&ctrl->mbox_lock);
506         memset(wrb, 0, sizeof(*wrb));
507
508         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
509
510         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
511                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
512
513         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
514
515         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
516         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
517         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
518                         __ilog2_u32(cq->len/256));
519         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
520         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
521         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
522         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
523         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
524         AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
525         be_dws_cpu_to_le(ctxt, sizeof(req->context));
526
527         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
528
529         status = be_mbox_db_ring(ctrl);
530         if (!status) {
531                 cq->id = le16_to_cpu(resp->cq_id);
532                 cq->created = true;
533         }
534         spin_unlock(&ctrl->mbox_lock);
535
536         return status;
537 }
538
539 static u32 be_encoded_q_len(int q_len)
540 {
541         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
542         if (len_encoded == 16)
543                 len_encoded = 0;
544         return len_encoded;
545 }
546
547 int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
548                         struct be_queue_info *mccq,
549                         struct be_queue_info *cq)
550 {
551         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
552         struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
553         struct be_dma_mem *q_mem = &mccq->dma_mem;
554         void *ctxt = &req->context;
555         int status;
556
557         spin_lock(&ctrl->mbox_lock);
558         memset(wrb, 0, sizeof(*wrb));
559
560         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
561
562         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
563                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
564
565         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
566
567         AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
568         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
569         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
570                 be_encoded_q_len(mccq->len));
571         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
572
573         be_dws_cpu_to_le(ctxt, sizeof(req->context));
574
575         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
576
577         status = be_mbox_db_ring(ctrl);
578         if (!status) {
579                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
580                 mccq->id = le16_to_cpu(resp->id);
581                 mccq->created = true;
582         }
583         spin_unlock(&ctrl->mbox_lock);
584
585         return status;
586 }
587
588 int be_cmd_txq_create(struct be_ctrl_info *ctrl,
589                         struct be_queue_info *txq,
590                         struct be_queue_info *cq)
591 {
592         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
593         struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
594         struct be_dma_mem *q_mem = &txq->dma_mem;
595         void *ctxt = &req->context;
596         int status;
597         u32 len_encoded;
598
599         spin_lock(&ctrl->mbox_lock);
600         memset(wrb, 0, sizeof(*wrb));
601
602         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
603
604         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
605                 sizeof(*req));
606
607         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
608         req->ulp_num = BE_ULP1_NUM;
609         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
610
611         len_encoded = fls(txq->len); /* log2(len) + 1 */
612         if (len_encoded == 16)
613                 len_encoded = 0;
614         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
615         AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
616                         ctrl->pci_func);
617         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
618         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
619
620         be_dws_cpu_to_le(ctxt, sizeof(req->context));
621
622         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
623
624         status = be_mbox_db_ring(ctrl);
625         if (!status) {
626                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
627                 txq->id = le16_to_cpu(resp->cid);
628                 txq->created = true;
629         }
630         spin_unlock(&ctrl->mbox_lock);
631
632         return status;
633 }
634
635 int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
636                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
637                 u16 max_frame_size, u32 if_id, u32 rss)
638 {
639         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
640         struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
641         struct be_dma_mem *q_mem = &rxq->dma_mem;
642         int status;
643
644         spin_lock(&ctrl->mbox_lock);
645         memset(wrb, 0, sizeof(*wrb));
646
647         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
648
649         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
650                 sizeof(*req));
651
652         req->cq_id = cpu_to_le16(cq_id);
653         req->frag_size = fls(frag_size) - 1;
654         req->num_pages = 2;
655         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
656         req->interface_id = cpu_to_le32(if_id);
657         req->max_frame_size = cpu_to_le16(max_frame_size);
658         req->rss_queue = cpu_to_le32(rss);
659
660         status = be_mbox_db_ring(ctrl);
661         if (!status) {
662                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
663                 rxq->id = le16_to_cpu(resp->id);
664                 rxq->created = true;
665         }
666         spin_unlock(&ctrl->mbox_lock);
667
668         return status;
669 }
670
671 /* Generic destroyer function for all types of queues */
672 int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
673                 int queue_type)
674 {
675         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
676         struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
677         u8 subsys = 0, opcode = 0;
678         int status;
679
680         spin_lock(&ctrl->mbox_lock);
681
682         memset(wrb, 0, sizeof(*wrb));
683         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
684
685         switch (queue_type) {
686         case QTYPE_EQ:
687                 subsys = CMD_SUBSYSTEM_COMMON;
688                 opcode = OPCODE_COMMON_EQ_DESTROY;
689                 break;
690         case QTYPE_CQ:
691                 subsys = CMD_SUBSYSTEM_COMMON;
692                 opcode = OPCODE_COMMON_CQ_DESTROY;
693                 break;
694         case QTYPE_TXQ:
695                 subsys = CMD_SUBSYSTEM_ETH;
696                 opcode = OPCODE_ETH_TX_DESTROY;
697                 break;
698         case QTYPE_RXQ:
699                 subsys = CMD_SUBSYSTEM_ETH;
700                 opcode = OPCODE_ETH_RX_DESTROY;
701                 break;
702         case QTYPE_MCCQ:
703                 subsys = CMD_SUBSYSTEM_COMMON;
704                 opcode = OPCODE_COMMON_MCC_DESTROY;
705                 break;
706         default:
707                 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
708                 status = -1;
709                 goto err;
710         }
711         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
712         req->id = cpu_to_le16(q->id);
713
714         status = be_mbox_db_ring(ctrl);
715 err:
716         spin_unlock(&ctrl->mbox_lock);
717
718         return status;
719 }
720
721 /* Create an rx filtering policy configuration on an i/f */
722 int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
723                 bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
724 {
725         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
726         struct be_cmd_req_if_create *req = embedded_payload(wrb);
727         int status;
728
729         spin_lock(&ctrl->mbox_lock);
730         memset(wrb, 0, sizeof(*wrb));
731
732         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
733
734         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
735                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
736
737         req->capability_flags = cpu_to_le32(flags);
738         req->enable_flags = cpu_to_le32(flags);
739         if (!pmac_invalid)
740                 memcpy(req->mac_addr, mac, ETH_ALEN);
741
742         status = be_mbox_db_ring(ctrl);
743         if (!status) {
744                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
745                 *if_handle = le32_to_cpu(resp->interface_id);
746                 if (!pmac_invalid)
747                         *pmac_id = le32_to_cpu(resp->pmac_id);
748         }
749
750         spin_unlock(&ctrl->mbox_lock);
751         return status;
752 }
753
754 int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
755 {
756         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
757         struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
758         int status;
759
760         spin_lock(&ctrl->mbox_lock);
761         memset(wrb, 0, sizeof(*wrb));
762
763         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
764
765         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
766                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
767
768         req->interface_id = cpu_to_le32(interface_id);
769         status = be_mbox_db_ring(ctrl);
770
771         spin_unlock(&ctrl->mbox_lock);
772
773         return status;
774 }
775
776 /* Get stats is a non embedded command: the request is not embedded inside
777  * WRB but is a separate dma memory block
778  */
779 int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
780 {
781         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
782         struct be_cmd_req_get_stats *req = nonemb_cmd->va;
783         struct be_sge *sge = nonembedded_sgl(wrb);
784         int status;
785
786         spin_lock(&ctrl->mbox_lock);
787         memset(wrb, 0, sizeof(*wrb));
788
789         memset(req, 0, sizeof(*req));
790
791         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
792
793         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
794                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
795         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
796         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
797         sge->len = cpu_to_le32(nonemb_cmd->size);
798
799         status = be_mbox_db_ring(ctrl);
800         if (!status) {
801                 struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
802                 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
803         }
804
805         spin_unlock(&ctrl->mbox_lock);
806         return status;
807 }
808
809 int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
810                         bool *link_up)
811 {
812         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
813         struct be_cmd_req_link_status *req = embedded_payload(wrb);
814         int status;
815
816         spin_lock(&ctrl->mbox_lock);
817
818         *link_up = false;
819         memset(wrb, 0, sizeof(*wrb));
820
821         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
822
823         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
824                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
825
826         status = be_mbox_db_ring(ctrl);
827         if (!status) {
828                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
829                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
830                         *link_up = true;
831         }
832
833         spin_unlock(&ctrl->mbox_lock);
834         return status;
835 }
836
837 int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
838 {
839         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
840         struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
841         int status;
842
843         spin_lock(&ctrl->mbox_lock);
844         memset(wrb, 0, sizeof(*wrb));
845
846         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
847
848         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
849                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
850
851         status = be_mbox_db_ring(ctrl);
852         if (!status) {
853                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
854                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
855         }
856
857         spin_unlock(&ctrl->mbox_lock);
858         return status;
859 }
860
861 /* set the EQ delay interval of an EQ to specified value */
862 int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
863 {
864         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
865         struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
866         int status;
867
868         spin_lock(&ctrl->mbox_lock);
869         memset(wrb, 0, sizeof(*wrb));
870
871         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
872
873         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
874                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
875
876         req->num_eq = cpu_to_le32(1);
877         req->delay[0].eq_id = cpu_to_le32(eq_id);
878         req->delay[0].phase = 0;
879         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
880
881         status = be_mbox_db_ring(ctrl);
882
883         spin_unlock(&ctrl->mbox_lock);
884         return status;
885 }
886
887 int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
888                         u32 num, bool untagged, bool promiscuous)
889 {
890         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
891         struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
892         int status;
893
894         spin_lock(&ctrl->mbox_lock);
895         memset(wrb, 0, sizeof(*wrb));
896
897         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
898
899         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
900                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
901
902         req->interface_id = if_id;
903         req->promiscuous = promiscuous;
904         req->untagged = untagged;
905         req->num_vlan = num;
906         if (!promiscuous) {
907                 memcpy(req->normal_vlan, vtag_array,
908                         req->num_vlan * sizeof(vtag_array[0]));
909         }
910
911         status = be_mbox_db_ring(ctrl);
912
913         spin_unlock(&ctrl->mbox_lock);
914         return status;
915 }
916
917 /* Use MCC for this command as it may be called in BH context */
918 int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
919 {
920         struct be_mcc_wrb *wrb;
921         struct be_cmd_req_promiscuous_config *req;
922
923         spin_lock_bh(&ctrl->mcc_lock);
924
925         wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
926         BUG_ON(!wrb);
927
928         req = embedded_payload(wrb);
929
930         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
931
932         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
933                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
934
935         if (port_num)
936                 req->port1_promiscuous = en;
937         else
938                 req->port0_promiscuous = en;
939
940         be_mcc_notify_wait(ctrl);
941
942         spin_unlock_bh(&ctrl->mcc_lock);
943         return 0;
944 }
945
946 /*
947  * Use MCC for this command as it may be called in BH context
948  * (mc == NULL) => multicast promiscous
949  */
950 int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
951                 struct dev_mc_list *mc_list, u32 mc_count)
952 {
953 #define BE_MAX_MC               32 /* set mcast promisc if > 32 */
954         struct be_mcc_wrb *wrb;
955         struct be_cmd_req_mcast_mac_config *req;
956
957         spin_lock_bh(&ctrl->mcc_lock);
958
959         wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
960         BUG_ON(!wrb);
961
962         req = embedded_payload(wrb);
963
964         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
965
966         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
967                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
968
969         req->interface_id = if_id;
970         if (mc_list && mc_count <= BE_MAX_MC) {
971                 int i;
972                 struct dev_mc_list *mc;
973
974                 req->num_mac = cpu_to_le16(mc_count);
975
976                 for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
977                         memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
978         } else {
979                 req->promiscuous = 1;
980         }
981
982         be_mcc_notify_wait(ctrl);
983
984         spin_unlock_bh(&ctrl->mcc_lock);
985
986         return 0;
987 }
988
989 int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
990 {
991         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
992         struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
993         int status;
994
995         spin_lock(&ctrl->mbox_lock);
996
997         memset(wrb, 0, sizeof(*wrb));
998
999         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1000
1001         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1002                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1003
1004         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1005         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1006
1007         status = be_mbox_db_ring(ctrl);
1008
1009         spin_unlock(&ctrl->mbox_lock);
1010         return status;
1011 }
1012
1013 int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
1014 {
1015         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1016         struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
1017         int status;
1018
1019         spin_lock(&ctrl->mbox_lock);
1020
1021         memset(wrb, 0, sizeof(*wrb));
1022
1023         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1024
1025         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1026                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1027
1028         status = be_mbox_db_ring(ctrl);
1029         if (!status) {
1030                 struct be_cmd_resp_get_flow_control *resp =
1031                                                 embedded_payload(wrb);
1032                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1033                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1034         }
1035
1036         spin_unlock(&ctrl->mbox_lock);
1037         return status;
1038 }
1039
1040 int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
1041 {
1042         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1043         struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
1044         int status;
1045
1046         spin_lock(&ctrl->mbox_lock);
1047
1048         memset(wrb, 0, sizeof(*wrb));
1049
1050         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1051
1052         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1053                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1054
1055         status = be_mbox_db_ring(ctrl);
1056         if (!status) {
1057                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1058                 *port_num = le32_to_cpu(resp->phys_port);
1059         }
1060
1061         spin_unlock(&ctrl->mbox_lock);
1062         return status;
1063 }