bnx2x: VF RSS support - PF side
[pandora-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30
31 #define BNX2X_MAX_EMUL_MULTI            16
32
33 /**** Exe Queue interfaces ****/
34
35 /**
36  * bnx2x_exe_queue_init - init the Exe Queue object
37  *
38  * @o:          pointer to the object
39  * @exe_len:    length
40  * @owner:      pointer to the owner
41  * @validate:   validate function pointer
42  * @optimize:   optimize function pointer
43  * @exec:       execute function pointer
44  * @get:        get function pointer
45  */
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47                                         struct bnx2x_exe_queue_obj *o,
48                                         int exe_len,
49                                         union bnx2x_qable_obj *owner,
50                                         exe_q_validate validate,
51                                         exe_q_remove remove,
52                                         exe_q_optimize optimize,
53                                         exe_q_execute exec,
54                                         exe_q_get get)
55 {
56         memset(o, 0, sizeof(*o));
57
58         INIT_LIST_HEAD(&o->exe_queue);
59         INIT_LIST_HEAD(&o->pending_comp);
60
61         spin_lock_init(&o->lock);
62
63         o->exe_chunk_len = exe_len;
64         o->owner         = owner;
65
66         /* Owner specific callbacks */
67         o->validate      = validate;
68         o->remove        = remove;
69         o->optimize      = optimize;
70         o->execute       = exec;
71         o->get           = get;
72
73         DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74            exe_len);
75 }
76
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78                                              struct bnx2x_exeq_elem *elem)
79 {
80         DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81         kfree(elem);
82 }
83
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85 {
86         struct bnx2x_exeq_elem *elem;
87         int cnt = 0;
88
89         spin_lock_bh(&o->lock);
90
91         list_for_each_entry(elem, &o->exe_queue, link)
92                 cnt++;
93
94         spin_unlock_bh(&o->lock);
95
96         return cnt;
97 }
98
99 /**
100  * bnx2x_exe_queue_add - add a new element to the execution queue
101  *
102  * @bp:         driver handle
103  * @o:          queue
104  * @cmd:        new command to add
105  * @restore:    true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110                                       struct bnx2x_exe_queue_obj *o,
111                                       struct bnx2x_exeq_elem *elem,
112                                       bool restore)
113 {
114         int rc;
115
116         spin_lock_bh(&o->lock);
117
118         if (!restore) {
119                 /* Try to cancel this element queue */
120                 rc = o->optimize(bp, o->owner, elem);
121                 if (rc)
122                         goto free_and_exit;
123
124                 /* Check if this request is ok */
125                 rc = o->validate(bp, o->owner, elem);
126                 if (rc) {
127                         DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
128                         goto free_and_exit;
129                 }
130         }
131
132         /* If so, add it to the execution queue */
133         list_add_tail(&elem->link, &o->exe_queue);
134
135         spin_unlock_bh(&o->lock);
136
137         return 0;
138
139 free_and_exit:
140         bnx2x_exe_queue_free_elem(bp, elem);
141
142         spin_unlock_bh(&o->lock);
143
144         return rc;
145 }
146
147 static inline void __bnx2x_exe_queue_reset_pending(
148         struct bnx2x *bp,
149         struct bnx2x_exe_queue_obj *o)
150 {
151         struct bnx2x_exeq_elem *elem;
152
153         while (!list_empty(&o->pending_comp)) {
154                 elem = list_first_entry(&o->pending_comp,
155                                         struct bnx2x_exeq_elem, link);
156
157                 list_del(&elem->link);
158                 bnx2x_exe_queue_free_elem(bp, elem);
159         }
160 }
161
162 /**
163  * bnx2x_exe_queue_step - execute one execution chunk atomically
164  *
165  * @bp:                 driver handle
166  * @o:                  queue
167  * @ramrod_flags:       flags
168  *
169  * (Should be called while holding the exe_queue->lock).
170  */
171 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
172                                        struct bnx2x_exe_queue_obj *o,
173                                        unsigned long *ramrod_flags)
174 {
175         struct bnx2x_exeq_elem *elem, spacer;
176         int cur_len = 0, rc;
177
178         memset(&spacer, 0, sizeof(spacer));
179
180         /* Next step should not be performed until the current is finished,
181          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
182          * properly clear object internals without sending any command to the FW
183          * which also implies there won't be any completion to clear the
184          * 'pending' list.
185          */
186         if (!list_empty(&o->pending_comp)) {
187                 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
188                         DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
189                         __bnx2x_exe_queue_reset_pending(bp, o);
190                 } else {
191                         return 1;
192                 }
193         }
194
195         /* Run through the pending commands list and create a next
196          * execution chunk.
197          */
198         while (!list_empty(&o->exe_queue)) {
199                 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
200                                         link);
201                 WARN_ON(!elem->cmd_len);
202
203                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
204                         cur_len += elem->cmd_len;
205                         /* Prevent from both lists being empty when moving an
206                          * element. This will allow the call of
207                          * bnx2x_exe_queue_empty() without locking.
208                          */
209                         list_add_tail(&spacer.link, &o->pending_comp);
210                         mb();
211                         list_move_tail(&elem->link, &o->pending_comp);
212                         list_del(&spacer.link);
213                 } else
214                         break;
215         }
216
217         /* Sanity check */
218         if (!cur_len)
219                 return 0;
220
221         rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
222         if (rc < 0)
223                 /* In case of an error return the commands back to the queue
224                  * and reset the pending_comp.
225                  */
226                 list_splice_init(&o->pending_comp, &o->exe_queue);
227         else if (!rc)
228                 /* If zero is returned, means there are no outstanding pending
229                  * completions and we may dismiss the pending list.
230                  */
231                 __bnx2x_exe_queue_reset_pending(bp, o);
232
233         return rc;
234 }
235
236 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
237 {
238         bool empty = list_empty(&o->exe_queue);
239
240         /* Don't reorder!!! */
241         mb();
242
243         return empty && list_empty(&o->pending_comp);
244 }
245
246 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
247         struct bnx2x *bp)
248 {
249         DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
250         return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
251 }
252
253 /************************ raw_obj functions ***********************************/
254 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
255 {
256         return !!test_bit(o->state, o->pstate);
257 }
258
259 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
260 {
261         smp_mb__before_clear_bit();
262         clear_bit(o->state, o->pstate);
263         smp_mb__after_clear_bit();
264 }
265
266 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
267 {
268         smp_mb__before_clear_bit();
269         set_bit(o->state, o->pstate);
270         smp_mb__after_clear_bit();
271 }
272
273 /**
274  * bnx2x_state_wait - wait until the given bit(state) is cleared
275  *
276  * @bp:         device handle
277  * @state:      state which is to be cleared
278  * @state_p:    state buffer
279  *
280  */
281 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
282                                    unsigned long *pstate)
283 {
284         /* can take a while if any port is running */
285         int cnt = 5000;
286
287         if (CHIP_REV_IS_EMUL(bp))
288                 cnt *= 20;
289
290         DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
291
292         might_sleep();
293         while (cnt--) {
294                 if (!test_bit(state, pstate)) {
295 #ifdef BNX2X_STOP_ON_ERROR
296                         DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
297 #endif
298                         return 0;
299                 }
300
301                 usleep_range(1000, 2000);
302
303                 if (bp->panic)
304                         return -EIO;
305         }
306
307         /* timeout! */
308         BNX2X_ERR("timeout waiting for state %d\n", state);
309 #ifdef BNX2X_STOP_ON_ERROR
310         bnx2x_panic();
311 #endif
312
313         return -EBUSY;
314 }
315
316 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
317 {
318         return bnx2x_state_wait(bp, raw->state, raw->pstate);
319 }
320
321 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
322 /* credit handling callbacks */
323 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
324 {
325         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
326
327         WARN_ON(!mp);
328
329         return mp->get_entry(mp, offset);
330 }
331
332 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
333 {
334         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
335
336         WARN_ON(!mp);
337
338         return mp->get(mp, 1);
339 }
340
341 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
342 {
343         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
344
345         WARN_ON(!vp);
346
347         return vp->get_entry(vp, offset);
348 }
349
350 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
351 {
352         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
353
354         WARN_ON(!vp);
355
356         return vp->get(vp, 1);
357 }
358
359 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
360 {
361         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
363
364         if (!mp->get(mp, 1))
365                 return false;
366
367         if (!vp->get(vp, 1)) {
368                 mp->put(mp, 1);
369                 return false;
370         }
371
372         return true;
373 }
374
375 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
376 {
377         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
378
379         return mp->put_entry(mp, offset);
380 }
381
382 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
383 {
384         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
385
386         return mp->put(mp, 1);
387 }
388
389 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
390 {
391         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392
393         return vp->put_entry(vp, offset);
394 }
395
396 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
397 {
398         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
399
400         return vp->put(vp, 1);
401 }
402
403 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
404 {
405         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
407
408         if (!mp->put(mp, 1))
409                 return false;
410
411         if (!vp->put(vp, 1)) {
412                 mp->get(mp, 1);
413                 return false;
414         }
415
416         return true;
417 }
418
419 /**
420  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
421  *
422  * @bp:         device handle
423  * @o:          vlan_mac object
424  *
425  * @details: Non-blocking implementation; should be called under execution
426  *           queue lock.
427  */
428 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429                                             struct bnx2x_vlan_mac_obj *o)
430 {
431         if (o->head_reader) {
432                 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
433                 return -EBUSY;
434         }
435
436         DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
437         return 0;
438 }
439
440 /**
441  * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
442  *
443  * @bp:         device handle
444  * @o:          vlan_mac object
445  *
446  * @details Should be called under execution queue lock; notice it might release
447  *          and reclaim it during its run.
448  */
449 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450                                             struct bnx2x_vlan_mac_obj *o)
451 {
452         int rc;
453         unsigned long ramrod_flags = o->saved_ramrod_flags;
454
455         DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
456            ramrod_flags);
457         o->head_exe_request = false;
458         o->saved_ramrod_flags = 0;
459         rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
460         if (rc != 0) {
461                 BNX2X_ERR("execution of pending commands failed with rc %d\n",
462                           rc);
463 #ifdef BNX2X_STOP_ON_ERROR
464                 bnx2x_panic();
465 #endif
466         }
467 }
468
469 /**
470  * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
471  *
472  * @bp:                 device handle
473  * @o:                  vlan_mac object
474  * @ramrod_flags:       ramrod flags of missed execution
475  *
476  * @details Should be called under execution queue lock.
477  */
478 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479                                     struct bnx2x_vlan_mac_obj *o,
480                                     unsigned long ramrod_flags)
481 {
482         o->head_exe_request = true;
483         o->saved_ramrod_flags = ramrod_flags;
484         DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
485            ramrod_flags);
486 }
487
488 /**
489  * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
490  *
491  * @bp:                 device handle
492  * @o:                  vlan_mac object
493  *
494  * @details Should be called under execution queue lock. Notice if a pending
495  *          execution exists, it would perform it - possibly releasing and
496  *          reclaiming the execution queue lock.
497  */
498 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499                                             struct bnx2x_vlan_mac_obj *o)
500 {
501         /* It's possible a new pending execution was added since this writer
502          * executed. If so, execute again. [Ad infinitum]
503          */
504         while (o->head_exe_request) {
505                 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506                 __bnx2x_vlan_mac_h_exec_pending(bp, o);
507         }
508 }
509
510 /**
511  * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
512  *
513  * @bp:                 device handle
514  * @o:                  vlan_mac object
515  *
516  * @details Notice if a pending execution exists, it would perform it -
517  *          possibly releasing and reclaiming the execution queue lock.
518  */
519 void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520                                    struct bnx2x_vlan_mac_obj *o)
521 {
522         spin_lock_bh(&o->exe_queue.lock);
523         __bnx2x_vlan_mac_h_write_unlock(bp, o);
524         spin_unlock_bh(&o->exe_queue.lock);
525 }
526
527 /**
528  * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
529  *
530  * @bp:                 device handle
531  * @o:                  vlan_mac object
532  *
533  * @details Should be called under the execution queue lock. May sleep. May
534  *          release and reclaim execution queue lock during its run.
535  */
536 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537                                         struct bnx2x_vlan_mac_obj *o)
538 {
539         /* If we got here, we're holding lock --> no WRITER exists */
540         o->head_reader++;
541         DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
542            o->head_reader);
543
544         return 0;
545 }
546
547 /**
548  * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549  *
550  * @bp:                 device handle
551  * @o:                  vlan_mac object
552  *
553  * @details May sleep. Claims and releases execution queue lock during its run.
554  */
555 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556                                struct bnx2x_vlan_mac_obj *o)
557 {
558         int rc;
559
560         spin_lock_bh(&o->exe_queue.lock);
561         rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562         spin_unlock_bh(&o->exe_queue.lock);
563
564         return rc;
565 }
566
567 /**
568  * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569  *
570  * @bp:                 device handle
571  * @o:                  vlan_mac object
572  *
573  * @details Should be called under execution queue lock. Notice if a pending
574  *          execution exists, it would be performed if this was the last
575  *          reader. possibly releasing and reclaiming the execution queue lock.
576  */
577 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578                                           struct bnx2x_vlan_mac_obj *o)
579 {
580         if (!o->head_reader) {
581                 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582 #ifdef BNX2X_STOP_ON_ERROR
583                 bnx2x_panic();
584 #endif
585         } else {
586                 o->head_reader--;
587                 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
588                    o->head_reader);
589         }
590
591         /* It's possible a new pending execution was added, and that this reader
592          * was last - if so we need to execute the command.
593          */
594         if (!o->head_reader && o->head_exe_request) {
595                 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
596
597                 /* Writer release will do the trick */
598                 __bnx2x_vlan_mac_h_write_unlock(bp, o);
599         }
600 }
601
602 /**
603  * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
604  *
605  * @bp:                 device handle
606  * @o:                  vlan_mac object
607  *
608  * @details Notice if a pending execution exists, it would be performed if this
609  *          was the last reader. Claims and releases the execution queue lock
610  *          during its run.
611  */
612 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613                                   struct bnx2x_vlan_mac_obj *o)
614 {
615         spin_lock_bh(&o->exe_queue.lock);
616         __bnx2x_vlan_mac_h_read_unlock(bp, o);
617         spin_unlock_bh(&o->exe_queue.lock);
618 }
619
620 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
621                                 int n, u8 *base, u8 stride, u8 size)
622 {
623         struct bnx2x_vlan_mac_registry_elem *pos;
624         u8 *next = base;
625         int counter = 0;
626         int read_lock;
627
628         DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629         read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
630         if (read_lock != 0)
631                 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
632
633         /* traverse list */
634         list_for_each_entry(pos, &o->head, link) {
635                 if (counter < n) {
636                         memcpy(next, &pos->u, size);
637                         counter++;
638                         DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
639                            counter, next);
640                         next += stride + size;
641                 }
642         }
643
644         if (read_lock == 0) {
645                 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646                 bnx2x_vlan_mac_h_read_unlock(bp, o);
647         }
648
649         return counter * ETH_ALEN;
650 }
651
652 /* check_add() callbacks */
653 static int bnx2x_check_mac_add(struct bnx2x *bp,
654                                struct bnx2x_vlan_mac_obj *o,
655                                union bnx2x_classification_ramrod_data *data)
656 {
657         struct bnx2x_vlan_mac_registry_elem *pos;
658
659         DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
660
661         if (!is_valid_ether_addr(data->mac.mac))
662                 return -EINVAL;
663
664         /* Check if a requested MAC already exists */
665         list_for_each_entry(pos, &o->head, link)
666                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
667                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
668                         return -EEXIST;
669
670         return 0;
671 }
672
673 static int bnx2x_check_vlan_add(struct bnx2x *bp,
674                                 struct bnx2x_vlan_mac_obj *o,
675                                 union bnx2x_classification_ramrod_data *data)
676 {
677         struct bnx2x_vlan_mac_registry_elem *pos;
678
679         DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
680
681         list_for_each_entry(pos, &o->head, link)
682                 if (data->vlan.vlan == pos->u.vlan.vlan)
683                         return -EEXIST;
684
685         return 0;
686 }
687
688 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
689                                     struct bnx2x_vlan_mac_obj *o,
690                                    union bnx2x_classification_ramrod_data *data)
691 {
692         struct bnx2x_vlan_mac_registry_elem *pos;
693
694         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
695            data->vlan_mac.mac, data->vlan_mac.vlan);
696
697         list_for_each_entry(pos, &o->head, link)
698                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
699                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
700                                   ETH_ALEN)) &&
701                     (data->vlan_mac.is_inner_mac ==
702                      pos->u.vlan_mac.is_inner_mac))
703                         return -EEXIST;
704
705         return 0;
706 }
707
708 /* check_del() callbacks */
709 static struct bnx2x_vlan_mac_registry_elem *
710         bnx2x_check_mac_del(struct bnx2x *bp,
711                             struct bnx2x_vlan_mac_obj *o,
712                             union bnx2x_classification_ramrod_data *data)
713 {
714         struct bnx2x_vlan_mac_registry_elem *pos;
715
716         DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
717
718         list_for_each_entry(pos, &o->head, link)
719                 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
720                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
721                         return pos;
722
723         return NULL;
724 }
725
726 static struct bnx2x_vlan_mac_registry_elem *
727         bnx2x_check_vlan_del(struct bnx2x *bp,
728                              struct bnx2x_vlan_mac_obj *o,
729                              union bnx2x_classification_ramrod_data *data)
730 {
731         struct bnx2x_vlan_mac_registry_elem *pos;
732
733         DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
734
735         list_for_each_entry(pos, &o->head, link)
736                 if (data->vlan.vlan == pos->u.vlan.vlan)
737                         return pos;
738
739         return NULL;
740 }
741
742 static struct bnx2x_vlan_mac_registry_elem *
743         bnx2x_check_vlan_mac_del(struct bnx2x *bp,
744                                  struct bnx2x_vlan_mac_obj *o,
745                                  union bnx2x_classification_ramrod_data *data)
746 {
747         struct bnx2x_vlan_mac_registry_elem *pos;
748
749         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
750            data->vlan_mac.mac, data->vlan_mac.vlan);
751
752         list_for_each_entry(pos, &o->head, link)
753                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
754                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
755                              ETH_ALEN)) &&
756                     (data->vlan_mac.is_inner_mac ==
757                      pos->u.vlan_mac.is_inner_mac))
758                         return pos;
759
760         return NULL;
761 }
762
763 /* check_move() callback */
764 static bool bnx2x_check_move(struct bnx2x *bp,
765                              struct bnx2x_vlan_mac_obj *src_o,
766                              struct bnx2x_vlan_mac_obj *dst_o,
767                              union bnx2x_classification_ramrod_data *data)
768 {
769         struct bnx2x_vlan_mac_registry_elem *pos;
770         int rc;
771
772         /* Check if we can delete the requested configuration from the first
773          * object.
774          */
775         pos = src_o->check_del(bp, src_o, data);
776
777         /*  check if configuration can be added */
778         rc = dst_o->check_add(bp, dst_o, data);
779
780         /* If this classification can not be added (is already set)
781          * or can't be deleted - return an error.
782          */
783         if (rc || !pos)
784                 return false;
785
786         return true;
787 }
788
789 static bool bnx2x_check_move_always_err(
790         struct bnx2x *bp,
791         struct bnx2x_vlan_mac_obj *src_o,
792         struct bnx2x_vlan_mac_obj *dst_o,
793         union bnx2x_classification_ramrod_data *data)
794 {
795         return false;
796 }
797
798 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
799 {
800         struct bnx2x_raw_obj *raw = &o->raw;
801         u8 rx_tx_flag = 0;
802
803         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
804             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
805                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
806
807         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
808             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
809                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
810
811         return rx_tx_flag;
812 }
813
814 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
815                           bool add, unsigned char *dev_addr, int index)
816 {
817         u32 wb_data[2];
818         u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
819                          NIG_REG_LLH0_FUNC_MEM;
820
821         if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
822                 return;
823
824         if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
825                 return;
826
827         DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
828                          (add ? "ADD" : "DELETE"), index);
829
830         if (add) {
831                 /* LLH_FUNC_MEM is a u64 WB register */
832                 reg_offset += 8*index;
833
834                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
835                               (dev_addr[4] <<  8) |  dev_addr[5]);
836                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
837
838                 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
839         }
840
841         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
842                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
843 }
844
845 /**
846  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
847  *
848  * @bp:         device handle
849  * @o:          queue for which we want to configure this rule
850  * @add:        if true the command is an ADD command, DEL otherwise
851  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
852  * @hdr:        pointer to a header to setup
853  *
854  */
855 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
856         struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
857         struct eth_classify_cmd_header *hdr)
858 {
859         struct bnx2x_raw_obj *raw = &o->raw;
860
861         hdr->client_id = raw->cl_id;
862         hdr->func_id = raw->func_id;
863
864         /* Rx or/and Tx (internal switching) configuration ? */
865         hdr->cmd_general_data |=
866                 bnx2x_vlan_mac_get_rx_tx_flag(o);
867
868         if (add)
869                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
870
871         hdr->cmd_general_data |=
872                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
873 }
874
875 /**
876  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
877  *
878  * @cid:        connection id
879  * @type:       BNX2X_FILTER_XXX_PENDING
880  * @hdr:        pointer to header to setup
881  * @rule_cnt:
882  *
883  * currently we always configure one rule and echo field to contain a CID and an
884  * opcode type.
885  */
886 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
887                                 struct eth_classify_header *hdr, int rule_cnt)
888 {
889         hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
890                                 (type << BNX2X_SWCID_SHIFT));
891         hdr->rule_cnt = (u8)rule_cnt;
892 }
893
894 /* hw_config() callbacks */
895 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
896                                  struct bnx2x_vlan_mac_obj *o,
897                                  struct bnx2x_exeq_elem *elem, int rule_idx,
898                                  int cam_offset)
899 {
900         struct bnx2x_raw_obj *raw = &o->raw;
901         struct eth_classify_rules_ramrod_data *data =
902                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
903         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
904         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
905         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
906         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
907         u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
908
909         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
910          * relevant. In addition, current implementation is tuned for a
911          * single ETH MAC.
912          *
913          * When multiple unicast ETH MACs PF configuration in switch
914          * independent mode is required (NetQ, multiple netdev MACs,
915          * etc.), consider better utilisation of 8 per function MAC
916          * entries in the LLH register. There is also
917          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
918          * total number of CAM entries to 16.
919          *
920          * Currently we won't configure NIG for MACs other than a primary ETH
921          * MAC and iSCSI L2 MAC.
922          *
923          * If this MAC is moving from one Queue to another, no need to change
924          * NIG configuration.
925          */
926         if (cmd != BNX2X_VLAN_MAC_MOVE) {
927                 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
928                         bnx2x_set_mac_in_nig(bp, add, mac,
929                                              BNX2X_LLH_CAM_ISCSI_ETH_LINE);
930                 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
931                         bnx2x_set_mac_in_nig(bp, add, mac,
932                                              BNX2X_LLH_CAM_ETH_LINE);
933         }
934
935         /* Reset the ramrod data buffer for the first rule */
936         if (rule_idx == 0)
937                 memset(data, 0, sizeof(*data));
938
939         /* Setup a command header */
940         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
941                                       &rule_entry->mac.header);
942
943         DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
944            (add ? "add" : "delete"), mac, raw->cl_id);
945
946         /* Set a MAC itself */
947         bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
948                               &rule_entry->mac.mac_mid,
949                               &rule_entry->mac.mac_lsb, mac);
950         rule_entry->mac.inner_mac =
951                 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
952
953         /* MOVE: Add a rule that will add this MAC to the target Queue */
954         if (cmd == BNX2X_VLAN_MAC_MOVE) {
955                 rule_entry++;
956                 rule_cnt++;
957
958                 /* Setup ramrod data */
959                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
960                                         elem->cmd_data.vlan_mac.target_obj,
961                                               true, CLASSIFY_RULE_OPCODE_MAC,
962                                               &rule_entry->mac.header);
963
964                 /* Set a MAC itself */
965                 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
966                                       &rule_entry->mac.mac_mid,
967                                       &rule_entry->mac.mac_lsb, mac);
968                 rule_entry->mac.inner_mac =
969                         cpu_to_le16(elem->cmd_data.vlan_mac.
970                                                 u.mac.is_inner_mac);
971         }
972
973         /* Set the ramrod data header */
974         /* TODO: take this to the higher level in order to prevent multiple
975                  writing */
976         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
977                                         rule_cnt);
978 }
979
980 /**
981  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
982  *
983  * @bp:         device handle
984  * @o:          queue
985  * @type:
986  * @cam_offset: offset in cam memory
987  * @hdr:        pointer to a header to setup
988  *
989  * E1/E1H
990  */
991 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
992         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
993         struct mac_configuration_hdr *hdr)
994 {
995         struct bnx2x_raw_obj *r = &o->raw;
996
997         hdr->length = 1;
998         hdr->offset = (u8)cam_offset;
999         hdr->client_id = cpu_to_le16(0xff);
1000         hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
1001                                 (type << BNX2X_SWCID_SHIFT));
1002 }
1003
1004 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
1005         struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
1006         u16 vlan_id, struct mac_configuration_entry *cfg_entry)
1007 {
1008         struct bnx2x_raw_obj *r = &o->raw;
1009         u32 cl_bit_vec = (1 << r->cl_id);
1010
1011         cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
1012         cfg_entry->pf_id = r->func_id;
1013         cfg_entry->vlan_id = cpu_to_le16(vlan_id);
1014
1015         if (add) {
1016                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1017                          T_ETH_MAC_COMMAND_SET);
1018                 SET_FLAG(cfg_entry->flags,
1019                          MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
1020
1021                 /* Set a MAC in a ramrod data */
1022                 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1023                                       &cfg_entry->middle_mac_addr,
1024                                       &cfg_entry->lsb_mac_addr, mac);
1025         } else
1026                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1027                          T_ETH_MAC_COMMAND_INVALIDATE);
1028 }
1029
1030 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
1031         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1032         u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1033 {
1034         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1035         struct bnx2x_raw_obj *raw = &o->raw;
1036
1037         bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1038                                          &config->hdr);
1039         bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1040                                          cfg_entry);
1041
1042         DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
1043                          (add ? "setting" : "clearing"),
1044                          mac, raw->cl_id, cam_offset);
1045 }
1046
1047 /**
1048  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
1049  *
1050  * @bp:         device handle
1051  * @o:          bnx2x_vlan_mac_obj
1052  * @elem:       bnx2x_exeq_elem
1053  * @rule_idx:   rule_idx
1054  * @cam_offset: cam_offset
1055  */
1056 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
1057                                   struct bnx2x_vlan_mac_obj *o,
1058                                   struct bnx2x_exeq_elem *elem, int rule_idx,
1059                                   int cam_offset)
1060 {
1061         struct bnx2x_raw_obj *raw = &o->raw;
1062         struct mac_configuration_cmd *config =
1063                 (struct mac_configuration_cmd *)(raw->rdata);
1064         /* 57710 and 57711 do not support MOVE command,
1065          * so it's either ADD or DEL
1066          */
1067         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1068                 true : false;
1069
1070         /* Reset the ramrod data buffer */
1071         memset(config, 0, sizeof(*config));
1072
1073         bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
1074                                      cam_offset, add,
1075                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
1076                                      ETH_VLAN_FILTER_ANY_VLAN, config);
1077 }
1078
1079 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
1080                                   struct bnx2x_vlan_mac_obj *o,
1081                                   struct bnx2x_exeq_elem *elem, int rule_idx,
1082                                   int cam_offset)
1083 {
1084         struct bnx2x_raw_obj *raw = &o->raw;
1085         struct eth_classify_rules_ramrod_data *data =
1086                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1087         int rule_cnt = rule_idx + 1;
1088         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1089         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1090         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1091         u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1092
1093         /* Reset the ramrod data buffer for the first rule */
1094         if (rule_idx == 0)
1095                 memset(data, 0, sizeof(*data));
1096
1097         /* Set a rule header */
1098         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1099                                       &rule_entry->vlan.header);
1100
1101         DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1102                          vlan);
1103
1104         /* Set a VLAN itself */
1105         rule_entry->vlan.vlan = cpu_to_le16(vlan);
1106
1107         /* MOVE: Add a rule that will add this MAC to the target Queue */
1108         if (cmd == BNX2X_VLAN_MAC_MOVE) {
1109                 rule_entry++;
1110                 rule_cnt++;
1111
1112                 /* Setup ramrod data */
1113                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1114                                         elem->cmd_data.vlan_mac.target_obj,
1115                                               true, CLASSIFY_RULE_OPCODE_VLAN,
1116                                               &rule_entry->vlan.header);
1117
1118                 /* Set a VLAN itself */
1119                 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1120         }
1121
1122         /* Set the ramrod data header */
1123         /* TODO: take this to the higher level in order to prevent multiple
1124                  writing */
1125         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1126                                         rule_cnt);
1127 }
1128
1129 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
1130                                       struct bnx2x_vlan_mac_obj *o,
1131                                       struct bnx2x_exeq_elem *elem,
1132                                       int rule_idx, int cam_offset)
1133 {
1134         struct bnx2x_raw_obj *raw = &o->raw;
1135         struct eth_classify_rules_ramrod_data *data =
1136                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1137         int rule_cnt = rule_idx + 1;
1138         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1139         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1140         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1141         u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1142         u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1143
1144         /* Reset the ramrod data buffer for the first rule */
1145         if (rule_idx == 0)
1146                 memset(data, 0, sizeof(*data));
1147
1148         /* Set a rule header */
1149         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1150                                       &rule_entry->pair.header);
1151
1152         /* Set VLAN and MAC themselves */
1153         rule_entry->pair.vlan = cpu_to_le16(vlan);
1154         bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1155                               &rule_entry->pair.mac_mid,
1156                               &rule_entry->pair.mac_lsb, mac);
1157         rule_entry->pair.inner_mac =
1158                 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
1159         /* MOVE: Add a rule that will add this MAC to the target Queue */
1160         if (cmd == BNX2X_VLAN_MAC_MOVE) {
1161                 rule_entry++;
1162                 rule_cnt++;
1163
1164                 /* Setup ramrod data */
1165                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1166                                         elem->cmd_data.vlan_mac.target_obj,
1167                                               true, CLASSIFY_RULE_OPCODE_PAIR,
1168                                               &rule_entry->pair.header);
1169
1170                 /* Set a VLAN itself */
1171                 rule_entry->pair.vlan = cpu_to_le16(vlan);
1172                 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1173                                       &rule_entry->pair.mac_mid,
1174                                       &rule_entry->pair.mac_lsb, mac);
1175                 rule_entry->pair.inner_mac =
1176                         cpu_to_le16(elem->cmd_data.vlan_mac.u.
1177                                                 vlan_mac.is_inner_mac);
1178         }
1179
1180         /* Set the ramrod data header */
1181         /* TODO: take this to the higher level in order to prevent multiple
1182                  writing */
1183         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1184                                         rule_cnt);
1185 }
1186
1187 /**
1188  * bnx2x_set_one_vlan_mac_e1h -
1189  *
1190  * @bp:         device handle
1191  * @o:          bnx2x_vlan_mac_obj
1192  * @elem:       bnx2x_exeq_elem
1193  * @rule_idx:   rule_idx
1194  * @cam_offset: cam_offset
1195  */
1196 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1197                                        struct bnx2x_vlan_mac_obj *o,
1198                                        struct bnx2x_exeq_elem *elem,
1199                                        int rule_idx, int cam_offset)
1200 {
1201         struct bnx2x_raw_obj *raw = &o->raw;
1202         struct mac_configuration_cmd *config =
1203                 (struct mac_configuration_cmd *)(raw->rdata);
1204         /* 57710 and 57711 do not support MOVE command,
1205          * so it's either ADD or DEL
1206          */
1207         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1208                 true : false;
1209
1210         /* Reset the ramrod data buffer */
1211         memset(config, 0, sizeof(*config));
1212
1213         bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1214                                      cam_offset, add,
1215                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1216                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1217                                      ETH_VLAN_FILTER_CLASSIFY, config);
1218 }
1219
1220 #define list_next_entry(pos, member) \
1221         list_entry((pos)->member.next, typeof(*(pos)), member)
1222
1223 /**
1224  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1225  *
1226  * @bp:         device handle
1227  * @p:          command parameters
1228  * @ppos:       pointer to the cookie
1229  *
1230  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1231  * previously configured elements list.
1232  *
1233  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1234  * into an account
1235  *
1236  * pointer to the cookie  - that should be given back in the next call to make
1237  * function handle the next element. If *ppos is set to NULL it will restart the
1238  * iterator. If returned *ppos == NULL this means that the last element has been
1239  * handled.
1240  *
1241  */
1242 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1243                            struct bnx2x_vlan_mac_ramrod_params *p,
1244                            struct bnx2x_vlan_mac_registry_elem **ppos)
1245 {
1246         struct bnx2x_vlan_mac_registry_elem *pos;
1247         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1248
1249         /* If list is empty - there is nothing to do here */
1250         if (list_empty(&o->head)) {
1251                 *ppos = NULL;
1252                 return 0;
1253         }
1254
1255         /* make a step... */
1256         if (*ppos == NULL)
1257                 *ppos = list_first_entry(&o->head,
1258                                          struct bnx2x_vlan_mac_registry_elem,
1259                                          link);
1260         else
1261                 *ppos = list_next_entry(*ppos, link);
1262
1263         pos = *ppos;
1264
1265         /* If it's the last step - return NULL */
1266         if (list_is_last(&pos->link, &o->head))
1267                 *ppos = NULL;
1268
1269         /* Prepare a 'user_req' */
1270         memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1271
1272         /* Set the command */
1273         p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1274
1275         /* Set vlan_mac_flags */
1276         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1277
1278         /* Set a restore bit */
1279         __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1280
1281         return bnx2x_config_vlan_mac(bp, p);
1282 }
1283
1284 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1285  * pointer to an element with a specific criteria and NULL if such an element
1286  * hasn't been found.
1287  */
1288 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1289         struct bnx2x_exe_queue_obj *o,
1290         struct bnx2x_exeq_elem *elem)
1291 {
1292         struct bnx2x_exeq_elem *pos;
1293         struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1294
1295         /* Check pending for execution commands */
1296         list_for_each_entry(pos, &o->exe_queue, link)
1297                 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1298                               sizeof(*data)) &&
1299                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1300                         return pos;
1301
1302         return NULL;
1303 }
1304
1305 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1306         struct bnx2x_exe_queue_obj *o,
1307         struct bnx2x_exeq_elem *elem)
1308 {
1309         struct bnx2x_exeq_elem *pos;
1310         struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1311
1312         /* Check pending for execution commands */
1313         list_for_each_entry(pos, &o->exe_queue, link)
1314                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1315                               sizeof(*data)) &&
1316                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1317                         return pos;
1318
1319         return NULL;
1320 }
1321
1322 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1323         struct bnx2x_exe_queue_obj *o,
1324         struct bnx2x_exeq_elem *elem)
1325 {
1326         struct bnx2x_exeq_elem *pos;
1327         struct bnx2x_vlan_mac_ramrod_data *data =
1328                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1329
1330         /* Check pending for execution commands */
1331         list_for_each_entry(pos, &o->exe_queue, link)
1332                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1333                               sizeof(*data)) &&
1334                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1335                         return pos;
1336
1337         return NULL;
1338 }
1339
1340 /**
1341  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1342  *
1343  * @bp:         device handle
1344  * @qo:         bnx2x_qable_obj
1345  * @elem:       bnx2x_exeq_elem
1346  *
1347  * Checks that the requested configuration can be added. If yes and if
1348  * requested, consume CAM credit.
1349  *
1350  * The 'validate' is run after the 'optimize'.
1351  *
1352  */
1353 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1354                                               union bnx2x_qable_obj *qo,
1355                                               struct bnx2x_exeq_elem *elem)
1356 {
1357         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1358         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1359         int rc;
1360
1361         /* Check the registry */
1362         rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1363         if (rc) {
1364                 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1365                 return rc;
1366         }
1367
1368         /* Check if there is a pending ADD command for this
1369          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1370          */
1371         if (exeq->get(exeq, elem)) {
1372                 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1373                 return -EEXIST;
1374         }
1375
1376         /* TODO: Check the pending MOVE from other objects where this
1377          * object is a destination object.
1378          */
1379
1380         /* Consume the credit if not requested not to */
1381         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1382                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1383             o->get_credit(o)))
1384                 return -EINVAL;
1385
1386         return 0;
1387 }
1388
1389 /**
1390  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1391  *
1392  * @bp:         device handle
1393  * @qo:         quable object to check
1394  * @elem:       element that needs to be deleted
1395  *
1396  * Checks that the requested configuration can be deleted. If yes and if
1397  * requested, returns a CAM credit.
1398  *
1399  * The 'validate' is run after the 'optimize'.
1400  */
1401 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1402                                               union bnx2x_qable_obj *qo,
1403                                               struct bnx2x_exeq_elem *elem)
1404 {
1405         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1406         struct bnx2x_vlan_mac_registry_elem *pos;
1407         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1408         struct bnx2x_exeq_elem query_elem;
1409
1410         /* If this classification can not be deleted (doesn't exist)
1411          * - return a BNX2X_EXIST.
1412          */
1413         pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1414         if (!pos) {
1415                 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1416                 return -EEXIST;
1417         }
1418
1419         /* Check if there are pending DEL or MOVE commands for this
1420          * MAC/VLAN/VLAN-MAC. Return an error if so.
1421          */
1422         memcpy(&query_elem, elem, sizeof(query_elem));
1423
1424         /* Check for MOVE commands */
1425         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1426         if (exeq->get(exeq, &query_elem)) {
1427                 BNX2X_ERR("There is a pending MOVE command already\n");
1428                 return -EINVAL;
1429         }
1430
1431         /* Check for DEL commands */
1432         if (exeq->get(exeq, elem)) {
1433                 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1434                 return -EEXIST;
1435         }
1436
1437         /* Return the credit to the credit pool if not requested not to */
1438         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1439                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1440             o->put_credit(o))) {
1441                 BNX2X_ERR("Failed to return a credit\n");
1442                 return -EINVAL;
1443         }
1444
1445         return 0;
1446 }
1447
1448 /**
1449  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1450  *
1451  * @bp:         device handle
1452  * @qo:         quable object to check (source)
1453  * @elem:       element that needs to be moved
1454  *
1455  * Checks that the requested configuration can be moved. If yes and if
1456  * requested, returns a CAM credit.
1457  *
1458  * The 'validate' is run after the 'optimize'.
1459  */
1460 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1461                                                union bnx2x_qable_obj *qo,
1462                                                struct bnx2x_exeq_elem *elem)
1463 {
1464         struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1465         struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1466         struct bnx2x_exeq_elem query_elem;
1467         struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1468         struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1469
1470         /* Check if we can perform this operation based on the current registry
1471          * state.
1472          */
1473         if (!src_o->check_move(bp, src_o, dest_o,
1474                                &elem->cmd_data.vlan_mac.u)) {
1475                 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1476                 return -EINVAL;
1477         }
1478
1479         /* Check if there is an already pending DEL or MOVE command for the
1480          * source object or ADD command for a destination object. Return an
1481          * error if so.
1482          */
1483         memcpy(&query_elem, elem, sizeof(query_elem));
1484
1485         /* Check DEL on source */
1486         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1487         if (src_exeq->get(src_exeq, &query_elem)) {
1488                 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1489                 return -EINVAL;
1490         }
1491
1492         /* Check MOVE on source */
1493         if (src_exeq->get(src_exeq, elem)) {
1494                 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1495                 return -EEXIST;
1496         }
1497
1498         /* Check ADD on destination */
1499         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1500         if (dest_exeq->get(dest_exeq, &query_elem)) {
1501                 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1502                 return -EINVAL;
1503         }
1504
1505         /* Consume the credit if not requested not to */
1506         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1507                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1508             dest_o->get_credit(dest_o)))
1509                 return -EINVAL;
1510
1511         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1512                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1513             src_o->put_credit(src_o))) {
1514                 /* return the credit taken from dest... */
1515                 dest_o->put_credit(dest_o);
1516                 return -EINVAL;
1517         }
1518
1519         return 0;
1520 }
1521
1522 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1523                                    union bnx2x_qable_obj *qo,
1524                                    struct bnx2x_exeq_elem *elem)
1525 {
1526         switch (elem->cmd_data.vlan_mac.cmd) {
1527         case BNX2X_VLAN_MAC_ADD:
1528                 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1529         case BNX2X_VLAN_MAC_DEL:
1530                 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1531         case BNX2X_VLAN_MAC_MOVE:
1532                 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1533         default:
1534                 return -EINVAL;
1535         }
1536 }
1537
1538 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1539                                   union bnx2x_qable_obj *qo,
1540                                   struct bnx2x_exeq_elem *elem)
1541 {
1542         int rc = 0;
1543
1544         /* If consumption wasn't required, nothing to do */
1545         if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1546                      &elem->cmd_data.vlan_mac.vlan_mac_flags))
1547                 return 0;
1548
1549         switch (elem->cmd_data.vlan_mac.cmd) {
1550         case BNX2X_VLAN_MAC_ADD:
1551         case BNX2X_VLAN_MAC_MOVE:
1552                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1553                 break;
1554         case BNX2X_VLAN_MAC_DEL:
1555                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1556                 break;
1557         default:
1558                 return -EINVAL;
1559         }
1560
1561         if (rc != true)
1562                 return -EINVAL;
1563
1564         return 0;
1565 }
1566
1567 /**
1568  * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1569  *
1570  * @bp:         device handle
1571  * @o:          bnx2x_vlan_mac_obj
1572  *
1573  */
1574 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1575                                struct bnx2x_vlan_mac_obj *o)
1576 {
1577         int cnt = 5000, rc;
1578         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1579         struct bnx2x_raw_obj *raw = &o->raw;
1580
1581         while (cnt--) {
1582                 /* Wait for the current command to complete */
1583                 rc = raw->wait_comp(bp, raw);
1584                 if (rc)
1585                         return rc;
1586
1587                 /* Wait until there are no pending commands */
1588                 if (!bnx2x_exe_queue_empty(exeq))
1589                         usleep_range(1000, 2000);
1590                 else
1591                         return 0;
1592         }
1593
1594         return -EBUSY;
1595 }
1596
1597 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1598                                          struct bnx2x_vlan_mac_obj *o,
1599                                          unsigned long *ramrod_flags)
1600 {
1601         int rc = 0;
1602
1603         spin_lock_bh(&o->exe_queue.lock);
1604
1605         DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1606         rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1607
1608         if (rc != 0) {
1609                 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1610
1611                 /* Calling function should not diffrentiate between this case
1612                  * and the case in which there is already a pending ramrod
1613                  */
1614                 rc = 1;
1615         } else {
1616                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1617         }
1618         spin_unlock_bh(&o->exe_queue.lock);
1619
1620         return rc;
1621 }
1622
1623 /**
1624  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1625  *
1626  * @bp:         device handle
1627  * @o:          bnx2x_vlan_mac_obj
1628  * @cqe:
1629  * @cont:       if true schedule next execution chunk
1630  *
1631  */
1632 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1633                                    struct bnx2x_vlan_mac_obj *o,
1634                                    union event_ring_elem *cqe,
1635                                    unsigned long *ramrod_flags)
1636 {
1637         struct bnx2x_raw_obj *r = &o->raw;
1638         int rc;
1639
1640         /* Clearing the pending list & raw state should be made
1641          * atomically (as execution flow assumes they represent the same).
1642          */
1643         spin_lock_bh(&o->exe_queue.lock);
1644
1645         /* Reset pending list */
1646         __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1647
1648         /* Clear pending */
1649         r->clear_pending(r);
1650
1651         spin_unlock_bh(&o->exe_queue.lock);
1652
1653         /* If ramrod failed this is most likely a SW bug */
1654         if (cqe->message.error)
1655                 return -EINVAL;
1656
1657         /* Run the next bulk of pending commands if requested */
1658         if (test_bit(RAMROD_CONT, ramrod_flags)) {
1659                 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1660
1661                 if (rc < 0)
1662                         return rc;
1663         }
1664
1665         /* If there is more work to do return PENDING */
1666         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1667                 return 1;
1668
1669         return 0;
1670 }
1671
1672 /**
1673  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1674  *
1675  * @bp:         device handle
1676  * @o:          bnx2x_qable_obj
1677  * @elem:       bnx2x_exeq_elem
1678  */
1679 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1680                                    union bnx2x_qable_obj *qo,
1681                                    struct bnx2x_exeq_elem *elem)
1682 {
1683         struct bnx2x_exeq_elem query, *pos;
1684         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1685         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1686
1687         memcpy(&query, elem, sizeof(query));
1688
1689         switch (elem->cmd_data.vlan_mac.cmd) {
1690         case BNX2X_VLAN_MAC_ADD:
1691                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1692                 break;
1693         case BNX2X_VLAN_MAC_DEL:
1694                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1695                 break;
1696         default:
1697                 /* Don't handle anything other than ADD or DEL */
1698                 return 0;
1699         }
1700
1701         /* If we found the appropriate element - delete it */
1702         pos = exeq->get(exeq, &query);
1703         if (pos) {
1704
1705                 /* Return the credit of the optimized command */
1706                 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1707                               &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1708                         if ((query.cmd_data.vlan_mac.cmd ==
1709                              BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1710                                 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1711                                 return -EINVAL;
1712                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1713                                 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1714                                 return -EINVAL;
1715                         }
1716                 }
1717
1718                 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1719                            (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1720                            "ADD" : "DEL");
1721
1722                 list_del(&pos->link);
1723                 bnx2x_exe_queue_free_elem(bp, pos);
1724                 return 1;
1725         }
1726
1727         return 0;
1728 }
1729
1730 /**
1731  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1732  *
1733  * @bp:   device handle
1734  * @o:
1735  * @elem:
1736  * @restore:
1737  * @re:
1738  *
1739  * prepare a registry element according to the current command request.
1740  */
1741 static inline int bnx2x_vlan_mac_get_registry_elem(
1742         struct bnx2x *bp,
1743         struct bnx2x_vlan_mac_obj *o,
1744         struct bnx2x_exeq_elem *elem,
1745         bool restore,
1746         struct bnx2x_vlan_mac_registry_elem **re)
1747 {
1748         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1749         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1750
1751         /* Allocate a new registry element if needed. */
1752         if (!restore &&
1753             ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1754                 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1755                 if (!reg_elem)
1756                         return -ENOMEM;
1757
1758                 /* Get a new CAM offset */
1759                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1760                         /* This shall never happen, because we have checked the
1761                          * CAM availability in the 'validate'.
1762                          */
1763                         WARN_ON(1);
1764                         kfree(reg_elem);
1765                         return -EINVAL;
1766                 }
1767
1768                 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1769
1770                 /* Set a VLAN-MAC data */
1771                 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1772                           sizeof(reg_elem->u));
1773
1774                 /* Copy the flags (needed for DEL and RESTORE flows) */
1775                 reg_elem->vlan_mac_flags =
1776                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1777         } else /* DEL, RESTORE */
1778                 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1779
1780         *re = reg_elem;
1781         return 0;
1782 }
1783
1784 /**
1785  * bnx2x_execute_vlan_mac - execute vlan mac command
1786  *
1787  * @bp:                 device handle
1788  * @qo:
1789  * @exe_chunk:
1790  * @ramrod_flags:
1791  *
1792  * go and send a ramrod!
1793  */
1794 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1795                                   union bnx2x_qable_obj *qo,
1796                                   struct list_head *exe_chunk,
1797                                   unsigned long *ramrod_flags)
1798 {
1799         struct bnx2x_exeq_elem *elem;
1800         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1801         struct bnx2x_raw_obj *r = &o->raw;
1802         int rc, idx = 0;
1803         bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1804         bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1805         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1806         enum bnx2x_vlan_mac_cmd cmd;
1807
1808         /* If DRIVER_ONLY execution is requested, cleanup a registry
1809          * and exit. Otherwise send a ramrod to FW.
1810          */
1811         if (!drv_only) {
1812                 WARN_ON(r->check_pending(r));
1813
1814                 /* Set pending */
1815                 r->set_pending(r);
1816
1817                 /* Fill the ramrod data */
1818                 list_for_each_entry(elem, exe_chunk, link) {
1819                         cmd = elem->cmd_data.vlan_mac.cmd;
1820                         /* We will add to the target object in MOVE command, so
1821                          * change the object for a CAM search.
1822                          */
1823                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1824                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1825                         else
1826                                 cam_obj = o;
1827
1828                         rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1829                                                               elem, restore,
1830                                                               &reg_elem);
1831                         if (rc)
1832                                 goto error_exit;
1833
1834                         WARN_ON(!reg_elem);
1835
1836                         /* Push a new entry into the registry */
1837                         if (!restore &&
1838                             ((cmd == BNX2X_VLAN_MAC_ADD) ||
1839                             (cmd == BNX2X_VLAN_MAC_MOVE)))
1840                                 list_add(&reg_elem->link, &cam_obj->head);
1841
1842                         /* Configure a single command in a ramrod data buffer */
1843                         o->set_one_rule(bp, o, elem, idx,
1844                                         reg_elem->cam_offset);
1845
1846                         /* MOVE command consumes 2 entries in the ramrod data */
1847                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1848                                 idx += 2;
1849                         else
1850                                 idx++;
1851                 }
1852
1853                 /* No need for an explicit memory barrier here as long we would
1854                  * need to ensure the ordering of writing to the SPQ element
1855                  * and updating of the SPQ producer which involves a memory
1856                  * read and we will have to put a full memory barrier there
1857                  * (inside bnx2x_sp_post()).
1858                  */
1859
1860                 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1861                                    U64_HI(r->rdata_mapping),
1862                                    U64_LO(r->rdata_mapping),
1863                                    ETH_CONNECTION_TYPE);
1864                 if (rc)
1865                         goto error_exit;
1866         }
1867
1868         /* Now, when we are done with the ramrod - clean up the registry */
1869         list_for_each_entry(elem, exe_chunk, link) {
1870                 cmd = elem->cmd_data.vlan_mac.cmd;
1871                 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1872                     (cmd == BNX2X_VLAN_MAC_MOVE)) {
1873                         reg_elem = o->check_del(bp, o,
1874                                                 &elem->cmd_data.vlan_mac.u);
1875
1876                         WARN_ON(!reg_elem);
1877
1878                         o->put_cam_offset(o, reg_elem->cam_offset);
1879                         list_del(&reg_elem->link);
1880                         kfree(reg_elem);
1881                 }
1882         }
1883
1884         if (!drv_only)
1885                 return 1;
1886         else
1887                 return 0;
1888
1889 error_exit:
1890         r->clear_pending(r);
1891
1892         /* Cleanup a registry in case of a failure */
1893         list_for_each_entry(elem, exe_chunk, link) {
1894                 cmd = elem->cmd_data.vlan_mac.cmd;
1895
1896                 if (cmd == BNX2X_VLAN_MAC_MOVE)
1897                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1898                 else
1899                         cam_obj = o;
1900
1901                 /* Delete all newly added above entries */
1902                 if (!restore &&
1903                     ((cmd == BNX2X_VLAN_MAC_ADD) ||
1904                     (cmd == BNX2X_VLAN_MAC_MOVE))) {
1905                         reg_elem = o->check_del(bp, cam_obj,
1906                                                 &elem->cmd_data.vlan_mac.u);
1907                         if (reg_elem) {
1908                                 list_del(&reg_elem->link);
1909                                 kfree(reg_elem);
1910                         }
1911                 }
1912         }
1913
1914         return rc;
1915 }
1916
1917 static inline int bnx2x_vlan_mac_push_new_cmd(
1918         struct bnx2x *bp,
1919         struct bnx2x_vlan_mac_ramrod_params *p)
1920 {
1921         struct bnx2x_exeq_elem *elem;
1922         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1923         bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1924
1925         /* Allocate the execution queue element */
1926         elem = bnx2x_exe_queue_alloc_elem(bp);
1927         if (!elem)
1928                 return -ENOMEM;
1929
1930         /* Set the command 'length' */
1931         switch (p->user_req.cmd) {
1932         case BNX2X_VLAN_MAC_MOVE:
1933                 elem->cmd_len = 2;
1934                 break;
1935         default:
1936                 elem->cmd_len = 1;
1937         }
1938
1939         /* Fill the object specific info */
1940         memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1941
1942         /* Try to add a new command to the pending list */
1943         return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1944 }
1945
1946 /**
1947  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1948  *
1949  * @bp:   device handle
1950  * @p:
1951  *
1952  */
1953 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1954                            struct bnx2x_vlan_mac_ramrod_params *p)
1955 {
1956         int rc = 0;
1957         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1958         unsigned long *ramrod_flags = &p->ramrod_flags;
1959         bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1960         struct bnx2x_raw_obj *raw = &o->raw;
1961
1962         /*
1963          * Add new elements to the execution list for commands that require it.
1964          */
1965         if (!cont) {
1966                 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1967                 if (rc)
1968                         return rc;
1969         }
1970
1971         /* If nothing will be executed further in this iteration we want to
1972          * return PENDING if there are pending commands
1973          */
1974         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1975                 rc = 1;
1976
1977         if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1978                 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1979                 raw->clear_pending(raw);
1980         }
1981
1982         /* Execute commands if required */
1983         if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1984             test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1985                 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1986                                                    &p->ramrod_flags);
1987                 if (rc < 0)
1988                         return rc;
1989         }
1990
1991         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1992          * then user want to wait until the last command is done.
1993          */
1994         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1995                 /* Wait maximum for the current exe_queue length iterations plus
1996                  * one (for the current pending command).
1997                  */
1998                 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1999
2000                 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
2001                        max_iterations--) {
2002
2003                         /* Wait for the current command to complete */
2004                         rc = raw->wait_comp(bp, raw);
2005                         if (rc)
2006                                 return rc;
2007
2008                         /* Make a next step */
2009                         rc = __bnx2x_vlan_mac_execute_step(bp,
2010                                                            p->vlan_mac_obj,
2011                                                            &p->ramrod_flags);
2012                         if (rc < 0)
2013                                 return rc;
2014                 }
2015
2016                 return 0;
2017         }
2018
2019         return rc;
2020 }
2021
2022 /**
2023  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2024  *
2025  * @bp:                 device handle
2026  * @o:
2027  * @vlan_mac_flags:
2028  * @ramrod_flags:       execution flags to be used for this deletion
2029  *
2030  * if the last operation has completed successfully and there are no
2031  * more elements left, positive value if the last operation has completed
2032  * successfully and there are more previously configured elements, negative
2033  * value is current operation has failed.
2034  */
2035 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2036                                   struct bnx2x_vlan_mac_obj *o,
2037                                   unsigned long *vlan_mac_flags,
2038                                   unsigned long *ramrod_flags)
2039 {
2040         struct bnx2x_vlan_mac_registry_elem *pos = NULL;
2041         struct bnx2x_vlan_mac_ramrod_params p;
2042         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2043         struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2044         int read_lock;
2045         int rc = 0;
2046
2047         /* Clear pending commands first */
2048
2049         spin_lock_bh(&exeq->lock);
2050
2051         list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2052                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2053                     *vlan_mac_flags) {
2054                         rc = exeq->remove(bp, exeq->owner, exeq_pos);
2055                         if (rc) {
2056                                 BNX2X_ERR("Failed to remove command\n");
2057                                 spin_unlock_bh(&exeq->lock);
2058                                 return rc;
2059                         }
2060                         list_del(&exeq_pos->link);
2061                         bnx2x_exe_queue_free_elem(bp, exeq_pos);
2062                 }
2063         }
2064
2065         spin_unlock_bh(&exeq->lock);
2066
2067         /* Prepare a command request */
2068         memset(&p, 0, sizeof(p));
2069         p.vlan_mac_obj = o;
2070         p.ramrod_flags = *ramrod_flags;
2071         p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
2072
2073         /* Add all but the last VLAN-MAC to the execution queue without actually
2074          * execution anything.
2075          */
2076         __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
2077         __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
2078         __clear_bit(RAMROD_CONT, &p.ramrod_flags);
2079
2080         DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2081         read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2082         if (read_lock != 0)
2083                 return read_lock;
2084
2085         list_for_each_entry(pos, &o->head, link) {
2086                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
2087                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2088                         memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2089                         rc = bnx2x_config_vlan_mac(bp, &p);
2090                         if (rc < 0) {
2091                                 BNX2X_ERR("Failed to add a new DEL command\n");
2092                                 bnx2x_vlan_mac_h_read_unlock(bp, o);
2093                                 return rc;
2094                         }
2095                 }
2096         }
2097
2098         DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2099         bnx2x_vlan_mac_h_read_unlock(bp, o);
2100
2101         p.ramrod_flags = *ramrod_flags;
2102         __set_bit(RAMROD_CONT, &p.ramrod_flags);
2103
2104         return bnx2x_config_vlan_mac(bp, &p);
2105 }
2106
2107 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
2108         u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
2109         unsigned long *pstate, bnx2x_obj_type type)
2110 {
2111         raw->func_id = func_id;
2112         raw->cid = cid;
2113         raw->cl_id = cl_id;
2114         raw->rdata = rdata;
2115         raw->rdata_mapping = rdata_mapping;
2116         raw->state = state;
2117         raw->pstate = pstate;
2118         raw->obj_type = type;
2119         raw->check_pending = bnx2x_raw_check_pending;
2120         raw->clear_pending = bnx2x_raw_clear_pending;
2121         raw->set_pending = bnx2x_raw_set_pending;
2122         raw->wait_comp = bnx2x_raw_wait;
2123 }
2124
2125 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2126         u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
2127         int state, unsigned long *pstate, bnx2x_obj_type type,
2128         struct bnx2x_credit_pool_obj *macs_pool,
2129         struct bnx2x_credit_pool_obj *vlans_pool)
2130 {
2131         INIT_LIST_HEAD(&o->head);
2132         o->head_reader = 0;
2133         o->head_exe_request = false;
2134         o->saved_ramrod_flags = 0;
2135
2136         o->macs_pool = macs_pool;
2137         o->vlans_pool = vlans_pool;
2138
2139         o->delete_all = bnx2x_vlan_mac_del_all;
2140         o->restore = bnx2x_vlan_mac_restore;
2141         o->complete = bnx2x_complete_vlan_mac;
2142         o->wait = bnx2x_wait_vlan_mac;
2143
2144         bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2145                            state, pstate, type);
2146 }
2147
2148 void bnx2x_init_mac_obj(struct bnx2x *bp,
2149                         struct bnx2x_vlan_mac_obj *mac_obj,
2150                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
2151                         dma_addr_t rdata_mapping, int state,
2152                         unsigned long *pstate, bnx2x_obj_type type,
2153                         struct bnx2x_credit_pool_obj *macs_pool)
2154 {
2155         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
2156
2157         bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2158                                    rdata_mapping, state, pstate, type,
2159                                    macs_pool, NULL);
2160
2161         /* CAM credit pool handling */
2162         mac_obj->get_credit = bnx2x_get_credit_mac;
2163         mac_obj->put_credit = bnx2x_put_credit_mac;
2164         mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2165         mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2166
2167         if (CHIP_IS_E1x(bp)) {
2168                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
2169                 mac_obj->check_del         = bnx2x_check_mac_del;
2170                 mac_obj->check_add         = bnx2x_check_mac_add;
2171                 mac_obj->check_move        = bnx2x_check_move_always_err;
2172                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2173
2174                 /* Exe Queue */
2175                 bnx2x_exe_queue_init(bp,
2176                                      &mac_obj->exe_queue, 1, qable_obj,
2177                                      bnx2x_validate_vlan_mac,
2178                                      bnx2x_remove_vlan_mac,
2179                                      bnx2x_optimize_vlan_mac,
2180                                      bnx2x_execute_vlan_mac,
2181                                      bnx2x_exeq_get_mac);
2182         } else {
2183                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
2184                 mac_obj->check_del         = bnx2x_check_mac_del;
2185                 mac_obj->check_add         = bnx2x_check_mac_add;
2186                 mac_obj->check_move        = bnx2x_check_move;
2187                 mac_obj->ramrod_cmd        =
2188                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2189                 mac_obj->get_n_elements    = bnx2x_get_n_elements;
2190
2191                 /* Exe Queue */
2192                 bnx2x_exe_queue_init(bp,
2193                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2194                                      qable_obj, bnx2x_validate_vlan_mac,
2195                                      bnx2x_remove_vlan_mac,
2196                                      bnx2x_optimize_vlan_mac,
2197                                      bnx2x_execute_vlan_mac,
2198                                      bnx2x_exeq_get_mac);
2199         }
2200 }
2201
2202 void bnx2x_init_vlan_obj(struct bnx2x *bp,
2203                          struct bnx2x_vlan_mac_obj *vlan_obj,
2204                          u8 cl_id, u32 cid, u8 func_id, void *rdata,
2205                          dma_addr_t rdata_mapping, int state,
2206                          unsigned long *pstate, bnx2x_obj_type type,
2207                          struct bnx2x_credit_pool_obj *vlans_pool)
2208 {
2209         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2210
2211         bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2212                                    rdata_mapping, state, pstate, type, NULL,
2213                                    vlans_pool);
2214
2215         vlan_obj->get_credit = bnx2x_get_credit_vlan;
2216         vlan_obj->put_credit = bnx2x_put_credit_vlan;
2217         vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2218         vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2219
2220         if (CHIP_IS_E1x(bp)) {
2221                 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2222                 BUG();
2223         } else {
2224                 vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2225                 vlan_obj->check_del         = bnx2x_check_vlan_del;
2226                 vlan_obj->check_add         = bnx2x_check_vlan_add;
2227                 vlan_obj->check_move        = bnx2x_check_move;
2228                 vlan_obj->ramrod_cmd        =
2229                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2230                 vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2231
2232                 /* Exe Queue */
2233                 bnx2x_exe_queue_init(bp,
2234                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2235                                      qable_obj, bnx2x_validate_vlan_mac,
2236                                      bnx2x_remove_vlan_mac,
2237                                      bnx2x_optimize_vlan_mac,
2238                                      bnx2x_execute_vlan_mac,
2239                                      bnx2x_exeq_get_vlan);
2240         }
2241 }
2242
2243 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2244                              struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2245                              u8 cl_id, u32 cid, u8 func_id, void *rdata,
2246                              dma_addr_t rdata_mapping, int state,
2247                              unsigned long *pstate, bnx2x_obj_type type,
2248                              struct bnx2x_credit_pool_obj *macs_pool,
2249                              struct bnx2x_credit_pool_obj *vlans_pool)
2250 {
2251         union bnx2x_qable_obj *qable_obj =
2252                 (union bnx2x_qable_obj *)vlan_mac_obj;
2253
2254         bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2255                                    rdata_mapping, state, pstate, type,
2256                                    macs_pool, vlans_pool);
2257
2258         /* CAM pool handling */
2259         vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2260         vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2261         /* CAM offset is relevant for 57710 and 57711 chips only which have a
2262          * single CAM for both MACs and VLAN-MAC pairs. So the offset
2263          * will be taken from MACs' pool object only.
2264          */
2265         vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2266         vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2267
2268         if (CHIP_IS_E1(bp)) {
2269                 BNX2X_ERR("Do not support chips others than E2\n");
2270                 BUG();
2271         } else if (CHIP_IS_E1H(bp)) {
2272                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2273                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2274                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2275                 vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2276                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2277
2278                 /* Exe Queue */
2279                 bnx2x_exe_queue_init(bp,
2280                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
2281                                      bnx2x_validate_vlan_mac,
2282                                      bnx2x_remove_vlan_mac,
2283                                      bnx2x_optimize_vlan_mac,
2284                                      bnx2x_execute_vlan_mac,
2285                                      bnx2x_exeq_get_vlan_mac);
2286         } else {
2287                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2288                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2289                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2290                 vlan_mac_obj->check_move        = bnx2x_check_move;
2291                 vlan_mac_obj->ramrod_cmd        =
2292                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2293
2294                 /* Exe Queue */
2295                 bnx2x_exe_queue_init(bp,
2296                                      &vlan_mac_obj->exe_queue,
2297                                      CLASSIFY_RULES_COUNT,
2298                                      qable_obj, bnx2x_validate_vlan_mac,
2299                                      bnx2x_remove_vlan_mac,
2300                                      bnx2x_optimize_vlan_mac,
2301                                      bnx2x_execute_vlan_mac,
2302                                      bnx2x_exeq_get_vlan_mac);
2303         }
2304 }
2305
2306 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2307 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2308                         struct tstorm_eth_mac_filter_config *mac_filters,
2309                         u16 pf_id)
2310 {
2311         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2312
2313         u32 addr = BAR_TSTRORM_INTMEM +
2314                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2315
2316         __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2317 }
2318
2319 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2320                                  struct bnx2x_rx_mode_ramrod_params *p)
2321 {
2322         /* update the bp MAC filter structure */
2323         u32 mask = (1 << p->cl_id);
2324
2325         struct tstorm_eth_mac_filter_config *mac_filters =
2326                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2327
2328         /* initial setting is drop-all */
2329         u8 drop_all_ucast = 1, drop_all_mcast = 1;
2330         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2331         u8 unmatched_unicast = 0;
2332
2333     /* In e1x there we only take into account rx accept flag since tx switching
2334      * isn't enabled. */
2335         if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2336                 /* accept matched ucast */
2337                 drop_all_ucast = 0;
2338
2339         if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2340                 /* accept matched mcast */
2341                 drop_all_mcast = 0;
2342
2343         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2344                 /* accept all mcast */
2345                 drop_all_ucast = 0;
2346                 accp_all_ucast = 1;
2347         }
2348         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2349                 /* accept all mcast */
2350                 drop_all_mcast = 0;
2351                 accp_all_mcast = 1;
2352         }
2353         if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2354                 /* accept (all) bcast */
2355                 accp_all_bcast = 1;
2356         if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2357                 /* accept unmatched unicasts */
2358                 unmatched_unicast = 1;
2359
2360         mac_filters->ucast_drop_all = drop_all_ucast ?
2361                 mac_filters->ucast_drop_all | mask :
2362                 mac_filters->ucast_drop_all & ~mask;
2363
2364         mac_filters->mcast_drop_all = drop_all_mcast ?
2365                 mac_filters->mcast_drop_all | mask :
2366                 mac_filters->mcast_drop_all & ~mask;
2367
2368         mac_filters->ucast_accept_all = accp_all_ucast ?
2369                 mac_filters->ucast_accept_all | mask :
2370                 mac_filters->ucast_accept_all & ~mask;
2371
2372         mac_filters->mcast_accept_all = accp_all_mcast ?
2373                 mac_filters->mcast_accept_all | mask :
2374                 mac_filters->mcast_accept_all & ~mask;
2375
2376         mac_filters->bcast_accept_all = accp_all_bcast ?
2377                 mac_filters->bcast_accept_all | mask :
2378                 mac_filters->bcast_accept_all & ~mask;
2379
2380         mac_filters->unmatched_unicast = unmatched_unicast ?
2381                 mac_filters->unmatched_unicast | mask :
2382                 mac_filters->unmatched_unicast & ~mask;
2383
2384         DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2385                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2386            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2387            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2388            mac_filters->bcast_accept_all);
2389
2390         /* write the MAC filter structure*/
2391         __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2392
2393         /* The operation is completed */
2394         clear_bit(p->state, p->pstate);
2395         smp_mb__after_clear_bit();
2396
2397         return 0;
2398 }
2399
2400 /* Setup ramrod data */
2401 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2402                                 struct eth_classify_header *hdr,
2403                                 u8 rule_cnt)
2404 {
2405         hdr->echo = cpu_to_le32(cid);
2406         hdr->rule_cnt = rule_cnt;
2407 }
2408
2409 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2410                                 unsigned long *accept_flags,
2411                                 struct eth_filter_rules_cmd *cmd,
2412                                 bool clear_accept_all)
2413 {
2414         u16 state;
2415
2416         /* start with 'drop-all' */
2417         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2418                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2419
2420         if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2421                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2422
2423         if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2424                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2425
2426         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2427                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2428                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2429         }
2430
2431         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2432                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2433                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2434         }
2435
2436         if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2437                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2438
2439         if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2440                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2441                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2442         }
2443
2444         if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2445                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2446
2447         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2448         if (clear_accept_all) {
2449                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2450                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2451                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2452                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2453         }
2454
2455         cmd->state = cpu_to_le16(state);
2456 }
2457
2458 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2459                                 struct bnx2x_rx_mode_ramrod_params *p)
2460 {
2461         struct eth_filter_rules_ramrod_data *data = p->rdata;
2462         int rc;
2463         u8 rule_idx = 0;
2464
2465         /* Reset the ramrod data buffer */
2466         memset(data, 0, sizeof(*data));
2467
2468         /* Setup ramrod data */
2469
2470         /* Tx (internal switching) */
2471         if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2472                 data->rules[rule_idx].client_id = p->cl_id;
2473                 data->rules[rule_idx].func_id = p->func_id;
2474
2475                 data->rules[rule_idx].cmd_general_data =
2476                         ETH_FILTER_RULES_CMD_TX_CMD;
2477
2478                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2479                                                &(data->rules[rule_idx++]),
2480                                                false);
2481         }
2482
2483         /* Rx */
2484         if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2485                 data->rules[rule_idx].client_id = p->cl_id;
2486                 data->rules[rule_idx].func_id = p->func_id;
2487
2488                 data->rules[rule_idx].cmd_general_data =
2489                         ETH_FILTER_RULES_CMD_RX_CMD;
2490
2491                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2492                                                &(data->rules[rule_idx++]),
2493                                                false);
2494         }
2495
2496         /* If FCoE Queue configuration has been requested configure the Rx and
2497          * internal switching modes for this queue in separate rules.
2498          *
2499          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2500          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2501          */
2502         if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2503                 /*  Tx (internal switching) */
2504                 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2505                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2506                         data->rules[rule_idx].func_id = p->func_id;
2507
2508                         data->rules[rule_idx].cmd_general_data =
2509                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2510
2511                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2512                                                        &(data->rules[rule_idx]),
2513                                                        true);
2514                         rule_idx++;
2515                 }
2516
2517                 /* Rx */
2518                 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2519                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2520                         data->rules[rule_idx].func_id = p->func_id;
2521
2522                         data->rules[rule_idx].cmd_general_data =
2523                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2524
2525                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2526                                                        &(data->rules[rule_idx]),
2527                                                        true);
2528                         rule_idx++;
2529                 }
2530         }
2531
2532         /* Set the ramrod header (most importantly - number of rules to
2533          * configure).
2534          */
2535         bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2536
2537         DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2538                          data->header.rule_cnt, p->rx_accept_flags,
2539                          p->tx_accept_flags);
2540
2541         /* No need for an explicit memory barrier here as long we would
2542          * need to ensure the ordering of writing to the SPQ element
2543          * and updating of the SPQ producer which involves a memory
2544          * read and we will have to put a full memory barrier there
2545          * (inside bnx2x_sp_post()).
2546          */
2547
2548         /* Send a ramrod */
2549         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2550                            U64_HI(p->rdata_mapping),
2551                            U64_LO(p->rdata_mapping),
2552                            ETH_CONNECTION_TYPE);
2553         if (rc)
2554                 return rc;
2555
2556         /* Ramrod completion is pending */
2557         return 1;
2558 }
2559
2560 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2561                                       struct bnx2x_rx_mode_ramrod_params *p)
2562 {
2563         return bnx2x_state_wait(bp, p->state, p->pstate);
2564 }
2565
2566 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2567                                     struct bnx2x_rx_mode_ramrod_params *p)
2568 {
2569         /* Do nothing */
2570         return 0;
2571 }
2572
2573 int bnx2x_config_rx_mode(struct bnx2x *bp,
2574                          struct bnx2x_rx_mode_ramrod_params *p)
2575 {
2576         int rc;
2577
2578         /* Configure the new classification in the chip */
2579         rc = p->rx_mode_obj->config_rx_mode(bp, p);
2580         if (rc < 0)
2581                 return rc;
2582
2583         /* Wait for a ramrod completion if was requested */
2584         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2585                 rc = p->rx_mode_obj->wait_comp(bp, p);
2586                 if (rc)
2587                         return rc;
2588         }
2589
2590         return rc;
2591 }
2592
2593 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2594                             struct bnx2x_rx_mode_obj *o)
2595 {
2596         if (CHIP_IS_E1x(bp)) {
2597                 o->wait_comp      = bnx2x_empty_rx_mode_wait;
2598                 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2599         } else {
2600                 o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2601                 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2602         }
2603 }
2604
2605 /********************* Multicast verbs: SET, CLEAR ****************************/
2606 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2607 {
2608         return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2609 }
2610
2611 struct bnx2x_mcast_mac_elem {
2612         struct list_head link;
2613         u8 mac[ETH_ALEN];
2614         u8 pad[2]; /* For a natural alignment of the following buffer */
2615 };
2616
2617 struct bnx2x_pending_mcast_cmd {
2618         struct list_head link;
2619         int type; /* BNX2X_MCAST_CMD_X */
2620         union {
2621                 struct list_head macs_head;
2622                 u32 macs_num; /* Needed for DEL command */
2623                 int next_bin; /* Needed for RESTORE flow with aprox match */
2624         } data;
2625
2626         bool done; /* set to true, when the command has been handled,
2627                     * practically used in 57712 handling only, where one pending
2628                     * command may be handled in a few operations. As long as for
2629                     * other chips every operation handling is completed in a
2630                     * single ramrod, there is no need to utilize this field.
2631                     */
2632 };
2633
2634 static int bnx2x_mcast_wait(struct bnx2x *bp,
2635                             struct bnx2x_mcast_obj *o)
2636 {
2637         if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2638                         o->raw.wait_comp(bp, &o->raw))
2639                 return -EBUSY;
2640
2641         return 0;
2642 }
2643
2644 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2645                                    struct bnx2x_mcast_obj *o,
2646                                    struct bnx2x_mcast_ramrod_params *p,
2647                                    enum bnx2x_mcast_cmd cmd)
2648 {
2649         int total_sz;
2650         struct bnx2x_pending_mcast_cmd *new_cmd;
2651         struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2652         struct bnx2x_mcast_list_elem *pos;
2653         int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2654                              p->mcast_list_len : 0);
2655
2656         /* If the command is empty ("handle pending commands only"), break */
2657         if (!p->mcast_list_len)
2658                 return 0;
2659
2660         total_sz = sizeof(*new_cmd) +
2661                 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2662
2663         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2664         new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2665
2666         if (!new_cmd)
2667                 return -ENOMEM;
2668
2669         DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2670            cmd, macs_list_len);
2671
2672         INIT_LIST_HEAD(&new_cmd->data.macs_head);
2673
2674         new_cmd->type = cmd;
2675         new_cmd->done = false;
2676
2677         switch (cmd) {
2678         case BNX2X_MCAST_CMD_ADD:
2679                 cur_mac = (struct bnx2x_mcast_mac_elem *)
2680                           ((u8 *)new_cmd + sizeof(*new_cmd));
2681
2682                 /* Push the MACs of the current command into the pending command
2683                  * MACs list: FIFO
2684                  */
2685                 list_for_each_entry(pos, &p->mcast_list, link) {
2686                         memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2687                         list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2688                         cur_mac++;
2689                 }
2690
2691                 break;
2692
2693         case BNX2X_MCAST_CMD_DEL:
2694                 new_cmd->data.macs_num = p->mcast_list_len;
2695                 break;
2696
2697         case BNX2X_MCAST_CMD_RESTORE:
2698                 new_cmd->data.next_bin = 0;
2699                 break;
2700
2701         default:
2702                 kfree(new_cmd);
2703                 BNX2X_ERR("Unknown command: %d\n", cmd);
2704                 return -EINVAL;
2705         }
2706
2707         /* Push the new pending command to the tail of the pending list: FIFO */
2708         list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2709
2710         o->set_sched(o);
2711
2712         return 1;
2713 }
2714
2715 /**
2716  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2717  *
2718  * @o:
2719  * @last:       index to start looking from (including)
2720  *
2721  * returns the next found (set) bin or a negative value if none is found.
2722  */
2723 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2724 {
2725         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2726
2727         for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2728                 if (o->registry.aprox_match.vec[i])
2729                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2730                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2731                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2732                                                        vec, cur_bit)) {
2733                                         return cur_bit;
2734                                 }
2735                         }
2736                 inner_start = 0;
2737         }
2738
2739         /* None found */
2740         return -1;
2741 }
2742
2743 /**
2744  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2745  *
2746  * @o:
2747  *
2748  * returns the index of the found bin or -1 if none is found
2749  */
2750 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2751 {
2752         int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2753
2754         if (cur_bit >= 0)
2755                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2756
2757         return cur_bit;
2758 }
2759
2760 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2761 {
2762         struct bnx2x_raw_obj *raw = &o->raw;
2763         u8 rx_tx_flag = 0;
2764
2765         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2766             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2767                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2768
2769         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2770             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2771                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2772
2773         return rx_tx_flag;
2774 }
2775
2776 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2777                                         struct bnx2x_mcast_obj *o, int idx,
2778                                         union bnx2x_mcast_config_data *cfg_data,
2779                                         enum bnx2x_mcast_cmd cmd)
2780 {
2781         struct bnx2x_raw_obj *r = &o->raw;
2782         struct eth_multicast_rules_ramrod_data *data =
2783                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2784         u8 func_id = r->func_id;
2785         u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2786         int bin;
2787
2788         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2789                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2790
2791         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2792
2793         /* Get a bin and update a bins' vector */
2794         switch (cmd) {
2795         case BNX2X_MCAST_CMD_ADD:
2796                 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2797                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2798                 break;
2799
2800         case BNX2X_MCAST_CMD_DEL:
2801                 /* If there were no more bins to clear
2802                  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2803                  * clear any (0xff) bin.
2804                  * See bnx2x_mcast_validate_e2() for explanation when it may
2805                  * happen.
2806                  */
2807                 bin = bnx2x_mcast_clear_first_bin(o);
2808                 break;
2809
2810         case BNX2X_MCAST_CMD_RESTORE:
2811                 bin = cfg_data->bin;
2812                 break;
2813
2814         default:
2815                 BNX2X_ERR("Unknown command: %d\n", cmd);
2816                 return;
2817         }
2818
2819         DP(BNX2X_MSG_SP, "%s bin %d\n",
2820                          ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2821                          "Setting"  : "Clearing"), bin);
2822
2823         data->rules[idx].bin_id    = (u8)bin;
2824         data->rules[idx].func_id   = func_id;
2825         data->rules[idx].engine_id = o->engine_id;
2826 }
2827
2828 /**
2829  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2830  *
2831  * @bp:         device handle
2832  * @o:
2833  * @start_bin:  index in the registry to start from (including)
2834  * @rdata_idx:  index in the ramrod data to start from
2835  *
2836  * returns last handled bin index or -1 if all bins have been handled
2837  */
2838 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2839         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2840         int *rdata_idx)
2841 {
2842         int cur_bin, cnt = *rdata_idx;
2843         union bnx2x_mcast_config_data cfg_data = {NULL};
2844
2845         /* go through the registry and configure the bins from it */
2846         for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2847             cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2848
2849                 cfg_data.bin = (u8)cur_bin;
2850                 o->set_one_rule(bp, o, cnt, &cfg_data,
2851                                 BNX2X_MCAST_CMD_RESTORE);
2852
2853                 cnt++;
2854
2855                 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2856
2857                 /* Break if we reached the maximum number
2858                  * of rules.
2859                  */
2860                 if (cnt >= o->max_cmd_len)
2861                         break;
2862         }
2863
2864         *rdata_idx = cnt;
2865
2866         return cur_bin;
2867 }
2868
2869 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2870         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2871         int *line_idx)
2872 {
2873         struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2874         int cnt = *line_idx;
2875         union bnx2x_mcast_config_data cfg_data = {NULL};
2876
2877         list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2878                                  link) {
2879
2880                 cfg_data.mac = &pmac_pos->mac[0];
2881                 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2882
2883                 cnt++;
2884
2885                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2886                    pmac_pos->mac);
2887
2888                 list_del(&pmac_pos->link);
2889
2890                 /* Break if we reached the maximum number
2891                  * of rules.
2892                  */
2893                 if (cnt >= o->max_cmd_len)
2894                         break;
2895         }
2896
2897         *line_idx = cnt;
2898
2899         /* if no more MACs to configure - we are done */
2900         if (list_empty(&cmd_pos->data.macs_head))
2901                 cmd_pos->done = true;
2902 }
2903
2904 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2905         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2906         int *line_idx)
2907 {
2908         int cnt = *line_idx;
2909
2910         while (cmd_pos->data.macs_num) {
2911                 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2912
2913                 cnt++;
2914
2915                 cmd_pos->data.macs_num--;
2916
2917                   DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2918                                    cmd_pos->data.macs_num, cnt);
2919
2920                 /* Break if we reached the maximum
2921                  * number of rules.
2922                  */
2923                 if (cnt >= o->max_cmd_len)
2924                         break;
2925         }
2926
2927         *line_idx = cnt;
2928
2929         /* If we cleared all bins - we are done */
2930         if (!cmd_pos->data.macs_num)
2931                 cmd_pos->done = true;
2932 }
2933
2934 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2935         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2936         int *line_idx)
2937 {
2938         cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2939                                                 line_idx);
2940
2941         if (cmd_pos->data.next_bin < 0)
2942                 /* If o->set_restore returned -1 we are done */
2943                 cmd_pos->done = true;
2944         else
2945                 /* Start from the next bin next time */
2946                 cmd_pos->data.next_bin++;
2947 }
2948
2949 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2950                                 struct bnx2x_mcast_ramrod_params *p)
2951 {
2952         struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2953         int cnt = 0;
2954         struct bnx2x_mcast_obj *o = p->mcast_obj;
2955
2956         list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2957                                  link) {
2958                 switch (cmd_pos->type) {
2959                 case BNX2X_MCAST_CMD_ADD:
2960                         bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2961                         break;
2962
2963                 case BNX2X_MCAST_CMD_DEL:
2964                         bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2965                         break;
2966
2967                 case BNX2X_MCAST_CMD_RESTORE:
2968                         bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2969                                                            &cnt);
2970                         break;
2971
2972                 default:
2973                         BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2974                         return -EINVAL;
2975                 }
2976
2977                 /* If the command has been completed - remove it from the list
2978                  * and free the memory
2979                  */
2980                 if (cmd_pos->done) {
2981                         list_del(&cmd_pos->link);
2982                         kfree(cmd_pos);
2983                 }
2984
2985                 /* Break if we reached the maximum number of rules */
2986                 if (cnt >= o->max_cmd_len)
2987                         break;
2988         }
2989
2990         return cnt;
2991 }
2992
2993 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2994         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2995         int *line_idx)
2996 {
2997         struct bnx2x_mcast_list_elem *mlist_pos;
2998         union bnx2x_mcast_config_data cfg_data = {NULL};
2999         int cnt = *line_idx;
3000
3001         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3002                 cfg_data.mac = mlist_pos->mac;
3003                 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3004
3005                 cnt++;
3006
3007                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3008                    mlist_pos->mac);
3009         }
3010
3011         *line_idx = cnt;
3012 }
3013
3014 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
3015         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3016         int *line_idx)
3017 {
3018         int cnt = *line_idx, i;
3019
3020         for (i = 0; i < p->mcast_list_len; i++) {
3021                 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3022
3023                 cnt++;
3024
3025                 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
3026                                  p->mcast_list_len - i - 1);
3027         }
3028
3029         *line_idx = cnt;
3030 }
3031
3032 /**
3033  * bnx2x_mcast_handle_current_cmd -
3034  *
3035  * @bp:         device handle
3036  * @p:
3037  * @cmd:
3038  * @start_cnt:  first line in the ramrod data that may be used
3039  *
3040  * This function is called iff there is enough place for the current command in
3041  * the ramrod data.
3042  * Returns number of lines filled in the ramrod data in total.
3043  */
3044 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
3045                         struct bnx2x_mcast_ramrod_params *p,
3046                         enum bnx2x_mcast_cmd cmd,
3047                         int start_cnt)
3048 {
3049         struct bnx2x_mcast_obj *o = p->mcast_obj;
3050         int cnt = start_cnt;
3051
3052         DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3053
3054         switch (cmd) {
3055         case BNX2X_MCAST_CMD_ADD:
3056                 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3057                 break;
3058
3059         case BNX2X_MCAST_CMD_DEL:
3060                 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3061                 break;
3062
3063         case BNX2X_MCAST_CMD_RESTORE:
3064                 o->hdl_restore(bp, o, 0, &cnt);
3065                 break;
3066
3067         default:
3068                 BNX2X_ERR("Unknown command: %d\n", cmd);
3069                 return -EINVAL;
3070         }
3071
3072         /* The current command has been handled */
3073         p->mcast_list_len = 0;
3074
3075         return cnt;
3076 }
3077
3078 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
3079                                    struct bnx2x_mcast_ramrod_params *p,
3080                                    enum bnx2x_mcast_cmd cmd)
3081 {
3082         struct bnx2x_mcast_obj *o = p->mcast_obj;
3083         int reg_sz = o->get_registry_size(o);
3084
3085         switch (cmd) {
3086         /* DEL command deletes all currently configured MACs */
3087         case BNX2X_MCAST_CMD_DEL:
3088                 o->set_registry_size(o, 0);
3089                 /* Don't break */
3090
3091         /* RESTORE command will restore the entire multicast configuration */
3092         case BNX2X_MCAST_CMD_RESTORE:
3093                 /* Here we set the approximate amount of work to do, which in
3094                  * fact may be only less as some MACs in postponed ADD
3095                  * command(s) scheduled before this command may fall into
3096                  * the same bin and the actual number of bins set in the
3097                  * registry would be less than we estimated here. See
3098                  * bnx2x_mcast_set_one_rule_e2() for further details.
3099                  */
3100                 p->mcast_list_len = reg_sz;
3101                 break;
3102
3103         case BNX2X_MCAST_CMD_ADD:
3104         case BNX2X_MCAST_CMD_CONT:
3105                 /* Here we assume that all new MACs will fall into new bins.
3106                  * However we will correct the real registry size after we
3107                  * handle all pending commands.
3108                  */
3109                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3110                 break;
3111
3112         default:
3113                 BNX2X_ERR("Unknown command: %d\n", cmd);
3114                 return -EINVAL;
3115         }
3116
3117         /* Increase the total number of MACs pending to be configured */
3118         o->total_pending_num += p->mcast_list_len;
3119
3120         return 0;
3121 }
3122
3123 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
3124                                       struct bnx2x_mcast_ramrod_params *p,
3125                                       int old_num_bins)
3126 {
3127         struct bnx2x_mcast_obj *o = p->mcast_obj;
3128
3129         o->set_registry_size(o, old_num_bins);
3130         o->total_pending_num -= p->mcast_list_len;
3131 }
3132
3133 /**
3134  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
3135  *
3136  * @bp:         device handle
3137  * @p:
3138  * @len:        number of rules to handle
3139  */
3140 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
3141                                         struct bnx2x_mcast_ramrod_params *p,
3142                                         u8 len)
3143 {
3144         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3145         struct eth_multicast_rules_ramrod_data *data =
3146                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3147
3148         data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3149                                         (BNX2X_FILTER_MCAST_PENDING <<
3150                                          BNX2X_SWCID_SHIFT));
3151         data->header.rule_cnt = len;
3152 }
3153
3154 /**
3155  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3156  *
3157  * @bp:         device handle
3158  * @o:
3159  *
3160  * Recalculate the actual number of set bins in the registry using Brian
3161  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3162  *
3163  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
3164  */
3165 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
3166                                                   struct bnx2x_mcast_obj *o)
3167 {
3168         int i, cnt = 0;
3169         u64 elem;
3170
3171         for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
3172                 elem = o->registry.aprox_match.vec[i];
3173                 for (; elem; cnt++)
3174                         elem &= elem - 1;
3175         }
3176
3177         o->set_registry_size(o, cnt);
3178
3179         return 0;
3180 }
3181
3182 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3183                                 struct bnx2x_mcast_ramrod_params *p,
3184                                 enum bnx2x_mcast_cmd cmd)
3185 {
3186         struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
3187         struct bnx2x_mcast_obj *o = p->mcast_obj;
3188         struct eth_multicast_rules_ramrod_data *data =
3189                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3190         int cnt = 0, rc;
3191
3192         /* Reset the ramrod data buffer */
3193         memset(data, 0, sizeof(*data));
3194
3195         cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
3196
3197         /* If there are no more pending commands - clear SCHEDULED state */
3198         if (list_empty(&o->pending_cmds_head))
3199                 o->clear_sched(o);
3200
3201         /* The below may be true iff there was enough room in ramrod
3202          * data for all pending commands and for the current
3203          * command. Otherwise the current command would have been added
3204          * to the pending commands and p->mcast_list_len would have been
3205          * zeroed.
3206          */
3207         if (p->mcast_list_len > 0)
3208                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3209
3210         /* We've pulled out some MACs - update the total number of
3211          * outstanding.
3212          */
3213         o->total_pending_num -= cnt;
3214
3215         /* send a ramrod */
3216         WARN_ON(o->total_pending_num < 0);
3217         WARN_ON(cnt > o->max_cmd_len);
3218
3219         bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3220
3221         /* Update a registry size if there are no more pending operations.
3222          *
3223          * We don't want to change the value of the registry size if there are
3224          * pending operations because we want it to always be equal to the
3225          * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3226          * set bins after the last requested operation in order to properly
3227          * evaluate the size of the next DEL/RESTORE operation.
3228          *
3229          * Note that we update the registry itself during command(s) handling
3230          * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3231          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3232          * with a limited amount of update commands (per MAC/bin) and we don't
3233          * know in this scope what the actual state of bins configuration is
3234          * going to be after this ramrod.
3235          */
3236         if (!o->total_pending_num)
3237                 bnx2x_mcast_refresh_registry_e2(bp, o);
3238
3239         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3240          * RAMROD_PENDING status immediately.
3241          */
3242         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3243                 raw->clear_pending(raw);
3244                 return 0;
3245         } else {
3246                 /* No need for an explicit memory barrier here as long we would
3247                  * need to ensure the ordering of writing to the SPQ element
3248                  * and updating of the SPQ producer which involves a memory
3249                  * read and we will have to put a full memory barrier there
3250                  * (inside bnx2x_sp_post()).
3251                  */
3252
3253                 /* Send a ramrod */
3254                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3255                                    raw->cid, U64_HI(raw->rdata_mapping),
3256                                    U64_LO(raw->rdata_mapping),
3257                                    ETH_CONNECTION_TYPE);
3258                 if (rc)
3259                         return rc;
3260
3261                 /* Ramrod completion is pending */
3262                 return 1;
3263         }
3264 }
3265
3266 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3267                                     struct bnx2x_mcast_ramrod_params *p,
3268                                     enum bnx2x_mcast_cmd cmd)
3269 {
3270         /* Mark, that there is a work to do */
3271         if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3272                 p->mcast_list_len = 1;
3273
3274         return 0;
3275 }
3276
3277 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3278                                        struct bnx2x_mcast_ramrod_params *p,
3279                                        int old_num_bins)
3280 {
3281         /* Do nothing */
3282 }
3283
3284 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3285 do { \
3286         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3287 } while (0)
3288
3289 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3290                                            struct bnx2x_mcast_obj *o,
3291                                            struct bnx2x_mcast_ramrod_params *p,
3292                                            u32 *mc_filter)
3293 {
3294         struct bnx2x_mcast_list_elem *mlist_pos;
3295         int bit;
3296
3297         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3298                 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3299                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3300
3301                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3302                    mlist_pos->mac, bit);
3303
3304                 /* bookkeeping... */
3305                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3306                                   bit);
3307         }
3308 }
3309
3310 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3311         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3312         u32 *mc_filter)
3313 {
3314         int bit;
3315
3316         for (bit = bnx2x_mcast_get_next_bin(o, 0);
3317              bit >= 0;
3318              bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3319                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3320                 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3321         }
3322 }
3323
3324 /* On 57711 we write the multicast MACs' approximate match
3325  * table by directly into the TSTORM's internal RAM. So we don't
3326  * really need to handle any tricks to make it work.
3327  */
3328 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3329                                  struct bnx2x_mcast_ramrod_params *p,
3330                                  enum bnx2x_mcast_cmd cmd)
3331 {
3332         int i;
3333         struct bnx2x_mcast_obj *o = p->mcast_obj;
3334         struct bnx2x_raw_obj *r = &o->raw;
3335
3336         /* If CLEAR_ONLY has been requested - clear the registry
3337          * and clear a pending bit.
3338          */
3339         if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3340                 u32 mc_filter[MC_HASH_SIZE] = {0};
3341
3342                 /* Set the multicast filter bits before writing it into
3343                  * the internal memory.
3344                  */
3345                 switch (cmd) {
3346                 case BNX2X_MCAST_CMD_ADD:
3347                         bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3348                         break;
3349
3350                 case BNX2X_MCAST_CMD_DEL:
3351                         DP(BNX2X_MSG_SP,
3352                            "Invalidating multicast MACs configuration\n");
3353
3354                         /* clear the registry */
3355                         memset(o->registry.aprox_match.vec, 0,
3356                                sizeof(o->registry.aprox_match.vec));
3357                         break;
3358
3359                 case BNX2X_MCAST_CMD_RESTORE:
3360                         bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3361                         break;
3362
3363                 default:
3364                         BNX2X_ERR("Unknown command: %d\n", cmd);
3365                         return -EINVAL;
3366                 }
3367
3368                 /* Set the mcast filter in the internal memory */
3369                 for (i = 0; i < MC_HASH_SIZE; i++)
3370                         REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3371         } else
3372                 /* clear the registry */
3373                 memset(o->registry.aprox_match.vec, 0,
3374                        sizeof(o->registry.aprox_match.vec));
3375
3376         /* We are done */
3377         r->clear_pending(r);
3378
3379         return 0;
3380 }
3381
3382 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3383                                    struct bnx2x_mcast_ramrod_params *p,
3384                                    enum bnx2x_mcast_cmd cmd)
3385 {
3386         struct bnx2x_mcast_obj *o = p->mcast_obj;
3387         int reg_sz = o->get_registry_size(o);
3388
3389         switch (cmd) {
3390         /* DEL command deletes all currently configured MACs */
3391         case BNX2X_MCAST_CMD_DEL:
3392                 o->set_registry_size(o, 0);
3393                 /* Don't break */
3394
3395         /* RESTORE command will restore the entire multicast configuration */
3396         case BNX2X_MCAST_CMD_RESTORE:
3397                 p->mcast_list_len = reg_sz;
3398                   DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3399                                    cmd, p->mcast_list_len);
3400                 break;
3401
3402         case BNX2X_MCAST_CMD_ADD:
3403         case BNX2X_MCAST_CMD_CONT:
3404                 /* Multicast MACs on 57710 are configured as unicast MACs and
3405                  * there is only a limited number of CAM entries for that
3406                  * matter.
3407                  */
3408                 if (p->mcast_list_len > o->max_cmd_len) {
3409                         BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3410                                   o->max_cmd_len);
3411                         return -EINVAL;
3412                 }
3413                 /* Every configured MAC should be cleared if DEL command is
3414                  * called. Only the last ADD command is relevant as long as
3415                  * every ADD commands overrides the previous configuration.
3416                  */
3417                 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3418                 if (p->mcast_list_len > 0)
3419                         o->set_registry_size(o, p->mcast_list_len);
3420
3421                 break;
3422
3423         default:
3424                 BNX2X_ERR("Unknown command: %d\n", cmd);
3425                 return -EINVAL;
3426         }
3427
3428         /* We want to ensure that commands are executed one by one for 57710.
3429          * Therefore each none-empty command will consume o->max_cmd_len.
3430          */
3431         if (p->mcast_list_len)
3432                 o->total_pending_num += o->max_cmd_len;
3433
3434         return 0;
3435 }
3436
3437 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3438                                       struct bnx2x_mcast_ramrod_params *p,
3439                                       int old_num_macs)
3440 {
3441         struct bnx2x_mcast_obj *o = p->mcast_obj;
3442
3443         o->set_registry_size(o, old_num_macs);
3444
3445         /* If current command hasn't been handled yet and we are
3446          * here means that it's meant to be dropped and we have to
3447          * update the number of outstanding MACs accordingly.
3448          */
3449         if (p->mcast_list_len)
3450                 o->total_pending_num -= o->max_cmd_len;
3451 }
3452
3453 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3454                                         struct bnx2x_mcast_obj *o, int idx,
3455                                         union bnx2x_mcast_config_data *cfg_data,
3456                                         enum bnx2x_mcast_cmd cmd)
3457 {
3458         struct bnx2x_raw_obj *r = &o->raw;
3459         struct mac_configuration_cmd *data =
3460                 (struct mac_configuration_cmd *)(r->rdata);
3461
3462         /* copy mac */
3463         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3464                 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3465                                       &data->config_table[idx].middle_mac_addr,
3466                                       &data->config_table[idx].lsb_mac_addr,
3467                                       cfg_data->mac);
3468
3469                 data->config_table[idx].vlan_id = 0;
3470                 data->config_table[idx].pf_id = r->func_id;
3471                 data->config_table[idx].clients_bit_vector =
3472                         cpu_to_le32(1 << r->cl_id);
3473
3474                 SET_FLAG(data->config_table[idx].flags,
3475                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3476                          T_ETH_MAC_COMMAND_SET);
3477         }
3478 }
3479
3480 /**
3481  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3482  *
3483  * @bp:         device handle
3484  * @p:
3485  * @len:        number of rules to handle
3486  */
3487 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3488                                         struct bnx2x_mcast_ramrod_params *p,
3489                                         u8 len)
3490 {
3491         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3492         struct mac_configuration_cmd *data =
3493                 (struct mac_configuration_cmd *)(r->rdata);
3494
3495         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3496                      BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3497                      BNX2X_MAX_MULTICAST*(1 + r->func_id));
3498
3499         data->hdr.offset = offset;
3500         data->hdr.client_id = cpu_to_le16(0xff);
3501         data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3502                                      (BNX2X_FILTER_MCAST_PENDING <<
3503                                       BNX2X_SWCID_SHIFT));
3504         data->hdr.length = len;
3505 }
3506
3507 /**
3508  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3509  *
3510  * @bp:         device handle
3511  * @o:
3512  * @start_idx:  index in the registry to start from
3513  * @rdata_idx:  index in the ramrod data to start from
3514  *
3515  * restore command for 57710 is like all other commands - always a stand alone
3516  * command - start_idx and rdata_idx will always be 0. This function will always
3517  * succeed.
3518  * returns -1 to comply with 57712 variant.
3519  */
3520 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3521         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3522         int *rdata_idx)
3523 {
3524         struct bnx2x_mcast_mac_elem *elem;
3525         int i = 0;
3526         union bnx2x_mcast_config_data cfg_data = {NULL};
3527
3528         /* go through the registry and configure the MACs from it. */
3529         list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3530                 cfg_data.mac = &elem->mac[0];
3531                 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3532
3533                 i++;
3534
3535                   DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3536                      cfg_data.mac);
3537         }
3538
3539         *rdata_idx = i;
3540
3541         return -1;
3542 }
3543
3544 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3545         struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3546 {
3547         struct bnx2x_pending_mcast_cmd *cmd_pos;
3548         struct bnx2x_mcast_mac_elem *pmac_pos;
3549         struct bnx2x_mcast_obj *o = p->mcast_obj;
3550         union bnx2x_mcast_config_data cfg_data = {NULL};
3551         int cnt = 0;
3552
3553         /* If nothing to be done - return */
3554         if (list_empty(&o->pending_cmds_head))
3555                 return 0;
3556
3557         /* Handle the first command */
3558         cmd_pos = list_first_entry(&o->pending_cmds_head,
3559                                    struct bnx2x_pending_mcast_cmd, link);
3560
3561         switch (cmd_pos->type) {
3562         case BNX2X_MCAST_CMD_ADD:
3563                 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3564                         cfg_data.mac = &pmac_pos->mac[0];
3565                         o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3566
3567                         cnt++;
3568
3569                         DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3570                            pmac_pos->mac);
3571                 }
3572                 break;
3573
3574         case BNX2X_MCAST_CMD_DEL:
3575                 cnt = cmd_pos->data.macs_num;
3576                 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3577                 break;
3578
3579         case BNX2X_MCAST_CMD_RESTORE:
3580                 o->hdl_restore(bp, o, 0, &cnt);
3581                 break;
3582
3583         default:
3584                 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3585                 return -EINVAL;
3586         }
3587
3588         list_del(&cmd_pos->link);
3589         kfree(cmd_pos);
3590
3591         return cnt;
3592 }
3593
3594 /**
3595  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3596  *
3597  * @fw_hi:
3598  * @fw_mid:
3599  * @fw_lo:
3600  * @mac:
3601  */
3602 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3603                                          __le16 *fw_lo, u8 *mac)
3604 {
3605         mac[1] = ((u8 *)fw_hi)[0];
3606         mac[0] = ((u8 *)fw_hi)[1];
3607         mac[3] = ((u8 *)fw_mid)[0];
3608         mac[2] = ((u8 *)fw_mid)[1];
3609         mac[5] = ((u8 *)fw_lo)[0];
3610         mac[4] = ((u8 *)fw_lo)[1];
3611 }
3612
3613 /**
3614  * bnx2x_mcast_refresh_registry_e1 -
3615  *
3616  * @bp:         device handle
3617  * @cnt:
3618  *
3619  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3620  * and update the registry correspondingly: if ADD - allocate a memory and add
3621  * the entries to the registry (list), if DELETE - clear the registry and free
3622  * the memory.
3623  */
3624 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3625                                                   struct bnx2x_mcast_obj *o)
3626 {
3627         struct bnx2x_raw_obj *raw = &o->raw;
3628         struct bnx2x_mcast_mac_elem *elem;
3629         struct mac_configuration_cmd *data =
3630                         (struct mac_configuration_cmd *)(raw->rdata);
3631
3632         /* If first entry contains a SET bit - the command was ADD,
3633          * otherwise - DEL_ALL
3634          */
3635         if (GET_FLAG(data->config_table[0].flags,
3636                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3637                 int i, len = data->hdr.length;
3638
3639                 /* Break if it was a RESTORE command */
3640                 if (!list_empty(&o->registry.exact_match.macs))
3641                         return 0;
3642
3643                 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3644                 if (!elem) {
3645                         BNX2X_ERR("Failed to allocate registry memory\n");
3646                         return -ENOMEM;
3647                 }
3648
3649                 for (i = 0; i < len; i++, elem++) {
3650                         bnx2x_get_fw_mac_addr(
3651                                 &data->config_table[i].msb_mac_addr,
3652                                 &data->config_table[i].middle_mac_addr,
3653                                 &data->config_table[i].lsb_mac_addr,
3654                                 elem->mac);
3655                         DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3656                            elem->mac);
3657                         list_add_tail(&elem->link,
3658                                       &o->registry.exact_match.macs);
3659                 }
3660         } else {
3661                 elem = list_first_entry(&o->registry.exact_match.macs,
3662                                         struct bnx2x_mcast_mac_elem, link);
3663                 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3664                 kfree(elem);
3665                 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3666         }
3667
3668         return 0;
3669 }
3670
3671 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3672                                 struct bnx2x_mcast_ramrod_params *p,
3673                                 enum bnx2x_mcast_cmd cmd)
3674 {
3675         struct bnx2x_mcast_obj *o = p->mcast_obj;
3676         struct bnx2x_raw_obj *raw = &o->raw;
3677         struct mac_configuration_cmd *data =
3678                 (struct mac_configuration_cmd *)(raw->rdata);
3679         int cnt = 0, i, rc;
3680
3681         /* Reset the ramrod data buffer */
3682         memset(data, 0, sizeof(*data));
3683
3684         /* First set all entries as invalid */
3685         for (i = 0; i < o->max_cmd_len ; i++)
3686                 SET_FLAG(data->config_table[i].flags,
3687                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3688                          T_ETH_MAC_COMMAND_INVALIDATE);
3689
3690         /* Handle pending commands first */
3691         cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3692
3693         /* If there are no more pending commands - clear SCHEDULED state */
3694         if (list_empty(&o->pending_cmds_head))
3695                 o->clear_sched(o);
3696
3697         /* The below may be true iff there were no pending commands */
3698         if (!cnt)
3699                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3700
3701         /* For 57710 every command has o->max_cmd_len length to ensure that
3702          * commands are done one at a time.
3703          */
3704         o->total_pending_num -= o->max_cmd_len;
3705
3706         /* send a ramrod */
3707
3708         WARN_ON(cnt > o->max_cmd_len);
3709
3710         /* Set ramrod header (in particular, a number of entries to update) */
3711         bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3712
3713         /* update a registry: we need the registry contents to be always up
3714          * to date in order to be able to execute a RESTORE opcode. Here
3715          * we use the fact that for 57710 we sent one command at a time
3716          * hence we may take the registry update out of the command handling
3717          * and do it in a simpler way here.
3718          */
3719         rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3720         if (rc)
3721                 return rc;
3722
3723         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3724          * RAMROD_PENDING status immediately.
3725          */
3726         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3727                 raw->clear_pending(raw);
3728                 return 0;
3729         } else {
3730                 /* No need for an explicit memory barrier here as long we would
3731                  * need to ensure the ordering of writing to the SPQ element
3732                  * and updating of the SPQ producer which involves a memory
3733                  * read and we will have to put a full memory barrier there
3734                  * (inside bnx2x_sp_post()).
3735                  */
3736
3737                 /* Send a ramrod */
3738                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3739                                    U64_HI(raw->rdata_mapping),
3740                                    U64_LO(raw->rdata_mapping),
3741                                    ETH_CONNECTION_TYPE);
3742                 if (rc)
3743                         return rc;
3744
3745                 /* Ramrod completion is pending */
3746                 return 1;
3747         }
3748 }
3749
3750 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3751 {
3752         return o->registry.exact_match.num_macs_set;
3753 }
3754
3755 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3756 {
3757         return o->registry.aprox_match.num_bins_set;
3758 }
3759
3760 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3761                                                 int n)
3762 {
3763         o->registry.exact_match.num_macs_set = n;
3764 }
3765
3766 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3767                                                 int n)
3768 {
3769         o->registry.aprox_match.num_bins_set = n;
3770 }
3771
3772 int bnx2x_config_mcast(struct bnx2x *bp,
3773                        struct bnx2x_mcast_ramrod_params *p,
3774                        enum bnx2x_mcast_cmd cmd)
3775 {
3776         struct bnx2x_mcast_obj *o = p->mcast_obj;
3777         struct bnx2x_raw_obj *r = &o->raw;
3778         int rc = 0, old_reg_size;
3779
3780         /* This is needed to recover number of currently configured mcast macs
3781          * in case of failure.
3782          */
3783         old_reg_size = o->get_registry_size(o);
3784
3785         /* Do some calculations and checks */
3786         rc = o->validate(bp, p, cmd);
3787         if (rc)
3788                 return rc;
3789
3790         /* Return if there is no work to do */
3791         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3792                 return 0;
3793
3794         DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3795            o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3796
3797         /* Enqueue the current command to the pending list if we can't complete
3798          * it in the current iteration
3799          */
3800         if (r->check_pending(r) ||
3801             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3802                 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3803                 if (rc < 0)
3804                         goto error_exit1;
3805
3806                 /* As long as the current command is in a command list we
3807                  * don't need to handle it separately.
3808                  */
3809                 p->mcast_list_len = 0;
3810         }
3811
3812         if (!r->check_pending(r)) {
3813
3814                 /* Set 'pending' state */
3815                 r->set_pending(r);
3816
3817                 /* Configure the new classification in the chip */
3818                 rc = o->config_mcast(bp, p, cmd);
3819                 if (rc < 0)
3820                         goto error_exit2;
3821
3822                 /* Wait for a ramrod completion if was requested */
3823                 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3824                         rc = o->wait_comp(bp, o);
3825         }
3826
3827         return rc;
3828
3829 error_exit2:
3830         r->clear_pending(r);
3831
3832 error_exit1:
3833         o->revert(bp, p, old_reg_size);
3834
3835         return rc;
3836 }
3837
3838 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3839 {
3840         smp_mb__before_clear_bit();
3841         clear_bit(o->sched_state, o->raw.pstate);
3842         smp_mb__after_clear_bit();
3843 }
3844
3845 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3846 {
3847         smp_mb__before_clear_bit();
3848         set_bit(o->sched_state, o->raw.pstate);
3849         smp_mb__after_clear_bit();
3850 }
3851
3852 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3853 {
3854         return !!test_bit(o->sched_state, o->raw.pstate);
3855 }
3856
3857 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3858 {
3859         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3860 }
3861
3862 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3863                           struct bnx2x_mcast_obj *mcast_obj,
3864                           u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3865                           u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3866                           int state, unsigned long *pstate, bnx2x_obj_type type)
3867 {
3868         memset(mcast_obj, 0, sizeof(*mcast_obj));
3869
3870         bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3871                            rdata, rdata_mapping, state, pstate, type);
3872
3873         mcast_obj->engine_id = engine_id;
3874
3875         INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3876
3877         mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3878         mcast_obj->check_sched = bnx2x_mcast_check_sched;
3879         mcast_obj->set_sched = bnx2x_mcast_set_sched;
3880         mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3881
3882         if (CHIP_IS_E1(bp)) {
3883                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3884                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3885                 mcast_obj->hdl_restore       =
3886                         bnx2x_mcast_handle_restore_cmd_e1;
3887                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3888
3889                 if (CHIP_REV_IS_SLOW(bp))
3890                         mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3891                 else
3892                         mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3893
3894                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3895                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3896                 mcast_obj->validate          = bnx2x_mcast_validate_e1;
3897                 mcast_obj->revert            = bnx2x_mcast_revert_e1;
3898                 mcast_obj->get_registry_size =
3899                         bnx2x_mcast_get_registry_size_exact;
3900                 mcast_obj->set_registry_size =
3901                         bnx2x_mcast_set_registry_size_exact;
3902
3903                 /* 57710 is the only chip that uses the exact match for mcast
3904                  * at the moment.
3905                  */
3906                 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3907
3908         } else if (CHIP_IS_E1H(bp)) {
3909                 mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3910                 mcast_obj->enqueue_cmd   = NULL;
3911                 mcast_obj->hdl_restore   = NULL;
3912                 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3913
3914                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3915                  * for one command.
3916                  */
3917                 mcast_obj->max_cmd_len       = -1;
3918                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3919                 mcast_obj->set_one_rule      = NULL;
3920                 mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3921                 mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3922                 mcast_obj->get_registry_size =
3923                         bnx2x_mcast_get_registry_size_aprox;
3924                 mcast_obj->set_registry_size =
3925                         bnx2x_mcast_set_registry_size_aprox;
3926         } else {
3927                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3928                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3929                 mcast_obj->hdl_restore       =
3930                         bnx2x_mcast_handle_restore_cmd_e2;
3931                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3932                 /* TODO: There should be a proper HSI define for this number!!!
3933                  */
3934                 mcast_obj->max_cmd_len       = 16;
3935                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3936                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3937                 mcast_obj->validate          = bnx2x_mcast_validate_e2;
3938                 mcast_obj->revert            = bnx2x_mcast_revert_e2;
3939                 mcast_obj->get_registry_size =
3940                         bnx2x_mcast_get_registry_size_aprox;
3941                 mcast_obj->set_registry_size =
3942                         bnx2x_mcast_set_registry_size_aprox;
3943         }
3944 }
3945
3946 /*************************** Credit handling **********************************/
3947
3948 /**
3949  * atomic_add_ifless - add if the result is less than a given value.
3950  *
3951  * @v:  pointer of type atomic_t
3952  * @a:  the amount to add to v...
3953  * @u:  ...if (v + a) is less than u.
3954  *
3955  * returns true if (v + a) was less than u, and false otherwise.
3956  *
3957  */
3958 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3959 {
3960         int c, old;
3961
3962         c = atomic_read(v);
3963         for (;;) {
3964                 if (unlikely(c + a >= u))
3965                         return false;
3966
3967                 old = atomic_cmpxchg((v), c, c + a);
3968                 if (likely(old == c))
3969                         break;
3970                 c = old;
3971         }
3972
3973         return true;
3974 }
3975
3976 /**
3977  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3978  *
3979  * @v:  pointer of type atomic_t
3980  * @a:  the amount to dec from v...
3981  * @u:  ...if (v - a) is more or equal than u.
3982  *
3983  * returns true if (v - a) was more or equal than u, and false
3984  * otherwise.
3985  */
3986 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3987 {
3988         int c, old;
3989
3990         c = atomic_read(v);
3991         for (;;) {
3992                 if (unlikely(c - a < u))
3993                         return false;
3994
3995                 old = atomic_cmpxchg((v), c, c - a);
3996                 if (likely(old == c))
3997                         break;
3998                 c = old;
3999         }
4000
4001         return true;
4002 }
4003
4004 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4005 {
4006         bool rc;
4007
4008         smp_mb();
4009         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4010         smp_mb();
4011
4012         return rc;
4013 }
4014
4015 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4016 {
4017         bool rc;
4018
4019         smp_mb();
4020
4021         /* Don't let to refill if credit + cnt > pool_sz */
4022         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4023
4024         smp_mb();
4025
4026         return rc;
4027 }
4028
4029 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4030 {
4031         int cur_credit;
4032
4033         smp_mb();
4034         cur_credit = atomic_read(&o->credit);
4035
4036         return cur_credit;
4037 }
4038
4039 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4040                                           int cnt)
4041 {
4042         return true;
4043 }
4044
4045 static bool bnx2x_credit_pool_get_entry(
4046         struct bnx2x_credit_pool_obj *o,
4047         int *offset)
4048 {
4049         int idx, vec, i;
4050
4051         *offset = -1;
4052
4053         /* Find "internal cam-offset" then add to base for this object... */
4054         for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
4055
4056                 /* Skip the current vector if there are no free entries in it */
4057                 if (!o->pool_mirror[vec])
4058                         continue;
4059
4060                 /* If we've got here we are going to find a free entry */
4061                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4062                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
4063
4064                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4065                                 /* Got one!! */
4066                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4067                                 *offset = o->base_pool_offset + idx;
4068                                 return true;
4069                         }
4070         }
4071
4072         return false;
4073 }
4074
4075 static bool bnx2x_credit_pool_put_entry(
4076         struct bnx2x_credit_pool_obj *o,
4077         int offset)
4078 {
4079         if (offset < o->base_pool_offset)
4080                 return false;
4081
4082         offset -= o->base_pool_offset;
4083
4084         if (offset >= o->pool_sz)
4085                 return false;
4086
4087         /* Return the entry to the pool */
4088         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4089
4090         return true;
4091 }
4092
4093 static bool bnx2x_credit_pool_put_entry_always_true(
4094         struct bnx2x_credit_pool_obj *o,
4095         int offset)
4096 {
4097         return true;
4098 }
4099
4100 static bool bnx2x_credit_pool_get_entry_always_true(
4101         struct bnx2x_credit_pool_obj *o,
4102         int *offset)
4103 {
4104         *offset = -1;
4105         return true;
4106 }
4107 /**
4108  * bnx2x_init_credit_pool - initialize credit pool internals.
4109  *
4110  * @p:
4111  * @base:       Base entry in the CAM to use.
4112  * @credit:     pool size.
4113  *
4114  * If base is negative no CAM entries handling will be performed.
4115  * If credit is negative pool operations will always succeed (unlimited pool).
4116  *
4117  */
4118 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
4119                                           int base, int credit)
4120 {
4121         /* Zero the object first */
4122         memset(p, 0, sizeof(*p));
4123
4124         /* Set the table to all 1s */
4125         memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4126
4127         /* Init a pool as full */
4128         atomic_set(&p->credit, credit);
4129
4130         /* The total poll size */
4131         p->pool_sz = credit;
4132
4133         p->base_pool_offset = base;
4134
4135         /* Commit the change */
4136         smp_mb();
4137
4138         p->check = bnx2x_credit_pool_check;
4139
4140         /* if pool credit is negative - disable the checks */
4141         if (credit >= 0) {
4142                 p->put      = bnx2x_credit_pool_put;
4143                 p->get      = bnx2x_credit_pool_get;
4144                 p->put_entry = bnx2x_credit_pool_put_entry;
4145                 p->get_entry = bnx2x_credit_pool_get_entry;
4146         } else {
4147                 p->put      = bnx2x_credit_pool_always_true;
4148                 p->get      = bnx2x_credit_pool_always_true;
4149                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4150                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4151         }
4152
4153         /* If base is negative - disable entries handling */
4154         if (base < 0) {
4155                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4156                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4157         }
4158 }
4159
4160 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4161                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
4162                                 u8 func_num)
4163 {
4164 /* TODO: this will be defined in consts as well... */
4165 #define BNX2X_CAM_SIZE_EMUL 5
4166
4167         int cam_sz;
4168
4169         if (CHIP_IS_E1(bp)) {
4170                 /* In E1, Multicast is saved in cam... */
4171                 if (!CHIP_REV_IS_SLOW(bp))
4172                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
4173                 else
4174                         cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
4175
4176                 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4177
4178         } else if (CHIP_IS_E1H(bp)) {
4179                 /* CAM credit is equaly divided between all active functions
4180                  * on the PORT!.
4181                  */
4182                 if ((func_num > 0)) {
4183                         if (!CHIP_REV_IS_SLOW(bp))
4184                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4185                         else
4186                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
4187                         bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4188                 } else {
4189                         /* this should never happen! Block MAC operations. */
4190                         bnx2x_init_credit_pool(p, 0, 0);
4191                 }
4192
4193         } else {
4194
4195                 /* CAM credit is equaly divided between all active functions
4196                  * on the PATH.
4197                  */
4198                 if ((func_num > 0)) {
4199                         if (!CHIP_REV_IS_SLOW(bp))
4200                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4201                         else
4202                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
4203
4204                         /* No need for CAM entries handling for 57712 and
4205                          * newer.
4206                          */
4207                         bnx2x_init_credit_pool(p, -1, cam_sz);
4208                 } else {
4209                         /* this should never happen! Block MAC operations. */
4210                         bnx2x_init_credit_pool(p, 0, 0);
4211                 }
4212         }
4213 }
4214
4215 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4216                                  struct bnx2x_credit_pool_obj *p,
4217                                  u8 func_id,
4218                                  u8 func_num)
4219 {
4220         if (CHIP_IS_E1x(bp)) {
4221                 /* There is no VLAN credit in HW on 57710 and 57711 only
4222                  * MAC / MAC-VLAN can be set
4223                  */
4224                 bnx2x_init_credit_pool(p, 0, -1);
4225         } else {
4226                 /* CAM credit is equally divided between all active functions
4227                  * on the PATH.
4228                  */
4229                 if (func_num > 0) {
4230                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
4231                         bnx2x_init_credit_pool(p, func_id * credit, credit);
4232                 } else
4233                         /* this should never happen! Block VLAN operations. */
4234                         bnx2x_init_credit_pool(p, 0, 0);
4235         }
4236 }
4237
4238 /****************** RSS Configuration ******************/
4239 /**
4240  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4241  *
4242  * @bp:         driver handle
4243  * @p:          pointer to rss configuration
4244  *
4245  * Prints it when NETIF_MSG_IFUP debug level is configured.
4246  */
4247 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4248                                         struct bnx2x_config_rss_params *p)
4249 {
4250         int i;
4251
4252         DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4253         DP(BNX2X_MSG_SP, "0x0000: ");
4254         for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4255                 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4256
4257                 /* Print 4 bytes in a line */
4258                 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4259                     (((i + 1) & 0x3) == 0)) {
4260                         DP_CONT(BNX2X_MSG_SP, "\n");
4261                         DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4262                 }
4263         }
4264
4265         DP_CONT(BNX2X_MSG_SP, "\n");
4266 }
4267
4268 /**
4269  * bnx2x_setup_rss - configure RSS
4270  *
4271  * @bp:         device handle
4272  * @p:          rss configuration
4273  *
4274  * sends on UPDATE ramrod for that matter.
4275  */
4276 static int bnx2x_setup_rss(struct bnx2x *bp,
4277                            struct bnx2x_config_rss_params *p)
4278 {
4279         struct bnx2x_rss_config_obj *o = p->rss_obj;
4280         struct bnx2x_raw_obj *r = &o->raw;
4281         struct eth_rss_update_ramrod_data *data =
4282                 (struct eth_rss_update_ramrod_data *)(r->rdata);
4283         u8 rss_mode = 0;
4284         int rc;
4285
4286         memset(data, 0, sizeof(*data));
4287
4288         DP(BNX2X_MSG_SP, "Configuring RSS\n");
4289
4290         /* Set an echo field */
4291         data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4292                                  (r->state << BNX2X_SWCID_SHIFT));
4293
4294         /* RSS mode */
4295         if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4296                 rss_mode = ETH_RSS_MODE_DISABLED;
4297         else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4298                 rss_mode = ETH_RSS_MODE_REGULAR;
4299
4300         data->rss_mode = rss_mode;
4301
4302         DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4303
4304         /* RSS capabilities */
4305         if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4306                 data->capabilities |=
4307                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4308
4309         if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4310                 data->capabilities |=
4311                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4312
4313         if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4314                 data->capabilities |=
4315                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4316
4317         if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4318                 data->capabilities |=
4319                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4320
4321         if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4322                 data->capabilities |=
4323                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4324
4325         if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4326                 data->capabilities |=
4327                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4328
4329         /* Hashing mask */
4330         data->rss_result_mask = p->rss_result_mask;
4331
4332         /* RSS engine ID */
4333         data->rss_engine_id = o->engine_id;
4334
4335         DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4336
4337         /* Indirection table */
4338         memcpy(data->indirection_table, p->ind_table,
4339                   T_ETH_INDIRECTION_TABLE_SIZE);
4340
4341         /* Remember the last configuration */
4342         memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4343
4344         /* Print the indirection table */
4345         if (netif_msg_ifup(bp))
4346                 bnx2x_debug_print_ind_table(bp, p);
4347
4348         /* RSS keys */
4349         if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4350                 memcpy(&data->rss_key[0], &p->rss_key[0],
4351                        sizeof(data->rss_key));
4352                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4353         }
4354
4355         /* No need for an explicit memory barrier here as long we would
4356          * need to ensure the ordering of writing to the SPQ element
4357          * and updating of the SPQ producer which involves a memory
4358          * read and we will have to put a full memory barrier there
4359          * (inside bnx2x_sp_post()).
4360          */
4361
4362         /* Send a ramrod */
4363         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4364                            U64_HI(r->rdata_mapping),
4365                            U64_LO(r->rdata_mapping),
4366                            ETH_CONNECTION_TYPE);
4367
4368         if (rc < 0)
4369                 return rc;
4370
4371         return 1;
4372 }
4373
4374 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4375                              u8 *ind_table)
4376 {
4377         memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4378 }
4379
4380 int bnx2x_config_rss(struct bnx2x *bp,
4381                      struct bnx2x_config_rss_params *p)
4382 {
4383         int rc;
4384         struct bnx2x_rss_config_obj *o = p->rss_obj;
4385         struct bnx2x_raw_obj *r = &o->raw;
4386
4387         /* Do nothing if only driver cleanup was requested */
4388         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4389                 return 0;
4390
4391         r->set_pending(r);
4392
4393         rc = o->config_rss(bp, p);
4394         if (rc < 0) {
4395                 r->clear_pending(r);
4396                 return rc;
4397         }
4398
4399         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4400                 rc = r->wait_comp(bp, r);
4401
4402         return rc;
4403 }
4404
4405 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4406                                struct bnx2x_rss_config_obj *rss_obj,
4407                                u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4408                                void *rdata, dma_addr_t rdata_mapping,
4409                                int state, unsigned long *pstate,
4410                                bnx2x_obj_type type)
4411 {
4412         bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4413                            rdata_mapping, state, pstate, type);
4414
4415         rss_obj->engine_id  = engine_id;
4416         rss_obj->config_rss = bnx2x_setup_rss;
4417 }
4418
4419 int validate_vlan_mac(struct bnx2x *bp,
4420                       struct bnx2x_vlan_mac_obj *vlan_mac)
4421 {
4422         if (!vlan_mac->get_n_elements) {
4423                 BNX2X_ERR("vlan mac object was not intialized\n");
4424                 return -EINVAL;
4425         }
4426         return 0;
4427 }
4428
4429 /********************** Queue state object ***********************************/
4430
4431 /**
4432  * bnx2x_queue_state_change - perform Queue state change transition
4433  *
4434  * @bp:         device handle
4435  * @params:     parameters to perform the transition
4436  *
4437  * returns 0 in case of successfully completed transition, negative error
4438  * code in case of failure, positive (EBUSY) value if there is a completion
4439  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4440  * not set in params->ramrod_flags for asynchronous commands).
4441  *
4442  */
4443 int bnx2x_queue_state_change(struct bnx2x *bp,
4444                              struct bnx2x_queue_state_params *params)
4445 {
4446         struct bnx2x_queue_sp_obj *o = params->q_obj;
4447         int rc, pending_bit;
4448         unsigned long *pending = &o->pending;
4449
4450         /* Check that the requested transition is legal */
4451         rc = o->check_transition(bp, o, params);
4452         if (rc) {
4453                 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4454                 return -EINVAL;
4455         }
4456
4457         /* Set "pending" bit */
4458         DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4459         pending_bit = o->set_pending(o, params);
4460         DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4461
4462         /* Don't send a command if only driver cleanup was requested */
4463         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4464                 o->complete_cmd(bp, o, pending_bit);
4465         else {
4466                 /* Send a ramrod */
4467                 rc = o->send_cmd(bp, params);
4468                 if (rc) {
4469                         o->next_state = BNX2X_Q_STATE_MAX;
4470                         clear_bit(pending_bit, pending);
4471                         smp_mb__after_clear_bit();
4472                         return rc;
4473                 }
4474
4475                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4476                         rc = o->wait_comp(bp, o, pending_bit);
4477                         if (rc)
4478                                 return rc;
4479
4480                         return 0;
4481                 }
4482         }
4483
4484         return !!test_bit(pending_bit, pending);
4485 }
4486
4487 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4488                                    struct bnx2x_queue_state_params *params)
4489 {
4490         enum bnx2x_queue_cmd cmd = params->cmd, bit;
4491
4492         /* ACTIVATE and DEACTIVATE commands are implemented on top of
4493          * UPDATE command.
4494          */
4495         if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4496             (cmd == BNX2X_Q_CMD_DEACTIVATE))
4497                 bit = BNX2X_Q_CMD_UPDATE;
4498         else
4499                 bit = cmd;
4500
4501         set_bit(bit, &obj->pending);
4502         return bit;
4503 }
4504
4505 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4506                                  struct bnx2x_queue_sp_obj *o,
4507                                  enum bnx2x_queue_cmd cmd)
4508 {
4509         return bnx2x_state_wait(bp, cmd, &o->pending);
4510 }
4511
4512 /**
4513  * bnx2x_queue_comp_cmd - complete the state change command.
4514  *
4515  * @bp:         device handle
4516  * @o:
4517  * @cmd:
4518  *
4519  * Checks that the arrived completion is expected.
4520  */
4521 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4522                                 struct bnx2x_queue_sp_obj *o,
4523                                 enum bnx2x_queue_cmd cmd)
4524 {
4525         unsigned long cur_pending = o->pending;
4526
4527         if (!test_and_clear_bit(cmd, &cur_pending)) {
4528                 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4529                           cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4530                           o->state, cur_pending, o->next_state);
4531                 return -EINVAL;
4532         }
4533
4534         if (o->next_tx_only >= o->max_cos)
4535                 /* >= because tx only must always be smaller than cos since the
4536                  * primary connection supports COS 0
4537                  */
4538                 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4539                            o->next_tx_only, o->max_cos);
4540
4541         DP(BNX2X_MSG_SP,
4542            "Completing command %d for queue %d, setting state to %d\n",
4543            cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4544
4545         if (o->next_tx_only)  /* print num tx-only if any exist */
4546                 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4547                    o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4548
4549         o->state = o->next_state;
4550         o->num_tx_only = o->next_tx_only;
4551         o->next_state = BNX2X_Q_STATE_MAX;
4552
4553         /* It's important that o->state and o->next_state are
4554          * updated before o->pending.
4555          */
4556         wmb();
4557
4558         clear_bit(cmd, &o->pending);
4559         smp_mb__after_clear_bit();
4560
4561         return 0;
4562 }
4563
4564 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4565                                 struct bnx2x_queue_state_params *cmd_params,
4566                                 struct client_init_ramrod_data *data)
4567 {
4568         struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4569
4570         /* Rx data */
4571
4572         /* IPv6 TPA supported for E2 and above only */
4573         data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4574                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4575 }
4576
4577 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4578                                 struct bnx2x_queue_sp_obj *o,
4579                                 struct bnx2x_general_setup_params *params,
4580                                 struct client_init_general_data *gen_data,
4581                                 unsigned long *flags)
4582 {
4583         gen_data->client_id = o->cl_id;
4584
4585         if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4586                 gen_data->statistics_counter_id =
4587                                         params->stat_id;
4588                 gen_data->statistics_en_flg = 1;
4589                 gen_data->statistics_zero_flg =
4590                         test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4591         } else
4592                 gen_data->statistics_counter_id =
4593                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
4594
4595         gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4596         gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4597         gen_data->sp_client_id = params->spcl_id;
4598         gen_data->mtu = cpu_to_le16(params->mtu);
4599         gen_data->func_id = o->func_id;
4600
4601         gen_data->cos = params->cos;
4602
4603         gen_data->traffic_type =
4604                 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4605                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4606
4607         DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4608            gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4609 }
4610
4611 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4612                                 struct bnx2x_txq_setup_params *params,
4613                                 struct client_init_tx_data *tx_data,
4614                                 unsigned long *flags)
4615 {
4616         tx_data->enforce_security_flg =
4617                 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4618         tx_data->default_vlan =
4619                 cpu_to_le16(params->default_vlan);
4620         tx_data->default_vlan_flg =
4621                 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4622         tx_data->tx_switching_flg =
4623                 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4624         tx_data->anti_spoofing_flg =
4625                 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4626         tx_data->force_default_pri_flg =
4627                 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4628
4629         tx_data->tunnel_lso_inc_ip_id =
4630                 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4631         tx_data->tunnel_non_lso_pcsum_location =
4632                 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4633                                                                   PCSUM_ON_BD;
4634
4635         tx_data->tx_status_block_id = params->fw_sb_id;
4636         tx_data->tx_sb_index_number = params->sb_cq_index;
4637         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4638
4639         tx_data->tx_bd_page_base.lo =
4640                 cpu_to_le32(U64_LO(params->dscr_map));
4641         tx_data->tx_bd_page_base.hi =
4642                 cpu_to_le32(U64_HI(params->dscr_map));
4643
4644         /* Don't configure any Tx switching mode during queue SETUP */
4645         tx_data->state = 0;
4646 }
4647
4648 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4649                                 struct rxq_pause_params *params,
4650                                 struct client_init_rx_data *rx_data)
4651 {
4652         /* flow control data */
4653         rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4654         rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4655         rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4656         rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4657         rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4658         rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4659         rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4660 }
4661
4662 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4663                                 struct bnx2x_rxq_setup_params *params,
4664                                 struct client_init_rx_data *rx_data,
4665                                 unsigned long *flags)
4666 {
4667         rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4668                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4669         rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4670                                 CLIENT_INIT_RX_DATA_TPA_MODE;
4671         rx_data->vmqueue_mode_en_flg = 0;
4672
4673         rx_data->cache_line_alignment_log_size =
4674                 params->cache_line_log;
4675         rx_data->enable_dynamic_hc =
4676                 test_bit(BNX2X_Q_FLG_DHC, flags);
4677         rx_data->max_sges_for_packet = params->max_sges_pkt;
4678         rx_data->client_qzone_id = params->cl_qzone_id;
4679         rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4680
4681         /* Always start in DROP_ALL mode */
4682         rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4683                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4684
4685         /* We don't set drop flags */
4686         rx_data->drop_ip_cs_err_flg = 0;
4687         rx_data->drop_tcp_cs_err_flg = 0;
4688         rx_data->drop_ttl0_flg = 0;
4689         rx_data->drop_udp_cs_err_flg = 0;
4690         rx_data->inner_vlan_removal_enable_flg =
4691                 test_bit(BNX2X_Q_FLG_VLAN, flags);
4692         rx_data->outer_vlan_removal_enable_flg =
4693                 test_bit(BNX2X_Q_FLG_OV, flags);
4694         rx_data->status_block_id = params->fw_sb_id;
4695         rx_data->rx_sb_index_number = params->sb_cq_index;
4696         rx_data->max_tpa_queues = params->max_tpa_queues;
4697         rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4698         rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4699         rx_data->bd_page_base.lo =
4700                 cpu_to_le32(U64_LO(params->dscr_map));
4701         rx_data->bd_page_base.hi =
4702                 cpu_to_le32(U64_HI(params->dscr_map));
4703         rx_data->sge_page_base.lo =
4704                 cpu_to_le32(U64_LO(params->sge_map));
4705         rx_data->sge_page_base.hi =
4706                 cpu_to_le32(U64_HI(params->sge_map));
4707         rx_data->cqe_page_base.lo =
4708                 cpu_to_le32(U64_LO(params->rcq_map));
4709         rx_data->cqe_page_base.hi =
4710                 cpu_to_le32(U64_HI(params->rcq_map));
4711         rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4712
4713         if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4714                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4715                 rx_data->is_approx_mcast = 1;
4716         }
4717
4718         rx_data->rss_engine_id = params->rss_engine_id;
4719
4720         /* silent vlan removal */
4721         rx_data->silent_vlan_removal_flg =
4722                 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4723         rx_data->silent_vlan_value =
4724                 cpu_to_le16(params->silent_removal_value);
4725         rx_data->silent_vlan_mask =
4726                 cpu_to_le16(params->silent_removal_mask);
4727 }
4728
4729 /* initialize the general, tx and rx parts of a queue object */
4730 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4731                                 struct bnx2x_queue_state_params *cmd_params,
4732                                 struct client_init_ramrod_data *data)
4733 {
4734         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4735                                        &cmd_params->params.setup.gen_params,
4736                                        &data->general,
4737                                        &cmd_params->params.setup.flags);
4738
4739         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4740                                   &cmd_params->params.setup.txq_params,
4741                                   &data->tx,
4742                                   &cmd_params->params.setup.flags);
4743
4744         bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4745                                   &cmd_params->params.setup.rxq_params,
4746                                   &data->rx,
4747                                   &cmd_params->params.setup.flags);
4748
4749         bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4750                                      &cmd_params->params.setup.pause_params,
4751                                      &data->rx);
4752 }
4753
4754 /* initialize the general and tx parts of a tx-only queue object */
4755 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4756                                 struct bnx2x_queue_state_params *cmd_params,
4757                                 struct tx_queue_init_ramrod_data *data)
4758 {
4759         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4760                                        &cmd_params->params.tx_only.gen_params,
4761                                        &data->general,
4762                                        &cmd_params->params.tx_only.flags);
4763
4764         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4765                                   &cmd_params->params.tx_only.txq_params,
4766                                   &data->tx,
4767                                   &cmd_params->params.tx_only.flags);
4768
4769         DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4770                          cmd_params->q_obj->cids[0],
4771                          data->tx.tx_bd_page_base.lo,
4772                          data->tx.tx_bd_page_base.hi);
4773 }
4774
4775 /**
4776  * bnx2x_q_init - init HW/FW queue
4777  *
4778  * @bp:         device handle
4779  * @params:
4780  *
4781  * HW/FW initial Queue configuration:
4782  *      - HC: Rx and Tx
4783  *      - CDU context validation
4784  *
4785  */
4786 static inline int bnx2x_q_init(struct bnx2x *bp,
4787                                struct bnx2x_queue_state_params *params)
4788 {
4789         struct bnx2x_queue_sp_obj *o = params->q_obj;
4790         struct bnx2x_queue_init_params *init = &params->params.init;
4791         u16 hc_usec;
4792         u8 cos;
4793
4794         /* Tx HC configuration */
4795         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4796             test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4797                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4798
4799                 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4800                         init->tx.sb_cq_index,
4801                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4802                         hc_usec);
4803         }
4804
4805         /* Rx HC configuration */
4806         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4807             test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4808                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4809
4810                 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4811                         init->rx.sb_cq_index,
4812                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4813                         hc_usec);
4814         }
4815
4816         /* Set CDU context validation values */
4817         for (cos = 0; cos < o->max_cos; cos++) {
4818                 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4819                                  o->cids[cos], cos);
4820                 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4821                 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4822         }
4823
4824         /* As no ramrod is sent, complete the command immediately  */
4825         o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4826
4827         mmiowb();
4828         smp_mb();
4829
4830         return 0;
4831 }
4832
4833 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4834                                         struct bnx2x_queue_state_params *params)
4835 {
4836         struct bnx2x_queue_sp_obj *o = params->q_obj;
4837         struct client_init_ramrod_data *rdata =
4838                 (struct client_init_ramrod_data *)o->rdata;
4839         dma_addr_t data_mapping = o->rdata_mapping;
4840         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4841
4842         /* Clear the ramrod data */
4843         memset(rdata, 0, sizeof(*rdata));
4844
4845         /* Fill the ramrod data */
4846         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4847
4848         /* No need for an explicit memory barrier here as long we would
4849          * need to ensure the ordering of writing to the SPQ element
4850          * and updating of the SPQ producer which involves a memory
4851          * read and we will have to put a full memory barrier there
4852          * (inside bnx2x_sp_post()).
4853          */
4854
4855         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4856                              U64_HI(data_mapping),
4857                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4858 }
4859
4860 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4861                                         struct bnx2x_queue_state_params *params)
4862 {
4863         struct bnx2x_queue_sp_obj *o = params->q_obj;
4864         struct client_init_ramrod_data *rdata =
4865                 (struct client_init_ramrod_data *)o->rdata;
4866         dma_addr_t data_mapping = o->rdata_mapping;
4867         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4868
4869         /* Clear the ramrod data */
4870         memset(rdata, 0, sizeof(*rdata));
4871
4872         /* Fill the ramrod data */
4873         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4874         bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4875
4876         /* No need for an explicit memory barrier here as long we would
4877          * need to ensure the ordering of writing to the SPQ element
4878          * and updating of the SPQ producer which involves a memory
4879          * read and we will have to put a full memory barrier there
4880          * (inside bnx2x_sp_post()).
4881          */
4882
4883         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4884                              U64_HI(data_mapping),
4885                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4886 }
4887
4888 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4889                                   struct bnx2x_queue_state_params *params)
4890 {
4891         struct bnx2x_queue_sp_obj *o = params->q_obj;
4892         struct tx_queue_init_ramrod_data *rdata =
4893                 (struct tx_queue_init_ramrod_data *)o->rdata;
4894         dma_addr_t data_mapping = o->rdata_mapping;
4895         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4896         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4897                 &params->params.tx_only;
4898         u8 cid_index = tx_only_params->cid_index;
4899
4900         if (cid_index >= o->max_cos) {
4901                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4902                           o->cl_id, cid_index);
4903                 return -EINVAL;
4904         }
4905
4906         DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4907                          tx_only_params->gen_params.cos,
4908                          tx_only_params->gen_params.spcl_id);
4909
4910         /* Clear the ramrod data */
4911         memset(rdata, 0, sizeof(*rdata));
4912
4913         /* Fill the ramrod data */
4914         bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4915
4916         DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4917                          o->cids[cid_index], rdata->general.client_id,
4918                          rdata->general.sp_client_id, rdata->general.cos);
4919
4920         /* No need for an explicit memory barrier here as long we would
4921          * need to ensure the ordering of writing to the SPQ element
4922          * and updating of the SPQ producer which involves a memory
4923          * read and we will have to put a full memory barrier there
4924          * (inside bnx2x_sp_post()).
4925          */
4926
4927         return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4928                              U64_HI(data_mapping),
4929                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4930 }
4931
4932 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4933                                      struct bnx2x_queue_sp_obj *obj,
4934                                      struct bnx2x_queue_update_params *params,
4935                                      struct client_update_ramrod_data *data)
4936 {
4937         /* Client ID of the client to update */
4938         data->client_id = obj->cl_id;
4939
4940         /* Function ID of the client to update */
4941         data->func_id = obj->func_id;
4942
4943         /* Default VLAN value */
4944         data->default_vlan = cpu_to_le16(params->def_vlan);
4945
4946         /* Inner VLAN stripping */
4947         data->inner_vlan_removal_enable_flg =
4948                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4949         data->inner_vlan_removal_change_flg =
4950                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4951                          &params->update_flags);
4952
4953         /* Outer VLAN stripping */
4954         data->outer_vlan_removal_enable_flg =
4955                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4956         data->outer_vlan_removal_change_flg =
4957                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4958                          &params->update_flags);
4959
4960         /* Drop packets that have source MAC that doesn't belong to this
4961          * Queue.
4962          */
4963         data->anti_spoofing_enable_flg =
4964                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4965         data->anti_spoofing_change_flg =
4966                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4967
4968         /* Activate/Deactivate */
4969         data->activate_flg =
4970                 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4971         data->activate_change_flg =
4972                 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4973
4974         /* Enable default VLAN */
4975         data->default_vlan_enable_flg =
4976                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4977         data->default_vlan_change_flg =
4978                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4979                          &params->update_flags);
4980
4981         /* silent vlan removal */
4982         data->silent_vlan_change_flg =
4983                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4984                          &params->update_flags);
4985         data->silent_vlan_removal_flg =
4986                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4987         data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4988         data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4989 }
4990
4991 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4992                                       struct bnx2x_queue_state_params *params)
4993 {
4994         struct bnx2x_queue_sp_obj *o = params->q_obj;
4995         struct client_update_ramrod_data *rdata =
4996                 (struct client_update_ramrod_data *)o->rdata;
4997         dma_addr_t data_mapping = o->rdata_mapping;
4998         struct bnx2x_queue_update_params *update_params =
4999                 &params->params.update;
5000         u8 cid_index = update_params->cid_index;
5001
5002         if (cid_index >= o->max_cos) {
5003                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5004                           o->cl_id, cid_index);
5005                 return -EINVAL;
5006         }
5007
5008         /* Clear the ramrod data */
5009         memset(rdata, 0, sizeof(*rdata));
5010
5011         /* Fill the ramrod data */
5012         bnx2x_q_fill_update_data(bp, o, update_params, rdata);
5013
5014         /* No need for an explicit memory barrier here as long we would
5015          * need to ensure the ordering of writing to the SPQ element
5016          * and updating of the SPQ producer which involves a memory
5017          * read and we will have to put a full memory barrier there
5018          * (inside bnx2x_sp_post()).
5019          */
5020
5021         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5022                              o->cids[cid_index], U64_HI(data_mapping),
5023                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5024 }
5025
5026 /**
5027  * bnx2x_q_send_deactivate - send DEACTIVATE command
5028  *
5029  * @bp:         device handle
5030  * @params:
5031  *
5032  * implemented using the UPDATE command.
5033  */
5034 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
5035                                         struct bnx2x_queue_state_params *params)
5036 {
5037         struct bnx2x_queue_update_params *update = &params->params.update;
5038
5039         memset(update, 0, sizeof(*update));
5040
5041         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5042
5043         return bnx2x_q_send_update(bp, params);
5044 }
5045
5046 /**
5047  * bnx2x_q_send_activate - send ACTIVATE command
5048  *
5049  * @bp:         device handle
5050  * @params:
5051  *
5052  * implemented using the UPDATE command.
5053  */
5054 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
5055                                         struct bnx2x_queue_state_params *params)
5056 {
5057         struct bnx2x_queue_update_params *update = &params->params.update;
5058
5059         memset(update, 0, sizeof(*update));
5060
5061         __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
5062         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5063
5064         return bnx2x_q_send_update(bp, params);
5065 }
5066
5067 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
5068                                         struct bnx2x_queue_state_params *params)
5069 {
5070         /* TODO: Not implemented yet. */
5071         return -1;
5072 }
5073
5074 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
5075                                     struct bnx2x_queue_state_params *params)
5076 {
5077         struct bnx2x_queue_sp_obj *o = params->q_obj;
5078
5079         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
5080                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
5081                              ETH_CONNECTION_TYPE);
5082 }
5083
5084 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
5085                                        struct bnx2x_queue_state_params *params)
5086 {
5087         struct bnx2x_queue_sp_obj *o = params->q_obj;
5088         u8 cid_idx = params->params.cfc_del.cid_index;
5089
5090         if (cid_idx >= o->max_cos) {
5091                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5092                           o->cl_id, cid_idx);
5093                 return -EINVAL;
5094         }
5095
5096         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
5097                              o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
5098 }
5099
5100 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
5101                                         struct bnx2x_queue_state_params *params)
5102 {
5103         struct bnx2x_queue_sp_obj *o = params->q_obj;
5104         u8 cid_index = params->params.terminate.cid_index;
5105
5106         if (cid_index >= o->max_cos) {
5107                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5108                           o->cl_id, cid_index);
5109                 return -EINVAL;
5110         }
5111
5112         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
5113                              o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
5114 }
5115
5116 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
5117                                      struct bnx2x_queue_state_params *params)
5118 {
5119         struct bnx2x_queue_sp_obj *o = params->q_obj;
5120
5121         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
5122                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
5123                              ETH_CONNECTION_TYPE);
5124 }
5125
5126 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
5127                                         struct bnx2x_queue_state_params *params)
5128 {
5129         switch (params->cmd) {
5130         case BNX2X_Q_CMD_INIT:
5131                 return bnx2x_q_init(bp, params);
5132         case BNX2X_Q_CMD_SETUP_TX_ONLY:
5133                 return bnx2x_q_send_setup_tx_only(bp, params);
5134         case BNX2X_Q_CMD_DEACTIVATE:
5135                 return bnx2x_q_send_deactivate(bp, params);
5136         case BNX2X_Q_CMD_ACTIVATE:
5137                 return bnx2x_q_send_activate(bp, params);
5138         case BNX2X_Q_CMD_UPDATE:
5139                 return bnx2x_q_send_update(bp, params);
5140         case BNX2X_Q_CMD_UPDATE_TPA:
5141                 return bnx2x_q_send_update_tpa(bp, params);
5142         case BNX2X_Q_CMD_HALT:
5143                 return bnx2x_q_send_halt(bp, params);
5144         case BNX2X_Q_CMD_CFC_DEL:
5145                 return bnx2x_q_send_cfc_del(bp, params);
5146         case BNX2X_Q_CMD_TERMINATE:
5147                 return bnx2x_q_send_terminate(bp, params);
5148         case BNX2X_Q_CMD_EMPTY:
5149                 return bnx2x_q_send_empty(bp, params);
5150         default:
5151                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5152                 return -EINVAL;
5153         }
5154 }
5155
5156 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
5157                                     struct bnx2x_queue_state_params *params)
5158 {
5159         switch (params->cmd) {
5160         case BNX2X_Q_CMD_SETUP:
5161                 return bnx2x_q_send_setup_e1x(bp, params);
5162         case BNX2X_Q_CMD_INIT:
5163         case BNX2X_Q_CMD_SETUP_TX_ONLY:
5164         case BNX2X_Q_CMD_DEACTIVATE:
5165         case BNX2X_Q_CMD_ACTIVATE:
5166         case BNX2X_Q_CMD_UPDATE:
5167         case BNX2X_Q_CMD_UPDATE_TPA:
5168         case BNX2X_Q_CMD_HALT:
5169         case BNX2X_Q_CMD_CFC_DEL:
5170         case BNX2X_Q_CMD_TERMINATE:
5171         case BNX2X_Q_CMD_EMPTY:
5172                 return bnx2x_queue_send_cmd_cmn(bp, params);
5173         default:
5174                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5175                 return -EINVAL;
5176         }
5177 }
5178
5179 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
5180                                    struct bnx2x_queue_state_params *params)
5181 {
5182         switch (params->cmd) {
5183         case BNX2X_Q_CMD_SETUP:
5184                 return bnx2x_q_send_setup_e2(bp, params);
5185         case BNX2X_Q_CMD_INIT:
5186         case BNX2X_Q_CMD_SETUP_TX_ONLY:
5187         case BNX2X_Q_CMD_DEACTIVATE:
5188         case BNX2X_Q_CMD_ACTIVATE:
5189         case BNX2X_Q_CMD_UPDATE:
5190         case BNX2X_Q_CMD_UPDATE_TPA:
5191         case BNX2X_Q_CMD_HALT:
5192         case BNX2X_Q_CMD_CFC_DEL:
5193         case BNX2X_Q_CMD_TERMINATE:
5194         case BNX2X_Q_CMD_EMPTY:
5195                 return bnx2x_queue_send_cmd_cmn(bp, params);
5196         default:
5197                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5198                 return -EINVAL;
5199         }
5200 }
5201
5202 /**
5203  * bnx2x_queue_chk_transition - check state machine of a regular Queue
5204  *
5205  * @bp:         device handle
5206  * @o:
5207  * @params:
5208  *
5209  * (not Forwarding)
5210  * It both checks if the requested command is legal in a current
5211  * state and, if it's legal, sets a `next_state' in the object
5212  * that will be used in the completion flow to set the `state'
5213  * of the object.
5214  *
5215  * returns 0 if a requested command is a legal transition,
5216  *         -EINVAL otherwise.
5217  */
5218 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5219                                       struct bnx2x_queue_sp_obj *o,
5220                                       struct bnx2x_queue_state_params *params)
5221 {
5222         enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5223         enum bnx2x_queue_cmd cmd = params->cmd;
5224         struct bnx2x_queue_update_params *update_params =
5225                  &params->params.update;
5226         u8 next_tx_only = o->num_tx_only;
5227
5228         /* Forget all pending for completion commands if a driver only state
5229          * transition has been requested.
5230          */
5231         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5232                 o->pending = 0;
5233                 o->next_state = BNX2X_Q_STATE_MAX;
5234         }
5235
5236         /* Don't allow a next state transition if we are in the middle of
5237          * the previous one.
5238          */
5239         if (o->pending) {
5240                 BNX2X_ERR("Blocking transition since pending was %lx\n",
5241                           o->pending);
5242                 return -EBUSY;
5243         }
5244
5245         switch (state) {
5246         case BNX2X_Q_STATE_RESET:
5247                 if (cmd == BNX2X_Q_CMD_INIT)
5248                         next_state = BNX2X_Q_STATE_INITIALIZED;
5249
5250                 break;
5251         case BNX2X_Q_STATE_INITIALIZED:
5252                 if (cmd == BNX2X_Q_CMD_SETUP) {
5253                         if (test_bit(BNX2X_Q_FLG_ACTIVE,
5254                                      &params->params.setup.flags))
5255                                 next_state = BNX2X_Q_STATE_ACTIVE;
5256                         else
5257                                 next_state = BNX2X_Q_STATE_INACTIVE;
5258                 }
5259
5260                 break;
5261         case BNX2X_Q_STATE_ACTIVE:
5262                 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5263                         next_state = BNX2X_Q_STATE_INACTIVE;
5264
5265                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5266                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5267                         next_state = BNX2X_Q_STATE_ACTIVE;
5268
5269                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5270                         next_state = BNX2X_Q_STATE_MULTI_COS;
5271                         next_tx_only = 1;
5272                 }
5273
5274                 else if (cmd == BNX2X_Q_CMD_HALT)
5275                         next_state = BNX2X_Q_STATE_STOPPED;
5276
5277                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5278                         /* If "active" state change is requested, update the
5279                          *  state accordingly.
5280                          */
5281                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5282                                      &update_params->update_flags) &&
5283                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5284                                       &update_params->update_flags))
5285                                 next_state = BNX2X_Q_STATE_INACTIVE;
5286                         else
5287                                 next_state = BNX2X_Q_STATE_ACTIVE;
5288                 }
5289
5290                 break;
5291         case BNX2X_Q_STATE_MULTI_COS:
5292                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5293                         next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5294
5295                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5296                         next_state = BNX2X_Q_STATE_MULTI_COS;
5297                         next_tx_only = o->num_tx_only + 1;
5298                 }
5299
5300                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5301                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5302                         next_state = BNX2X_Q_STATE_MULTI_COS;
5303
5304                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5305                         /* If "active" state change is requested, update the
5306                          *  state accordingly.
5307                          */
5308                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5309                                      &update_params->update_flags) &&
5310                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5311                                       &update_params->update_flags))
5312                                 next_state = BNX2X_Q_STATE_INACTIVE;
5313                         else
5314                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5315                 }
5316
5317                 break;
5318         case BNX2X_Q_STATE_MCOS_TERMINATED:
5319                 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5320                         next_tx_only = o->num_tx_only - 1;
5321                         if (next_tx_only == 0)
5322                                 next_state = BNX2X_Q_STATE_ACTIVE;
5323                         else
5324                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5325                 }
5326
5327                 break;
5328         case BNX2X_Q_STATE_INACTIVE:
5329                 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5330                         next_state = BNX2X_Q_STATE_ACTIVE;
5331
5332                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5333                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5334                         next_state = BNX2X_Q_STATE_INACTIVE;
5335
5336                 else if (cmd == BNX2X_Q_CMD_HALT)
5337                         next_state = BNX2X_Q_STATE_STOPPED;
5338
5339                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5340                         /* If "active" state change is requested, update the
5341                          * state accordingly.
5342                          */
5343                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5344                                      &update_params->update_flags) &&
5345                             test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5346                                      &update_params->update_flags)){
5347                                 if (o->num_tx_only == 0)
5348                                         next_state = BNX2X_Q_STATE_ACTIVE;
5349                                 else /* tx only queues exist for this queue */
5350                                         next_state = BNX2X_Q_STATE_MULTI_COS;
5351                         } else
5352                                 next_state = BNX2X_Q_STATE_INACTIVE;
5353                 }
5354
5355                 break;
5356         case BNX2X_Q_STATE_STOPPED:
5357                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5358                         next_state = BNX2X_Q_STATE_TERMINATED;
5359
5360                 break;
5361         case BNX2X_Q_STATE_TERMINATED:
5362                 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5363                         next_state = BNX2X_Q_STATE_RESET;
5364
5365                 break;
5366         default:
5367                 BNX2X_ERR("Illegal state: %d\n", state);
5368         }
5369
5370         /* Transition is assured */
5371         if (next_state != BNX2X_Q_STATE_MAX) {
5372                 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5373                                  state, cmd, next_state);
5374                 o->next_state = next_state;
5375                 o->next_tx_only = next_tx_only;
5376                 return 0;
5377         }
5378
5379         DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5380
5381         return -EINVAL;
5382 }
5383
5384 void bnx2x_init_queue_obj(struct bnx2x *bp,
5385                           struct bnx2x_queue_sp_obj *obj,
5386                           u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5387                           void *rdata,
5388                           dma_addr_t rdata_mapping, unsigned long type)
5389 {
5390         memset(obj, 0, sizeof(*obj));
5391
5392         /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5393         BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5394
5395         memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5396         obj->max_cos = cid_cnt;
5397         obj->cl_id = cl_id;
5398         obj->func_id = func_id;
5399         obj->rdata = rdata;
5400         obj->rdata_mapping = rdata_mapping;
5401         obj->type = type;
5402         obj->next_state = BNX2X_Q_STATE_MAX;
5403
5404         if (CHIP_IS_E1x(bp))
5405                 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5406         else
5407                 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5408
5409         obj->check_transition = bnx2x_queue_chk_transition;
5410
5411         obj->complete_cmd = bnx2x_queue_comp_cmd;
5412         obj->wait_comp = bnx2x_queue_wait_comp;
5413         obj->set_pending = bnx2x_queue_set_pending;
5414 }
5415
5416 /* return a queue object's logical state*/
5417 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5418                                struct bnx2x_queue_sp_obj *obj)
5419 {
5420         switch (obj->state) {
5421         case BNX2X_Q_STATE_ACTIVE:
5422         case BNX2X_Q_STATE_MULTI_COS:
5423                 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5424         case BNX2X_Q_STATE_RESET:
5425         case BNX2X_Q_STATE_INITIALIZED:
5426         case BNX2X_Q_STATE_MCOS_TERMINATED:
5427         case BNX2X_Q_STATE_INACTIVE:
5428         case BNX2X_Q_STATE_STOPPED:
5429         case BNX2X_Q_STATE_TERMINATED:
5430         case BNX2X_Q_STATE_FLRED:
5431                 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5432         default:
5433                 return -EINVAL;
5434         }
5435 }
5436
5437 /********************** Function state object *********************************/
5438 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5439                                            struct bnx2x_func_sp_obj *o)
5440 {
5441         /* in the middle of transaction - return INVALID state */
5442         if (o->pending)
5443                 return BNX2X_F_STATE_MAX;
5444
5445         /* unsure the order of reading of o->pending and o->state
5446          * o->pending should be read first
5447          */
5448         rmb();
5449
5450         return o->state;
5451 }
5452
5453 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5454                                 struct bnx2x_func_sp_obj *o,
5455                                 enum bnx2x_func_cmd cmd)
5456 {
5457         return bnx2x_state_wait(bp, cmd, &o->pending);
5458 }
5459
5460 /**
5461  * bnx2x_func_state_change_comp - complete the state machine transition
5462  *
5463  * @bp:         device handle
5464  * @o:
5465  * @cmd:
5466  *
5467  * Called on state change transition. Completes the state
5468  * machine transition only - no HW interaction.
5469  */
5470 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5471                                                struct bnx2x_func_sp_obj *o,
5472                                                enum bnx2x_func_cmd cmd)
5473 {
5474         unsigned long cur_pending = o->pending;
5475
5476         if (!test_and_clear_bit(cmd, &cur_pending)) {
5477                 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5478                           cmd, BP_FUNC(bp), o->state,
5479                           cur_pending, o->next_state);
5480                 return -EINVAL;
5481         }
5482
5483         DP(BNX2X_MSG_SP,
5484            "Completing command %d for func %d, setting state to %d\n",
5485            cmd, BP_FUNC(bp), o->next_state);
5486
5487         o->state = o->next_state;
5488         o->next_state = BNX2X_F_STATE_MAX;
5489
5490         /* It's important that o->state and o->next_state are
5491          * updated before o->pending.
5492          */
5493         wmb();
5494
5495         clear_bit(cmd, &o->pending);
5496         smp_mb__after_clear_bit();
5497
5498         return 0;
5499 }
5500
5501 /**
5502  * bnx2x_func_comp_cmd - complete the state change command
5503  *
5504  * @bp:         device handle
5505  * @o:
5506  * @cmd:
5507  *
5508  * Checks that the arrived completion is expected.
5509  */
5510 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5511                                struct bnx2x_func_sp_obj *o,
5512                                enum bnx2x_func_cmd cmd)
5513 {
5514         /* Complete the state machine part first, check if it's a
5515          * legal completion.
5516          */
5517         int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5518         return rc;
5519 }
5520
5521 /**
5522  * bnx2x_func_chk_transition - perform function state machine transition
5523  *
5524  * @bp:         device handle
5525  * @o:
5526  * @params:
5527  *
5528  * It both checks if the requested command is legal in a current
5529  * state and, if it's legal, sets a `next_state' in the object
5530  * that will be used in the completion flow to set the `state'
5531  * of the object.
5532  *
5533  * returns 0 if a requested command is a legal transition,
5534  *         -EINVAL otherwise.
5535  */
5536 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5537                                      struct bnx2x_func_sp_obj *o,
5538                                      struct bnx2x_func_state_params *params)
5539 {
5540         enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5541         enum bnx2x_func_cmd cmd = params->cmd;
5542
5543         /* Forget all pending for completion commands if a driver only state
5544          * transition has been requested.
5545          */
5546         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5547                 o->pending = 0;
5548                 o->next_state = BNX2X_F_STATE_MAX;
5549         }
5550
5551         /* Don't allow a next state transition if we are in the middle of
5552          * the previous one.
5553          */
5554         if (o->pending)
5555                 return -EBUSY;
5556
5557         switch (state) {
5558         case BNX2X_F_STATE_RESET:
5559                 if (cmd == BNX2X_F_CMD_HW_INIT)
5560                         next_state = BNX2X_F_STATE_INITIALIZED;
5561
5562                 break;
5563         case BNX2X_F_STATE_INITIALIZED:
5564                 if (cmd == BNX2X_F_CMD_START)
5565                         next_state = BNX2X_F_STATE_STARTED;
5566
5567                 else if (cmd == BNX2X_F_CMD_HW_RESET)
5568                         next_state = BNX2X_F_STATE_RESET;
5569
5570                 break;
5571         case BNX2X_F_STATE_STARTED:
5572                 if (cmd == BNX2X_F_CMD_STOP)
5573                         next_state = BNX2X_F_STATE_INITIALIZED;
5574                 /* afex ramrods can be sent only in started mode, and only
5575                  * if not pending for function_stop ramrod completion
5576                  * for these events - next state remained STARTED.
5577                  */
5578                 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5579                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5580                         next_state = BNX2X_F_STATE_STARTED;
5581
5582                 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5583                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5584                         next_state = BNX2X_F_STATE_STARTED;
5585
5586                 /* Switch_update ramrod can be sent in either started or
5587                  * tx_stopped state, and it doesn't change the state.
5588                  */
5589                 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5590                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5591                         next_state = BNX2X_F_STATE_STARTED;
5592
5593                 else if (cmd == BNX2X_F_CMD_TX_STOP)
5594                         next_state = BNX2X_F_STATE_TX_STOPPED;
5595
5596                 break;
5597         case BNX2X_F_STATE_TX_STOPPED:
5598                 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5599                     (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5600                         next_state = BNX2X_F_STATE_TX_STOPPED;
5601
5602                 else if (cmd == BNX2X_F_CMD_TX_START)
5603                         next_state = BNX2X_F_STATE_STARTED;
5604
5605                 break;
5606         default:
5607                 BNX2X_ERR("Unknown state: %d\n", state);
5608         }
5609
5610         /* Transition is assured */
5611         if (next_state != BNX2X_F_STATE_MAX) {
5612                 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5613                                  state, cmd, next_state);
5614                 o->next_state = next_state;
5615                 return 0;
5616         }
5617
5618         DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5619                          state, cmd);
5620
5621         return -EINVAL;
5622 }
5623
5624 /**
5625  * bnx2x_func_init_func - performs HW init at function stage
5626  *
5627  * @bp:         device handle
5628  * @drv:
5629  *
5630  * Init HW when the current phase is
5631  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5632  * HW blocks.
5633  */
5634 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5635                                        const struct bnx2x_func_sp_drv_ops *drv)
5636 {
5637         return drv->init_hw_func(bp);
5638 }
5639
5640 /**
5641  * bnx2x_func_init_port - performs HW init at port stage
5642  *
5643  * @bp:         device handle
5644  * @drv:
5645  *
5646  * Init HW when the current phase is
5647  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5648  * FUNCTION-only HW blocks.
5649  *
5650  */
5651 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5652                                        const struct bnx2x_func_sp_drv_ops *drv)
5653 {
5654         int rc = drv->init_hw_port(bp);
5655         if (rc)
5656                 return rc;
5657
5658         return bnx2x_func_init_func(bp, drv);
5659 }
5660
5661 /**
5662  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5663  *
5664  * @bp:         device handle
5665  * @drv:
5666  *
5667  * Init HW when the current phase is
5668  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5669  * PORT-only and FUNCTION-only HW blocks.
5670  */
5671 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5672                                         const struct bnx2x_func_sp_drv_ops *drv)
5673 {
5674         int rc = drv->init_hw_cmn_chip(bp);
5675         if (rc)
5676                 return rc;
5677
5678         return bnx2x_func_init_port(bp, drv);
5679 }
5680
5681 /**
5682  * bnx2x_func_init_cmn - performs HW init at common stage
5683  *
5684  * @bp:         device handle
5685  * @drv:
5686  *
5687  * Init HW when the current phase is
5688  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5689  * PORT-only and FUNCTION-only HW blocks.
5690  */
5691 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5692                                       const struct bnx2x_func_sp_drv_ops *drv)
5693 {
5694         int rc = drv->init_hw_cmn(bp);
5695         if (rc)
5696                 return rc;
5697
5698         return bnx2x_func_init_port(bp, drv);
5699 }
5700
5701 static int bnx2x_func_hw_init(struct bnx2x *bp,
5702                               struct bnx2x_func_state_params *params)
5703 {
5704         u32 load_code = params->params.hw_init.load_phase;
5705         struct bnx2x_func_sp_obj *o = params->f_obj;
5706         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5707         int rc = 0;
5708
5709         DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5710                          BP_ABS_FUNC(bp), load_code);
5711
5712         /* Prepare buffers for unzipping the FW */
5713         rc = drv->gunzip_init(bp);
5714         if (rc)
5715                 return rc;
5716
5717         /* Prepare FW */
5718         rc = drv->init_fw(bp);
5719         if (rc) {
5720                 BNX2X_ERR("Error loading firmware\n");
5721                 goto init_err;
5722         }
5723
5724         /* Handle the beginning of COMMON_XXX pases separately... */
5725         switch (load_code) {
5726         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5727                 rc = bnx2x_func_init_cmn_chip(bp, drv);
5728                 if (rc)
5729                         goto init_err;
5730
5731                 break;
5732         case FW_MSG_CODE_DRV_LOAD_COMMON:
5733                 rc = bnx2x_func_init_cmn(bp, drv);
5734                 if (rc)
5735                         goto init_err;
5736
5737                 break;
5738         case FW_MSG_CODE_DRV_LOAD_PORT:
5739                 rc = bnx2x_func_init_port(bp, drv);
5740                 if (rc)
5741                         goto init_err;
5742
5743                 break;
5744         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5745                 rc = bnx2x_func_init_func(bp, drv);
5746                 if (rc)
5747                         goto init_err;
5748
5749                 break;
5750         default:
5751                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5752                 rc = -EINVAL;
5753         }
5754
5755 init_err:
5756         drv->gunzip_end(bp);
5757
5758         /* In case of success, complete the command immediately: no ramrods
5759          * have been sent.
5760          */
5761         if (!rc)
5762                 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5763
5764         return rc;
5765 }
5766
5767 /**
5768  * bnx2x_func_reset_func - reset HW at function stage
5769  *
5770  * @bp:         device handle
5771  * @drv:
5772  *
5773  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5774  * FUNCTION-only HW blocks.
5775  */
5776 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5777                                         const struct bnx2x_func_sp_drv_ops *drv)
5778 {
5779         drv->reset_hw_func(bp);
5780 }
5781
5782 /**
5783  * bnx2x_func_reset_port - reset HW at port stage
5784  *
5785  * @bp:         device handle
5786  * @drv:
5787  *
5788  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5789  * FUNCTION-only and PORT-only HW blocks.
5790  *
5791  *                 !!!IMPORTANT!!!
5792  *
5793  * It's important to call reset_port before reset_func() as the last thing
5794  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5795  * makes impossible any DMAE transactions.
5796  */
5797 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5798                                         const struct bnx2x_func_sp_drv_ops *drv)
5799 {
5800         drv->reset_hw_port(bp);
5801         bnx2x_func_reset_func(bp, drv);
5802 }
5803
5804 /**
5805  * bnx2x_func_reset_cmn - reset HW at common stage
5806  *
5807  * @bp:         device handle
5808  * @drv:
5809  *
5810  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5811  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5812  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5813  */
5814 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5815                                         const struct bnx2x_func_sp_drv_ops *drv)
5816 {
5817         bnx2x_func_reset_port(bp, drv);
5818         drv->reset_hw_cmn(bp);
5819 }
5820
5821 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5822                                       struct bnx2x_func_state_params *params)
5823 {
5824         u32 reset_phase = params->params.hw_reset.reset_phase;
5825         struct bnx2x_func_sp_obj *o = params->f_obj;
5826         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5827
5828         DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5829                          reset_phase);
5830
5831         switch (reset_phase) {
5832         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5833                 bnx2x_func_reset_cmn(bp, drv);
5834                 break;
5835         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5836                 bnx2x_func_reset_port(bp, drv);
5837                 break;
5838         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5839                 bnx2x_func_reset_func(bp, drv);
5840                 break;
5841         default:
5842                 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5843                            reset_phase);
5844                 break;
5845         }
5846
5847         /* Complete the command immediately: no ramrods have been sent. */
5848         o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5849
5850         return 0;
5851 }
5852
5853 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5854                                         struct bnx2x_func_state_params *params)
5855 {
5856         struct bnx2x_func_sp_obj *o = params->f_obj;
5857         struct function_start_data *rdata =
5858                 (struct function_start_data *)o->rdata;
5859         dma_addr_t data_mapping = o->rdata_mapping;
5860         struct bnx2x_func_start_params *start_params = &params->params.start;
5861
5862         memset(rdata, 0, sizeof(*rdata));
5863
5864         /* Fill the ramrod data with provided parameters */
5865         rdata->function_mode    = (u8)start_params->mf_mode;
5866         rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
5867         rdata->path_id          = BP_PATH(bp);
5868         rdata->network_cos_mode = start_params->network_cos_mode;
5869         rdata->gre_tunnel_mode  = start_params->gre_tunnel_mode;
5870         rdata->gre_tunnel_rss   = start_params->gre_tunnel_rss;
5871
5872         /* No need for an explicit memory barrier here as long we would
5873          * need to ensure the ordering of writing to the SPQ element
5874          * and updating of the SPQ producer which involves a memory
5875          * read and we will have to put a full memory barrier there
5876          * (inside bnx2x_sp_post()).
5877          */
5878
5879         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5880                              U64_HI(data_mapping),
5881                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5882 }
5883
5884 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5885                                         struct bnx2x_func_state_params *params)
5886 {
5887         struct bnx2x_func_sp_obj *o = params->f_obj;
5888         struct function_update_data *rdata =
5889                 (struct function_update_data *)o->rdata;
5890         dma_addr_t data_mapping = o->rdata_mapping;
5891         struct bnx2x_func_switch_update_params *switch_update_params =
5892                 &params->params.switch_update;
5893
5894         memset(rdata, 0, sizeof(*rdata));
5895
5896         /* Fill the ramrod data with provided parameters */
5897         rdata->tx_switch_suspend_change_flg = 1;
5898         rdata->tx_switch_suspend = switch_update_params->suspend;
5899         rdata->echo = SWITCH_UPDATE;
5900
5901         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5902                              U64_HI(data_mapping),
5903                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5904 }
5905
5906 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5907                                          struct bnx2x_func_state_params *params)
5908 {
5909         struct bnx2x_func_sp_obj *o = params->f_obj;
5910         struct function_update_data *rdata =
5911                 (struct function_update_data *)o->afex_rdata;
5912         dma_addr_t data_mapping = o->afex_rdata_mapping;
5913         struct bnx2x_func_afex_update_params *afex_update_params =
5914                 &params->params.afex_update;
5915
5916         memset(rdata, 0, sizeof(*rdata));
5917
5918         /* Fill the ramrod data with provided parameters */
5919         rdata->vif_id_change_flg = 1;
5920         rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5921         rdata->afex_default_vlan_change_flg = 1;
5922         rdata->afex_default_vlan =
5923                 cpu_to_le16(afex_update_params->afex_default_vlan);
5924         rdata->allowed_priorities_change_flg = 1;
5925         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5926         rdata->echo = AFEX_UPDATE;
5927
5928         /*  No need for an explicit memory barrier here as long we would
5929          *  need to ensure the ordering of writing to the SPQ element
5930          *  and updating of the SPQ producer which involves a memory
5931          *  read and we will have to put a full memory barrier there
5932          *  (inside bnx2x_sp_post()).
5933          */
5934         DP(BNX2X_MSG_SP,
5935            "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5936            rdata->vif_id,
5937            rdata->afex_default_vlan, rdata->allowed_priorities);
5938
5939         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5940                              U64_HI(data_mapping),
5941                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5942 }
5943
5944 static
5945 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5946                                          struct bnx2x_func_state_params *params)
5947 {
5948         struct bnx2x_func_sp_obj *o = params->f_obj;
5949         struct afex_vif_list_ramrod_data *rdata =
5950                 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5951         struct bnx2x_func_afex_viflists_params *afex_vif_params =
5952                 &params->params.afex_viflists;
5953         u64 *p_rdata = (u64 *)rdata;
5954
5955         memset(rdata, 0, sizeof(*rdata));
5956
5957         /* Fill the ramrod data with provided parameters */
5958         rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5959         rdata->func_bit_map          = afex_vif_params->func_bit_map;
5960         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5961         rdata->func_to_clear         = afex_vif_params->func_to_clear;
5962
5963         /* send in echo type of sub command */
5964         rdata->echo = afex_vif_params->afex_vif_list_command;
5965
5966         /*  No need for an explicit memory barrier here as long we would
5967          *  need to ensure the ordering of writing to the SPQ element
5968          *  and updating of the SPQ producer which involves a memory
5969          *  read and we will have to put a full memory barrier there
5970          *  (inside bnx2x_sp_post()).
5971          */
5972
5973         DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5974            rdata->afex_vif_list_command, rdata->vif_list_index,
5975            rdata->func_bit_map, rdata->func_to_clear);
5976
5977         /* this ramrod sends data directly and not through DMA mapping */
5978         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5979                              U64_HI(*p_rdata), U64_LO(*p_rdata),
5980                              NONE_CONNECTION_TYPE);
5981 }
5982
5983 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5984                                        struct bnx2x_func_state_params *params)
5985 {
5986         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5987                              NONE_CONNECTION_TYPE);
5988 }
5989
5990 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5991                                        struct bnx2x_func_state_params *params)
5992 {
5993         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5994                              NONE_CONNECTION_TYPE);
5995 }
5996 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5997                                        struct bnx2x_func_state_params *params)
5998 {
5999         struct bnx2x_func_sp_obj *o = params->f_obj;
6000         struct flow_control_configuration *rdata =
6001                 (struct flow_control_configuration *)o->rdata;
6002         dma_addr_t data_mapping = o->rdata_mapping;
6003         struct bnx2x_func_tx_start_params *tx_start_params =
6004                 &params->params.tx_start;
6005         int i;
6006
6007         memset(rdata, 0, sizeof(*rdata));
6008
6009         rdata->dcb_enabled = tx_start_params->dcb_enabled;
6010         rdata->dcb_version = tx_start_params->dcb_version;
6011         rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6012
6013         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6014                 rdata->traffic_type_to_priority_cos[i] =
6015                         tx_start_params->traffic_type_to_priority_cos[i];
6016
6017         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6018                              U64_HI(data_mapping),
6019                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6020 }
6021
6022 static int bnx2x_func_send_cmd(struct bnx2x *bp,
6023                                struct bnx2x_func_state_params *params)
6024 {
6025         switch (params->cmd) {
6026         case BNX2X_F_CMD_HW_INIT:
6027                 return bnx2x_func_hw_init(bp, params);
6028         case BNX2X_F_CMD_START:
6029                 return bnx2x_func_send_start(bp, params);
6030         case BNX2X_F_CMD_STOP:
6031                 return bnx2x_func_send_stop(bp, params);
6032         case BNX2X_F_CMD_HW_RESET:
6033                 return bnx2x_func_hw_reset(bp, params);
6034         case BNX2X_F_CMD_AFEX_UPDATE:
6035                 return bnx2x_func_send_afex_update(bp, params);
6036         case BNX2X_F_CMD_AFEX_VIFLISTS:
6037                 return bnx2x_func_send_afex_viflists(bp, params);
6038         case BNX2X_F_CMD_TX_STOP:
6039                 return bnx2x_func_send_tx_stop(bp, params);
6040         case BNX2X_F_CMD_TX_START:
6041                 return bnx2x_func_send_tx_start(bp, params);
6042         case BNX2X_F_CMD_SWITCH_UPDATE:
6043                 return bnx2x_func_send_switch_update(bp, params);
6044         default:
6045                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
6046                 return -EINVAL;
6047         }
6048 }
6049
6050 void bnx2x_init_func_obj(struct bnx2x *bp,
6051                          struct bnx2x_func_sp_obj *obj,
6052                          void *rdata, dma_addr_t rdata_mapping,
6053                          void *afex_rdata, dma_addr_t afex_rdata_mapping,
6054                          struct bnx2x_func_sp_drv_ops *drv_iface)
6055 {
6056         memset(obj, 0, sizeof(*obj));
6057
6058         mutex_init(&obj->one_pending_mutex);
6059
6060         obj->rdata = rdata;
6061         obj->rdata_mapping = rdata_mapping;
6062         obj->afex_rdata = afex_rdata;
6063         obj->afex_rdata_mapping = afex_rdata_mapping;
6064         obj->send_cmd = bnx2x_func_send_cmd;
6065         obj->check_transition = bnx2x_func_chk_transition;
6066         obj->complete_cmd = bnx2x_func_comp_cmd;
6067         obj->wait_comp = bnx2x_func_wait_comp;
6068
6069         obj->drv = drv_iface;
6070 }
6071
6072 /**
6073  * bnx2x_func_state_change - perform Function state change transition
6074  *
6075  * @bp:         device handle
6076  * @params:     parameters to perform the transaction
6077  *
6078  * returns 0 in case of successfully completed transition,
6079  *         negative error code in case of failure, positive
6080  *         (EBUSY) value if there is a completion to that is
6081  *         still pending (possible only if RAMROD_COMP_WAIT is
6082  *         not set in params->ramrod_flags for asynchronous
6083  *         commands).
6084  */
6085 int bnx2x_func_state_change(struct bnx2x *bp,
6086                             struct bnx2x_func_state_params *params)
6087 {
6088         struct bnx2x_func_sp_obj *o = params->f_obj;
6089         int rc, cnt = 300;
6090         enum bnx2x_func_cmd cmd = params->cmd;
6091         unsigned long *pending = &o->pending;
6092
6093         mutex_lock(&o->one_pending_mutex);
6094
6095         /* Check that the requested transition is legal */
6096         rc = o->check_transition(bp, o, params);
6097         if ((rc == -EBUSY) &&
6098             (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
6099                 while ((rc == -EBUSY) && (--cnt > 0)) {
6100                         mutex_unlock(&o->one_pending_mutex);
6101                         msleep(10);
6102                         mutex_lock(&o->one_pending_mutex);
6103                         rc = o->check_transition(bp, o, params);
6104                 }
6105                 if (rc == -EBUSY) {
6106                         mutex_unlock(&o->one_pending_mutex);
6107                         BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6108                         return rc;
6109                 }
6110         } else if (rc) {
6111                 mutex_unlock(&o->one_pending_mutex);
6112                 return rc;
6113         }
6114
6115         /* Set "pending" bit */
6116         set_bit(cmd, pending);
6117
6118         /* Don't send a command if only driver cleanup was requested */
6119         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6120                 bnx2x_func_state_change_comp(bp, o, cmd);
6121                 mutex_unlock(&o->one_pending_mutex);
6122         } else {
6123                 /* Send a ramrod */
6124                 rc = o->send_cmd(bp, params);
6125
6126                 mutex_unlock(&o->one_pending_mutex);
6127
6128                 if (rc) {
6129                         o->next_state = BNX2X_F_STATE_MAX;
6130                         clear_bit(cmd, pending);
6131                         smp_mb__after_clear_bit();
6132                         return rc;
6133                 }
6134
6135                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6136                         rc = o->wait_comp(bp, o, cmd);
6137                         if (rc)
6138                                 return rc;
6139
6140                         return 0;
6141                 }
6142         }
6143
6144         return !!test_bit(cmd, pending);
6145 }