2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static char *ue_status_low_desc[] = {
84 /* UE Status High CSR */
85 static char *ue_status_hi_desc[] = {
120 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122 struct be_dma_mem *mem = &q->dma_mem;
124 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
128 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
129 u16 len, u16 entry_size)
131 struct be_dma_mem *mem = &q->dma_mem;
133 memset(q, 0, sizeof(*q));
135 q->entry_size = entry_size;
136 mem->size = len * entry_size;
137 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
141 memset(mem->va, 0, mem->size);
145 static void be_intr_set(struct be_adapter *adapter, bool enable)
147 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
148 u32 reg = ioread32(addr);
149 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151 if (adapter->eeh_err)
154 if (!enabled && enable)
155 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
156 else if (enabled && !enable)
157 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161 iowrite32(reg, addr);
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185 bool arm, bool clear_int, u16 num_popped)
188 val |= qid & DB_EQ_RING_ID_MASK;
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
192 if (adapter->eeh_err)
196 val |= 1 << DB_EQ_REARM_SHIFT;
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
207 val |= qid & DB_CQ_RING_ID_MASK;
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
211 if (adapter->eeh_err)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
232 if (!be_physfn(adapter))
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241 adapter->if_handle, &adapter->pmac_id, 0);
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249 static void populate_be2_stats(struct be_adapter *adapter)
252 struct be_drv_stats *drvs = &adapter->drv_stats;
253 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
254 struct be_port_rxf_stats_v0 *port_stats =
255 be_port_rxf_stats_from_cmd(adapter);
256 struct be_rxf_stats_v0 *rxf_stats =
257 be_rxf_stats_from_cmd(adapter);
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop =
274 port_stats->rx_input_fifo_overflow;
275 drvs->rx_dropped_header_too_small =
276 port_stats->rx_dropped_header_too_small;
277 drvs->rx_address_match_errors =
278 port_stats->rx_address_match_errors;
279 drvs->rx_alignment_symbol_errors =
280 port_stats->rx_alignment_symbol_errors;
282 drvs->tx_pauseframes = port_stats->tx_pauseframes;
283 drvs->tx_controlframes = port_stats->tx_controlframes;
285 if (adapter->port_num)
286 drvs->jabber_events =
287 rxf_stats->port1_jabber_events;
289 drvs->jabber_events =
290 rxf_stats->port0_jabber_events;
291 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
292 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
293 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
294 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
295 drvs->forwarded_packets = rxf_stats->forwarded_packets;
296 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
297 drvs->rx_drops_no_tpre_descr =
298 rxf_stats->rx_drops_no_tpre_descr;
299 drvs->rx_drops_too_many_frags =
300 rxf_stats->rx_drops_too_many_frags;
301 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
304 static void populate_be3_stats(struct be_adapter *adapter)
306 struct be_drv_stats *drvs = &adapter->drv_stats;
307 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309 struct be_rxf_stats_v1 *rxf_stats =
310 be_rxf_stats_from_cmd(adapter);
311 struct be_port_rxf_stats_v1 *port_stats =
312 be_port_rxf_stats_from_cmd(adapter);
314 drvs->rx_priority_pause_frames = 0;
315 drvs->pmem_fifo_overflow_drop = 0;
316 drvs->rx_pause_frames = port_stats->rx_pause_frames;
317 drvs->rx_crc_errors = port_stats->rx_crc_errors;
318 drvs->rx_control_frames = port_stats->rx_control_frames;
319 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
320 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
321 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
322 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
323 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
324 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
325 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
326 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
327 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
328 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
329 drvs->rx_dropped_header_too_small =
330 port_stats->rx_dropped_header_too_small;
331 drvs->rx_input_fifo_overflow_drop =
332 port_stats->rx_input_fifo_overflow_drop;
333 drvs->rx_address_match_errors =
334 port_stats->rx_address_match_errors;
335 drvs->rx_alignment_symbol_errors =
336 port_stats->rx_alignment_symbol_errors;
337 drvs->rxpp_fifo_overflow_drop =
338 port_stats->rxpp_fifo_overflow_drop;
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348 drvs->rx_drops_no_tpre_descr =
349 rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags =
351 rxf_stats->rx_drops_too_many_frags;
352 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
355 static void populate_lancer_stats(struct be_adapter *adapter)
358 struct be_drv_stats *drvs = &adapter->drv_stats;
359 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361 drvs->rx_priority_pause_frames = 0;
362 drvs->pmem_fifo_overflow_drop = 0;
363 drvs->rx_pause_frames =
364 make_64bit_val(pport_stats->rx_pause_frames_lo,
365 pport_stats->rx_pause_frames_hi);
366 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
367 pport_stats->rx_crc_errors_lo);
368 drvs->rx_control_frames =
369 make_64bit_val(pport_stats->rx_control_frames_hi,
370 pport_stats->rx_control_frames_lo);
371 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
372 drvs->rx_frame_too_long =
373 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
374 pport_stats->rx_frames_too_long_lo);
375 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
376 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
377 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
378 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
379 drvs->rx_dropped_tcp_length =
380 pport_stats->rx_dropped_invalid_tcp_length;
381 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
382 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
383 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
384 drvs->rx_dropped_header_too_small =
385 pport_stats->rx_dropped_header_too_small;
386 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
387 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
388 drvs->rx_alignment_symbol_errors =
389 make_64bit_val(pport_stats->rx_symbol_errors_hi,
390 pport_stats->rx_symbol_errors_lo);
391 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
392 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
393 pport_stats->tx_pause_frames_lo);
394 drvs->tx_controlframes =
395 make_64bit_val(pport_stats->tx_control_frames_hi,
396 pport_stats->tx_control_frames_lo);
397 drvs->jabber_events = pport_stats->rx_jabbers;
398 drvs->rx_drops_no_pbuf = 0;
399 drvs->rx_drops_no_txpb = 0;
400 drvs->rx_drops_no_erx_descr = 0;
401 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
402 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
403 pport_stats->num_forwards_lo);
404 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
405 pport_stats->rx_drops_mtu_lo);
406 drvs->rx_drops_no_tpre_descr = 0;
407 drvs->rx_drops_too_many_frags =
408 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
409 pport_stats->rx_drops_too_many_frags_lo);
412 void be_parse_stats(struct be_adapter *adapter)
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
418 populate_be3_stats(adapter);
420 populate_be2_stats(adapter);
424 void netdev_stats_update(struct be_adapter *adapter)
426 struct be_drv_stats *drvs = &adapter->drv_stats;
427 struct net_device_stats *dev_stats = &adapter->netdev->stats;
428 struct be_rx_obj *rxo;
431 memset(dev_stats, 0, sizeof(*dev_stats));
432 for_all_rx_queues(adapter, rxo, i) {
433 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
434 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
435 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
436 /* no space in linux buffers: best possible approximation */
437 if (adapter->generation == BE_GEN3) {
438 if (!(lancer_chip(adapter))) {
439 struct be_erx_stats_v1 *erx_stats =
440 be_erx_stats_from_cmd(adapter);
441 dev_stats->rx_dropped +=
442 erx_stats->rx_drops_no_fragments[rxo->q.id];
445 struct be_erx_stats_v0 *erx_stats =
446 be_erx_stats_from_cmd(adapter);
447 dev_stats->rx_dropped +=
448 erx_stats->rx_drops_no_fragments[rxo->q.id];
452 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
453 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
455 /* bad pkts received */
456 dev_stats->rx_errors = drvs->rx_crc_errors +
457 drvs->rx_alignment_symbol_errors +
458 drvs->rx_in_range_errors +
459 drvs->rx_out_range_errors +
460 drvs->rx_frame_too_long +
461 drvs->rx_dropped_too_small +
462 drvs->rx_dropped_too_short +
463 drvs->rx_dropped_header_too_small +
464 drvs->rx_dropped_tcp_length +
465 drvs->rx_dropped_runt +
466 drvs->rx_tcp_checksum_errs +
467 drvs->rx_ip_checksum_errs +
468 drvs->rx_udp_checksum_errs;
470 /* detailed rx errors */
471 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long;
475 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
477 /* frame alignment errors */
478 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
480 /* receiver fifo overrun */
481 /* drops_no_pbuf is no per i/f, it's per BE card */
482 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
483 drvs->rx_input_fifo_overflow_drop +
484 drvs->rx_drops_no_pbuf;
487 void be_link_status_update(struct be_adapter *adapter, bool link_up)
489 struct net_device *netdev = adapter->netdev;
491 /* If link came up or went down */
492 if (adapter->link_up != link_up) {
493 adapter->link_speed = -1;
495 netif_carrier_on(netdev);
496 printk(KERN_INFO "%s: Link up\n", netdev->name);
498 netif_carrier_off(netdev);
499 printk(KERN_INFO "%s: Link down\n", netdev->name);
501 adapter->link_up = link_up;
505 /* Update the EQ delay n BE based on the RX frags consumed / sec */
506 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
508 struct be_eq_obj *rx_eq = &rxo->rx_eq;
509 struct be_rx_stats *stats = &rxo->stats;
513 if (!rx_eq->enable_aic)
517 if (time_before(now, stats->rx_fps_jiffies)) {
518 stats->rx_fps_jiffies = now;
522 /* Update once a second */
523 if ((now - stats->rx_fps_jiffies) < HZ)
526 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
527 ((now - stats->rx_fps_jiffies) / HZ);
529 stats->rx_fps_jiffies = now;
530 stats->prev_rx_frags = stats->rx_frags;
531 eqd = stats->rx_fps / 110000;
533 if (eqd > rx_eq->max_eqd)
534 eqd = rx_eq->max_eqd;
535 if (eqd < rx_eq->min_eqd)
536 eqd = rx_eq->min_eqd;
539 if (eqd != rx_eq->cur_eqd)
540 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
542 rx_eq->cur_eqd = eqd;
545 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
549 do_div(rate, ticks / HZ);
550 rate <<= 3; /* bytes/sec -> bits/sec */
551 do_div(rate, 1000000ul); /* MB/Sec */
556 static void be_tx_rate_update(struct be_adapter *adapter)
558 struct be_tx_stats *stats = tx_stats(adapter);
561 /* Wrapped around? */
562 if (time_before(now, stats->be_tx_jiffies)) {
563 stats->be_tx_jiffies = now;
567 /* Update tx rate once in two seconds */
568 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
569 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
570 - stats->be_tx_bytes_prev,
571 now - stats->be_tx_jiffies);
572 stats->be_tx_jiffies = now;
573 stats->be_tx_bytes_prev = stats->be_tx_bytes;
577 static void be_tx_stats_update(struct be_adapter *adapter,
578 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
580 struct be_tx_stats *stats = tx_stats(adapter);
582 stats->be_tx_wrbs += wrb_cnt;
583 stats->be_tx_bytes += copied;
584 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
586 stats->be_tx_stops++;
589 /* Determine number of WRB entries needed to xmit data in an skb */
590 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
593 int cnt = (skb->len > skb->data_len);
595 cnt += skb_shinfo(skb)->nr_frags;
597 /* to account for hdr wrb */
599 if (lancer_chip(adapter) || !(cnt & 1)) {
602 /* add a dummy to make it an even num */
606 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
610 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
612 wrb->frag_pa_hi = upper_32_bits(addr);
613 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
614 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
617 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
618 struct sk_buff *skb, u32 wrb_cnt, u32 len)
623 memset(hdr, 0, sizeof(*hdr));
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
627 if (skb_is_gso(skb)) {
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
629 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
630 hdr, skb_shinfo(skb)->gso_size);
631 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
633 if (lancer_chip(adapter) && adapter->sli_family ==
634 LANCER_A0_SLI_FAMILY) {
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
639 else if (is_udp_pkt(skb))
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
643 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
646 else if (is_udp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
650 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
652 vlan_tag = vlan_tx_tag_get(skb);
653 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
654 /* If vlan priority provided by OS is NOT in available bmap */
655 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
656 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
657 adapter->recommended_prio;
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
667 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
672 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
677 dma_unmap_single(dev, dma, wrb->frag_len,
680 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
684 static int make_tx_wrbs(struct be_adapter *adapter,
685 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
689 struct device *dev = &adapter->pdev->dev;
690 struct sk_buff *first_skb = skb;
691 struct be_queue_info *txq = &adapter->tx_obj.q;
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
694 bool map_single = false;
697 hdr = queue_head_node(txq);
699 map_head = txq->head;
701 if (skb->len > skb->data_len) {
702 int len = skb_headlen(skb);
703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715 struct skb_frag_struct *frag =
716 &skb_shinfo(skb)->frags[i];
717 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
718 frag->size, DMA_TO_DEVICE);
719 if (dma_mapping_error(dev, busaddr))
721 wrb = queue_head_node(txq);
722 wrb_fill(wrb, busaddr, frag->size);
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
725 copied += frag->size;
729 wrb = queue_head_node(txq);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
740 txq->head = map_head;
742 wrb = queue_head_node(txq);
743 unmap_tx_frag(dev, wrb, map_single);
745 copied -= wrb->frag_len;
751 static netdev_tx_t be_xmit(struct sk_buff *skb,
752 struct net_device *netdev)
754 struct be_adapter *adapter = netdev_priv(netdev);
755 struct be_tx_obj *tx_obj = &adapter->tx_obj;
756 struct be_queue_info *txq = &tx_obj->q;
757 u32 wrb_cnt = 0, copied = 0;
758 u32 start = txq->head;
759 bool dummy_wrb, stopped = false;
761 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
763 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
765 /* record the sent skb in the sent_skb table */
766 BUG_ON(tx_obj->sent_skb_list[start]);
767 tx_obj->sent_skb_list[start] = skb;
769 /* Ensure txq has space for the next skb; Else stop the queue
770 * *BEFORE* ringing the tx doorbell, so that we serialze the
771 * tx compls of the current transmit which'll wake up the queue
773 atomic_add(wrb_cnt, &txq->used);
774 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
776 netif_stop_queue(netdev);
780 be_txq_notify(adapter, txq->id, wrb_cnt);
782 be_tx_stats_update(adapter, wrb_cnt, copied,
783 skb_shinfo(skb)->gso_segs, stopped);
786 dev_kfree_skb_any(skb);
791 static int be_change_mtu(struct net_device *netdev, int new_mtu)
793 struct be_adapter *adapter = netdev_priv(netdev);
794 if (new_mtu < BE_MIN_MTU ||
795 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
796 (ETH_HLEN + ETH_FCS_LEN))) {
797 dev_info(&adapter->pdev->dev,
798 "MTU must be between %d and %d bytes\n",
800 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
803 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
804 netdev->mtu, new_mtu);
805 netdev->mtu = new_mtu;
810 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
811 * If the user configures more, place BE in vlan promiscuous mode.
813 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
815 u16 vtag[BE_NUM_VLANS_SUPPORTED];
821 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
822 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
823 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
826 if (adapter->vlans_added <= adapter->max_vlans) {
827 /* Construct VLAN Table to give to HW */
828 for (i = 0; i < VLAN_N_VID; i++) {
829 if (adapter->vlan_tag[i]) {
830 vtag[ntags] = cpu_to_le16(i);
834 status = be_cmd_vlan_config(adapter, adapter->if_handle,
837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
846 struct be_adapter *adapter = netdev_priv(netdev);
848 adapter->vlan_grp = grp;
851 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
853 struct be_adapter *adapter = netdev_priv(netdev);
855 adapter->vlans_added++;
856 if (!be_physfn(adapter))
859 adapter->vlan_tag[vid] = 1;
860 if (adapter->vlans_added <= (adapter->max_vlans + 1))
861 be_vid_config(adapter, false, 0);
864 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
866 struct be_adapter *adapter = netdev_priv(netdev);
868 adapter->vlans_added--;
869 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
871 if (!be_physfn(adapter))
874 adapter->vlan_tag[vid] = 0;
875 if (adapter->vlans_added <= adapter->max_vlans)
876 be_vid_config(adapter, false, 0);
879 static void be_set_multicast_list(struct net_device *netdev)
881 struct be_adapter *adapter = netdev_priv(netdev);
883 if (netdev->flags & IFF_PROMISC) {
884 be_cmd_promiscuous_config(adapter, true);
885 adapter->promiscuous = true;
889 /* BE was previously in promiscuous mode; disable it */
890 if (adapter->promiscuous) {
891 adapter->promiscuous = false;
892 be_cmd_promiscuous_config(adapter, false);
895 /* Enable multicast promisc if num configured exceeds what we support */
896 if (netdev->flags & IFF_ALLMULTI ||
897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
899 &adapter->mc_cmd_mem);
903 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
904 &adapter->mc_cmd_mem);
909 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
911 struct be_adapter *adapter = netdev_priv(netdev);
914 if (!adapter->sriov_enabled)
917 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
920 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
921 status = be_cmd_pmac_del(adapter,
922 adapter->vf_cfg[vf].vf_if_handle,
923 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
925 status = be_cmd_pmac_add(adapter, mac,
926 adapter->vf_cfg[vf].vf_if_handle,
927 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
930 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
933 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
938 static int be_get_vf_config(struct net_device *netdev, int vf,
939 struct ifla_vf_info *vi)
941 struct be_adapter *adapter = netdev_priv(netdev);
943 if (!adapter->sriov_enabled)
950 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
951 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
953 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
958 static int be_set_vf_vlan(struct net_device *netdev,
959 int vf, u16 vlan, u8 qos)
961 struct be_adapter *adapter = netdev_priv(netdev);
964 if (!adapter->sriov_enabled)
967 if ((vf >= num_vfs) || (vlan > 4095))
971 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
972 adapter->vlans_added++;
974 adapter->vf_cfg[vf].vf_vlan_tag = 0;
975 adapter->vlans_added--;
978 status = be_vid_config(adapter, true, vf);
981 dev_info(&adapter->pdev->dev,
982 "VLAN %d config on VF %d failed\n", vlan, vf);
986 static int be_set_vf_tx_rate(struct net_device *netdev,
989 struct be_adapter *adapter = netdev_priv(netdev);
992 if (!adapter->sriov_enabled)
995 if ((vf >= num_vfs) || (rate < 0))
1001 adapter->vf_cfg[vf].vf_tx_rate = rate;
1002 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1005 dev_info(&adapter->pdev->dev,
1006 "tx rate %d on VF %d failed\n", rate, vf);
1010 static void be_rx_rate_update(struct be_rx_obj *rxo)
1012 struct be_rx_stats *stats = &rxo->stats;
1013 ulong now = jiffies;
1015 /* Wrapped around */
1016 if (time_before(now, stats->rx_jiffies)) {
1017 stats->rx_jiffies = now;
1021 /* Update the rate once in two seconds */
1022 if ((now - stats->rx_jiffies) < 2 * HZ)
1025 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1026 now - stats->rx_jiffies);
1027 stats->rx_jiffies = now;
1028 stats->rx_bytes_prev = stats->rx_bytes;
1031 static void be_rx_stats_update(struct be_rx_obj *rxo,
1032 struct be_rx_compl_info *rxcp)
1034 struct be_rx_stats *stats = &rxo->stats;
1037 stats->rx_frags += rxcp->num_rcvd;
1038 stats->rx_bytes += rxcp->pkt_size;
1040 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1041 stats->rx_mcast_pkts++;
1046 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1048 /* L4 checksum is not reliable for non TCP/UDP packets.
1049 * Also ignore ipcksm for ipv6 pkts */
1050 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1051 (rxcp->ip_csum || rxcp->ipv6);
1054 static struct be_rx_page_info *
1055 get_rx_page_info(struct be_adapter *adapter,
1056 struct be_rx_obj *rxo,
1059 struct be_rx_page_info *rx_page_info;
1060 struct be_queue_info *rxq = &rxo->q;
1062 rx_page_info = &rxo->page_info_tbl[frag_idx];
1063 BUG_ON(!rx_page_info->page);
1065 if (rx_page_info->last_page_user) {
1066 dma_unmap_page(&adapter->pdev->dev,
1067 dma_unmap_addr(rx_page_info, bus),
1068 adapter->big_page_size, DMA_FROM_DEVICE);
1069 rx_page_info->last_page_user = false;
1072 atomic_dec(&rxq->used);
1073 return rx_page_info;
1076 /* Throwaway the data in the Rx completion */
1077 static void be_rx_compl_discard(struct be_adapter *adapter,
1078 struct be_rx_obj *rxo,
1079 struct be_rx_compl_info *rxcp)
1081 struct be_queue_info *rxq = &rxo->q;
1082 struct be_rx_page_info *page_info;
1083 u16 i, num_rcvd = rxcp->num_rcvd;
1085 for (i = 0; i < num_rcvd; i++) {
1086 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1087 put_page(page_info->page);
1088 memset(page_info, 0, sizeof(*page_info));
1089 index_inc(&rxcp->rxq_idx, rxq->len);
1094 * skb_fill_rx_data forms a complete skb for an ether frame
1095 * indicated by rxcp.
1097 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1098 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1100 struct be_queue_info *rxq = &rxo->q;
1101 struct be_rx_page_info *page_info;
1103 u16 hdr_len, curr_frag_len, remaining;
1106 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1107 start = page_address(page_info->page) + page_info->page_offset;
1110 /* Copy data in the first descriptor of this completion */
1111 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1113 /* Copy the header portion into skb_data */
1114 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1115 memcpy(skb->data, start, hdr_len);
1116 skb->len = curr_frag_len;
1117 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1118 /* Complete packet has now been moved to data */
1119 put_page(page_info->page);
1121 skb->tail += curr_frag_len;
1123 skb_shinfo(skb)->nr_frags = 1;
1124 skb_shinfo(skb)->frags[0].page = page_info->page;
1125 skb_shinfo(skb)->frags[0].page_offset =
1126 page_info->page_offset + hdr_len;
1127 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1128 skb->data_len = curr_frag_len - hdr_len;
1129 skb->tail += hdr_len;
1131 page_info->page = NULL;
1133 if (rxcp->pkt_size <= rx_frag_size) {
1134 BUG_ON(rxcp->num_rcvd != 1);
1138 /* More frags present for this completion */
1139 index_inc(&rxcp->rxq_idx, rxq->len);
1140 remaining = rxcp->pkt_size - curr_frag_len;
1141 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1142 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1143 curr_frag_len = min(remaining, rx_frag_size);
1145 /* Coalesce all frags from the same physical page in one slot */
1146 if (page_info->page_offset == 0) {
1149 skb_shinfo(skb)->frags[j].page = page_info->page;
1150 skb_shinfo(skb)->frags[j].page_offset =
1151 page_info->page_offset;
1152 skb_shinfo(skb)->frags[j].size = 0;
1153 skb_shinfo(skb)->nr_frags++;
1155 put_page(page_info->page);
1158 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1159 skb->len += curr_frag_len;
1160 skb->data_len += curr_frag_len;
1162 remaining -= curr_frag_len;
1163 index_inc(&rxcp->rxq_idx, rxq->len);
1164 page_info->page = NULL;
1166 BUG_ON(j > MAX_SKB_FRAGS);
1169 /* Process the RX completion indicated by rxcp when GRO is disabled */
1170 static void be_rx_compl_process(struct be_adapter *adapter,
1171 struct be_rx_obj *rxo,
1172 struct be_rx_compl_info *rxcp)
1174 struct net_device *netdev = adapter->netdev;
1175 struct sk_buff *skb;
1177 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1178 if (unlikely(!skb)) {
1179 if (net_ratelimit())
1180 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1181 be_rx_compl_discard(adapter, rxo, rxcp);
1185 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1187 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1188 skb->ip_summed = CHECKSUM_UNNECESSARY;
1190 skb_checksum_none_assert(skb);
1192 skb->truesize = skb->len + sizeof(struct sk_buff);
1193 skb->protocol = eth_type_trans(skb, netdev);
1194 if (adapter->netdev->features & NETIF_F_RXHASH)
1195 skb->rxhash = rxcp->rss_hash;
1198 if (unlikely(rxcp->vlanf)) {
1199 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1203 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1206 netif_receive_skb(skb);
1210 /* Process the RX completion indicated by rxcp when GRO is enabled */
1211 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1212 struct be_rx_obj *rxo,
1213 struct be_rx_compl_info *rxcp)
1215 struct be_rx_page_info *page_info;
1216 struct sk_buff *skb = NULL;
1217 struct be_queue_info *rxq = &rxo->q;
1218 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1219 u16 remaining, curr_frag_len;
1222 skb = napi_get_frags(&eq_obj->napi);
1224 be_rx_compl_discard(adapter, rxo, rxcp);
1228 remaining = rxcp->pkt_size;
1229 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1230 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1232 curr_frag_len = min(remaining, rx_frag_size);
1234 /* Coalesce all frags from the same physical page in one slot */
1235 if (i == 0 || page_info->page_offset == 0) {
1236 /* First frag or Fresh page */
1238 skb_shinfo(skb)->frags[j].page = page_info->page;
1239 skb_shinfo(skb)->frags[j].page_offset =
1240 page_info->page_offset;
1241 skb_shinfo(skb)->frags[j].size = 0;
1243 put_page(page_info->page);
1245 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1247 remaining -= curr_frag_len;
1248 index_inc(&rxcp->rxq_idx, rxq->len);
1249 memset(page_info, 0, sizeof(*page_info));
1251 BUG_ON(j > MAX_SKB_FRAGS);
1253 skb_shinfo(skb)->nr_frags = j + 1;
1254 skb->len = rxcp->pkt_size;
1255 skb->data_len = rxcp->pkt_size;
1256 skb->truesize += rxcp->pkt_size;
1257 skb->ip_summed = CHECKSUM_UNNECESSARY;
1258 if (adapter->netdev->features & NETIF_F_RXHASH)
1259 skb->rxhash = rxcp->rss_hash;
1261 if (likely(!rxcp->vlanf))
1262 napi_gro_frags(&eq_obj->napi);
1264 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1268 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1269 struct be_eth_rx_compl *compl,
1270 struct be_rx_compl_info *rxcp)
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1274 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1275 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1276 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1277 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1281 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1283 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1285 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1287 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1289 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1291 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1293 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1295 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1300 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1301 struct be_eth_rx_compl *compl,
1302 struct be_rx_compl_info *rxcp)
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1306 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1307 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1308 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1309 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1315 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1325 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1327 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1332 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1334 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1335 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1336 struct be_adapter *adapter = rxo->adapter;
1338 /* For checking the valid bit it is Ok to use either definition as the
1339 * valid bit is at the same position in both v0 and v1 Rx compl */
1340 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1344 be_dws_le_to_cpu(compl, sizeof(*compl));
1346 if (adapter->be3_native)
1347 be_parse_rx_compl_v1(adapter, compl, rxcp);
1349 be_parse_rx_compl_v0(adapter, compl, rxcp);
1352 /* vlanf could be wrongly set in some cards.
1353 * ignore if vtm is not set */
1354 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1357 if (!lancer_chip(adapter))
1358 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1360 if (((adapter->pvid & VLAN_VID_MASK) ==
1361 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1362 !adapter->vlan_tag[rxcp->vlan_tag])
1366 /* As the compl has been parsed, reset it; we wont touch it again */
1367 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1369 queue_tail_inc(&rxo->cq);
1373 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1375 u32 order = get_order(size);
1379 return alloc_pages(gfp, order);
1383 * Allocate a page, split it to fragments of size rx_frag_size and post as
1384 * receive buffers to BE
1386 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1388 struct be_adapter *adapter = rxo->adapter;
1389 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1390 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1391 struct be_queue_info *rxq = &rxo->q;
1392 struct page *pagep = NULL;
1393 struct be_eth_rx_d *rxd;
1394 u64 page_dmaaddr = 0, frag_dmaaddr;
1395 u32 posted, page_offset = 0;
1397 page_info = &rxo->page_info_tbl[rxq->head];
1398 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1400 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1401 if (unlikely(!pagep)) {
1402 rxo->stats.rx_post_fail++;
1405 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1406 0, adapter->big_page_size,
1408 page_info->page_offset = 0;
1411 page_info->page_offset = page_offset + rx_frag_size;
1413 page_offset = page_info->page_offset;
1414 page_info->page = pagep;
1415 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1416 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1418 rxd = queue_head_node(rxq);
1419 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1420 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1422 /* Any space left in the current big page for another frag? */
1423 if ((page_offset + rx_frag_size + rx_frag_size) >
1424 adapter->big_page_size) {
1426 page_info->last_page_user = true;
1429 prev_page_info = page_info;
1430 queue_head_inc(rxq);
1431 page_info = &page_info_tbl[rxq->head];
1434 prev_page_info->last_page_user = true;
1437 atomic_add(posted, &rxq->used);
1438 be_rxq_notify(adapter, rxq->id, posted);
1439 } else if (atomic_read(&rxq->used) == 0) {
1440 /* Let be_worker replenish when memory is available */
1441 rxo->rx_post_starved = true;
1445 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1447 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1449 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1453 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1455 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1457 queue_tail_inc(tx_cq);
1461 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1463 struct be_queue_info *txq = &adapter->tx_obj.q;
1464 struct be_eth_wrb *wrb;
1465 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1466 struct sk_buff *sent_skb;
1467 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1468 bool unmap_skb_hdr = true;
1470 sent_skb = sent_skbs[txq->tail];
1472 sent_skbs[txq->tail] = NULL;
1474 /* skip header wrb */
1475 queue_tail_inc(txq);
1478 cur_index = txq->tail;
1479 wrb = queue_tail_node(txq);
1480 unmap_tx_frag(&adapter->pdev->dev, wrb,
1481 (unmap_skb_hdr && skb_headlen(sent_skb)));
1482 unmap_skb_hdr = false;
1485 queue_tail_inc(txq);
1486 } while (cur_index != last_index);
1488 kfree_skb(sent_skb);
1492 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1494 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1500 eqe->evt = le32_to_cpu(eqe->evt);
1501 queue_tail_inc(&eq_obj->q);
1505 static int event_handle(struct be_adapter *adapter,
1506 struct be_eq_obj *eq_obj)
1508 struct be_eq_entry *eqe;
1511 while ((eqe = event_get(eq_obj)) != NULL) {
1516 /* Deal with any spurious interrupts that come
1519 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1521 napi_schedule(&eq_obj->napi);
1526 /* Just read and notify events without processing them.
1527 * Used at the time of destroying event queues */
1528 static void be_eq_clean(struct be_adapter *adapter,
1529 struct be_eq_obj *eq_obj)
1531 struct be_eq_entry *eqe;
1534 while ((eqe = event_get(eq_obj)) != NULL) {
1540 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1543 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1545 struct be_rx_page_info *page_info;
1546 struct be_queue_info *rxq = &rxo->q;
1547 struct be_queue_info *rx_cq = &rxo->cq;
1548 struct be_rx_compl_info *rxcp;
1551 /* First cleanup pending rx completions */
1552 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1553 be_rx_compl_discard(adapter, rxo, rxcp);
1554 be_cq_notify(adapter, rx_cq->id, false, 1);
1557 /* Then free posted rx buffer that were not used */
1558 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1559 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1560 page_info = get_rx_page_info(adapter, rxo, tail);
1561 put_page(page_info->page);
1562 memset(page_info, 0, sizeof(*page_info));
1564 BUG_ON(atomic_read(&rxq->used));
1567 static void be_tx_compl_clean(struct be_adapter *adapter)
1569 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1570 struct be_queue_info *txq = &adapter->tx_obj.q;
1571 struct be_eth_tx_compl *txcp;
1572 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1573 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1574 struct sk_buff *sent_skb;
1577 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1579 while ((txcp = be_tx_compl_get(tx_cq))) {
1580 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1582 num_wrbs += be_tx_compl_process(adapter, end_idx);
1586 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1587 atomic_sub(num_wrbs, &txq->used);
1592 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1598 if (atomic_read(&txq->used))
1599 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1600 atomic_read(&txq->used));
1602 /* free posted tx for which compls will never arrive */
1603 while (atomic_read(&txq->used)) {
1604 sent_skb = sent_skbs[txq->tail];
1605 end_idx = txq->tail;
1607 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1609 num_wrbs = be_tx_compl_process(adapter, end_idx);
1610 atomic_sub(num_wrbs, &txq->used);
1614 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1616 struct be_queue_info *q;
1618 q = &adapter->mcc_obj.q;
1620 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1621 be_queue_free(adapter, q);
1623 q = &adapter->mcc_obj.cq;
1625 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1626 be_queue_free(adapter, q);
1629 /* Must be called only after TX qs are created as MCC shares TX EQ */
1630 static int be_mcc_queues_create(struct be_adapter *adapter)
1632 struct be_queue_info *q, *cq;
1634 /* Alloc MCC compl queue */
1635 cq = &adapter->mcc_obj.cq;
1636 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1637 sizeof(struct be_mcc_compl)))
1640 /* Ask BE to create MCC compl queue; share TX's eq */
1641 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1644 /* Alloc MCC queue */
1645 q = &adapter->mcc_obj.q;
1646 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1647 goto mcc_cq_destroy;
1649 /* Ask BE to create MCC queue */
1650 if (be_cmd_mccq_create(adapter, q, cq))
1656 be_queue_free(adapter, q);
1658 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1660 be_queue_free(adapter, cq);
1665 static void be_tx_queues_destroy(struct be_adapter *adapter)
1667 struct be_queue_info *q;
1669 q = &adapter->tx_obj.q;
1671 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672 be_queue_free(adapter, q);
1674 q = &adapter->tx_obj.cq;
1676 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677 be_queue_free(adapter, q);
1679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1682 q = &adapter->tx_eq.q;
1684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1685 be_queue_free(adapter, q);
1688 static int be_tx_queues_create(struct be_adapter *adapter)
1690 struct be_queue_info *eq, *q, *cq;
1692 adapter->tx_eq.max_eqd = 0;
1693 adapter->tx_eq.min_eqd = 0;
1694 adapter->tx_eq.cur_eqd = 96;
1695 adapter->tx_eq.enable_aic = false;
1696 /* Alloc Tx Event queue */
1697 eq = &adapter->tx_eq.q;
1698 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1701 /* Ask BE to create Tx Event queue */
1702 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1705 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1708 /* Alloc TX eth compl queue */
1709 cq = &adapter->tx_obj.cq;
1710 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1711 sizeof(struct be_eth_tx_compl)))
1714 /* Ask BE to create Tx eth compl queue */
1715 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1718 /* Alloc TX eth queue */
1719 q = &adapter->tx_obj.q;
1720 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1723 /* Ask BE to create Tx eth queue */
1724 if (be_cmd_txq_create(adapter, q, cq))
1729 be_queue_free(adapter, q);
1731 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1733 be_queue_free(adapter, cq);
1735 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1737 be_queue_free(adapter, eq);
1741 static void be_rx_queues_destroy(struct be_adapter *adapter)
1743 struct be_queue_info *q;
1744 struct be_rx_obj *rxo;
1747 for_all_rx_queues(adapter, rxo, i) {
1750 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1751 /* After the rxq is invalidated, wait for a grace time
1752 * of 1ms for all dma to end and the flush compl to
1756 be_rx_q_clean(adapter, rxo);
1758 be_queue_free(adapter, q);
1762 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1763 be_queue_free(adapter, q);
1765 /* Clear any residual events */
1768 be_eq_clean(adapter, &rxo->rx_eq);
1769 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1771 be_queue_free(adapter, q);
1775 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1777 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1778 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1779 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1781 dev_warn(&adapter->pdev->dev,
1782 "No support for multiple RX queues\n");
1787 static int be_rx_queues_create(struct be_adapter *adapter)
1789 struct be_queue_info *eq, *q, *cq;
1790 struct be_rx_obj *rxo;
1793 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1794 msix_enabled(adapter) ?
1795 adapter->num_msix_vec - 1 : 1);
1796 if (adapter->num_rx_qs != MAX_RX_QS)
1797 dev_warn(&adapter->pdev->dev,
1798 "Can create only %d RX queues", adapter->num_rx_qs);
1800 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1801 for_all_rx_queues(adapter, rxo, i) {
1802 rxo->adapter = adapter;
1803 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1804 rxo->rx_eq.enable_aic = true;
1808 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1809 sizeof(struct be_eq_entry));
1813 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1817 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1821 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1822 sizeof(struct be_eth_rx_compl));
1826 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1831 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1832 sizeof(struct be_eth_rx_d));
1836 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1837 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1838 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1843 if (be_multi_rxq(adapter)) {
1844 u8 rsstable[MAX_RSS_QS];
1846 for_all_rss_queues(adapter, rxo, i)
1847 rsstable[i] = rxo->rss_id;
1849 rc = be_cmd_rss_config(adapter, rsstable,
1850 adapter->num_rx_qs - 1);
1857 be_rx_queues_destroy(adapter);
1861 static bool event_peek(struct be_eq_obj *eq_obj)
1863 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1870 static irqreturn_t be_intx(int irq, void *dev)
1872 struct be_adapter *adapter = dev;
1873 struct be_rx_obj *rxo;
1874 int isr, i, tx = 0 , rx = 0;
1876 if (lancer_chip(adapter)) {
1877 if (event_peek(&adapter->tx_eq))
1878 tx = event_handle(adapter, &adapter->tx_eq);
1879 for_all_rx_queues(adapter, rxo, i) {
1880 if (event_peek(&rxo->rx_eq))
1881 rx |= event_handle(adapter, &rxo->rx_eq);
1888 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1889 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1893 if ((1 << adapter->tx_eq.eq_idx & isr))
1894 event_handle(adapter, &adapter->tx_eq);
1896 for_all_rx_queues(adapter, rxo, i) {
1897 if ((1 << rxo->rx_eq.eq_idx & isr))
1898 event_handle(adapter, &rxo->rx_eq);
1905 static irqreturn_t be_msix_rx(int irq, void *dev)
1907 struct be_rx_obj *rxo = dev;
1908 struct be_adapter *adapter = rxo->adapter;
1910 event_handle(adapter, &rxo->rx_eq);
1915 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1917 struct be_adapter *adapter = dev;
1919 event_handle(adapter, &adapter->tx_eq);
1924 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1926 return (rxcp->tcpf && !rxcp->err) ? true : false;
1929 static int be_poll_rx(struct napi_struct *napi, int budget)
1931 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1932 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1933 struct be_adapter *adapter = rxo->adapter;
1934 struct be_queue_info *rx_cq = &rxo->cq;
1935 struct be_rx_compl_info *rxcp;
1938 rxo->stats.rx_polls++;
1939 for (work_done = 0; work_done < budget; work_done++) {
1940 rxcp = be_rx_compl_get(rxo);
1944 /* Ignore flush completions */
1945 if (rxcp->num_rcvd && rxcp->pkt_size) {
1947 be_rx_compl_process_gro(adapter, rxo, rxcp);
1949 be_rx_compl_process(adapter, rxo, rxcp);
1950 } else if (rxcp->pkt_size == 0) {
1951 be_rx_compl_discard(adapter, rxo, rxcp);
1954 be_rx_stats_update(rxo, rxcp);
1957 /* Refill the queue */
1958 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1959 be_post_rx_frags(rxo, GFP_ATOMIC);
1962 if (work_done < budget) {
1963 napi_complete(napi);
1964 be_cq_notify(adapter, rx_cq->id, true, work_done);
1966 /* More to be consumed; continue with interrupts disabled */
1967 be_cq_notify(adapter, rx_cq->id, false, work_done);
1972 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1973 * For TX/MCC we don't honour budget; consume everything
1975 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1977 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1978 struct be_adapter *adapter =
1979 container_of(tx_eq, struct be_adapter, tx_eq);
1980 struct be_queue_info *txq = &adapter->tx_obj.q;
1981 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1982 struct be_eth_tx_compl *txcp;
1983 int tx_compl = 0, mcc_compl, status = 0;
1984 u16 end_idx, num_wrbs = 0;
1986 while ((txcp = be_tx_compl_get(tx_cq))) {
1987 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1989 num_wrbs += be_tx_compl_process(adapter, end_idx);
1993 mcc_compl = be_process_mcc(adapter, &status);
1995 napi_complete(napi);
1998 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1999 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2003 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
2005 atomic_sub(num_wrbs, &txq->used);
2007 /* As Tx wrbs have been freed up, wake up netdev queue if
2008 * it was stopped due to lack of tx wrbs.
2010 if (netif_queue_stopped(adapter->netdev) &&
2011 atomic_read(&txq->used) < txq->len / 2) {
2012 netif_wake_queue(adapter->netdev);
2015 tx_stats(adapter)->be_tx_events++;
2016 tx_stats(adapter)->be_tx_compl += tx_compl;
2022 void be_detect_dump_ue(struct be_adapter *adapter)
2024 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2027 pci_read_config_dword(adapter->pdev,
2028 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2029 pci_read_config_dword(adapter->pdev,
2030 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2031 pci_read_config_dword(adapter->pdev,
2032 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2033 pci_read_config_dword(adapter->pdev,
2034 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2036 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2037 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2039 if (ue_status_lo || ue_status_hi) {
2040 adapter->ue_detected = true;
2041 adapter->eeh_err = true;
2042 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2046 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2047 if (ue_status_lo & 1)
2048 dev_err(&adapter->pdev->dev,
2049 "UE: %s bit set\n", ue_status_low_desc[i]);
2053 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2054 if (ue_status_hi & 1)
2055 dev_err(&adapter->pdev->dev,
2056 "UE: %s bit set\n", ue_status_hi_desc[i]);
2062 static void be_worker(struct work_struct *work)
2064 struct be_adapter *adapter =
2065 container_of(work, struct be_adapter, work.work);
2066 struct be_rx_obj *rxo;
2069 if (!adapter->ue_detected && !lancer_chip(adapter))
2070 be_detect_dump_ue(adapter);
2072 /* when interrupts are not yet enabled, just reap any pending
2073 * mcc completions */
2074 if (!netif_running(adapter->netdev)) {
2075 int mcc_compl, status = 0;
2077 mcc_compl = be_process_mcc(adapter, &status);
2080 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2081 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2087 if (!adapter->stats_cmd_sent) {
2088 if (lancer_chip(adapter))
2089 lancer_cmd_get_pport_stats(adapter,
2090 &adapter->stats_cmd);
2092 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2094 be_tx_rate_update(adapter);
2096 for_all_rx_queues(adapter, rxo, i) {
2097 be_rx_rate_update(rxo);
2098 be_rx_eqd_update(adapter, rxo);
2100 if (rxo->rx_post_starved) {
2101 rxo->rx_post_starved = false;
2102 be_post_rx_frags(rxo, GFP_KERNEL);
2107 adapter->work_counter++;
2108 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2111 static void be_msix_disable(struct be_adapter *adapter)
2113 if (msix_enabled(adapter)) {
2114 pci_disable_msix(adapter->pdev);
2115 adapter->num_msix_vec = 0;
2119 static void be_msix_enable(struct be_adapter *adapter)
2121 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2122 int i, status, num_vec;
2124 num_vec = be_num_rxqs_want(adapter) + 1;
2126 for (i = 0; i < num_vec; i++)
2127 adapter->msix_entries[i].entry = i;
2129 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2132 } else if (status >= BE_MIN_MSIX_VECTORS) {
2134 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2140 adapter->num_msix_vec = num_vec;
2144 static void be_sriov_enable(struct be_adapter *adapter)
2146 be_check_sriov_fn_type(adapter);
2147 #ifdef CONFIG_PCI_IOV
2148 if (be_physfn(adapter) && num_vfs) {
2152 pos = pci_find_ext_capability(adapter->pdev,
2153 PCI_EXT_CAP_ID_SRIOV);
2154 pci_read_config_word(adapter->pdev,
2155 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2157 if (num_vfs > nvfs) {
2158 dev_info(&adapter->pdev->dev,
2159 "Device supports %d VFs and not %d\n",
2164 status = pci_enable_sriov(adapter->pdev, num_vfs);
2165 adapter->sriov_enabled = status ? false : true;
2170 static void be_sriov_disable(struct be_adapter *adapter)
2172 #ifdef CONFIG_PCI_IOV
2173 if (adapter->sriov_enabled) {
2174 pci_disable_sriov(adapter->pdev);
2175 adapter->sriov_enabled = false;
2180 static inline int be_msix_vec_get(struct be_adapter *adapter,
2181 struct be_eq_obj *eq_obj)
2183 return adapter->msix_entries[eq_obj->eq_idx].vector;
2186 static int be_request_irq(struct be_adapter *adapter,
2187 struct be_eq_obj *eq_obj,
2188 void *handler, char *desc, void *context)
2190 struct net_device *netdev = adapter->netdev;
2193 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2194 vec = be_msix_vec_get(adapter, eq_obj);
2195 return request_irq(vec, handler, 0, eq_obj->desc, context);
2198 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2201 int vec = be_msix_vec_get(adapter, eq_obj);
2202 free_irq(vec, context);
2205 static int be_msix_register(struct be_adapter *adapter)
2207 struct be_rx_obj *rxo;
2211 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2216 for_all_rx_queues(adapter, rxo, i) {
2217 sprintf(qname, "rxq%d", i);
2218 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2227 be_free_irq(adapter, &adapter->tx_eq, adapter);
2229 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2230 be_free_irq(adapter, &rxo->rx_eq, rxo);
2233 dev_warn(&adapter->pdev->dev,
2234 "MSIX Request IRQ failed - err %d\n", status);
2235 be_msix_disable(adapter);
2239 static int be_irq_register(struct be_adapter *adapter)
2241 struct net_device *netdev = adapter->netdev;
2244 if (msix_enabled(adapter)) {
2245 status = be_msix_register(adapter);
2248 /* INTx is not supported for VF */
2249 if (!be_physfn(adapter))
2254 netdev->irq = adapter->pdev->irq;
2255 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2258 dev_err(&adapter->pdev->dev,
2259 "INTx request IRQ failed - err %d\n", status);
2263 adapter->isr_registered = true;
2267 static void be_irq_unregister(struct be_adapter *adapter)
2269 struct net_device *netdev = adapter->netdev;
2270 struct be_rx_obj *rxo;
2273 if (!adapter->isr_registered)
2277 if (!msix_enabled(adapter)) {
2278 free_irq(netdev->irq, adapter);
2283 be_free_irq(adapter, &adapter->tx_eq, adapter);
2285 for_all_rx_queues(adapter, rxo, i)
2286 be_free_irq(adapter, &rxo->rx_eq, rxo);
2289 adapter->isr_registered = false;
2292 static int be_close(struct net_device *netdev)
2294 struct be_adapter *adapter = netdev_priv(netdev);
2295 struct be_rx_obj *rxo;
2296 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2299 be_async_mcc_disable(adapter);
2301 netif_carrier_off(netdev);
2302 adapter->link_up = false;
2304 if (!lancer_chip(adapter))
2305 be_intr_set(adapter, false);
2307 for_all_rx_queues(adapter, rxo, i)
2308 napi_disable(&rxo->rx_eq.napi);
2310 napi_disable(&tx_eq->napi);
2312 if (lancer_chip(adapter)) {
2313 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2314 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2315 for_all_rx_queues(adapter, rxo, i)
2316 be_cq_notify(adapter, rxo->cq.id, false, 0);
2319 if (msix_enabled(adapter)) {
2320 vec = be_msix_vec_get(adapter, tx_eq);
2321 synchronize_irq(vec);
2323 for_all_rx_queues(adapter, rxo, i) {
2324 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2325 synchronize_irq(vec);
2328 synchronize_irq(netdev->irq);
2330 be_irq_unregister(adapter);
2332 /* Wait for all pending tx completions to arrive so that
2333 * all tx skbs are freed.
2335 be_tx_compl_clean(adapter);
2340 static int be_open(struct net_device *netdev)
2342 struct be_adapter *adapter = netdev_priv(netdev);
2343 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2344 struct be_rx_obj *rxo;
2350 for_all_rx_queues(adapter, rxo, i) {
2351 be_post_rx_frags(rxo, GFP_KERNEL);
2352 napi_enable(&rxo->rx_eq.napi);
2354 napi_enable(&tx_eq->napi);
2356 be_irq_register(adapter);
2358 if (!lancer_chip(adapter))
2359 be_intr_set(adapter, true);
2361 /* The evt queues are created in unarmed state; arm them */
2362 for_all_rx_queues(adapter, rxo, i) {
2363 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2364 be_cq_notify(adapter, rxo->cq.id, true, 0);
2366 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2368 /* Now that interrupts are on we can process async mcc */
2369 be_async_mcc_enable(adapter);
2371 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2375 be_link_status_update(adapter, link_up);
2377 if (be_physfn(adapter)) {
2378 status = be_vid_config(adapter, false, 0);
2382 status = be_cmd_set_flow_control(adapter,
2383 adapter->tx_fc, adapter->rx_fc);
2390 be_close(adapter->netdev);
2394 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2396 struct be_dma_mem cmd;
2400 memset(mac, 0, ETH_ALEN);
2402 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2403 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2407 memset(cmd.va, 0, cmd.size);
2410 status = pci_write_config_dword(adapter->pdev,
2411 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2413 dev_err(&adapter->pdev->dev,
2414 "Could not enable Wake-on-lan\n");
2415 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2419 status = be_cmd_enable_magic_wol(adapter,
2420 adapter->netdev->dev_addr, &cmd);
2421 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2422 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2424 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2425 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2426 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2429 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2434 * Generate a seed MAC address from the PF MAC Address using jhash.
2435 * MAC Address for VFs are assigned incrementally starting from the seed.
2436 * These addresses are programmed in the ASIC by the PF and the VF driver
2437 * queries for the MAC address during its probe.
2439 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2445 be_vf_eth_addr_generate(adapter, mac);
2447 for (vf = 0; vf < num_vfs; vf++) {
2448 status = be_cmd_pmac_add(adapter, mac,
2449 adapter->vf_cfg[vf].vf_if_handle,
2450 &adapter->vf_cfg[vf].vf_pmac_id,
2453 dev_err(&adapter->pdev->dev,
2454 "Mac address add failed for VF %d\n", vf);
2456 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2463 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2469 be_cmd_pmac_del(adapter,
2470 adapter->vf_cfg[vf].vf_if_handle,
2471 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2475 static int be_setup(struct be_adapter *adapter)
2477 struct net_device *netdev = adapter->netdev;
2478 u32 cap_flags, en_flags, vf = 0;
2482 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2483 BE_IF_FLAGS_BROADCAST |
2484 BE_IF_FLAGS_MULTICAST;
2486 if (be_physfn(adapter)) {
2487 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2488 BE_IF_FLAGS_PROMISCUOUS |
2489 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2490 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2492 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2493 cap_flags |= BE_IF_FLAGS_RSS;
2494 en_flags |= BE_IF_FLAGS_RSS;
2498 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2499 netdev->dev_addr, false/* pmac_invalid */,
2500 &adapter->if_handle, &adapter->pmac_id, 0);
2504 if (be_physfn(adapter)) {
2505 if (adapter->sriov_enabled) {
2506 while (vf < num_vfs) {
2507 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2508 BE_IF_FLAGS_BROADCAST;
2509 status = be_cmd_if_create(adapter, cap_flags,
2510 en_flags, mac, true,
2511 &adapter->vf_cfg[vf].vf_if_handle,
2514 dev_err(&adapter->pdev->dev,
2515 "Interface Create failed for VF %d\n",
2519 adapter->vf_cfg[vf].vf_pmac_id =
2525 status = be_cmd_mac_addr_query(adapter, mac,
2526 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2528 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2529 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2533 status = be_tx_queues_create(adapter);
2537 status = be_rx_queues_create(adapter);
2541 status = be_mcc_queues_create(adapter);
2545 adapter->link_speed = -1;
2550 be_rx_queues_destroy(adapter);
2552 be_tx_queues_destroy(adapter);
2554 if (be_physfn(adapter) && adapter->sriov_enabled)
2555 for (vf = 0; vf < num_vfs; vf++)
2556 if (adapter->vf_cfg[vf].vf_if_handle)
2557 be_cmd_if_destroy(adapter,
2558 adapter->vf_cfg[vf].vf_if_handle,
2560 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2565 static int be_clear(struct be_adapter *adapter)
2569 if (be_physfn(adapter) && adapter->sriov_enabled)
2570 be_vf_eth_addr_rem(adapter);
2572 be_mcc_queues_destroy(adapter);
2573 be_rx_queues_destroy(adapter);
2574 be_tx_queues_destroy(adapter);
2575 adapter->eq_next_idx = 0;
2577 if (be_physfn(adapter) && adapter->sriov_enabled)
2578 for (vf = 0; vf < num_vfs; vf++)
2579 if (adapter->vf_cfg[vf].vf_if_handle)
2580 be_cmd_if_destroy(adapter,
2581 adapter->vf_cfg[vf].vf_if_handle,
2584 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2586 /* tell fw we're done with firing cmds */
2587 be_cmd_fw_clean(adapter);
2592 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2593 static bool be_flash_redboot(struct be_adapter *adapter,
2594 const u8 *p, u32 img_start, int image_size,
2601 crc_offset = hdr_size + img_start + image_size - 4;
2605 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2608 dev_err(&adapter->pdev->dev,
2609 "could not get crc from flash, not flashing redboot\n");
2613 /*update redboot only if crc does not match*/
2614 if (!memcmp(flashed_crc, p, 4))
2620 static int be_flash_data(struct be_adapter *adapter,
2621 const struct firmware *fw,
2622 struct be_dma_mem *flash_cmd, int num_of_images)
2625 int status = 0, i, filehdr_size = 0;
2626 u32 total_bytes = 0, flash_op;
2628 const u8 *p = fw->data;
2629 struct be_cmd_write_flashrom *req = flash_cmd->va;
2630 const struct flash_comp *pflashcomp;
2633 static const struct flash_comp gen3_flash_types[9] = {
2634 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2635 FLASH_IMAGE_MAX_SIZE_g3},
2636 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2637 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2638 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2639 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2640 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2641 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2645 FLASH_IMAGE_MAX_SIZE_g3},
2646 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2647 FLASH_IMAGE_MAX_SIZE_g3},
2648 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2649 FLASH_IMAGE_MAX_SIZE_g3},
2650 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2651 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2653 static const struct flash_comp gen2_flash_types[8] = {
2654 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2655 FLASH_IMAGE_MAX_SIZE_g2},
2656 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2657 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2658 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2659 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2660 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2661 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2662 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2663 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2665 FLASH_IMAGE_MAX_SIZE_g2},
2666 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2667 FLASH_IMAGE_MAX_SIZE_g2},
2668 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2669 FLASH_IMAGE_MAX_SIZE_g2}
2672 if (adapter->generation == BE_GEN3) {
2673 pflashcomp = gen3_flash_types;
2674 filehdr_size = sizeof(struct flash_file_hdr_g3);
2675 num_comp = ARRAY_SIZE(gen3_flash_types);
2677 pflashcomp = gen2_flash_types;
2678 filehdr_size = sizeof(struct flash_file_hdr_g2);
2679 num_comp = ARRAY_SIZE(gen2_flash_types);
2681 for (i = 0; i < num_comp; i++) {
2682 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2685 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2686 (!be_flash_redboot(adapter, fw->data,
2687 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2688 (num_of_images * sizeof(struct image_hdr)))))
2691 p += filehdr_size + pflashcomp[i].offset
2692 + (num_of_images * sizeof(struct image_hdr));
2693 if (p + pflashcomp[i].size > fw->data + fw->size)
2695 total_bytes = pflashcomp[i].size;
2696 while (total_bytes) {
2697 if (total_bytes > 32*1024)
2698 num_bytes = 32*1024;
2700 num_bytes = total_bytes;
2701 total_bytes -= num_bytes;
2704 flash_op = FLASHROM_OPER_FLASH;
2706 flash_op = FLASHROM_OPER_SAVE;
2707 memcpy(req->params.data_buf, p, num_bytes);
2709 status = be_cmd_write_flashrom(adapter, flash_cmd,
2710 pflashcomp[i].optype, flash_op, num_bytes);
2712 dev_err(&adapter->pdev->dev,
2713 "cmd to write to flash rom failed.\n");
2721 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2725 if (fhdr->build[0] == '3')
2727 else if (fhdr->build[0] == '2')
2733 static int lancer_fw_download(struct be_adapter *adapter,
2734 const struct firmware *fw)
2736 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2737 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2738 struct be_dma_mem flash_cmd;
2739 const u8 *data_ptr = NULL;
2740 u8 *dest_image_ptr = NULL;
2741 size_t image_size = 0;
2743 u32 data_written = 0;
2748 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2749 dev_err(&adapter->pdev->dev,
2750 "FW Image not properly aligned. "
2751 "Length must be 4 byte aligned.\n");
2753 goto lancer_fw_exit;
2756 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2757 + LANCER_FW_DOWNLOAD_CHUNK;
2758 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2759 &flash_cmd.dma, GFP_KERNEL);
2760 if (!flash_cmd.va) {
2762 dev_err(&adapter->pdev->dev,
2763 "Memory allocation failure while flashing\n");
2764 goto lancer_fw_exit;
2767 dest_image_ptr = flash_cmd.va +
2768 sizeof(struct lancer_cmd_req_write_object);
2769 image_size = fw->size;
2770 data_ptr = fw->data;
2772 while (image_size) {
2773 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2775 /* Copy the image chunk content. */
2776 memcpy(dest_image_ptr, data_ptr, chunk_size);
2778 status = lancer_cmd_write_object(adapter, &flash_cmd,
2779 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2780 &data_written, &add_status);
2785 offset += data_written;
2786 data_ptr += data_written;
2787 image_size -= data_written;
2791 /* Commit the FW written */
2792 status = lancer_cmd_write_object(adapter, &flash_cmd,
2793 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2794 &data_written, &add_status);
2797 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2800 dev_err(&adapter->pdev->dev,
2801 "Firmware load error. "
2802 "Status code: 0x%x Additional Status: 0x%x\n",
2803 status, add_status);
2804 goto lancer_fw_exit;
2807 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2812 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2814 struct flash_file_hdr_g2 *fhdr;
2815 struct flash_file_hdr_g3 *fhdr3;
2816 struct image_hdr *img_hdr_ptr = NULL;
2817 struct be_dma_mem flash_cmd;
2819 int status = 0, i = 0, num_imgs = 0;
2822 fhdr = (struct flash_file_hdr_g2 *) p;
2824 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2825 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2826 &flash_cmd.dma, GFP_KERNEL);
2827 if (!flash_cmd.va) {
2829 dev_err(&adapter->pdev->dev,
2830 "Memory allocation failure while flashing\n");
2834 if ((adapter->generation == BE_GEN3) &&
2835 (get_ufigen_type(fhdr) == BE_GEN3)) {
2836 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2837 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2838 for (i = 0; i < num_imgs; i++) {
2839 img_hdr_ptr = (struct image_hdr *) (fw->data +
2840 (sizeof(struct flash_file_hdr_g3) +
2841 i * sizeof(struct image_hdr)));
2842 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2843 status = be_flash_data(adapter, fw, &flash_cmd,
2846 } else if ((adapter->generation == BE_GEN2) &&
2847 (get_ufigen_type(fhdr) == BE_GEN2)) {
2848 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2850 dev_err(&adapter->pdev->dev,
2851 "UFI and Interface are not compatible for flashing\n");
2855 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2858 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2862 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2868 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2870 const struct firmware *fw;
2873 if (!netif_running(adapter->netdev)) {
2874 dev_err(&adapter->pdev->dev,
2875 "Firmware load not allowed (interface is down)\n");
2879 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2883 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2885 if (lancer_chip(adapter))
2886 status = lancer_fw_download(adapter, fw);
2888 status = be_fw_download(adapter, fw);
2891 release_firmware(fw);
2895 static struct net_device_ops be_netdev_ops = {
2896 .ndo_open = be_open,
2897 .ndo_stop = be_close,
2898 .ndo_start_xmit = be_xmit,
2899 .ndo_set_rx_mode = be_set_multicast_list,
2900 .ndo_set_mac_address = be_mac_addr_set,
2901 .ndo_change_mtu = be_change_mtu,
2902 .ndo_validate_addr = eth_validate_addr,
2903 .ndo_vlan_rx_register = be_vlan_register,
2904 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2905 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2906 .ndo_set_vf_mac = be_set_vf_mac,
2907 .ndo_set_vf_vlan = be_set_vf_vlan,
2908 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2909 .ndo_get_vf_config = be_get_vf_config
2912 static void be_netdev_init(struct net_device *netdev)
2914 struct be_adapter *adapter = netdev_priv(netdev);
2915 struct be_rx_obj *rxo;
2918 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2919 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2921 if (be_multi_rxq(adapter))
2922 netdev->hw_features |= NETIF_F_RXHASH;
2924 netdev->features |= netdev->hw_features |
2925 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2927 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2928 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2930 if (lancer_chip(adapter))
2931 netdev->vlan_features |= NETIF_F_TSO6;
2933 netdev->flags |= IFF_MULTICAST;
2935 /* Default settings for Rx and Tx flow control */
2936 adapter->rx_fc = true;
2937 adapter->tx_fc = true;
2939 netif_set_gso_max_size(netdev, 65535);
2941 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2943 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2945 for_all_rx_queues(adapter, rxo, i)
2946 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2949 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2953 static void be_unmap_pci_bars(struct be_adapter *adapter)
2956 iounmap(adapter->csr);
2958 iounmap(adapter->db);
2959 if (adapter->pcicfg && be_physfn(adapter))
2960 iounmap(adapter->pcicfg);
2963 static int be_map_pci_bars(struct be_adapter *adapter)
2966 int pcicfg_reg, db_reg;
2968 if (lancer_chip(adapter)) {
2969 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2970 pci_resource_len(adapter->pdev, 0));
2977 if (be_physfn(adapter)) {
2978 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2979 pci_resource_len(adapter->pdev, 2));
2982 adapter->csr = addr;
2985 if (adapter->generation == BE_GEN2) {
2990 if (be_physfn(adapter))
2995 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2996 pci_resource_len(adapter->pdev, db_reg));
3001 if (be_physfn(adapter)) {
3002 addr = ioremap_nocache(
3003 pci_resource_start(adapter->pdev, pcicfg_reg),
3004 pci_resource_len(adapter->pdev, pcicfg_reg));
3007 adapter->pcicfg = addr;
3009 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3013 be_unmap_pci_bars(adapter);
3018 static void be_ctrl_cleanup(struct be_adapter *adapter)
3020 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3022 be_unmap_pci_bars(adapter);
3025 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3028 mem = &adapter->mc_cmd_mem;
3030 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3034 static int be_ctrl_init(struct be_adapter *adapter)
3036 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3037 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3038 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3041 status = be_map_pci_bars(adapter);
3045 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3046 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3047 mbox_mem_alloc->size,
3048 &mbox_mem_alloc->dma,
3050 if (!mbox_mem_alloc->va) {
3052 goto unmap_pci_bars;
3055 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3056 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3057 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3058 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3060 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3061 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3062 mc_cmd_mem->size, &mc_cmd_mem->dma,
3064 if (mc_cmd_mem->va == NULL) {
3068 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3070 mutex_init(&adapter->mbox_lock);
3071 spin_lock_init(&adapter->mcc_lock);
3072 spin_lock_init(&adapter->mcc_cq_lock);
3074 init_completion(&adapter->flash_compl);
3075 pci_save_state(adapter->pdev);
3079 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3080 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3083 be_unmap_pci_bars(adapter);
3089 static void be_stats_cleanup(struct be_adapter *adapter)
3091 struct be_dma_mem *cmd = &adapter->stats_cmd;
3094 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3098 static int be_stats_init(struct be_adapter *adapter)
3100 struct be_dma_mem *cmd = &adapter->stats_cmd;
3102 if (adapter->generation == BE_GEN2) {
3103 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3105 if (lancer_chip(adapter))
3106 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3108 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3110 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3112 if (cmd->va == NULL)
3114 memset(cmd->va, 0, cmd->size);
3118 static void __devexit be_remove(struct pci_dev *pdev)
3120 struct be_adapter *adapter = pci_get_drvdata(pdev);
3125 cancel_delayed_work_sync(&adapter->work);
3127 unregister_netdev(adapter->netdev);
3131 be_stats_cleanup(adapter);
3133 be_ctrl_cleanup(adapter);
3135 kfree(adapter->vf_cfg);
3136 be_sriov_disable(adapter);
3138 be_msix_disable(adapter);
3140 pci_set_drvdata(pdev, NULL);
3141 pci_release_regions(pdev);
3142 pci_disable_device(pdev);
3144 free_netdev(adapter->netdev);
3147 static int be_get_config(struct be_adapter *adapter)
3152 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3156 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3157 &adapter->function_mode, &adapter->function_caps);
3161 memset(mac, 0, ETH_ALEN);
3163 /* A default permanent address is given to each VF for Lancer*/
3164 if (be_physfn(adapter) || lancer_chip(adapter)) {
3165 status = be_cmd_mac_addr_query(adapter, mac,
3166 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3171 if (!is_valid_ether_addr(mac))
3172 return -EADDRNOTAVAIL;
3174 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3175 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3178 if (adapter->function_mode & 0x400)
3179 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3181 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3183 status = be_cmd_get_cntl_attributes(adapter);
3187 be_cmd_check_native_mode(adapter);
3191 static int be_dev_family_check(struct be_adapter *adapter)
3193 struct pci_dev *pdev = adapter->pdev;
3194 u32 sli_intf = 0, if_type;
3196 switch (pdev->device) {
3199 adapter->generation = BE_GEN2;
3203 adapter->generation = BE_GEN3;
3207 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3208 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3209 SLI_INTF_IF_TYPE_SHIFT;
3211 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3213 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3216 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3217 SLI_INTF_FAMILY_SHIFT);
3218 adapter->generation = BE_GEN3;
3221 adapter->generation = 0;
3226 static int lancer_wait_ready(struct be_adapter *adapter)
3228 #define SLIPORT_READY_TIMEOUT 500
3232 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3233 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3234 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3240 if (i == SLIPORT_READY_TIMEOUT)
3246 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3249 u32 sliport_status, err, reset_needed;
3250 status = lancer_wait_ready(adapter);
3252 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3253 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3254 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3255 if (err && reset_needed) {
3256 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3257 adapter->db + SLIPORT_CONTROL_OFFSET);
3259 /* check adapter has corrected the error */
3260 status = lancer_wait_ready(adapter);
3261 sliport_status = ioread32(adapter->db +
3262 SLIPORT_STATUS_OFFSET);
3263 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3264 SLIPORT_STATUS_RN_MASK);
3265 if (status || sliport_status)
3267 } else if (err || reset_needed) {
3274 static int __devinit be_probe(struct pci_dev *pdev,
3275 const struct pci_device_id *pdev_id)
3278 struct be_adapter *adapter;
3279 struct net_device *netdev;
3281 status = pci_enable_device(pdev);
3285 status = pci_request_regions(pdev, DRV_NAME);
3288 pci_set_master(pdev);
3290 netdev = alloc_etherdev(sizeof(struct be_adapter));
3291 if (netdev == NULL) {
3295 adapter = netdev_priv(netdev);
3296 adapter->pdev = pdev;
3297 pci_set_drvdata(pdev, adapter);
3299 status = be_dev_family_check(adapter);
3303 adapter->netdev = netdev;
3304 SET_NETDEV_DEV(netdev, &pdev->dev);
3306 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3308 netdev->features |= NETIF_F_HIGHDMA;
3310 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3312 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3317 be_sriov_enable(adapter);
3318 if (adapter->sriov_enabled) {
3319 adapter->vf_cfg = kcalloc(num_vfs,
3320 sizeof(struct be_vf_cfg), GFP_KERNEL);
3322 if (!adapter->vf_cfg)
3326 status = be_ctrl_init(adapter);
3330 if (lancer_chip(adapter)) {
3331 status = lancer_test_and_set_rdy_state(adapter);
3333 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3338 /* sync up with fw's ready state */
3339 if (be_physfn(adapter)) {
3340 status = be_cmd_POST(adapter);
3345 /* tell fw we're ready to fire cmds */
3346 status = be_cmd_fw_init(adapter);
3350 status = be_cmd_reset_function(adapter);
3354 status = be_stats_init(adapter);
3358 status = be_get_config(adapter);
3362 be_msix_enable(adapter);
3364 INIT_DELAYED_WORK(&adapter->work, be_worker);
3366 status = be_setup(adapter);
3370 be_netdev_init(netdev);
3371 status = register_netdev(netdev);
3374 netif_carrier_off(netdev);
3376 if (be_physfn(adapter) && adapter->sriov_enabled) {
3381 if (!lancer_chip(adapter)) {
3382 status = be_vf_eth_addr_config(adapter);
3387 for (vf = 0; vf < num_vfs; vf++) {
3388 status = be_cmd_link_status_query(adapter, &link_up,
3389 &mac_speed, &lnk_speed, vf + 1);
3391 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3397 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3398 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3402 unregister_netdev(netdev);
3406 be_msix_disable(adapter);
3408 be_stats_cleanup(adapter);
3410 be_ctrl_cleanup(adapter);
3412 kfree(adapter->vf_cfg);
3414 be_sriov_disable(adapter);
3415 free_netdev(netdev);
3416 pci_set_drvdata(pdev, NULL);
3418 pci_release_regions(pdev);
3420 pci_disable_device(pdev);
3422 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3426 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3428 struct be_adapter *adapter = pci_get_drvdata(pdev);
3429 struct net_device *netdev = adapter->netdev;
3431 cancel_delayed_work_sync(&adapter->work);
3433 be_setup_wol(adapter, true);
3435 netif_device_detach(netdev);
3436 if (netif_running(netdev)) {
3441 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3444 be_msix_disable(adapter);
3445 pci_save_state(pdev);
3446 pci_disable_device(pdev);
3447 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3451 static int be_resume(struct pci_dev *pdev)
3454 struct be_adapter *adapter = pci_get_drvdata(pdev);
3455 struct net_device *netdev = adapter->netdev;
3457 netif_device_detach(netdev);
3459 status = pci_enable_device(pdev);
3463 pci_set_power_state(pdev, 0);
3464 pci_restore_state(pdev);
3466 be_msix_enable(adapter);
3467 /* tell fw we're ready to fire cmds */
3468 status = be_cmd_fw_init(adapter);
3473 if (netif_running(netdev)) {
3478 netif_device_attach(netdev);
3481 be_setup_wol(adapter, false);
3483 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3488 * An FLR will stop BE from DMAing any data.
3490 static void be_shutdown(struct pci_dev *pdev)
3492 struct be_adapter *adapter = pci_get_drvdata(pdev);
3497 cancel_delayed_work_sync(&adapter->work);
3499 netif_device_detach(adapter->netdev);
3502 be_setup_wol(adapter, true);
3504 be_cmd_reset_function(adapter);
3506 pci_disable_device(pdev);
3509 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3510 pci_channel_state_t state)
3512 struct be_adapter *adapter = pci_get_drvdata(pdev);
3513 struct net_device *netdev = adapter->netdev;
3515 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3517 adapter->eeh_err = true;
3519 netif_device_detach(netdev);
3521 if (netif_running(netdev)) {
3528 if (state == pci_channel_io_perm_failure)
3529 return PCI_ERS_RESULT_DISCONNECT;
3531 pci_disable_device(pdev);
3533 return PCI_ERS_RESULT_NEED_RESET;
3536 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3538 struct be_adapter *adapter = pci_get_drvdata(pdev);
3541 dev_info(&adapter->pdev->dev, "EEH reset\n");
3542 adapter->eeh_err = false;
3544 status = pci_enable_device(pdev);
3546 return PCI_ERS_RESULT_DISCONNECT;
3548 pci_set_master(pdev);
3549 pci_set_power_state(pdev, 0);
3550 pci_restore_state(pdev);
3552 /* Check if card is ok and fw is ready */
3553 status = be_cmd_POST(adapter);
3555 return PCI_ERS_RESULT_DISCONNECT;
3557 return PCI_ERS_RESULT_RECOVERED;
3560 static void be_eeh_resume(struct pci_dev *pdev)
3563 struct be_adapter *adapter = pci_get_drvdata(pdev);
3564 struct net_device *netdev = adapter->netdev;
3566 dev_info(&adapter->pdev->dev, "EEH resume\n");
3568 pci_save_state(pdev);
3570 /* tell fw we're ready to fire cmds */
3571 status = be_cmd_fw_init(adapter);
3575 status = be_setup(adapter);
3579 if (netif_running(netdev)) {
3580 status = be_open(netdev);
3584 netif_device_attach(netdev);
3587 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3590 static struct pci_error_handlers be_eeh_handlers = {
3591 .error_detected = be_eeh_err_detected,
3592 .slot_reset = be_eeh_reset,
3593 .resume = be_eeh_resume,
3596 static struct pci_driver be_driver = {
3598 .id_table = be_dev_ids,
3600 .remove = be_remove,
3601 .suspend = be_suspend,
3602 .resume = be_resume,
3603 .shutdown = be_shutdown,
3604 .err_handler = &be_eeh_handlers
3607 static int __init be_init_module(void)
3609 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3610 rx_frag_size != 2048) {
3611 printk(KERN_WARNING DRV_NAME
3612 " : Module param rx_frag_size must be 2048/4096/8192."
3614 rx_frag_size = 2048;
3617 return pci_register_driver(&be_driver);
3619 module_init(be_init_module);
3621 static void __exit be_exit_module(void)
3623 pci_unregister_driver(&be_driver);
3625 module_exit(be_exit_module);