1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.26.2"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
135 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140 "Register test\t(offline)",
141 "Eeprom test\t(offline)",
142 "Link test\t(online)",
143 "RLDRAM test\t(offline)",
144 "BIST Test\t(offline)"
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
149 {"tmac_data_octets"},
153 {"tmac_pause_ctrl_frms"},
157 {"tmac_any_err_frms"},
158 {"tmac_ttl_less_fb_octets"},
159 {"tmac_vld_ip_octets"},
167 {"rmac_data_octets"},
168 {"rmac_fcs_err_frms"},
170 {"rmac_vld_mcst_frms"},
171 {"rmac_vld_bcst_frms"},
172 {"rmac_in_rng_len_err_frms"},
173 {"rmac_out_rng_len_err_frms"},
175 {"rmac_pause_ctrl_frms"},
176 {"rmac_unsup_ctrl_frms"},
178 {"rmac_accepted_ucst_frms"},
179 {"rmac_accepted_nucst_frms"},
180 {"rmac_discarded_frms"},
181 {"rmac_drop_events"},
182 {"rmac_ttl_less_fb_octets"},
184 {"rmac_usized_frms"},
185 {"rmac_osized_frms"},
187 {"rmac_jabber_frms"},
188 {"rmac_ttl_64_frms"},
189 {"rmac_ttl_65_127_frms"},
190 {"rmac_ttl_128_255_frms"},
191 {"rmac_ttl_256_511_frms"},
192 {"rmac_ttl_512_1023_frms"},
193 {"rmac_ttl_1024_1518_frms"},
201 {"rmac_err_drp_udp"},
202 {"rmac_xgmii_err_sym"},
220 {"rmac_xgmii_data_err_cnt"},
221 {"rmac_xgmii_ctrl_err_cnt"},
222 {"rmac_accepted_ip"},
226 {"new_rd_req_rtry_cnt"},
228 {"wr_rtry_rd_ack_cnt"},
231 {"new_wr_req_rtry_cnt"},
234 {"rd_rtry_wr_ack_cnt"},
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245 {"rmac_ttl_1519_4095_frms"},
246 {"rmac_ttl_4096_8191_frms"},
247 {"rmac_ttl_8192_max_frms"},
248 {"rmac_ttl_gt_max_frms"},
249 {"rmac_osized_alt_frms"},
250 {"rmac_jabber_alt_frms"},
251 {"rmac_gt_max_alt_frms"},
253 {"rmac_len_discard"},
254 {"rmac_fcs_discard"},
257 {"rmac_red_discard"},
258 {"rmac_rts_discard"},
259 {"rmac_ingm_full_discard"},
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264 {"\n DRIVER STATISTICS"},
265 {"single_bit_ecc_errs"},
266 {"double_bit_ecc_errs"},
279 ("alarm_transceiver_temp_high"),
280 ("alarm_transceiver_temp_low"),
281 ("alarm_laser_bias_current_high"),
282 ("alarm_laser_bias_current_low"),
283 ("alarm_laser_output_power_high"),
284 ("alarm_laser_output_power_low"),
285 ("warn_transceiver_temp_high"),
286 ("warn_transceiver_temp_low"),
287 ("warn_laser_bias_current_high"),
288 ("warn_laser_bias_current_low"),
289 ("warn_laser_output_power_high"),
290 ("warn_laser_output_power_low"),
291 ("lro_aggregated_pkts"),
292 ("lro_flush_both_count"),
293 ("lro_out_of_sequence_pkts"),
294 ("lro_flush_due_to_max_pkts"),
295 ("lro_avg_aggr_pkts"),
296 ("mem_alloc_fail_cnt"),
297 ("pci_map_fail_cnt"),
298 ("watchdog_timer_cnt"),
305 ("tx_tcode_buf_abort_cnt"),
306 ("tx_tcode_desc_abort_cnt"),
307 ("tx_tcode_parity_err_cnt"),
308 ("tx_tcode_link_loss_cnt"),
309 ("tx_tcode_list_proc_err_cnt"),
310 ("rx_tcode_parity_err_cnt"),
311 ("rx_tcode_abort_cnt"),
312 ("rx_tcode_parity_abort_cnt"),
313 ("rx_tcode_rda_fail_cnt"),
314 ("rx_tcode_unkn_prot_cnt"),
315 ("rx_tcode_fcs_err_cnt"),
316 ("rx_tcode_buf_size_err_cnt"),
317 ("rx_tcode_rxd_corrupt_cnt"),
318 ("rx_tcode_unkn_err_cnt"),
326 {"mac_tmac_err_cnt"},
327 {"mac_rmac_err_cnt"},
328 {"xgxs_txgxs_err_cnt"},
329 {"xgxs_rxgxs_err_cnt"},
331 {"prc_pcix_err_cnt"},
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
349 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
353 init_timer(&timer); \
354 timer.function = handle; \
355 timer.data = (unsigned long) arg; \
356 mod_timer(&timer, (jiffies + exp)) \
359 static void s2io_vlan_rx_register(struct net_device *dev,
360 struct vlan_group *grp)
362 struct s2io_nic *nic = dev->priv;
365 spin_lock_irqsave(&nic->tx_lock, flags);
367 spin_unlock_irqrestore(&nic->tx_lock, flags);
370 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
371 static int vlan_strip_flag;
374 * Constants to be programmed into the Xena's registers, to configure
379 static const u64 herc_act_dtx_cfg[] = {
381 0x8000051536750000ULL, 0x80000515367500E0ULL,
383 0x8000051536750004ULL, 0x80000515367500E4ULL,
385 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
387 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
389 0x801205150D440000ULL, 0x801205150D4400E0ULL,
391 0x801205150D440004ULL, 0x801205150D4400E4ULL,
393 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
395 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
400 static const u64 xena_dtx_cfg[] = {
402 0x8000051500000000ULL, 0x80000515000000E0ULL,
404 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
406 0x8001051500000000ULL, 0x80010515000000E0ULL,
408 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
410 0x8002051500000000ULL, 0x80020515000000E0ULL,
412 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
417 * Constants for Fixing the MacAddress problem seen mostly on
420 static const u64 fix_mac[] = {
421 0x0060000000000000ULL, 0x0060600000000000ULL,
422 0x0040600000000000ULL, 0x0000600000000000ULL,
423 0x0020600000000000ULL, 0x0060600000000000ULL,
424 0x0020600000000000ULL, 0x0060600000000000ULL,
425 0x0020600000000000ULL, 0x0060600000000000ULL,
426 0x0020600000000000ULL, 0x0060600000000000ULL,
427 0x0020600000000000ULL, 0x0060600000000000ULL,
428 0x0020600000000000ULL, 0x0060600000000000ULL,
429 0x0020600000000000ULL, 0x0060600000000000ULL,
430 0x0020600000000000ULL, 0x0060600000000000ULL,
431 0x0020600000000000ULL, 0x0060600000000000ULL,
432 0x0020600000000000ULL, 0x0060600000000000ULL,
433 0x0020600000000000ULL, 0x0000600000000000ULL,
434 0x0040600000000000ULL, 0x0060600000000000ULL,
438 MODULE_LICENSE("GPL");
439 MODULE_VERSION(DRV_VERSION);
442 /* Module Loadable parameters. */
443 S2IO_PARM_INT(tx_fifo_num, 1);
444 S2IO_PARM_INT(rx_ring_num, 1);
447 S2IO_PARM_INT(rx_ring_mode, 1);
448 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
449 S2IO_PARM_INT(rmac_pause_time, 0x100);
450 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
451 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
452 S2IO_PARM_INT(shared_splits, 0);
453 S2IO_PARM_INT(tmac_util_period, 5);
454 S2IO_PARM_INT(rmac_util_period, 5);
455 S2IO_PARM_INT(bimodal, 0);
456 S2IO_PARM_INT(l3l4hdr_size, 128);
457 /* Frequency of Rx desc syncs expressed as power of 2 */
458 S2IO_PARM_INT(rxsync_frequency, 3);
459 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
460 S2IO_PARM_INT(intr_type, 2);
461 /* Large receive offload feature */
462 S2IO_PARM_INT(lro, 0);
463 /* Max pkts to be aggregated by LRO at one time. If not specified,
464 * aggregation happens until we hit max IP pkt size(64K)
466 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
467 S2IO_PARM_INT(indicate_max_pkts, 0);
469 S2IO_PARM_INT(napi, 1);
470 S2IO_PARM_INT(ufo, 0);
471 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
473 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
474 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
475 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
476 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
477 static unsigned int rts_frm_len[MAX_RX_RINGS] =
478 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
480 module_param_array(tx_fifo_len, uint, NULL, 0);
481 module_param_array(rx_ring_sz, uint, NULL, 0);
482 module_param_array(rts_frm_len, uint, NULL, 0);
486 * This table lists all the devices that this driver supports.
488 static struct pci_device_id s2io_tbl[] __devinitdata = {
489 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
490 PCI_ANY_ID, PCI_ANY_ID},
491 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
492 PCI_ANY_ID, PCI_ANY_ID},
493 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
494 PCI_ANY_ID, PCI_ANY_ID},
495 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
496 PCI_ANY_ID, PCI_ANY_ID},
500 MODULE_DEVICE_TABLE(pci, s2io_tbl);
502 static struct pci_error_handlers s2io_err_handler = {
503 .error_detected = s2io_io_error_detected,
504 .slot_reset = s2io_io_slot_reset,
505 .resume = s2io_io_resume,
508 static struct pci_driver s2io_driver = {
510 .id_table = s2io_tbl,
511 .probe = s2io_init_nic,
512 .remove = __devexit_p(s2io_rem_nic),
513 .err_handler = &s2io_err_handler,
516 /* A simplifier macro used both by init and free shared_mem Fns(). */
517 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
520 * init_shared_mem - Allocation and Initialization of Memory
521 * @nic: Device private variable.
522 * Description: The function allocates all the memory areas shared
523 * between the NIC and the driver. This includes Tx descriptors,
524 * Rx descriptors and the statistics block.
527 static int init_shared_mem(struct s2io_nic *nic)
530 void *tmp_v_addr, *tmp_v_addr_next;
531 dma_addr_t tmp_p_addr, tmp_p_addr_next;
532 struct RxD_block *pre_rxd_blk = NULL;
534 int lst_size, lst_per_page;
535 struct net_device *dev = nic->dev;
539 struct mac_info *mac_control;
540 struct config_param *config;
541 unsigned long long mem_allocated = 0;
543 mac_control = &nic->mac_control;
544 config = &nic->config;
547 /* Allocation and initialization of TXDLs in FIOFs */
549 for (i = 0; i < config->tx_fifo_num; i++) {
550 size += config->tx_cfg[i].fifo_len;
552 if (size > MAX_AVAILABLE_TXDS) {
553 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
554 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
558 lst_size = (sizeof(struct TxD) * config->max_txds);
559 lst_per_page = PAGE_SIZE / lst_size;
561 for (i = 0; i < config->tx_fifo_num; i++) {
562 int fifo_len = config->tx_cfg[i].fifo_len;
563 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
564 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
566 if (!mac_control->fifos[i].list_info) {
568 "Malloc failed for list_info\n");
571 mem_allocated += list_holder_size;
573 for (i = 0; i < config->tx_fifo_num; i++) {
574 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
576 mac_control->fifos[i].tx_curr_put_info.offset = 0;
577 mac_control->fifos[i].tx_curr_put_info.fifo_len =
578 config->tx_cfg[i].fifo_len - 1;
579 mac_control->fifos[i].tx_curr_get_info.offset = 0;
580 mac_control->fifos[i].tx_curr_get_info.fifo_len =
581 config->tx_cfg[i].fifo_len - 1;
582 mac_control->fifos[i].fifo_no = i;
583 mac_control->fifos[i].nic = nic;
584 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
586 for (j = 0; j < page_num; j++) {
590 tmp_v = pci_alloc_consistent(nic->pdev,
594 "pci_alloc_consistent ");
595 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
598 /* If we got a zero DMA address(can happen on
599 * certain platforms like PPC), reallocate.
600 * Store virtual address of page we don't want,
604 mac_control->zerodma_virt_addr = tmp_v;
606 "%s: Zero DMA address for TxDL. ", dev->name);
608 "Virtual address %p\n", tmp_v);
609 tmp_v = pci_alloc_consistent(nic->pdev,
613 "pci_alloc_consistent ");
614 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
617 mem_allocated += PAGE_SIZE;
619 while (k < lst_per_page) {
620 int l = (j * lst_per_page) + k;
621 if (l == config->tx_cfg[i].fifo_len)
623 mac_control->fifos[i].list_info[l].list_virt_addr =
624 tmp_v + (k * lst_size);
625 mac_control->fifos[i].list_info[l].list_phy_addr =
626 tmp_p + (k * lst_size);
632 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
633 if (!nic->ufo_in_band_v)
635 mem_allocated += (size * sizeof(u64));
637 /* Allocation and initialization of RXDs in Rings */
639 for (i = 0; i < config->rx_ring_num; i++) {
640 if (config->rx_cfg[i].num_rxd %
641 (rxd_count[nic->rxd_mode] + 1)) {
642 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
643 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
645 DBG_PRINT(ERR_DBG, "RxDs per Block");
648 size += config->rx_cfg[i].num_rxd;
649 mac_control->rings[i].block_count =
650 config->rx_cfg[i].num_rxd /
651 (rxd_count[nic->rxd_mode] + 1 );
652 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
653 mac_control->rings[i].block_count;
655 if (nic->rxd_mode == RXD_MODE_1)
656 size = (size * (sizeof(struct RxD1)));
658 size = (size * (sizeof(struct RxD3)));
660 for (i = 0; i < config->rx_ring_num; i++) {
661 mac_control->rings[i].rx_curr_get_info.block_index = 0;
662 mac_control->rings[i].rx_curr_get_info.offset = 0;
663 mac_control->rings[i].rx_curr_get_info.ring_len =
664 config->rx_cfg[i].num_rxd - 1;
665 mac_control->rings[i].rx_curr_put_info.block_index = 0;
666 mac_control->rings[i].rx_curr_put_info.offset = 0;
667 mac_control->rings[i].rx_curr_put_info.ring_len =
668 config->rx_cfg[i].num_rxd - 1;
669 mac_control->rings[i].nic = nic;
670 mac_control->rings[i].ring_no = i;
672 blk_cnt = config->rx_cfg[i].num_rxd /
673 (rxd_count[nic->rxd_mode] + 1);
674 /* Allocating all the Rx blocks */
675 for (j = 0; j < blk_cnt; j++) {
676 struct rx_block_info *rx_blocks;
679 rx_blocks = &mac_control->rings[i].rx_blocks[j];
680 size = SIZE_OF_BLOCK; //size is always page size
681 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
683 if (tmp_v_addr == NULL) {
685 * In case of failure, free_shared_mem()
686 * is called, which should free any
687 * memory that was alloced till the
690 rx_blocks->block_virt_addr = tmp_v_addr;
693 mem_allocated += size;
694 memset(tmp_v_addr, 0, size);
695 rx_blocks->block_virt_addr = tmp_v_addr;
696 rx_blocks->block_dma_addr = tmp_p_addr;
697 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
698 rxd_count[nic->rxd_mode],
700 if (!rx_blocks->rxds)
703 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
704 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
705 rx_blocks->rxds[l].virt_addr =
706 rx_blocks->block_virt_addr +
707 (rxd_size[nic->rxd_mode] * l);
708 rx_blocks->rxds[l].dma_addr =
709 rx_blocks->block_dma_addr +
710 (rxd_size[nic->rxd_mode] * l);
713 /* Interlinking all Rx Blocks */
714 for (j = 0; j < blk_cnt; j++) {
716 mac_control->rings[i].rx_blocks[j].block_virt_addr;
718 mac_control->rings[i].rx_blocks[(j + 1) %
719 blk_cnt].block_virt_addr;
721 mac_control->rings[i].rx_blocks[j].block_dma_addr;
723 mac_control->rings[i].rx_blocks[(j + 1) %
724 blk_cnt].block_dma_addr;
726 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
727 pre_rxd_blk->reserved_2_pNext_RxD_block =
728 (unsigned long) tmp_v_addr_next;
729 pre_rxd_blk->pNext_RxD_Blk_physical =
730 (u64) tmp_p_addr_next;
733 if (nic->rxd_mode == RXD_MODE_3B) {
735 * Allocation of Storages for buffer addresses in 2BUFF mode
736 * and the buffers as well.
738 for (i = 0; i < config->rx_ring_num; i++) {
739 blk_cnt = config->rx_cfg[i].num_rxd /
740 (rxd_count[nic->rxd_mode]+ 1);
741 mac_control->rings[i].ba =
742 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
744 if (!mac_control->rings[i].ba)
746 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
747 for (j = 0; j < blk_cnt; j++) {
749 mac_control->rings[i].ba[j] =
750 kmalloc((sizeof(struct buffAdd) *
751 (rxd_count[nic->rxd_mode] + 1)),
753 if (!mac_control->rings[i].ba[j])
755 mem_allocated += (sizeof(struct buffAdd) * \
756 (rxd_count[nic->rxd_mode] + 1));
757 while (k != rxd_count[nic->rxd_mode]) {
758 ba = &mac_control->rings[i].ba[j][k];
760 ba->ba_0_org = (void *) kmalloc
761 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
765 (BUF0_LEN + ALIGN_SIZE);
766 tmp = (unsigned long)ba->ba_0_org;
768 tmp &= ~((unsigned long) ALIGN_SIZE);
769 ba->ba_0 = (void *) tmp;
771 ba->ba_1_org = (void *) kmalloc
772 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
776 += (BUF1_LEN + ALIGN_SIZE);
777 tmp = (unsigned long) ba->ba_1_org;
779 tmp &= ~((unsigned long) ALIGN_SIZE);
780 ba->ba_1 = (void *) tmp;
787 /* Allocation and initialization of Statistics block */
788 size = sizeof(struct stat_block);
789 mac_control->stats_mem = pci_alloc_consistent
790 (nic->pdev, size, &mac_control->stats_mem_phy);
792 if (!mac_control->stats_mem) {
794 * In case of failure, free_shared_mem() is called, which
795 * should free any memory that was alloced till the
800 mem_allocated += size;
801 mac_control->stats_mem_sz = size;
803 tmp_v_addr = mac_control->stats_mem;
804 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
805 memset(tmp_v_addr, 0, size);
806 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
807 (unsigned long long) tmp_p_addr);
808 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
813 * free_shared_mem - Free the allocated Memory
814 * @nic: Device private variable.
815 * Description: This function is to free all memory locations allocated by
816 * the init_shared_mem() function and return it to the kernel.
819 static void free_shared_mem(struct s2io_nic *nic)
821 int i, j, blk_cnt, size;
824 dma_addr_t tmp_p_addr;
825 struct mac_info *mac_control;
826 struct config_param *config;
827 int lst_size, lst_per_page;
828 struct net_device *dev;
836 mac_control = &nic->mac_control;
837 config = &nic->config;
839 lst_size = (sizeof(struct TxD) * config->max_txds);
840 lst_per_page = PAGE_SIZE / lst_size;
842 for (i = 0; i < config->tx_fifo_num; i++) {
843 ufo_size += config->tx_cfg[i].fifo_len;
844 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
846 for (j = 0; j < page_num; j++) {
847 int mem_blks = (j * lst_per_page);
848 if (!mac_control->fifos[i].list_info)
850 if (!mac_control->fifos[i].list_info[mem_blks].
853 pci_free_consistent(nic->pdev, PAGE_SIZE,
854 mac_control->fifos[i].
857 mac_control->fifos[i].
860 nic->mac_control.stats_info->sw_stat.mem_freed
863 /* If we got a zero DMA address during allocation,
866 if (mac_control->zerodma_virt_addr) {
867 pci_free_consistent(nic->pdev, PAGE_SIZE,
868 mac_control->zerodma_virt_addr,
871 "%s: Freeing TxDL with zero DMA addr. ",
873 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
874 mac_control->zerodma_virt_addr);
875 nic->mac_control.stats_info->sw_stat.mem_freed
878 kfree(mac_control->fifos[i].list_info);
879 nic->mac_control.stats_info->sw_stat.mem_freed +=
880 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
883 size = SIZE_OF_BLOCK;
884 for (i = 0; i < config->rx_ring_num; i++) {
885 blk_cnt = mac_control->rings[i].block_count;
886 for (j = 0; j < blk_cnt; j++) {
887 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
889 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
891 if (tmp_v_addr == NULL)
893 pci_free_consistent(nic->pdev, size,
894 tmp_v_addr, tmp_p_addr);
895 nic->mac_control.stats_info->sw_stat.mem_freed += size;
896 kfree(mac_control->rings[i].rx_blocks[j].rxds);
897 nic->mac_control.stats_info->sw_stat.mem_freed +=
898 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
902 if (nic->rxd_mode == RXD_MODE_3B) {
903 /* Freeing buffer storage addresses in 2BUFF mode. */
904 for (i = 0; i < config->rx_ring_num; i++) {
905 blk_cnt = config->rx_cfg[i].num_rxd /
906 (rxd_count[nic->rxd_mode] + 1);
907 for (j = 0; j < blk_cnt; j++) {
909 if (!mac_control->rings[i].ba[j])
911 while (k != rxd_count[nic->rxd_mode]) {
913 &mac_control->rings[i].ba[j][k];
915 nic->mac_control.stats_info->sw_stat.\
916 mem_freed += (BUF0_LEN + ALIGN_SIZE);
918 nic->mac_control.stats_info->sw_stat.\
919 mem_freed += (BUF1_LEN + ALIGN_SIZE);
922 kfree(mac_control->rings[i].ba[j]);
923 nic->mac_control.stats_info->sw_stat.mem_freed +=
924 (sizeof(struct buffAdd) *
925 (rxd_count[nic->rxd_mode] + 1));
927 kfree(mac_control->rings[i].ba);
928 nic->mac_control.stats_info->sw_stat.mem_freed +=
929 (sizeof(struct buffAdd *) * blk_cnt);
933 if (mac_control->stats_mem) {
934 pci_free_consistent(nic->pdev,
935 mac_control->stats_mem_sz,
936 mac_control->stats_mem,
937 mac_control->stats_mem_phy);
938 nic->mac_control.stats_info->sw_stat.mem_freed +=
939 mac_control->stats_mem_sz;
941 if (nic->ufo_in_band_v) {
942 kfree(nic->ufo_in_band_v);
943 nic->mac_control.stats_info->sw_stat.mem_freed
944 += (ufo_size * sizeof(u64));
949 * s2io_verify_pci_mode -
952 static int s2io_verify_pci_mode(struct s2io_nic *nic)
954 struct XENA_dev_config __iomem *bar0 = nic->bar0;
955 register u64 val64 = 0;
958 val64 = readq(&bar0->pci_mode);
959 mode = (u8)GET_PCI_MODE(val64);
961 if ( val64 & PCI_MODE_UNKNOWN_MODE)
962 return -1; /* Unknown PCI mode */
966 #define NEC_VENID 0x1033
967 #define NEC_DEVID 0x0125
968 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
970 struct pci_dev *tdev = NULL;
971 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
972 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
973 if (tdev->bus == s2io_pdev->bus->parent)
981 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
983 * s2io_print_pci_mode -
985 static int s2io_print_pci_mode(struct s2io_nic *nic)
987 struct XENA_dev_config __iomem *bar0 = nic->bar0;
988 register u64 val64 = 0;
990 struct config_param *config = &nic->config;
992 val64 = readq(&bar0->pci_mode);
993 mode = (u8)GET_PCI_MODE(val64);
995 if ( val64 & PCI_MODE_UNKNOWN_MODE)
996 return -1; /* Unknown PCI mode */
998 config->bus_speed = bus_speed[mode];
1000 if (s2io_on_nec_bridge(nic->pdev)) {
1001 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1006 if (val64 & PCI_MODE_32_BITS) {
1007 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1009 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1013 case PCI_MODE_PCI_33:
1014 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1016 case PCI_MODE_PCI_66:
1017 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1019 case PCI_MODE_PCIX_M1_66:
1020 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1022 case PCI_MODE_PCIX_M1_100:
1023 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1025 case PCI_MODE_PCIX_M1_133:
1026 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1028 case PCI_MODE_PCIX_M2_66:
1029 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1031 case PCI_MODE_PCIX_M2_100:
1032 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1034 case PCI_MODE_PCIX_M2_133:
1035 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1038 return -1; /* Unsupported bus speed */
1045 * init_nic - Initialization of hardware
1046 * @nic: device peivate variable
1047 * Description: The function sequentially configures every block
1048 * of the H/W from their reset values.
1049 * Return Value: SUCCESS on success and
1050 * '-1' on failure (endian settings incorrect).
1053 static int init_nic(struct s2io_nic *nic)
1055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1056 struct net_device *dev = nic->dev;
1057 register u64 val64 = 0;
1061 struct mac_info *mac_control;
1062 struct config_param *config;
1064 unsigned long long mem_share;
1067 mac_control = &nic->mac_control;
1068 config = &nic->config;
1070 /* to set the swapper controle on the card */
1071 if(s2io_set_swapper(nic)) {
1072 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1077 * Herc requires EOI to be removed from reset before XGXS, so..
1079 if (nic->device_type & XFRAME_II_DEVICE) {
1080 val64 = 0xA500000000ULL;
1081 writeq(val64, &bar0->sw_reset);
1083 val64 = readq(&bar0->sw_reset);
1086 /* Remove XGXS from reset state */
1088 writeq(val64, &bar0->sw_reset);
1090 val64 = readq(&bar0->sw_reset);
1092 /* Enable Receiving broadcasts */
1093 add = &bar0->mac_cfg;
1094 val64 = readq(&bar0->mac_cfg);
1095 val64 |= MAC_RMAC_BCAST_ENABLE;
1096 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1097 writel((u32) val64, add);
1098 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1099 writel((u32) (val64 >> 32), (add + 4));
1101 /* Read registers in all blocks */
1102 val64 = readq(&bar0->mac_int_mask);
1103 val64 = readq(&bar0->mc_int_mask);
1104 val64 = readq(&bar0->xgxs_int_mask);
1108 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1110 if (nic->device_type & XFRAME_II_DEVICE) {
1111 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1112 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1113 &bar0->dtx_control, UF);
1115 msleep(1); /* Necessary!! */
1119 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1120 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1121 &bar0->dtx_control, UF);
1122 val64 = readq(&bar0->dtx_control);
1127 /* Tx DMA Initialization */
1129 writeq(val64, &bar0->tx_fifo_partition_0);
1130 writeq(val64, &bar0->tx_fifo_partition_1);
1131 writeq(val64, &bar0->tx_fifo_partition_2);
1132 writeq(val64, &bar0->tx_fifo_partition_3);
1135 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1137 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1138 13) | vBIT(config->tx_cfg[i].fifo_priority,
1141 if (i == (config->tx_fifo_num - 1)) {
1148 writeq(val64, &bar0->tx_fifo_partition_0);
1152 writeq(val64, &bar0->tx_fifo_partition_1);
1156 writeq(val64, &bar0->tx_fifo_partition_2);
1160 writeq(val64, &bar0->tx_fifo_partition_3);
1166 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1167 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1169 if ((nic->device_type == XFRAME_I_DEVICE) &&
1170 (nic->pdev->revision < 4))
1171 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1173 val64 = readq(&bar0->tx_fifo_partition_0);
1174 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1175 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1178 * Initialization of Tx_PA_CONFIG register to ignore packet
1179 * integrity checking.
1181 val64 = readq(&bar0->tx_pa_cfg);
1182 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1183 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1184 writeq(val64, &bar0->tx_pa_cfg);
1186 /* Rx DMA intialization. */
1188 for (i = 0; i < config->rx_ring_num; i++) {
1190 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1193 writeq(val64, &bar0->rx_queue_priority);
1196 * Allocating equal share of memory to all the
1200 if (nic->device_type & XFRAME_II_DEVICE)
1205 for (i = 0; i < config->rx_ring_num; i++) {
1208 mem_share = (mem_size / config->rx_ring_num +
1209 mem_size % config->rx_ring_num);
1210 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1213 mem_share = (mem_size / config->rx_ring_num);
1214 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1217 mem_share = (mem_size / config->rx_ring_num);
1218 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1221 mem_share = (mem_size / config->rx_ring_num);
1222 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1225 mem_share = (mem_size / config->rx_ring_num);
1226 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1229 mem_share = (mem_size / config->rx_ring_num);
1230 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1233 mem_share = (mem_size / config->rx_ring_num);
1234 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1237 mem_share = (mem_size / config->rx_ring_num);
1238 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1242 writeq(val64, &bar0->rx_queue_cfg);
1245 * Filling Tx round robin registers
1246 * as per the number of FIFOs
1248 switch (config->tx_fifo_num) {
1250 val64 = 0x0000000000000000ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_0);
1252 writeq(val64, &bar0->tx_w_round_robin_1);
1253 writeq(val64, &bar0->tx_w_round_robin_2);
1254 writeq(val64, &bar0->tx_w_round_robin_3);
1255 writeq(val64, &bar0->tx_w_round_robin_4);
1258 val64 = 0x0000010000010000ULL;
1259 writeq(val64, &bar0->tx_w_round_robin_0);
1260 val64 = 0x0100000100000100ULL;
1261 writeq(val64, &bar0->tx_w_round_robin_1);
1262 val64 = 0x0001000001000001ULL;
1263 writeq(val64, &bar0->tx_w_round_robin_2);
1264 val64 = 0x0000010000010000ULL;
1265 writeq(val64, &bar0->tx_w_round_robin_3);
1266 val64 = 0x0100000000000000ULL;
1267 writeq(val64, &bar0->tx_w_round_robin_4);
1270 val64 = 0x0001000102000001ULL;
1271 writeq(val64, &bar0->tx_w_round_robin_0);
1272 val64 = 0x0001020000010001ULL;
1273 writeq(val64, &bar0->tx_w_round_robin_1);
1274 val64 = 0x0200000100010200ULL;
1275 writeq(val64, &bar0->tx_w_round_robin_2);
1276 val64 = 0x0001000102000001ULL;
1277 writeq(val64, &bar0->tx_w_round_robin_3);
1278 val64 = 0x0001020000000000ULL;
1279 writeq(val64, &bar0->tx_w_round_robin_4);
1282 val64 = 0x0001020300010200ULL;
1283 writeq(val64, &bar0->tx_w_round_robin_0);
1284 val64 = 0x0100000102030001ULL;
1285 writeq(val64, &bar0->tx_w_round_robin_1);
1286 val64 = 0x0200010000010203ULL;
1287 writeq(val64, &bar0->tx_w_round_robin_2);
1288 val64 = 0x0001020001000001ULL;
1289 writeq(val64, &bar0->tx_w_round_robin_3);
1290 val64 = 0x0203000100000000ULL;
1291 writeq(val64, &bar0->tx_w_round_robin_4);
1294 val64 = 0x0001000203000102ULL;
1295 writeq(val64, &bar0->tx_w_round_robin_0);
1296 val64 = 0x0001020001030004ULL;
1297 writeq(val64, &bar0->tx_w_round_robin_1);
1298 val64 = 0x0001000203000102ULL;
1299 writeq(val64, &bar0->tx_w_round_robin_2);
1300 val64 = 0x0001020001030004ULL;
1301 writeq(val64, &bar0->tx_w_round_robin_3);
1302 val64 = 0x0001000000000000ULL;
1303 writeq(val64, &bar0->tx_w_round_robin_4);
1306 val64 = 0x0001020304000102ULL;
1307 writeq(val64, &bar0->tx_w_round_robin_0);
1308 val64 = 0x0304050001020001ULL;
1309 writeq(val64, &bar0->tx_w_round_robin_1);
1310 val64 = 0x0203000100000102ULL;
1311 writeq(val64, &bar0->tx_w_round_robin_2);
1312 val64 = 0x0304000102030405ULL;
1313 writeq(val64, &bar0->tx_w_round_robin_3);
1314 val64 = 0x0001000200000000ULL;
1315 writeq(val64, &bar0->tx_w_round_robin_4);
1318 val64 = 0x0001020001020300ULL;
1319 writeq(val64, &bar0->tx_w_round_robin_0);
1320 val64 = 0x0102030400010203ULL;
1321 writeq(val64, &bar0->tx_w_round_robin_1);
1322 val64 = 0x0405060001020001ULL;
1323 writeq(val64, &bar0->tx_w_round_robin_2);
1324 val64 = 0x0304050000010200ULL;
1325 writeq(val64, &bar0->tx_w_round_robin_3);
1326 val64 = 0x0102030000000000ULL;
1327 writeq(val64, &bar0->tx_w_round_robin_4);
1330 val64 = 0x0001020300040105ULL;
1331 writeq(val64, &bar0->tx_w_round_robin_0);
1332 val64 = 0x0200030106000204ULL;
1333 writeq(val64, &bar0->tx_w_round_robin_1);
1334 val64 = 0x0103000502010007ULL;
1335 writeq(val64, &bar0->tx_w_round_robin_2);
1336 val64 = 0x0304010002060500ULL;
1337 writeq(val64, &bar0->tx_w_round_robin_3);
1338 val64 = 0x0103020400000000ULL;
1339 writeq(val64, &bar0->tx_w_round_robin_4);
1343 /* Enable all configured Tx FIFO partitions */
1344 val64 = readq(&bar0->tx_fifo_partition_0);
1345 val64 |= (TX_FIFO_PARTITION_EN);
1346 writeq(val64, &bar0->tx_fifo_partition_0);
1348 /* Filling the Rx round robin registers as per the
1349 * number of Rings and steering based on QoS.
1351 switch (config->rx_ring_num) {
1353 val64 = 0x8080808080808080ULL;
1354 writeq(val64, &bar0->rts_qos_steering);
1357 val64 = 0x0000010000010000ULL;
1358 writeq(val64, &bar0->rx_w_round_robin_0);
1359 val64 = 0x0100000100000100ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_1);
1361 val64 = 0x0001000001000001ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_2);
1363 val64 = 0x0000010000010000ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_3);
1365 val64 = 0x0100000000000000ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_4);
1368 val64 = 0x8080808040404040ULL;
1369 writeq(val64, &bar0->rts_qos_steering);
1372 val64 = 0x0001000102000001ULL;
1373 writeq(val64, &bar0->rx_w_round_robin_0);
1374 val64 = 0x0001020000010001ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_1);
1376 val64 = 0x0200000100010200ULL;
1377 writeq(val64, &bar0->rx_w_round_robin_2);
1378 val64 = 0x0001000102000001ULL;
1379 writeq(val64, &bar0->rx_w_round_robin_3);
1380 val64 = 0x0001020000000000ULL;
1381 writeq(val64, &bar0->rx_w_round_robin_4);
1383 val64 = 0x8080804040402020ULL;
1384 writeq(val64, &bar0->rts_qos_steering);
1387 val64 = 0x0001020300010200ULL;
1388 writeq(val64, &bar0->rx_w_round_robin_0);
1389 val64 = 0x0100000102030001ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_1);
1391 val64 = 0x0200010000010203ULL;
1392 writeq(val64, &bar0->rx_w_round_robin_2);
1393 val64 = 0x0001020001000001ULL;
1394 writeq(val64, &bar0->rx_w_round_robin_3);
1395 val64 = 0x0203000100000000ULL;
1396 writeq(val64, &bar0->rx_w_round_robin_4);
1398 val64 = 0x8080404020201010ULL;
1399 writeq(val64, &bar0->rts_qos_steering);
1402 val64 = 0x0001000203000102ULL;
1403 writeq(val64, &bar0->rx_w_round_robin_0);
1404 val64 = 0x0001020001030004ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_1);
1406 val64 = 0x0001000203000102ULL;
1407 writeq(val64, &bar0->rx_w_round_robin_2);
1408 val64 = 0x0001020001030004ULL;
1409 writeq(val64, &bar0->rx_w_round_robin_3);
1410 val64 = 0x0001000000000000ULL;
1411 writeq(val64, &bar0->rx_w_round_robin_4);
1413 val64 = 0x8080404020201008ULL;
1414 writeq(val64, &bar0->rts_qos_steering);
1417 val64 = 0x0001020304000102ULL;
1418 writeq(val64, &bar0->rx_w_round_robin_0);
1419 val64 = 0x0304050001020001ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_1);
1421 val64 = 0x0203000100000102ULL;
1422 writeq(val64, &bar0->rx_w_round_robin_2);
1423 val64 = 0x0304000102030405ULL;
1424 writeq(val64, &bar0->rx_w_round_robin_3);
1425 val64 = 0x0001000200000000ULL;
1426 writeq(val64, &bar0->rx_w_round_robin_4);
1428 val64 = 0x8080404020100804ULL;
1429 writeq(val64, &bar0->rts_qos_steering);
1432 val64 = 0x0001020001020300ULL;
1433 writeq(val64, &bar0->rx_w_round_robin_0);
1434 val64 = 0x0102030400010203ULL;
1435 writeq(val64, &bar0->rx_w_round_robin_1);
1436 val64 = 0x0405060001020001ULL;
1437 writeq(val64, &bar0->rx_w_round_robin_2);
1438 val64 = 0x0304050000010200ULL;
1439 writeq(val64, &bar0->rx_w_round_robin_3);
1440 val64 = 0x0102030000000000ULL;
1441 writeq(val64, &bar0->rx_w_round_robin_4);
1443 val64 = 0x8080402010080402ULL;
1444 writeq(val64, &bar0->rts_qos_steering);
1447 val64 = 0x0001020300040105ULL;
1448 writeq(val64, &bar0->rx_w_round_robin_0);
1449 val64 = 0x0200030106000204ULL;
1450 writeq(val64, &bar0->rx_w_round_robin_1);
1451 val64 = 0x0103000502010007ULL;
1452 writeq(val64, &bar0->rx_w_round_robin_2);
1453 val64 = 0x0304010002060500ULL;
1454 writeq(val64, &bar0->rx_w_round_robin_3);
1455 val64 = 0x0103020400000000ULL;
1456 writeq(val64, &bar0->rx_w_round_robin_4);
1458 val64 = 0x8040201008040201ULL;
1459 writeq(val64, &bar0->rts_qos_steering);
1465 for (i = 0; i < 8; i++)
1466 writeq(val64, &bar0->rts_frm_len_n[i]);
1468 /* Set the default rts frame length for the rings configured */
1469 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1470 for (i = 0 ; i < config->rx_ring_num ; i++)
1471 writeq(val64, &bar0->rts_frm_len_n[i]);
1473 /* Set the frame length for the configured rings
1474 * desired by the user
1476 for (i = 0; i < config->rx_ring_num; i++) {
1477 /* If rts_frm_len[i] == 0 then it is assumed that user not
1478 * specified frame length steering.
1479 * If the user provides the frame length then program
1480 * the rts_frm_len register for those values or else
1481 * leave it as it is.
1483 if (rts_frm_len[i] != 0) {
1484 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1485 &bar0->rts_frm_len_n[i]);
1489 /* Disable differentiated services steering logic */
1490 for (i = 0; i < 64; i++) {
1491 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1492 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1494 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1499 /* Program statistics memory */
1500 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1502 if (nic->device_type == XFRAME_II_DEVICE) {
1503 val64 = STAT_BC(0x320);
1504 writeq(val64, &bar0->stat_byte_cnt);
1508 * Initializing the sampling rate for the device to calculate the
1509 * bandwidth utilization.
1511 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1512 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1513 writeq(val64, &bar0->mac_link_util);
1517 * Initializing the Transmit and Receive Traffic Interrupt
1521 * TTI Initialization. Default Tx timer gets us about
1522 * 250 interrupts per sec. Continuous interrupts are enabled
1525 if (nic->device_type == XFRAME_II_DEVICE) {
1526 int count = (nic->config.bus_speed * 125)/2;
1527 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1530 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1532 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1533 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1534 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1535 if (use_continuous_tx_intrs)
1536 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1537 writeq(val64, &bar0->tti_data1_mem);
1539 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1540 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1541 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1542 writeq(val64, &bar0->tti_data2_mem);
1544 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1545 writeq(val64, &bar0->tti_command_mem);
1548 * Once the operation completes, the Strobe bit of the command
1549 * register will be reset. We poll for this particular condition
1550 * We wait for a maximum of 500ms for the operation to complete,
1551 * if it's not complete by then we return error.
1555 val64 = readq(&bar0->tti_command_mem);
1556 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1560 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1568 if (nic->config.bimodal) {
1570 for (k = 0; k < config->rx_ring_num; k++) {
1571 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1572 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1573 writeq(val64, &bar0->tti_command_mem);
1576 * Once the operation completes, the Strobe bit of the command
1577 * register will be reset. We poll for this particular condition
1578 * We wait for a maximum of 500ms for the operation to complete,
1579 * if it's not complete by then we return error.
1583 val64 = readq(&bar0->tti_command_mem);
1584 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1589 "%s: TTI init Failed\n",
1599 /* RTI Initialization */
1600 if (nic->device_type == XFRAME_II_DEVICE) {
1602 * Programmed to generate Apprx 500 Intrs per
1605 int count = (nic->config.bus_speed * 125)/4;
1606 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1608 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1610 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1611 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1612 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1614 writeq(val64, &bar0->rti_data1_mem);
1616 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1617 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1618 if (nic->config.intr_type == MSI_X)
1619 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1620 RTI_DATA2_MEM_RX_UFC_D(0x40));
1622 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1623 RTI_DATA2_MEM_RX_UFC_D(0x80));
1624 writeq(val64, &bar0->rti_data2_mem);
1626 for (i = 0; i < config->rx_ring_num; i++) {
1627 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1628 | RTI_CMD_MEM_OFFSET(i);
1629 writeq(val64, &bar0->rti_command_mem);
1632 * Once the operation completes, the Strobe bit of the
1633 * command register will be reset. We poll for this
1634 * particular condition. We wait for a maximum of 500ms
1635 * for the operation to complete, if it's not complete
1636 * by then we return error.
1640 val64 = readq(&bar0->rti_command_mem);
1641 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1645 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1656 * Initializing proper values as Pause threshold into all
1657 * the 8 Queues on Rx side.
1659 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1660 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1662 /* Disable RMAC PAD STRIPPING */
1663 add = &bar0->mac_cfg;
1664 val64 = readq(&bar0->mac_cfg);
1665 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1666 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1667 writel((u32) (val64), add);
1668 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1669 writel((u32) (val64 >> 32), (add + 4));
1670 val64 = readq(&bar0->mac_cfg);
1672 /* Enable FCS stripping by adapter */
1673 add = &bar0->mac_cfg;
1674 val64 = readq(&bar0->mac_cfg);
1675 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1676 if (nic->device_type == XFRAME_II_DEVICE)
1677 writeq(val64, &bar0->mac_cfg);
1679 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1680 writel((u32) (val64), add);
1681 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1682 writel((u32) (val64 >> 32), (add + 4));
1686 * Set the time value to be inserted in the pause frame
1687 * generated by xena.
1689 val64 = readq(&bar0->rmac_pause_cfg);
1690 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1691 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1692 writeq(val64, &bar0->rmac_pause_cfg);
1695 * Set the Threshold Limit for Generating the pause frame
1696 * If the amount of data in any Queue exceeds ratio of
1697 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1698 * pause frame is generated
1701 for (i = 0; i < 4; i++) {
1703 (((u64) 0xFF00 | nic->mac_control.
1704 mc_pause_threshold_q0q3)
1707 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1710 for (i = 0; i < 4; i++) {
1712 (((u64) 0xFF00 | nic->mac_control.
1713 mc_pause_threshold_q4q7)
1716 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1719 * TxDMA will stop Read request if the number of read split has
1720 * exceeded the limit pointed by shared_splits
1722 val64 = readq(&bar0->pic_control);
1723 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1724 writeq(val64, &bar0->pic_control);
1726 if (nic->config.bus_speed == 266) {
1727 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1728 writeq(0x0, &bar0->read_retry_delay);
1729 writeq(0x0, &bar0->write_retry_delay);
1733 * Programming the Herc to split every write transaction
1734 * that does not start on an ADB to reduce disconnects.
1736 if (nic->device_type == XFRAME_II_DEVICE) {
1737 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1738 MISC_LINK_STABILITY_PRD(3);
1739 writeq(val64, &bar0->misc_control);
1740 val64 = readq(&bar0->pic_control2);
1741 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1742 writeq(val64, &bar0->pic_control2);
1744 if (strstr(nic->product_name, "CX4")) {
1745 val64 = TMAC_AVG_IPG(0x17);
1746 writeq(val64, &bar0->tmac_avg_ipg);
1751 #define LINK_UP_DOWN_INTERRUPT 1
1752 #define MAC_RMAC_ERR_TIMER 2
1754 static int s2io_link_fault_indication(struct s2io_nic *nic)
1756 if (nic->config.intr_type != INTA)
1757 return MAC_RMAC_ERR_TIMER;
1758 if (nic->device_type == XFRAME_II_DEVICE)
1759 return LINK_UP_DOWN_INTERRUPT;
1761 return MAC_RMAC_ERR_TIMER;
1765 * do_s2io_write_bits - update alarm bits in alarm register
1766 * @value: alarm bits
1767 * @flag: interrupt status
1768 * @addr: address value
1769 * Description: update alarm bits in alarm register
1773 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1777 temp64 = readq(addr);
1779 if(flag == ENABLE_INTRS)
1780 temp64 &= ~((u64) value);
1782 temp64 |= ((u64) value);
1783 writeq(temp64, addr);
1786 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1788 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1789 register u64 gen_int_mask = 0;
1791 if (mask & TX_DMA_INTR) {
1793 gen_int_mask |= TXDMA_INT_M;
1795 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1796 TXDMA_PCC_INT | TXDMA_TTI_INT |
1797 TXDMA_LSO_INT | TXDMA_TPA_INT |
1798 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1800 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1801 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1802 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1803 &bar0->pfc_err_mask);
1805 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1806 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1807 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1809 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1810 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1811 PCC_N_SERR | PCC_6_COF_OV_ERR |
1812 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1813 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1814 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1816 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1817 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1819 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1820 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1821 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1822 flag, &bar0->lso_err_mask);
1824 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1825 flag, &bar0->tpa_err_mask);
1827 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1831 if (mask & TX_MAC_INTR) {
1832 gen_int_mask |= TXMAC_INT_M;
1833 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1834 &bar0->mac_int_mask);
1835 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1836 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1837 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1838 flag, &bar0->mac_tmac_err_mask);
1841 if (mask & TX_XGXS_INTR) {
1842 gen_int_mask |= TXXGXS_INT_M;
1843 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1844 &bar0->xgxs_int_mask);
1845 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1846 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1847 flag, &bar0->xgxs_txgxs_err_mask);
1850 if (mask & RX_DMA_INTR) {
1851 gen_int_mask |= RXDMA_INT_M;
1852 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1853 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1854 flag, &bar0->rxdma_int_mask);
1855 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1856 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1857 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1858 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1859 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1860 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1861 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1862 &bar0->prc_pcix_err_mask);
1863 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1864 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1865 &bar0->rpa_err_mask);
1866 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1867 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1868 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1869 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1870 flag, &bar0->rda_err_mask);
1871 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1872 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1873 flag, &bar0->rti_err_mask);
1876 if (mask & RX_MAC_INTR) {
1877 gen_int_mask |= RXMAC_INT_M;
1878 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1879 &bar0->mac_int_mask);
1880 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1881 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1882 RMAC_DOUBLE_ECC_ERR |
1883 RMAC_LINK_STATE_CHANGE_INT,
1884 flag, &bar0->mac_rmac_err_mask);
1887 if (mask & RX_XGXS_INTR)
1889 gen_int_mask |= RXXGXS_INT_M;
1890 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1891 &bar0->xgxs_int_mask);
1892 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1893 &bar0->xgxs_rxgxs_err_mask);
1896 if (mask & MC_INTR) {
1897 gen_int_mask |= MC_INT_M;
1898 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1899 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1900 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1901 &bar0->mc_err_mask);
1903 nic->general_int_mask = gen_int_mask;
1905 /* Remove this line when alarm interrupts are enabled */
1906 nic->general_int_mask = 0;
1909 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1910 * @nic: device private variable,
1911 * @mask: A mask indicating which Intr block must be modified and,
1912 * @flag: A flag indicating whether to enable or disable the Intrs.
1913 * Description: This function will either disable or enable the interrupts
1914 * depending on the flag argument. The mask argument can be used to
1915 * enable/disable any Intr block.
1916 * Return Value: NONE.
1919 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1921 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1922 register u64 temp64 = 0, intr_mask = 0;
1924 intr_mask = nic->general_int_mask;
1926 /* Top level interrupt classification */
1927 /* PIC Interrupts */
1928 if (mask & TX_PIC_INTR) {
1929 /* Enable PIC Intrs in the general intr mask register */
1930 intr_mask |= TXPIC_INT_M;
1931 if (flag == ENABLE_INTRS) {
1933 * If Hercules adapter enable GPIO otherwise
1934 * disable all PCIX, Flash, MDIO, IIC and GPIO
1935 * interrupts for now.
1938 if (s2io_link_fault_indication(nic) ==
1939 LINK_UP_DOWN_INTERRUPT ) {
1940 do_s2io_write_bits(PIC_INT_GPIO, flag,
1941 &bar0->pic_int_mask);
1942 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1943 &bar0->gpio_int_mask);
1945 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1946 } else if (flag == DISABLE_INTRS) {
1948 * Disable PIC Intrs in the general
1949 * intr mask register
1951 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1955 /* Tx traffic interrupts */
1956 if (mask & TX_TRAFFIC_INTR) {
1957 intr_mask |= TXTRAFFIC_INT_M;
1958 if (flag == ENABLE_INTRS) {
1960 * Enable all the Tx side interrupts
1961 * writing 0 Enables all 64 TX interrupt levels
1963 writeq(0x0, &bar0->tx_traffic_mask);
1964 } else if (flag == DISABLE_INTRS) {
1966 * Disable Tx Traffic Intrs in the general intr mask
1969 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1973 /* Rx traffic interrupts */
1974 if (mask & RX_TRAFFIC_INTR) {
1975 intr_mask |= RXTRAFFIC_INT_M;
1976 if (flag == ENABLE_INTRS) {
1977 /* writing 0 Enables all 8 RX interrupt levels */
1978 writeq(0x0, &bar0->rx_traffic_mask);
1979 } else if (flag == DISABLE_INTRS) {
1981 * Disable Rx Traffic Intrs in the general intr mask
1984 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1988 temp64 = readq(&bar0->general_int_mask);
1989 if (flag == ENABLE_INTRS)
1990 temp64 &= ~((u64) intr_mask);
1992 temp64 = DISABLE_ALL_INTRS;
1993 writeq(temp64, &bar0->general_int_mask);
1995 nic->general_int_mask = readq(&bar0->general_int_mask);
1999 * verify_pcc_quiescent- Checks for PCC quiescent state
2000 * Return: 1 If PCC is quiescence
2001 * 0 If PCC is not quiescence
2003 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2006 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2007 u64 val64 = readq(&bar0->adapter_status);
2009 herc = (sp->device_type == XFRAME_II_DEVICE);
2011 if (flag == FALSE) {
2012 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2013 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2016 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2020 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2021 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2022 ADAPTER_STATUS_RMAC_PCC_IDLE))
2025 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2026 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2034 * verify_xena_quiescence - Checks whether the H/W is ready
2035 * Description: Returns whether the H/W is ready to go or not. Depending
2036 * on whether adapter enable bit was written or not the comparison
2037 * differs and the calling function passes the input argument flag to
2039 * Return: 1 If xena is quiescence
2040 * 0 If Xena is not quiescence
2043 static int verify_xena_quiescence(struct s2io_nic *sp)
2046 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2047 u64 val64 = readq(&bar0->adapter_status);
2048 mode = s2io_verify_pci_mode(sp);
2050 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2051 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2054 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2055 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2058 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2059 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2062 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2063 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2066 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2067 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2070 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2071 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2074 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2075 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2078 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2079 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2084 * In PCI 33 mode, the P_PLL is not used, and therefore,
2085 * the the P_PLL_LOCK bit in the adapter_status register will
2088 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2089 sp->device_type == XFRAME_II_DEVICE && mode !=
2091 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2094 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2095 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2096 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2103 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2104 * @sp: Pointer to device specifc structure
2106 * New procedure to clear mac address reading problems on Alpha platforms
2110 static void fix_mac_address(struct s2io_nic * sp)
2112 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2116 while (fix_mac[i] != END_SIGN) {
2117 writeq(fix_mac[i++], &bar0->gpio_control);
2119 val64 = readq(&bar0->gpio_control);
2124 * start_nic - Turns the device on
2125 * @nic : device private variable.
2127 * This function actually turns the device on. Before this function is
2128 * called,all Registers are configured from their reset states
2129 * and shared memory is allocated but the NIC is still quiescent. On
2130 * calling this function, the device interrupts are cleared and the NIC is
2131 * literally switched on by writing into the adapter control register.
2133 * SUCCESS on success and -1 on failure.
2136 static int start_nic(struct s2io_nic *nic)
2138 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2139 struct net_device *dev = nic->dev;
2140 register u64 val64 = 0;
2142 struct mac_info *mac_control;
2143 struct config_param *config;
2145 mac_control = &nic->mac_control;
2146 config = &nic->config;
2148 /* PRC Initialization and configuration */
2149 for (i = 0; i < config->rx_ring_num; i++) {
2150 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2151 &bar0->prc_rxd0_n[i]);
2153 val64 = readq(&bar0->prc_ctrl_n[i]);
2154 if (nic->config.bimodal)
2155 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2156 if (nic->rxd_mode == RXD_MODE_1)
2157 val64 |= PRC_CTRL_RC_ENABLED;
2159 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2160 if (nic->device_type == XFRAME_II_DEVICE)
2161 val64 |= PRC_CTRL_GROUP_READS;
2162 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2163 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2164 writeq(val64, &bar0->prc_ctrl_n[i]);
2167 if (nic->rxd_mode == RXD_MODE_3B) {
2168 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2169 val64 = readq(&bar0->rx_pa_cfg);
2170 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2171 writeq(val64, &bar0->rx_pa_cfg);
2174 if (vlan_tag_strip == 0) {
2175 val64 = readq(&bar0->rx_pa_cfg);
2176 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2177 writeq(val64, &bar0->rx_pa_cfg);
2178 vlan_strip_flag = 0;
2182 * Enabling MC-RLDRAM. After enabling the device, we timeout
2183 * for around 100ms, which is approximately the time required
2184 * for the device to be ready for operation.
2186 val64 = readq(&bar0->mc_rldram_mrs);
2187 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2188 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2189 val64 = readq(&bar0->mc_rldram_mrs);
2191 msleep(100); /* Delay by around 100 ms. */
2193 /* Enabling ECC Protection. */
2194 val64 = readq(&bar0->adapter_control);
2195 val64 &= ~ADAPTER_ECC_EN;
2196 writeq(val64, &bar0->adapter_control);
2199 * Verify if the device is ready to be enabled, if so enable
2202 val64 = readq(&bar0->adapter_status);
2203 if (!verify_xena_quiescence(nic)) {
2204 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2205 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2206 (unsigned long long) val64);
2211 * With some switches, link might be already up at this point.
2212 * Because of this weird behavior, when we enable laser,
2213 * we may not get link. We need to handle this. We cannot
2214 * figure out which switch is misbehaving. So we are forced to
2215 * make a global change.
2218 /* Enabling Laser. */
2219 val64 = readq(&bar0->adapter_control);
2220 val64 |= ADAPTER_EOI_TX_ON;
2221 writeq(val64, &bar0->adapter_control);
2223 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2225 * Dont see link state interrupts initally on some switches,
2226 * so directly scheduling the link state task here.
2228 schedule_work(&nic->set_link_task);
2230 /* SXE-002: Initialize link and activity LED */
2231 subid = nic->pdev->subsystem_device;
2232 if (((subid & 0xFF) >= 0x07) &&
2233 (nic->device_type == XFRAME_I_DEVICE)) {
2234 val64 = readq(&bar0->gpio_control);
2235 val64 |= 0x0000800000000000ULL;
2236 writeq(val64, &bar0->gpio_control);
2237 val64 = 0x0411040400000000ULL;
2238 writeq(val64, (void __iomem *)bar0 + 0x2700);
2244 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2246 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2247 TxD *txdlp, int get_off)
2249 struct s2io_nic *nic = fifo_data->nic;
2250 struct sk_buff *skb;
2255 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2256 pci_unmap_single(nic->pdev, (dma_addr_t)
2257 txds->Buffer_Pointer, sizeof(u64),
2262 skb = (struct sk_buff *) ((unsigned long)
2263 txds->Host_Control);
2265 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2268 pci_unmap_single(nic->pdev, (dma_addr_t)
2269 txds->Buffer_Pointer,
2270 skb->len - skb->data_len,
2272 frg_cnt = skb_shinfo(skb)->nr_frags;
2275 for (j = 0; j < frg_cnt; j++, txds++) {
2276 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2277 if (!txds->Buffer_Pointer)
2279 pci_unmap_page(nic->pdev, (dma_addr_t)
2280 txds->Buffer_Pointer,
2281 frag->size, PCI_DMA_TODEVICE);
2284 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2289 * free_tx_buffers - Free all queued Tx buffers
2290 * @nic : device private variable.
2292 * Free all queued Tx buffers.
2293 * Return Value: void
2296 static void free_tx_buffers(struct s2io_nic *nic)
2298 struct net_device *dev = nic->dev;
2299 struct sk_buff *skb;
2302 struct mac_info *mac_control;
2303 struct config_param *config;
2306 mac_control = &nic->mac_control;
2307 config = &nic->config;
2309 for (i = 0; i < config->tx_fifo_num; i++) {
2310 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2311 txdp = (struct TxD *) \
2312 mac_control->fifos[i].list_info[j].list_virt_addr;
2313 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2315 nic->mac_control.stats_info->sw_stat.mem_freed
2322 "%s:forcibly freeing %d skbs on FIFO%d\n",
2324 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2325 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2330 * stop_nic - To stop the nic
2331 * @nic ; device private variable.
2333 * This function does exactly the opposite of what the start_nic()
2334 * function does. This function is called to stop the device.
2339 static void stop_nic(struct s2io_nic *nic)
2341 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2342 register u64 val64 = 0;
2344 struct mac_info *mac_control;
2345 struct config_param *config;
2347 mac_control = &nic->mac_control;
2348 config = &nic->config;
2350 /* Disable all interrupts */
2351 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2352 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2353 interruptible |= TX_PIC_INTR;
2354 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2356 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2357 val64 = readq(&bar0->adapter_control);
2358 val64 &= ~(ADAPTER_CNTL_EN);
2359 writeq(val64, &bar0->adapter_control);
2363 * fill_rx_buffers - Allocates the Rx side skbs
2364 * @nic: device private variable
2365 * @ring_no: ring number
2367 * The function allocates Rx side skbs and puts the physical
2368 * address of these buffers into the RxD buffer pointers, so that the NIC
2369 * can DMA the received frame into these locations.
2370 * The NIC supports 3 receive modes, viz
2372 * 2. three buffer and
2373 * 3. Five buffer modes.
2374 * Each mode defines how many fragments the received frame will be split
2375 * up into by the NIC. The frame is split into L3 header, L4 Header,
2376 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2377 * is split into 3 fragments. As of now only single buffer mode is
2380 * SUCCESS on success or an appropriate -ve value on failure.
2383 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2385 struct net_device *dev = nic->dev;
2386 struct sk_buff *skb;
2388 int off, off1, size, block_no, block_no1;
2391 struct mac_info *mac_control;
2392 struct config_param *config;
2395 unsigned long flags;
2396 struct RxD_t *first_rxdp = NULL;
2397 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2400 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2402 mac_control = &nic->mac_control;
2403 config = &nic->config;
2404 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2405 atomic_read(&nic->rx_bufs_left[ring_no]);
2407 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2408 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2409 while (alloc_tab < alloc_cnt) {
2410 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2412 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2414 rxdp = mac_control->rings[ring_no].
2415 rx_blocks[block_no].rxds[off].virt_addr;
2417 if ((block_no == block_no1) && (off == off1) &&
2418 (rxdp->Host_Control)) {
2419 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2421 DBG_PRINT(INTR_DBG, " info equated\n");
2424 if (off && (off == rxd_count[nic->rxd_mode])) {
2425 mac_control->rings[ring_no].rx_curr_put_info.
2427 if (mac_control->rings[ring_no].rx_curr_put_info.
2428 block_index == mac_control->rings[ring_no].
2430 mac_control->rings[ring_no].rx_curr_put_info.
2432 block_no = mac_control->rings[ring_no].
2433 rx_curr_put_info.block_index;
2434 if (off == rxd_count[nic->rxd_mode])
2436 mac_control->rings[ring_no].rx_curr_put_info.
2438 rxdp = mac_control->rings[ring_no].
2439 rx_blocks[block_no].block_virt_addr;
2440 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2444 spin_lock_irqsave(&nic->put_lock, flags);
2445 mac_control->rings[ring_no].put_pos =
2446 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2447 spin_unlock_irqrestore(&nic->put_lock, flags);
2449 mac_control->rings[ring_no].put_pos =
2450 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2452 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2453 ((nic->rxd_mode == RXD_MODE_3B) &&
2454 (rxdp->Control_2 & BIT(0)))) {
2455 mac_control->rings[ring_no].rx_curr_put_info.
2459 /* calculate size of skb based on ring mode */
2460 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2461 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2462 if (nic->rxd_mode == RXD_MODE_1)
2463 size += NET_IP_ALIGN;
2465 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2468 skb = dev_alloc_skb(size);
2470 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2471 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2474 first_rxdp->Control_1 |= RXD_OWN_XENA;
2476 nic->mac_control.stats_info->sw_stat. \
2477 mem_alloc_fail_cnt++;
2480 nic->mac_control.stats_info->sw_stat.mem_allocated
2482 if (nic->rxd_mode == RXD_MODE_1) {
2483 /* 1 buffer mode - normal operation mode */
2484 rxdp1 = (struct RxD1*)rxdp;
2485 memset(rxdp, 0, sizeof(struct RxD1));
2486 skb_reserve(skb, NET_IP_ALIGN);
2487 rxdp1->Buffer0_ptr = pci_map_single
2488 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2489 PCI_DMA_FROMDEVICE);
2490 if( (rxdp1->Buffer0_ptr == 0) ||
2491 (rxdp1->Buffer0_ptr ==
2493 goto pci_map_failed;
2496 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2498 } else if (nic->rxd_mode == RXD_MODE_3B) {
2501 * 2 buffer mode provides 128
2502 * byte aligned receive buffers.
2505 rxdp3 = (struct RxD3*)rxdp;
2506 /* save buffer pointers to avoid frequent dma mapping */
2507 Buffer0_ptr = rxdp3->Buffer0_ptr;
2508 Buffer1_ptr = rxdp3->Buffer1_ptr;
2509 memset(rxdp, 0, sizeof(struct RxD3));
2510 /* restore the buffer pointers for dma sync*/
2511 rxdp3->Buffer0_ptr = Buffer0_ptr;
2512 rxdp3->Buffer1_ptr = Buffer1_ptr;
2514 ba = &mac_control->rings[ring_no].ba[block_no][off];
2515 skb_reserve(skb, BUF0_LEN);
2516 tmp = (u64)(unsigned long) skb->data;
2519 skb->data = (void *) (unsigned long)tmp;
2520 skb_reset_tail_pointer(skb);
2522 if (!(rxdp3->Buffer0_ptr))
2523 rxdp3->Buffer0_ptr =
2524 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2525 PCI_DMA_FROMDEVICE);
2527 pci_dma_sync_single_for_device(nic->pdev,
2528 (dma_addr_t) rxdp3->Buffer0_ptr,
2529 BUF0_LEN, PCI_DMA_FROMDEVICE);
2530 if( (rxdp3->Buffer0_ptr == 0) ||
2531 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2532 goto pci_map_failed;
2534 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2535 if (nic->rxd_mode == RXD_MODE_3B) {
2536 /* Two buffer mode */
2539 * Buffer2 will have L3/L4 header plus
2542 rxdp3->Buffer2_ptr = pci_map_single
2543 (nic->pdev, skb->data, dev->mtu + 4,
2544 PCI_DMA_FROMDEVICE);
2546 if( (rxdp3->Buffer2_ptr == 0) ||
2547 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2548 goto pci_map_failed;
2550 rxdp3->Buffer1_ptr =
2551 pci_map_single(nic->pdev,
2553 PCI_DMA_FROMDEVICE);
2554 if( (rxdp3->Buffer1_ptr == 0) ||
2555 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2558 (dma_addr_t)rxdp3->Buffer2_ptr,
2560 PCI_DMA_FROMDEVICE);
2561 goto pci_map_failed;
2563 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2564 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2567 rxdp->Control_2 |= BIT(0);
2569 rxdp->Host_Control = (unsigned long) (skb);
2570 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2571 rxdp->Control_1 |= RXD_OWN_XENA;
2573 if (off == (rxd_count[nic->rxd_mode] + 1))
2575 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2577 rxdp->Control_2 |= SET_RXD_MARKER;
2578 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2581 first_rxdp->Control_1 |= RXD_OWN_XENA;
2585 atomic_inc(&nic->rx_bufs_left[ring_no]);
2590 /* Transfer ownership of first descriptor to adapter just before
2591 * exiting. Before that, use memory barrier so that ownership
2592 * and other fields are seen by adapter correctly.
2596 first_rxdp->Control_1 |= RXD_OWN_XENA;
2601 stats->pci_map_fail_cnt++;
2602 stats->mem_freed += skb->truesize;
2603 dev_kfree_skb_irq(skb);
2607 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2609 struct net_device *dev = sp->dev;
2611 struct sk_buff *skb;
2613 struct mac_info *mac_control;
2618 mac_control = &sp->mac_control;
2619 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2620 rxdp = mac_control->rings[ring_no].
2621 rx_blocks[blk].rxds[j].virt_addr;
2622 skb = (struct sk_buff *)
2623 ((unsigned long) rxdp->Host_Control);
2627 if (sp->rxd_mode == RXD_MODE_1) {
2628 rxdp1 = (struct RxD1*)rxdp;
2629 pci_unmap_single(sp->pdev, (dma_addr_t)
2632 HEADER_ETHERNET_II_802_3_SIZE
2633 + HEADER_802_2_SIZE +
2635 PCI_DMA_FROMDEVICE);
2636 memset(rxdp, 0, sizeof(struct RxD1));
2637 } else if(sp->rxd_mode == RXD_MODE_3B) {
2638 rxdp3 = (struct RxD3*)rxdp;
2639 ba = &mac_control->rings[ring_no].
2641 pci_unmap_single(sp->pdev, (dma_addr_t)
2644 PCI_DMA_FROMDEVICE);
2645 pci_unmap_single(sp->pdev, (dma_addr_t)
2648 PCI_DMA_FROMDEVICE);
2649 pci_unmap_single(sp->pdev, (dma_addr_t)
2652 PCI_DMA_FROMDEVICE);
2653 memset(rxdp, 0, sizeof(struct RxD3));
2655 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2657 atomic_dec(&sp->rx_bufs_left[ring_no]);
2662 * free_rx_buffers - Frees all Rx buffers
2663 * @sp: device private variable.
2665 * This function will free all Rx buffers allocated by host.
2670 static void free_rx_buffers(struct s2io_nic *sp)
2672 struct net_device *dev = sp->dev;
2673 int i, blk = 0, buf_cnt = 0;
2674 struct mac_info *mac_control;
2675 struct config_param *config;
2677 mac_control = &sp->mac_control;
2678 config = &sp->config;
2680 for (i = 0; i < config->rx_ring_num; i++) {
2681 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2682 free_rxd_blk(sp,i,blk);
2684 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2685 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2686 mac_control->rings[i].rx_curr_put_info.offset = 0;
2687 mac_control->rings[i].rx_curr_get_info.offset = 0;
2688 atomic_set(&sp->rx_bufs_left[i], 0);
2689 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2690 dev->name, buf_cnt, i);
2695 * s2io_poll - Rx interrupt handler for NAPI support
2696 * @napi : pointer to the napi structure.
2697 * @budget : The number of packets that were budgeted to be processed
2698 * during one pass through the 'Poll" function.
2700 * Comes into picture only if NAPI support has been incorporated. It does
2701 * the same thing that rx_intr_handler does, but not in a interrupt context
2702 * also It will process only a given number of packets.
2704 * 0 on success and 1 if there are No Rx packets to be processed.
2707 static int s2io_poll(struct napi_struct *napi, int budget)
2709 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2710 struct net_device *dev = nic->dev;
2711 int pkt_cnt = 0, org_pkts_to_process;
2712 struct mac_info *mac_control;
2713 struct config_param *config;
2714 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2717 if (!is_s2io_card_up(nic))
2720 mac_control = &nic->mac_control;
2721 config = &nic->config;
2723 nic->pkts_to_process = budget;
2724 org_pkts_to_process = nic->pkts_to_process;
2726 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2727 readl(&bar0->rx_traffic_int);
2729 for (i = 0; i < config->rx_ring_num; i++) {
2730 rx_intr_handler(&mac_control->rings[i]);
2731 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2732 if (!nic->pkts_to_process) {
2733 /* Quota for the current iteration has been met */
2738 netif_rx_complete(dev, napi);
2740 for (i = 0; i < config->rx_ring_num; i++) {
2741 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2742 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2743 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2747 /* Re enable the Rx interrupts. */
2748 writeq(0x0, &bar0->rx_traffic_mask);
2749 readl(&bar0->rx_traffic_mask);
2753 for (i = 0; i < config->rx_ring_num; i++) {
2754 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2755 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2756 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2763 #ifdef CONFIG_NET_POLL_CONTROLLER
2765 * s2io_netpoll - netpoll event handler entry point
2766 * @dev : pointer to the device structure.
2768 * This function will be called by upper layer to check for events on the
2769 * interface in situations where interrupts are disabled. It is used for
2770 * specific in-kernel networking tasks, such as remote consoles and kernel
2771 * debugging over the network (example netdump in RedHat).
2773 static void s2io_netpoll(struct net_device *dev)
2775 struct s2io_nic *nic = dev->priv;
2776 struct mac_info *mac_control;
2777 struct config_param *config;
2778 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2779 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2782 if (pci_channel_offline(nic->pdev))
2785 disable_irq(dev->irq);
2787 mac_control = &nic->mac_control;
2788 config = &nic->config;
2790 writeq(val64, &bar0->rx_traffic_int);
2791 writeq(val64, &bar0->tx_traffic_int);
2793 /* we need to free up the transmitted skbufs or else netpoll will
2794 * run out of skbs and will fail and eventually netpoll application such
2795 * as netdump will fail.
2797 for (i = 0; i < config->tx_fifo_num; i++)
2798 tx_intr_handler(&mac_control->fifos[i]);
2800 /* check for received packet and indicate up to network */
2801 for (i = 0; i < config->rx_ring_num; i++)
2802 rx_intr_handler(&mac_control->rings[i]);
2804 for (i = 0; i < config->rx_ring_num; i++) {
2805 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2806 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2807 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2811 enable_irq(dev->irq);
2817 * rx_intr_handler - Rx interrupt handler
2818 * @nic: device private variable.
2820 * If the interrupt is because of a received frame or if the
2821 * receive ring contains fresh as yet un-processed frames,this function is
2822 * called. It picks out the RxD at which place the last Rx processing had
2823 * stopped and sends the skb to the OSM's Rx handler and then increments
2828 static void rx_intr_handler(struct ring_info *ring_data)
2830 struct s2io_nic *nic = ring_data->nic;
2831 struct net_device *dev = (struct net_device *) nic->dev;
2832 int get_block, put_block, put_offset;
2833 struct rx_curr_get_info get_info, put_info;
2835 struct sk_buff *skb;
2841 spin_lock(&nic->rx_lock);
2843 get_info = ring_data->rx_curr_get_info;
2844 get_block = get_info.block_index;
2845 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2846 put_block = put_info.block_index;
2847 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2849 spin_lock(&nic->put_lock);
2850 put_offset = ring_data->put_pos;
2851 spin_unlock(&nic->put_lock);
2853 put_offset = ring_data->put_pos;
2855 while (RXD_IS_UP2DT(rxdp)) {
2857 * If your are next to put index then it's
2858 * FIFO full condition
2860 if ((get_block == put_block) &&
2861 (get_info.offset + 1) == put_info.offset) {
2862 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2865 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2867 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2869 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2870 spin_unlock(&nic->rx_lock);
2873 if (nic->rxd_mode == RXD_MODE_1) {
2874 rxdp1 = (struct RxD1*)rxdp;
2875 pci_unmap_single(nic->pdev, (dma_addr_t)
2878 HEADER_ETHERNET_II_802_3_SIZE +
2881 PCI_DMA_FROMDEVICE);
2882 } else if (nic->rxd_mode == RXD_MODE_3B) {
2883 rxdp3 = (struct RxD3*)rxdp;
2884 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2886 BUF0_LEN, PCI_DMA_FROMDEVICE);
2887 pci_unmap_single(nic->pdev, (dma_addr_t)
2890 PCI_DMA_FROMDEVICE);
2892 prefetch(skb->data);
2893 rx_osm_handler(ring_data, rxdp);
2895 ring_data->rx_curr_get_info.offset = get_info.offset;
2896 rxdp = ring_data->rx_blocks[get_block].
2897 rxds[get_info.offset].virt_addr;
2898 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2899 get_info.offset = 0;
2900 ring_data->rx_curr_get_info.offset = get_info.offset;
2902 if (get_block == ring_data->block_count)
2904 ring_data->rx_curr_get_info.block_index = get_block;
2905 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2908 nic->pkts_to_process -= 1;
2909 if ((napi) && (!nic->pkts_to_process))
2912 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2916 /* Clear all LRO sessions before exiting */
2917 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2918 struct lro *lro = &nic->lro0_n[i];
2920 update_L3L4_header(nic, lro);
2921 queue_rx_frame(lro->parent);
2922 clear_lro_session(lro);
2927 spin_unlock(&nic->rx_lock);
2931 * tx_intr_handler - Transmit interrupt handler
2932 * @nic : device private variable
2934 * If an interrupt was raised to indicate DMA complete of the
2935 * Tx packet, this function is called. It identifies the last TxD
2936 * whose buffer was freed and frees all skbs whose data have already
2937 * DMA'ed into the NICs internal memory.
2942 static void tx_intr_handler(struct fifo_info *fifo_data)
2944 struct s2io_nic *nic = fifo_data->nic;
2945 struct net_device *dev = (struct net_device *) nic->dev;
2946 struct tx_curr_get_info get_info, put_info;
2947 struct sk_buff *skb;
2951 get_info = fifo_data->tx_curr_get_info;
2952 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2953 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2955 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2956 (get_info.offset != put_info.offset) &&
2957 (txdlp->Host_Control)) {
2958 /* Check for TxD errors */
2959 if (txdlp->Control_1 & TXD_T_CODE) {
2960 unsigned long long err;
2961 err = txdlp->Control_1 & TXD_T_CODE;
2963 nic->mac_control.stats_info->sw_stat.
2967 /* update t_code statistics */
2968 err_mask = err >> 48;
2971 nic->mac_control.stats_info->sw_stat.
2976 nic->mac_control.stats_info->sw_stat.
2977 tx_desc_abort_cnt++;
2981 nic->mac_control.stats_info->sw_stat.
2982 tx_parity_err_cnt++;
2986 nic->mac_control.stats_info->sw_stat.
2991 nic->mac_control.stats_info->sw_stat.
2992 tx_list_proc_err_cnt++;
2997 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2999 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3001 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3005 /* Updating the statistics block */
3006 nic->stats.tx_bytes += skb->len;
3007 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3008 dev_kfree_skb_irq(skb);
3011 if (get_info.offset == get_info.fifo_len + 1)
3012 get_info.offset = 0;
3013 txdlp = (struct TxD *) fifo_data->list_info
3014 [get_info.offset].list_virt_addr;
3015 fifo_data->tx_curr_get_info.offset =
3019 spin_lock(&nic->tx_lock);
3020 if (netif_queue_stopped(dev))
3021 netif_wake_queue(dev);
3022 spin_unlock(&nic->tx_lock);
3026 * s2io_mdio_write - Function to write in to MDIO registers
3027 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3028 * @addr : address value
3029 * @value : data value
3030 * @dev : pointer to net_device structure
3032 * This function is used to write values to the MDIO registers
3035 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3038 struct s2io_nic *sp = dev->priv;
3039 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3041 //address transaction
3042 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3043 | MDIO_MMD_DEV_ADDR(mmd_type)
3044 | MDIO_MMS_PRT_ADDR(0x0);
3045 writeq(val64, &bar0->mdio_control);
3046 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3047 writeq(val64, &bar0->mdio_control);
3052 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3053 | MDIO_MMD_DEV_ADDR(mmd_type)
3054 | MDIO_MMS_PRT_ADDR(0x0)
3055 | MDIO_MDIO_DATA(value)
3056 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3057 writeq(val64, &bar0->mdio_control);
3058 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3059 writeq(val64, &bar0->mdio_control);
3063 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3064 | MDIO_MMD_DEV_ADDR(mmd_type)
3065 | MDIO_MMS_PRT_ADDR(0x0)
3066 | MDIO_OP(MDIO_OP_READ_TRANS);
3067 writeq(val64, &bar0->mdio_control);
3068 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3069 writeq(val64, &bar0->mdio_control);
3075 * s2io_mdio_read - Function to write in to MDIO registers
3076 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3077 * @addr : address value
3078 * @dev : pointer to net_device structure
3080 * This function is used to read values to the MDIO registers
3083 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3087 struct s2io_nic *sp = dev->priv;
3088 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3090 /* address transaction */
3091 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3092 | MDIO_MMD_DEV_ADDR(mmd_type)
3093 | MDIO_MMS_PRT_ADDR(0x0);
3094 writeq(val64, &bar0->mdio_control);
3095 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3096 writeq(val64, &bar0->mdio_control);
3099 /* Data transaction */
3101 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3102 | MDIO_MMD_DEV_ADDR(mmd_type)
3103 | MDIO_MMS_PRT_ADDR(0x0)
3104 | MDIO_OP(MDIO_OP_READ_TRANS);
3105 writeq(val64, &bar0->mdio_control);
3106 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107 writeq(val64, &bar0->mdio_control);
3110 /* Read the value from regs */
3111 rval64 = readq(&bar0->mdio_control);
3112 rval64 = rval64 & 0xFFFF0000;
3113 rval64 = rval64 >> 16;
3117 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3118 * @counter : couter value to be updated
3119 * @flag : flag to indicate the status
3120 * @type : counter type
3122 * This function is to check the status of the xpak counters value
3126 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3131 for(i = 0; i <index; i++)
3136 *counter = *counter + 1;
3137 val64 = *regs_stat & mask;
3138 val64 = val64 >> (index * 0x2);
3145 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3146 "service. Excessive temperatures may "
3147 "result in premature transceiver "
3151 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3152 "service Excessive bias currents may "
3153 "indicate imminent laser diode "
3157 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3158 "service Excessive laser output "
3159 "power may saturate far-end "
3163 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3168 val64 = val64 << (index * 0x2);
3169 *regs_stat = (*regs_stat & (~mask)) | (val64);
3172 *regs_stat = *regs_stat & (~mask);
3177 * s2io_updt_xpak_counter - Function to update the xpak counters
3178 * @dev : pointer to net_device struct
3180 * This function is to upate the status of the xpak counters value
3183 static void s2io_updt_xpak_counter(struct net_device *dev)
3191 struct s2io_nic *sp = dev->priv;
3192 struct stat_block *stat_info = sp->mac_control.stats_info;
3194 /* Check the communication with the MDIO slave */
3197 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3198 if((val64 == 0xFFFF) || (val64 == 0x0000))
3200 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3201 "Returned %llx\n", (unsigned long long)val64);
3205 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3208 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3209 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3210 (unsigned long long)val64);
3214 /* Loading the DOM register to MDIO register */
3216 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3217 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3219 /* Reading the Alarm flags */
3222 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3224 flag = CHECKBIT(val64, 0x7);
3226 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3227 &stat_info->xpak_stat.xpak_regs_stat,
3230 if(CHECKBIT(val64, 0x6))
3231 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3233 flag = CHECKBIT(val64, 0x3);
3235 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3236 &stat_info->xpak_stat.xpak_regs_stat,
3239 if(CHECKBIT(val64, 0x2))
3240 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3242 flag = CHECKBIT(val64, 0x1);
3244 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3245 &stat_info->xpak_stat.xpak_regs_stat,
3248 if(CHECKBIT(val64, 0x0))
3249 stat_info->xpak_stat.alarm_laser_output_power_low++;
3251 /* Reading the Warning flags */
3254 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3256 if(CHECKBIT(val64, 0x7))
3257 stat_info->xpak_stat.warn_transceiver_temp_high++;
3259 if(CHECKBIT(val64, 0x6))
3260 stat_info->xpak_stat.warn_transceiver_temp_low++;
3262 if(CHECKBIT(val64, 0x3))
3263 stat_info->xpak_stat.warn_laser_bias_current_high++;
3265 if(CHECKBIT(val64, 0x2))
3266 stat_info->xpak_stat.warn_laser_bias_current_low++;
3268 if(CHECKBIT(val64, 0x1))
3269 stat_info->xpak_stat.warn_laser_output_power_high++;
3271 if(CHECKBIT(val64, 0x0))
3272 stat_info->xpak_stat.warn_laser_output_power_low++;
3276 * wait_for_cmd_complete - waits for a command to complete.
3277 * @sp : private member of the device structure, which is a pointer to the
3278 * s2io_nic structure.
3279 * Description: Function that waits for a command to Write into RMAC
3280 * ADDR DATA registers to be completed and returns either success or
3281 * error depending on whether the command was complete or not.
3283 * SUCCESS on success and FAILURE on failure.
3286 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3289 int ret = FAILURE, cnt = 0, delay = 1;
3292 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3296 val64 = readq(addr);
3297 if (bit_state == S2IO_BIT_RESET) {
3298 if (!(val64 & busy_bit)) {
3303 if (!(val64 & busy_bit)) {
3320 * check_pci_device_id - Checks if the device id is supported
3322 * Description: Function to check if the pci device id is supported by driver.
3323 * Return value: Actual device id if supported else PCI_ANY_ID
3325 static u16 check_pci_device_id(u16 id)
3328 case PCI_DEVICE_ID_HERC_WIN:
3329 case PCI_DEVICE_ID_HERC_UNI:
3330 return XFRAME_II_DEVICE;
3331 case PCI_DEVICE_ID_S2IO_UNI:
3332 case PCI_DEVICE_ID_S2IO_WIN:
3333 return XFRAME_I_DEVICE;
3340 * s2io_reset - Resets the card.
3341 * @sp : private member of the device structure.
3342 * Description: Function to Reset the card. This function then also
3343 * restores the previously saved PCI configuration space registers as
3344 * the card reset also resets the configuration space.
3349 static void s2io_reset(struct s2io_nic * sp)
3351 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3356 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3357 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3359 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3360 __FUNCTION__, sp->dev->name);
3362 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3363 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3365 val64 = SW_RESET_ALL;
3366 writeq(val64, &bar0->sw_reset);
3367 if (strstr(sp->product_name, "CX4")) {
3371 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3373 /* Restore the PCI state saved during initialization. */
3374 pci_restore_state(sp->pdev);
3375 pci_read_config_word(sp->pdev, 0x2, &val16);
3376 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3381 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3382 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3385 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3389 /* Set swapper to enable I/O register access */
3390 s2io_set_swapper(sp);
3392 /* Restore the MSIX table entries from local variables */
3393 restore_xmsi_data(sp);
3395 /* Clear certain PCI/PCI-X fields after reset */
3396 if (sp->device_type == XFRAME_II_DEVICE) {
3397 /* Clear "detected parity error" bit */
3398 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3400 /* Clearing PCIX Ecc status register */
3401 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3403 /* Clearing PCI_STATUS error reflected here */
3404 writeq(BIT(62), &bar0->txpic_int_reg);
3407 /* Reset device statistics maintained by OS */
3408 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3410 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3411 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3412 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3413 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3414 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3415 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3416 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3417 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3418 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3419 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3420 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3421 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3422 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3423 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3424 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3425 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3426 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3427 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3428 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3430 /* SXE-002: Configure link and activity LED to turn it off */
3431 subid = sp->pdev->subsystem_device;
3432 if (((subid & 0xFF) >= 0x07) &&
3433 (sp->device_type == XFRAME_I_DEVICE)) {
3434 val64 = readq(&bar0->gpio_control);
3435 val64 |= 0x0000800000000000ULL;
3436 writeq(val64, &bar0->gpio_control);
3437 val64 = 0x0411040400000000ULL;
3438 writeq(val64, (void __iomem *)bar0 + 0x2700);
3442 * Clear spurious ECC interrupts that would have occured on
3443 * XFRAME II cards after reset.
3445 if (sp->device_type == XFRAME_II_DEVICE) {
3446 val64 = readq(&bar0->pcc_err_reg);
3447 writeq(val64, &bar0->pcc_err_reg);
3450 /* restore the previously assigned mac address */
3451 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3453 sp->device_enabled_once = FALSE;
3457 * s2io_set_swapper - to set the swapper controle on the card
3458 * @sp : private member of the device structure,
3459 * pointer to the s2io_nic structure.
3460 * Description: Function to set the swapper control on the card
3461 * correctly depending on the 'endianness' of the system.
3463 * SUCCESS on success and FAILURE on failure.
3466 static int s2io_set_swapper(struct s2io_nic * sp)
3468 struct net_device *dev = sp->dev;
3469 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3470 u64 val64, valt, valr;
3473 * Set proper endian settings and verify the same by reading
3474 * the PIF Feed-back register.
3477 val64 = readq(&bar0->pif_rd_swapper_fb);
3478 if (val64 != 0x0123456789ABCDEFULL) {
3480 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3481 0x8100008181000081ULL, /* FE=1, SE=0 */
3482 0x4200004242000042ULL, /* FE=0, SE=1 */
3483 0}; /* FE=0, SE=0 */
3486 writeq(value[i], &bar0->swapper_ctrl);
3487 val64 = readq(&bar0->pif_rd_swapper_fb);
3488 if (val64 == 0x0123456789ABCDEFULL)
3493 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3495 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3496 (unsigned long long) val64);
3501 valr = readq(&bar0->swapper_ctrl);
3504 valt = 0x0123456789ABCDEFULL;
3505 writeq(valt, &bar0->xmsi_address);
3506 val64 = readq(&bar0->xmsi_address);
3510 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3511 0x0081810000818100ULL, /* FE=1, SE=0 */
3512 0x0042420000424200ULL, /* FE=0, SE=1 */
3513 0}; /* FE=0, SE=0 */
3516 writeq((value[i] | valr), &bar0->swapper_ctrl);
3517 writeq(valt, &bar0->xmsi_address);
3518 val64 = readq(&bar0->xmsi_address);
3524 unsigned long long x = val64;
3525 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3526 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3530 val64 = readq(&bar0->swapper_ctrl);
3531 val64 &= 0xFFFF000000000000ULL;
3535 * The device by default set to a big endian format, so a
3536 * big endian driver need not set anything.
3538 val64 |= (SWAPPER_CTRL_TXP_FE |
3539 SWAPPER_CTRL_TXP_SE |
3540 SWAPPER_CTRL_TXD_R_FE |
3541 SWAPPER_CTRL_TXD_W_FE |
3542 SWAPPER_CTRL_TXF_R_FE |
3543 SWAPPER_CTRL_RXD_R_FE |
3544 SWAPPER_CTRL_RXD_W_FE |
3545 SWAPPER_CTRL_RXF_W_FE |
3546 SWAPPER_CTRL_XMSI_FE |
3547 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3548 if (sp->config.intr_type == INTA)
3549 val64 |= SWAPPER_CTRL_XMSI_SE;
3550 writeq(val64, &bar0->swapper_ctrl);
3553 * Initially we enable all bits to make it accessible by the
3554 * driver, then we selectively enable only those bits that
3557 val64 |= (SWAPPER_CTRL_TXP_FE |
3558 SWAPPER_CTRL_TXP_SE |
3559 SWAPPER_CTRL_TXD_R_FE |
3560 SWAPPER_CTRL_TXD_R_SE |
3561 SWAPPER_CTRL_TXD_W_FE |
3562 SWAPPER_CTRL_TXD_W_SE |
3563 SWAPPER_CTRL_TXF_R_FE |
3564 SWAPPER_CTRL_RXD_R_FE |
3565 SWAPPER_CTRL_RXD_R_SE |
3566 SWAPPER_CTRL_RXD_W_FE |
3567 SWAPPER_CTRL_RXD_W_SE |
3568 SWAPPER_CTRL_RXF_W_FE |
3569 SWAPPER_CTRL_XMSI_FE |
3570 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3571 if (sp->config.intr_type == INTA)
3572 val64 |= SWAPPER_CTRL_XMSI_SE;
3573 writeq(val64, &bar0->swapper_ctrl);
3575 val64 = readq(&bar0->swapper_ctrl);
3578 * Verifying if endian settings are accurate by reading a
3579 * feedback register.
3581 val64 = readq(&bar0->pif_rd_swapper_fb);
3582 if (val64 != 0x0123456789ABCDEFULL) {
3583 /* Endian settings are incorrect, calls for another dekko. */
3584 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3586 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3587 (unsigned long long) val64);
3594 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3596 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3598 int ret = 0, cnt = 0;
3601 val64 = readq(&bar0->xmsi_access);
3602 if (!(val64 & BIT(15)))
3608 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3615 static void restore_xmsi_data(struct s2io_nic *nic)
3617 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3621 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3622 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3623 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3624 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3625 writeq(val64, &bar0->xmsi_access);
3626 if (wait_for_msix_trans(nic, i)) {
3627 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3633 static void store_xmsi_data(struct s2io_nic *nic)
3635 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3636 u64 val64, addr, data;
3639 /* Store and display */
3640 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3641 val64 = (BIT(15) | vBIT(i, 26, 6));
3642 writeq(val64, &bar0->xmsi_access);
3643 if (wait_for_msix_trans(nic, i)) {
3644 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3647 addr = readq(&bar0->xmsi_address);
3648 data = readq(&bar0->xmsi_data);
3650 nic->msix_info[i].addr = addr;
3651 nic->msix_info[i].data = data;
3656 static int s2io_enable_msi_x(struct s2io_nic *nic)
3658 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3660 u16 msi_control; /* Temp variable */
3661 int ret, i, j, msix_indx = 1;
3663 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3665 if (!nic->entries) {
3666 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3668 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3671 nic->mac_control.stats_info->sw_stat.mem_allocated
3672 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3675 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3677 if (!nic->s2io_entries) {
3678 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3680 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3681 kfree(nic->entries);
3682 nic->mac_control.stats_info->sw_stat.mem_freed
3683 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3686 nic->mac_control.stats_info->sw_stat.mem_allocated
3687 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3689 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3690 nic->entries[i].entry = i;
3691 nic->s2io_entries[i].entry = i;
3692 nic->s2io_entries[i].arg = NULL;
3693 nic->s2io_entries[i].in_use = 0;
3696 tx_mat = readq(&bar0->tx_mat0_n[0]);
3697 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3698 tx_mat |= TX_MAT_SET(i, msix_indx);
3699 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3700 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3701 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3703 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3705 if (!nic->config.bimodal) {
3706 rx_mat = readq(&bar0->rx_mat);
3707 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3708 rx_mat |= RX_MAT_SET(j, msix_indx);
3709 nic->s2io_entries[msix_indx].arg
3710 = &nic->mac_control.rings[j];
3711 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3712 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3714 writeq(rx_mat, &bar0->rx_mat);
3716 tx_mat = readq(&bar0->tx_mat0_n[7]);
3717 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3718 tx_mat |= TX_MAT_SET(i, msix_indx);
3719 nic->s2io_entries[msix_indx].arg
3720 = &nic->mac_control.rings[j];
3721 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3722 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3724 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3727 nic->avail_msix_vectors = 0;
3728 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3729 /* We fail init if error or we get less vectors than min required */
3730 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3731 nic->avail_msix_vectors = ret;
3732 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3735 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3736 kfree(nic->entries);
3737 nic->mac_control.stats_info->sw_stat.mem_freed
3738 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3739 kfree(nic->s2io_entries);
3740 nic->mac_control.stats_info->sw_stat.mem_freed
3741 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3742 nic->entries = NULL;
3743 nic->s2io_entries = NULL;
3744 nic->avail_msix_vectors = 0;
3747 if (!nic->avail_msix_vectors)
3748 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3751 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3752 * in the herc NIC. (Temp change, needs to be removed later)
3754 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3755 msi_control |= 0x1; /* Enable MSI */
3756 pci_write_config_word(nic->pdev, 0x42, msi_control);
3761 /* Handle software interrupt used during MSI(X) test */
3762 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3764 struct s2io_nic *sp = dev_id;
3766 sp->msi_detected = 1;
3767 wake_up(&sp->msi_wait);
3772 /* Test interrupt path by forcing a a software IRQ */
3773 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3775 struct pci_dev *pdev = sp->pdev;
3776 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3780 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3783 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3784 sp->dev->name, pci_name(pdev), pdev->irq);
3788 init_waitqueue_head (&sp->msi_wait);
3789 sp->msi_detected = 0;
3791 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3792 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3793 val64 |= SCHED_INT_CTRL_TIMER_EN;
3794 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3795 writeq(val64, &bar0->scheduled_int_ctrl);
3797 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3799 if (!sp->msi_detected) {
3800 /* MSI(X) test failed, go back to INTx mode */
3801 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3802 "using MSI(X) during test\n", sp->dev->name,
3808 free_irq(sp->entries[1].vector, sp);
3810 writeq(saved64, &bar0->scheduled_int_ctrl);
3814 /* ********************************************************* *
3815 * Functions defined below concern the OS part of the driver *
3816 * ********************************************************* */
3819 * s2io_open - open entry point of the driver
3820 * @dev : pointer to the device structure.
3822 * This function is the open entry point of the driver. It mainly calls a
3823 * function to allocate Rx buffers and inserts them into the buffer
3824 * descriptors and then enables the Rx part of the NIC.
3826 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3830 static int s2io_open(struct net_device *dev)
3832 struct s2io_nic *sp = dev->priv;
3836 * Make sure you have link off by default every time
3837 * Nic is initialized
3839 netif_carrier_off(dev);
3840 sp->last_link_state = 0;
3842 napi_enable(&sp->napi);
3844 if (sp->config.intr_type == MSI_X) {
3845 int ret = s2io_enable_msi_x(sp);
3850 ret = s2io_test_msi(sp);
3852 /* rollback MSI-X, will re-enable during add_isr() */
3854 sp->mac_control.stats_info->sw_stat.mem_freed +=
3855 (MAX_REQUESTED_MSI_X *
3856 sizeof(struct msix_entry));
3857 kfree(sp->s2io_entries);
3858 sp->mac_control.stats_info->sw_stat.mem_freed +=
3859 (MAX_REQUESTED_MSI_X *
3860 sizeof(struct s2io_msix_entry));
3862 sp->s2io_entries = NULL;
3864 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3865 msi_control &= 0xFFFE; /* Disable MSI */
3866 pci_write_config_word(sp->pdev, 0x42, msi_control);
3868 pci_disable_msix(sp->pdev);
3874 "%s: MSI-X requested but failed to enable\n",
3876 sp->config.intr_type = INTA;
3880 /* NAPI doesn't work well with MSI(X) */
3881 if (sp->config.intr_type != INTA) {
3883 sp->config.napi = 0;
3886 /* Initialize H/W and enable interrupts */
3887 err = s2io_card_up(sp);
3889 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3891 goto hw_init_failed;
3894 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3895 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3898 goto hw_init_failed;
3901 netif_start_queue(dev);
3905 napi_disable(&sp->napi);
3906 if (sp->config.intr_type == MSI_X) {
3909 sp->mac_control.stats_info->sw_stat.mem_freed
3910 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3912 if (sp->s2io_entries) {
3913 kfree(sp->s2io_entries);
3914 sp->mac_control.stats_info->sw_stat.mem_freed
3915 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3922 * s2io_close -close entry point of the driver
3923 * @dev : device pointer.
3925 * This is the stop entry point of the driver. It needs to undo exactly
3926 * whatever was done by the open entry point,thus it's usually referred to
3927 * as the close function.Among other things this function mainly stops the
3928 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3930 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3934 static int s2io_close(struct net_device *dev)
3936 struct s2io_nic *sp = dev->priv;
3938 netif_stop_queue(dev);
3939 napi_disable(&sp->napi);
3940 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3947 * s2io_xmit - Tx entry point of te driver
3948 * @skb : the socket buffer containing the Tx data.
3949 * @dev : device pointer.
3951 * This function is the Tx entry point of the driver. S2IO NIC supports
3952 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3953 * NOTE: when device cant queue the pkt,just the trans_start variable will
3956 * 0 on success & 1 on failure.
3959 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3961 struct s2io_nic *sp = dev->priv;
3962 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3965 struct TxFIFO_element __iomem *tx_fifo;
3966 unsigned long flags;
3968 int vlan_priority = 0;
3969 struct mac_info *mac_control;
3970 struct config_param *config;
3972 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3974 mac_control = &sp->mac_control;
3975 config = &sp->config;
3977 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3979 if (unlikely(skb->len <= 0)) {
3980 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3981 dev_kfree_skb_any(skb);
3985 spin_lock_irqsave(&sp->tx_lock, flags);
3986 if (!is_s2io_card_up(sp)) {
3987 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3989 spin_unlock_irqrestore(&sp->tx_lock, flags);
3995 /* Get Fifo number to Transmit based on vlan priority */
3996 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3997 vlan_tag = vlan_tx_tag_get(skb);
3998 vlan_priority = vlan_tag >> 13;
3999 queue = config->fifo_mapping[vlan_priority];
4002 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4003 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4004 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4007 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4008 /* Avoid "put" pointer going beyond "get" pointer */
4009 if (txdp->Host_Control ||
4010 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4011 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4012 netif_stop_queue(dev);
4014 spin_unlock_irqrestore(&sp->tx_lock, flags);
4018 offload_type = s2io_offload_type(skb);
4019 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4020 txdp->Control_1 |= TXD_TCP_LSO_EN;
4021 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4023 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4025 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4028 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4029 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4030 txdp->Control_2 |= config->tx_intr_type;
4032 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4033 txdp->Control_2 |= TXD_VLAN_ENABLE;
4034 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4037 frg_len = skb->len - skb->data_len;
4038 if (offload_type == SKB_GSO_UDP) {
4041 ufo_size = s2io_udp_mss(skb);
4043 txdp->Control_1 |= TXD_UFO_EN;
4044 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4045 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4047 sp->ufo_in_band_v[put_off] =
4048 (u64)skb_shinfo(skb)->ip6_frag_id;
4050 sp->ufo_in_band_v[put_off] =
4051 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4053 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4054 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4056 sizeof(u64), PCI_DMA_TODEVICE);
4057 if((txdp->Buffer_Pointer == 0) ||
4058 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4059 goto pci_map_failed;
4063 txdp->Buffer_Pointer = pci_map_single
4064 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4065 if((txdp->Buffer_Pointer == 0) ||
4066 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4067 goto pci_map_failed;
4069 txdp->Host_Control = (unsigned long) skb;
4070 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4071 if (offload_type == SKB_GSO_UDP)
4072 txdp->Control_1 |= TXD_UFO_EN;
4074 frg_cnt = skb_shinfo(skb)->nr_frags;
4075 /* For fragmented SKB. */
4076 for (i = 0; i < frg_cnt; i++) {
4077 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4078 /* A '0' length fragment will be ignored */
4082 txdp->Buffer_Pointer = (u64) pci_map_page
4083 (sp->pdev, frag->page, frag->page_offset,
4084 frag->size, PCI_DMA_TODEVICE);
4085 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4086 if (offload_type == SKB_GSO_UDP)
4087 txdp->Control_1 |= TXD_UFO_EN;
4089 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4091 if (offload_type == SKB_GSO_UDP)
4092 frg_cnt++; /* as Txd0 was used for inband header */
4094 tx_fifo = mac_control->tx_FIFO_start[queue];
4095 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4096 writeq(val64, &tx_fifo->TxDL_Pointer);
4098 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4101 val64 |= TX_FIFO_SPECIAL_FUNC;
4103 writeq(val64, &tx_fifo->List_Control);
4108 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4110 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4112 /* Avoid "put" pointer going beyond "get" pointer */
4113 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4114 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4116 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4118 netif_stop_queue(dev);
4120 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4121 dev->trans_start = jiffies;
4122 spin_unlock_irqrestore(&sp->tx_lock, flags);
4126 stats->pci_map_fail_cnt++;
4127 netif_stop_queue(dev);
4128 stats->mem_freed += skb->truesize;
4130 spin_unlock_irqrestore(&sp->tx_lock, flags);
4135 s2io_alarm_handle(unsigned long data)
4137 struct s2io_nic *sp = (struct s2io_nic *)data;
4138 struct net_device *dev = sp->dev;
4140 s2io_handle_errors(dev);
4141 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4144 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4146 int rxb_size, level;
4149 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4150 level = rx_buffer_level(sp, rxb_size, rng_n);
4152 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4154 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4155 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4156 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4157 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4159 clear_bit(0, (&sp->tasklet_status));
4162 clear_bit(0, (&sp->tasklet_status));
4163 } else if (level == LOW)
4164 tasklet_schedule(&sp->task);
4166 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4167 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4168 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4173 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4175 struct ring_info *ring = (struct ring_info *)dev_id;
4176 struct s2io_nic *sp = ring->nic;
4178 if (!is_s2io_card_up(sp))
4181 rx_intr_handler(ring);
4182 s2io_chk_rx_buffers(sp, ring->ring_no);
4187 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4189 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4190 struct s2io_nic *sp = fifo->nic;
4192 if (!is_s2io_card_up(sp))
4195 tx_intr_handler(fifo);
4198 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4200 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4203 val64 = readq(&bar0->pic_int_status);
4204 if (val64 & PIC_INT_GPIO) {
4205 val64 = readq(&bar0->gpio_int_reg);
4206 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4207 (val64 & GPIO_INT_REG_LINK_UP)) {
4209 * This is unstable state so clear both up/down
4210 * interrupt and adapter to re-evaluate the link state.
4212 val64 |= GPIO_INT_REG_LINK_DOWN;
4213 val64 |= GPIO_INT_REG_LINK_UP;
4214 writeq(val64, &bar0->gpio_int_reg);
4215 val64 = readq(&bar0->gpio_int_mask);
4216 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4217 GPIO_INT_MASK_LINK_DOWN);
4218 writeq(val64, &bar0->gpio_int_mask);
4220 else if (val64 & GPIO_INT_REG_LINK_UP) {
4221 val64 = readq(&bar0->adapter_status);
4222 /* Enable Adapter */
4223 val64 = readq(&bar0->adapter_control);
4224 val64 |= ADAPTER_CNTL_EN;
4225 writeq(val64, &bar0->adapter_control);
4226 val64 |= ADAPTER_LED_ON;
4227 writeq(val64, &bar0->adapter_control);
4228 if (!sp->device_enabled_once)
4229 sp->device_enabled_once = 1;
4231 s2io_link(sp, LINK_UP);
4233 * unmask link down interrupt and mask link-up
4236 val64 = readq(&bar0->gpio_int_mask);
4237 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4238 val64 |= GPIO_INT_MASK_LINK_UP;
4239 writeq(val64, &bar0->gpio_int_mask);
4241 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4242 val64 = readq(&bar0->adapter_status);
4243 s2io_link(sp, LINK_DOWN);
4244 /* Link is down so unmaks link up interrupt */
4245 val64 = readq(&bar0->gpio_int_mask);
4246 val64 &= ~GPIO_INT_MASK_LINK_UP;
4247 val64 |= GPIO_INT_MASK_LINK_DOWN;
4248 writeq(val64, &bar0->gpio_int_mask);
4251 val64 = readq(&bar0->adapter_control);
4252 val64 = val64 &(~ADAPTER_LED_ON);
4253 writeq(val64, &bar0->adapter_control);
4256 val64 = readq(&bar0->gpio_int_mask);
4260 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4261 * @value: alarm bits
4262 * @addr: address value
4263 * @cnt: counter variable
4264 * Description: Check for alarm and increment the counter
4266 * 1 - if alarm bit set
4267 * 0 - if alarm bit is not set
4269 int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4270 unsigned long long *cnt)
4273 val64 = readq(addr);
4274 if ( val64 & value ) {
4275 writeq(val64, addr);
4284 * s2io_handle_errors - Xframe error indication handler
4285 * @nic: device private variable
4286 * Description: Handle alarms such as loss of link, single or
4287 * double ECC errors, critical and serious errors.
4291 static void s2io_handle_errors(void * dev_id)
4293 struct net_device *dev = (struct net_device *) dev_id;
4294 struct s2io_nic *sp = dev->priv;
4295 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4296 u64 temp64 = 0,val64=0;
4299 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4300 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4302 if (!is_s2io_card_up(sp))
4305 if (pci_channel_offline(sp->pdev))
4308 memset(&sw_stat->ring_full_cnt, 0,
4309 sizeof(sw_stat->ring_full_cnt));
4311 /* Handling the XPAK counters update */
4312 if(stats->xpak_timer_count < 72000) {
4313 /* waiting for an hour */
4314 stats->xpak_timer_count++;
4316 s2io_updt_xpak_counter(dev);
4317 /* reset the count to zero */
4318 stats->xpak_timer_count = 0;
4321 /* Handling link status change error Intr */
4322 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4323 val64 = readq(&bar0->mac_rmac_err_reg);
4324 writeq(val64, &bar0->mac_rmac_err_reg);
4325 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4326 schedule_work(&sp->set_link_task);
4329 /* In case of a serious error, the device will be Reset. */
4330 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4331 &sw_stat->serious_err_cnt))
4334 /* Check for data parity error */
4335 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4336 &sw_stat->parity_err_cnt))
4339 /* Check for ring full counter */
4340 if (sp->device_type == XFRAME_II_DEVICE) {
4341 val64 = readq(&bar0->ring_bump_counter1);
4342 for (i=0; i<4; i++) {
4343 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4344 temp64 >>= 64 - ((i+1)*16);
4345 sw_stat->ring_full_cnt[i] += temp64;
4348 val64 = readq(&bar0->ring_bump_counter2);
4349 for (i=0; i<4; i++) {
4350 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4351 temp64 >>= 64 - ((i+1)*16);
4352 sw_stat->ring_full_cnt[i+4] += temp64;
4356 val64 = readq(&bar0->txdma_int_status);
4357 /*check for pfc_err*/
4358 if (val64 & TXDMA_PFC_INT) {
4359 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4360 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4361 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4362 &sw_stat->pfc_err_cnt))
4364 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4365 &sw_stat->pfc_err_cnt);
4368 /*check for tda_err*/
4369 if (val64 & TXDMA_TDA_INT) {
4370 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4371 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4372 &sw_stat->tda_err_cnt))
4374 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4375 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4377 /*check for pcc_err*/
4378 if (val64 & TXDMA_PCC_INT) {
4379 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4380 | PCC_N_SERR | PCC_6_COF_OV_ERR
4381 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4382 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4383 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4384 &sw_stat->pcc_err_cnt))
4386 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4387 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4390 /*check for tti_err*/
4391 if (val64 & TXDMA_TTI_INT) {
4392 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4393 &sw_stat->tti_err_cnt))
4395 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4396 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4399 /*check for lso_err*/
4400 if (val64 & TXDMA_LSO_INT) {
4401 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4402 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4403 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4405 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4406 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4409 /*check for tpa_err*/
4410 if (val64 & TXDMA_TPA_INT) {
4411 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4412 &sw_stat->tpa_err_cnt))
4414 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4415 &sw_stat->tpa_err_cnt);
4418 /*check for sm_err*/
4419 if (val64 & TXDMA_SM_INT) {
4420 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4421 &sw_stat->sm_err_cnt))
4425 val64 = readq(&bar0->mac_int_status);
4426 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4427 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4428 &bar0->mac_tmac_err_reg,
4429 &sw_stat->mac_tmac_err_cnt))
4431 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4432 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4433 &bar0->mac_tmac_err_reg,
4434 &sw_stat->mac_tmac_err_cnt);
4437 val64 = readq(&bar0->xgxs_int_status);
4438 if (val64 & XGXS_INT_STATUS_TXGXS) {
4439 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4440 &bar0->xgxs_txgxs_err_reg,
4441 &sw_stat->xgxs_txgxs_err_cnt))
4443 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4444 &bar0->xgxs_txgxs_err_reg,
4445 &sw_stat->xgxs_txgxs_err_cnt);
4448 val64 = readq(&bar0->rxdma_int_status);
4449 if (val64 & RXDMA_INT_RC_INT_M) {
4450 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4451 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4452 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4454 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4455 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4456 &sw_stat->rc_err_cnt);
4457 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4458 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4459 &sw_stat->prc_pcix_err_cnt))
4461 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4462 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4463 &sw_stat->prc_pcix_err_cnt);
4466 if (val64 & RXDMA_INT_RPA_INT_M) {
4467 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4468 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4470 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4471 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4474 if (val64 & RXDMA_INT_RDA_INT_M) {
4475 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4476 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4477 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4478 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4480 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4481 | RDA_MISC_ERR | RDA_PCIX_ERR,
4482 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4485 if (val64 & RXDMA_INT_RTI_INT_M) {
4486 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4487 &sw_stat->rti_err_cnt))
4489 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4490 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4493 val64 = readq(&bar0->mac_int_status);
4494 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4495 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4496 &bar0->mac_rmac_err_reg,
4497 &sw_stat->mac_rmac_err_cnt))
4499 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4500 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4501 &sw_stat->mac_rmac_err_cnt);
4504 val64 = readq(&bar0->xgxs_int_status);
4505 if (val64 & XGXS_INT_STATUS_RXGXS) {
4506 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4507 &bar0->xgxs_rxgxs_err_reg,
4508 &sw_stat->xgxs_rxgxs_err_cnt))
4512 val64 = readq(&bar0->mc_int_status);
4513 if(val64 & MC_INT_STATUS_MC_INT) {
4514 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4515 &sw_stat->mc_err_cnt))
4518 /* Handling Ecc errors */
4519 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4520 writeq(val64, &bar0->mc_err_reg);
4521 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4522 sw_stat->double_ecc_errs++;
4523 if (sp->device_type != XFRAME_II_DEVICE) {
4525 * Reset XframeI only if critical error
4528 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4529 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4533 sw_stat->single_ecc_errs++;
4539 netif_stop_queue(dev);
4540 schedule_work(&sp->rst_timer_task);
4541 sw_stat->soft_reset_cnt++;
4546 * s2io_isr - ISR handler of the device .
4547 * @irq: the irq of the device.
4548 * @dev_id: a void pointer to the dev structure of the NIC.
4549 * Description: This function is the ISR handler of the device. It
4550 * identifies the reason for the interrupt and calls the relevant
4551 * service routines. As a contongency measure, this ISR allocates the
4552 * recv buffers, if their numbers are below the panic value which is
4553 * presently set to 25% of the original number of rcv buffers allocated.
4555 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4556 * IRQ_NONE: will be returned if interrupt is not from our device
4558 static irqreturn_t s2io_isr(int irq, void *dev_id)
4560 struct net_device *dev = (struct net_device *) dev_id;
4561 struct s2io_nic *sp = dev->priv;
4562 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4565 struct mac_info *mac_control;
4566 struct config_param *config;
4568 /* Pretend we handled any irq's from a disconnected card */
4569 if (pci_channel_offline(sp->pdev))
4572 if (!is_s2io_card_up(sp))
4575 mac_control = &sp->mac_control;
4576 config = &sp->config;
4579 * Identify the cause for interrupt and call the appropriate
4580 * interrupt handler. Causes for the interrupt could be;
4585 reason = readq(&bar0->general_int_status);
4587 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4588 /* Nothing much can be done. Get out */
4592 if (reason & (GEN_INTR_RXTRAFFIC |
4593 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4595 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4598 if (reason & GEN_INTR_RXTRAFFIC) {
4599 if (likely(netif_rx_schedule_prep(dev,
4601 __netif_rx_schedule(dev, &sp->napi);
4602 writeq(S2IO_MINUS_ONE,
4603 &bar0->rx_traffic_mask);
4605 writeq(S2IO_MINUS_ONE,
4606 &bar0->rx_traffic_int);
4610 * rx_traffic_int reg is an R1 register, writing all 1's
4611 * will ensure that the actual interrupt causing bit
4612 * get's cleared and hence a read can be avoided.
4614 if (reason & GEN_INTR_RXTRAFFIC)
4615 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4617 for (i = 0; i < config->rx_ring_num; i++)
4618 rx_intr_handler(&mac_control->rings[i]);
4622 * tx_traffic_int reg is an R1 register, writing all 1's
4623 * will ensure that the actual interrupt causing bit get's
4624 * cleared and hence a read can be avoided.
4626 if (reason & GEN_INTR_TXTRAFFIC)
4627 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4629 for (i = 0; i < config->tx_fifo_num; i++)
4630 tx_intr_handler(&mac_control->fifos[i]);
4632 if (reason & GEN_INTR_TXPIC)
4633 s2io_txpic_intr_handle(sp);
4636 * Reallocate the buffers from the interrupt handler itself.
4638 if (!config->napi) {
4639 for (i = 0; i < config->rx_ring_num; i++)
4640 s2io_chk_rx_buffers(sp, i);
4642 writeq(sp->general_int_mask, &bar0->general_int_mask);
4643 readl(&bar0->general_int_status);
4649 /* The interrupt was not raised by us */
4659 static void s2io_updt_stats(struct s2io_nic *sp)
4661 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4665 if (is_s2io_card_up(sp)) {
4666 /* Apprx 30us on a 133 MHz bus */
4667 val64 = SET_UPDT_CLICKS(10) |
4668 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4669 writeq(val64, &bar0->stat_cfg);
4672 val64 = readq(&bar0->stat_cfg);
4673 if (!(val64 & BIT(0)))
4677 break; /* Updt failed */
4683 * s2io_get_stats - Updates the device statistics structure.
4684 * @dev : pointer to the device structure.
4686 * This function updates the device statistics structure in the s2io_nic
4687 * structure and returns a pointer to the same.
4689 * pointer to the updated net_device_stats structure.
4692 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4694 struct s2io_nic *sp = dev->priv;
4695 struct mac_info *mac_control;
4696 struct config_param *config;
4699 mac_control = &sp->mac_control;
4700 config = &sp->config;
4702 /* Configure Stats for immediate updt */
4703 s2io_updt_stats(sp);
4705 sp->stats.tx_packets =
4706 le32_to_cpu(mac_control->stats_info->tmac_frms);
4707 sp->stats.tx_errors =
4708 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4709 sp->stats.rx_errors =
4710 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4711 sp->stats.multicast =
4712 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4713 sp->stats.rx_length_errors =
4714 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4716 return (&sp->stats);
4720 * s2io_set_multicast - entry point for multicast address enable/disable.
4721 * @dev : pointer to the device structure
4723 * This function is a driver entry point which gets called by the kernel
4724 * whenever multicast addresses must be enabled/disabled. This also gets
4725 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4726 * determine, if multicast address must be enabled or if promiscuous mode
4727 * is to be disabled etc.
4732 static void s2io_set_multicast(struct net_device *dev)
4735 struct dev_mc_list *mclist;
4736 struct s2io_nic *sp = dev->priv;
4737 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4738 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4740 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4743 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4744 /* Enable all Multicast addresses */
4745 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4746 &bar0->rmac_addr_data0_mem);
4747 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4748 &bar0->rmac_addr_data1_mem);
4749 val64 = RMAC_ADDR_CMD_MEM_WE |
4750 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4751 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4752 writeq(val64, &bar0->rmac_addr_cmd_mem);
4753 /* Wait till command completes */
4754 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4755 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4759 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4760 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4761 /* Disable all Multicast addresses */
4762 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4763 &bar0->rmac_addr_data0_mem);
4764 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4765 &bar0->rmac_addr_data1_mem);
4766 val64 = RMAC_ADDR_CMD_MEM_WE |
4767 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4768 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4769 writeq(val64, &bar0->rmac_addr_cmd_mem);
4770 /* Wait till command completes */
4771 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4772 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4776 sp->all_multi_pos = 0;
4779 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4780 /* Put the NIC into promiscuous mode */
4781 add = &bar0->mac_cfg;
4782 val64 = readq(&bar0->mac_cfg);
4783 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4785 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4786 writel((u32) val64, add);
4787 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4788 writel((u32) (val64 >> 32), (add + 4));
4790 if (vlan_tag_strip != 1) {
4791 val64 = readq(&bar0->rx_pa_cfg);
4792 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4793 writeq(val64, &bar0->rx_pa_cfg);
4794 vlan_strip_flag = 0;
4797 val64 = readq(&bar0->mac_cfg);
4798 sp->promisc_flg = 1;
4799 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4801 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4802 /* Remove the NIC from promiscuous mode */
4803 add = &bar0->mac_cfg;
4804 val64 = readq(&bar0->mac_cfg);
4805 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4807 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4808 writel((u32) val64, add);
4809 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4810 writel((u32) (val64 >> 32), (add + 4));
4812 if (vlan_tag_strip != 0) {
4813 val64 = readq(&bar0->rx_pa_cfg);
4814 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4815 writeq(val64, &bar0->rx_pa_cfg);
4816 vlan_strip_flag = 1;
4819 val64 = readq(&bar0->mac_cfg);
4820 sp->promisc_flg = 0;
4821 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4825 /* Update individual M_CAST address list */
4826 if ((!sp->m_cast_flg) && dev->mc_count) {
4828 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4829 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4831 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4832 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4836 prev_cnt = sp->mc_addr_count;
4837 sp->mc_addr_count = dev->mc_count;
4839 /* Clear out the previous list of Mc in the H/W. */
4840 for (i = 0; i < prev_cnt; i++) {
4841 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4842 &bar0->rmac_addr_data0_mem);
4843 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4844 &bar0->rmac_addr_data1_mem);
4845 val64 = RMAC_ADDR_CMD_MEM_WE |
4846 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4847 RMAC_ADDR_CMD_MEM_OFFSET
4848 (MAC_MC_ADDR_START_OFFSET + i);
4849 writeq(val64, &bar0->rmac_addr_cmd_mem);
4851 /* Wait for command completes */
4852 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4853 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4855 DBG_PRINT(ERR_DBG, "%s: Adding ",
4857 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4862 /* Create the new Rx filter list and update the same in H/W. */
4863 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4864 i++, mclist = mclist->next) {
4865 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4868 for (j = 0; j < ETH_ALEN; j++) {
4869 mac_addr |= mclist->dmi_addr[j];
4873 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4874 &bar0->rmac_addr_data0_mem);
4875 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4876 &bar0->rmac_addr_data1_mem);
4877 val64 = RMAC_ADDR_CMD_MEM_WE |
4878 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4879 RMAC_ADDR_CMD_MEM_OFFSET
4880 (i + MAC_MC_ADDR_START_OFFSET);
4881 writeq(val64, &bar0->rmac_addr_cmd_mem);
4883 /* Wait for command completes */
4884 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4885 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4887 DBG_PRINT(ERR_DBG, "%s: Adding ",
4889 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4897 * s2io_set_mac_addr - Programs the Xframe mac address
4898 * @dev : pointer to the device structure.
4899 * @addr: a uchar pointer to the new mac address which is to be set.
4900 * Description : This procedure will program the Xframe to receive
4901 * frames with new Mac Address
4902 * Return value: SUCCESS on success and an appropriate (-)ve integer
4903 * as defined in errno.h file on failure.
4906 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4908 struct s2io_nic *sp = dev->priv;
4909 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4910 register u64 val64, mac_addr = 0;
4912 u64 old_mac_addr = 0;
4915 * Set the new MAC address as the new unicast filter and reflect this
4916 * change on the device address registered with the OS. It will be
4919 for (i = 0; i < ETH_ALEN; i++) {
4921 mac_addr |= addr[i];
4923 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4929 /* Update the internal structure with this new mac address */
4930 if(mac_addr != old_mac_addr) {
4931 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4932 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4933 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4934 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4935 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4936 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4937 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4940 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4941 &bar0->rmac_addr_data0_mem);
4944 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4945 RMAC_ADDR_CMD_MEM_OFFSET(0);
4946 writeq(val64, &bar0->rmac_addr_cmd_mem);
4947 /* Wait till command completes */
4948 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4949 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4950 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4958 * s2io_ethtool_sset - Sets different link parameters.
4959 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4960 * @info: pointer to the structure with parameters given by ethtool to set
4963 * The function sets different link parameters provided by the user onto
4969 static int s2io_ethtool_sset(struct net_device *dev,
4970 struct ethtool_cmd *info)
4972 struct s2io_nic *sp = dev->priv;
4973 if ((info->autoneg == AUTONEG_ENABLE) ||
4974 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4977 s2io_close(sp->dev);
4985 * s2io_ethtol_gset - Return link specific information.
4986 * @sp : private member of the device structure, pointer to the
4987 * s2io_nic structure.
4988 * @info : pointer to the structure with parameters given by ethtool
4989 * to return link information.
4991 * Returns link specific information like speed, duplex etc.. to ethtool.
4993 * return 0 on success.
4996 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4998 struct s2io_nic *sp = dev->priv;
4999 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5000 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5001 info->port = PORT_FIBRE;
5002 /* info->transceiver?? TODO */
5004 if (netif_carrier_ok(sp->dev)) {
5005 info->speed = 10000;
5006 info->duplex = DUPLEX_FULL;
5012 info->autoneg = AUTONEG_DISABLE;
5017 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5018 * @sp : private member of the device structure, which is a pointer to the
5019 * s2io_nic structure.
5020 * @info : pointer to the structure with parameters given by ethtool to
5021 * return driver information.
5023 * Returns driver specefic information like name, version etc.. to ethtool.
5028 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5029 struct ethtool_drvinfo *info)
5031 struct s2io_nic *sp = dev->priv;
5033 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5034 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5035 strncpy(info->fw_version, "", sizeof(info->fw_version));
5036 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5037 info->regdump_len = XENA_REG_SPACE;
5038 info->eedump_len = XENA_EEPROM_SPACE;
5042 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5043 * @sp: private member of the device structure, which is a pointer to the
5044 * s2io_nic structure.
5045 * @regs : pointer to the structure with parameters given by ethtool for
5046 * dumping the registers.
5047 * @reg_space: The input argumnet into which all the registers are dumped.
5049 * Dumps the entire register space of xFrame NIC into the user given
5055 static void s2io_ethtool_gregs(struct net_device *dev,
5056 struct ethtool_regs *regs, void *space)
5060 u8 *reg_space = (u8 *) space;
5061 struct s2io_nic *sp = dev->priv;
5063 regs->len = XENA_REG_SPACE;
5064 regs->version = sp->pdev->subsystem_device;
5066 for (i = 0; i < regs->len; i += 8) {
5067 reg = readq(sp->bar0 + i);
5068 memcpy((reg_space + i), ®, 8);
5073 * s2io_phy_id - timer function that alternates adapter LED.
5074 * @data : address of the private member of the device structure, which
5075 * is a pointer to the s2io_nic structure, provided as an u32.
5076 * Description: This is actually the timer function that alternates the
5077 * adapter LED bit of the adapter control bit to set/reset every time on
5078 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5079 * once every second.
5081 static void s2io_phy_id(unsigned long data)
5083 struct s2io_nic *sp = (struct s2io_nic *) data;
5084 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5088 subid = sp->pdev->subsystem_device;
5089 if ((sp->device_type == XFRAME_II_DEVICE) ||
5090 ((subid & 0xFF) >= 0x07)) {
5091 val64 = readq(&bar0->gpio_control);
5092 val64 ^= GPIO_CTRL_GPIO_0;
5093 writeq(val64, &bar0->gpio_control);
5095 val64 = readq(&bar0->adapter_control);
5096 val64 ^= ADAPTER_LED_ON;
5097 writeq(val64, &bar0->adapter_control);
5100 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5104 * s2io_ethtool_idnic - To physically identify the nic on the system.
5105 * @sp : private member of the device structure, which is a pointer to the
5106 * s2io_nic structure.
5107 * @id : pointer to the structure with identification parameters given by
5109 * Description: Used to physically identify the NIC on the system.
5110 * The Link LED will blink for a time specified by the user for
5112 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5113 * identification is possible only if it's link is up.
5115 * int , returns 0 on success
5118 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5120 u64 val64 = 0, last_gpio_ctrl_val;
5121 struct s2io_nic *sp = dev->priv;
5122 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5125 subid = sp->pdev->subsystem_device;
5126 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5127 if ((sp->device_type == XFRAME_I_DEVICE) &&
5128 ((subid & 0xFF) < 0x07)) {
5129 val64 = readq(&bar0->adapter_control);
5130 if (!(val64 & ADAPTER_CNTL_EN)) {
5132 "Adapter Link down, cannot blink LED\n");
5136 if (sp->id_timer.function == NULL) {
5137 init_timer(&sp->id_timer);
5138 sp->id_timer.function = s2io_phy_id;
5139 sp->id_timer.data = (unsigned long) sp;
5141 mod_timer(&sp->id_timer, jiffies);
5143 msleep_interruptible(data * HZ);
5145 msleep_interruptible(MAX_FLICKER_TIME);
5146 del_timer_sync(&sp->id_timer);
5148 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5149 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5150 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5156 static void s2io_ethtool_gringparam(struct net_device *dev,
5157 struct ethtool_ringparam *ering)
5159 struct s2io_nic *sp = dev->priv;
5160 int i,tx_desc_count=0,rx_desc_count=0;
5162 if (sp->rxd_mode == RXD_MODE_1)
5163 ering->rx_max_pending = MAX_RX_DESC_1;
5164 else if (sp->rxd_mode == RXD_MODE_3B)
5165 ering->rx_max_pending = MAX_RX_DESC_2;
5167 ering->tx_max_pending = MAX_TX_DESC;
5168 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5169 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5171 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5172 ering->tx_pending = tx_desc_count;
5174 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5175 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5177 ering->rx_pending = rx_desc_count;
5179 ering->rx_mini_max_pending = 0;
5180 ering->rx_mini_pending = 0;
5181 if(sp->rxd_mode == RXD_MODE_1)
5182 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5183 else if (sp->rxd_mode == RXD_MODE_3B)
5184 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5185 ering->rx_jumbo_pending = rx_desc_count;
5189 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5190 * @sp : private member of the device structure, which is a pointer to the
5191 * s2io_nic structure.
5192 * @ep : pointer to the structure with pause parameters given by ethtool.
5194 * Returns the Pause frame generation and reception capability of the NIC.
5198 static void s2io_ethtool_getpause_data(struct net_device *dev,
5199 struct ethtool_pauseparam *ep)
5202 struct s2io_nic *sp = dev->priv;
5203 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5205 val64 = readq(&bar0->rmac_pause_cfg);
5206 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5207 ep->tx_pause = TRUE;
5208 if (val64 & RMAC_PAUSE_RX_ENABLE)
5209 ep->rx_pause = TRUE;
5210 ep->autoneg = FALSE;
5214 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5215 * @sp : private member of the device structure, which is a pointer to the
5216 * s2io_nic structure.
5217 * @ep : pointer to the structure with pause parameters given by ethtool.
5219 * It can be used to set or reset Pause frame generation or reception
5220 * support of the NIC.
5222 * int, returns 0 on Success
5225 static int s2io_ethtool_setpause_data(struct net_device *dev,
5226 struct ethtool_pauseparam *ep)
5229 struct s2io_nic *sp = dev->priv;
5230 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5232 val64 = readq(&bar0->rmac_pause_cfg);
5234 val64 |= RMAC_PAUSE_GEN_ENABLE;
5236 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5238 val64 |= RMAC_PAUSE_RX_ENABLE;
5240 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5241 writeq(val64, &bar0->rmac_pause_cfg);
5246 * read_eeprom - reads 4 bytes of data from user given offset.
5247 * @sp : private member of the device structure, which is a pointer to the
5248 * s2io_nic structure.
5249 * @off : offset at which the data must be written
5250 * @data : Its an output parameter where the data read at the given
5253 * Will read 4 bytes of data from the user given offset and return the
5255 * NOTE: Will allow to read only part of the EEPROM visible through the
5258 * -1 on failure and 0 on success.
5261 #define S2IO_DEV_ID 5
5262 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5267 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5269 if (sp->device_type == XFRAME_I_DEVICE) {
5270 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5271 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5272 I2C_CONTROL_CNTL_START;
5273 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5275 while (exit_cnt < 5) {
5276 val64 = readq(&bar0->i2c_control);
5277 if (I2C_CONTROL_CNTL_END(val64)) {
5278 *data = I2C_CONTROL_GET_DATA(val64);
5287 if (sp->device_type == XFRAME_II_DEVICE) {
5288 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5289 SPI_CONTROL_BYTECNT(0x3) |
5290 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5291 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5292 val64 |= SPI_CONTROL_REQ;
5293 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5294 while (exit_cnt < 5) {
5295 val64 = readq(&bar0->spi_control);
5296 if (val64 & SPI_CONTROL_NACK) {
5299 } else if (val64 & SPI_CONTROL_DONE) {
5300 *data = readq(&bar0->spi_data);
5313 * write_eeprom - actually writes the relevant part of the data value.
5314 * @sp : private member of the device structure, which is a pointer to the
5315 * s2io_nic structure.
5316 * @off : offset at which the data must be written
5317 * @data : The data that is to be written
5318 * @cnt : Number of bytes of the data that are actually to be written into
5319 * the Eeprom. (max of 3)
5321 * Actually writes the relevant part of the data value into the Eeprom
5322 * through the I2C bus.
5324 * 0 on success, -1 on failure.
5327 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5329 int exit_cnt = 0, ret = -1;
5331 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5333 if (sp->device_type == XFRAME_I_DEVICE) {
5334 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5335 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5336 I2C_CONTROL_CNTL_START;
5337 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5339 while (exit_cnt < 5) {
5340 val64 = readq(&bar0->i2c_control);
5341 if (I2C_CONTROL_CNTL_END(val64)) {
5342 if (!(val64 & I2C_CONTROL_NACK))
5351 if (sp->device_type == XFRAME_II_DEVICE) {
5352 int write_cnt = (cnt == 8) ? 0 : cnt;
5353 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5355 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5356 SPI_CONTROL_BYTECNT(write_cnt) |
5357 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5358 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5359 val64 |= SPI_CONTROL_REQ;
5360 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5361 while (exit_cnt < 5) {
5362 val64 = readq(&bar0->spi_control);
5363 if (val64 & SPI_CONTROL_NACK) {
5366 } else if (val64 & SPI_CONTROL_DONE) {
5376 static void s2io_vpd_read(struct s2io_nic *nic)
5380 int i=0, cnt, fail = 0;
5381 int vpd_addr = 0x80;
5383 if (nic->device_type == XFRAME_II_DEVICE) {
5384 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5388 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5391 strcpy(nic->serial_num, "NOT AVAILABLE");
5393 vpd_data = kmalloc(256, GFP_KERNEL);
5395 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5398 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5400 for (i = 0; i < 256; i +=4 ) {
5401 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5402 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5403 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5404 for (cnt = 0; cnt <5; cnt++) {
5406 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5411 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5415 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5416 (u32 *)&vpd_data[i]);
5420 /* read serial number of adapter */
5421 for (cnt = 0; cnt < 256; cnt++) {
5422 if ((vpd_data[cnt] == 'S') &&
5423 (vpd_data[cnt+1] == 'N') &&
5424 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5425 memset(nic->serial_num, 0, VPD_STRING_LEN);
5426 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5433 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5434 memset(nic->product_name, 0, vpd_data[1]);
5435 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5438 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5442 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5443 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5444 * @eeprom : pointer to the user level structure provided by ethtool,
5445 * containing all relevant information.
5446 * @data_buf : user defined value to be written into Eeprom.
5447 * Description: Reads the values stored in the Eeprom at given offset
5448 * for a given length. Stores these values int the input argument data
5449 * buffer 'data_buf' and returns these to the caller (ethtool.)
5454 static int s2io_ethtool_geeprom(struct net_device *dev,
5455 struct ethtool_eeprom *eeprom, u8 * data_buf)
5459 struct s2io_nic *sp = dev->priv;
5461 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5463 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5464 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5466 for (i = 0; i < eeprom->len; i += 4) {
5467 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5468 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5472 memcpy((data_buf + i), &valid, 4);
5478 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5479 * @sp : private member of the device structure, which is a pointer to the
5480 * s2io_nic structure.
5481 * @eeprom : pointer to the user level structure provided by ethtool,
5482 * containing all relevant information.
5483 * @data_buf ; user defined value to be written into Eeprom.
5485 * Tries to write the user provided value in the Eeprom, at the offset
5486 * given by the user.
5488 * 0 on success, -EFAULT on failure.
5491 static int s2io_ethtool_seeprom(struct net_device *dev,
5492 struct ethtool_eeprom *eeprom,
5495 int len = eeprom->len, cnt = 0;
5496 u64 valid = 0, data;
5497 struct s2io_nic *sp = dev->priv;
5499 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5501 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5502 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5508 data = (u32) data_buf[cnt] & 0x000000FF;
5510 valid = (u32) (data << 24);
5514 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5516 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5518 "write into the specified offset\n");
5529 * s2io_register_test - reads and writes into all clock domains.
5530 * @sp : private member of the device structure, which is a pointer to the
5531 * s2io_nic structure.
5532 * @data : variable that returns the result of each of the test conducted b
5535 * Read and write into all clock domains. The NIC has 3 clock domains,
5536 * see that registers in all the three regions are accessible.
5541 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5543 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5544 u64 val64 = 0, exp_val;
5547 val64 = readq(&bar0->pif_rd_swapper_fb);
5548 if (val64 != 0x123456789abcdefULL) {
5550 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5553 val64 = readq(&bar0->rmac_pause_cfg);
5554 if (val64 != 0xc000ffff00000000ULL) {
5556 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5559 val64 = readq(&bar0->rx_queue_cfg);
5560 if (sp->device_type == XFRAME_II_DEVICE)
5561 exp_val = 0x0404040404040404ULL;
5563 exp_val = 0x0808080808080808ULL;
5564 if (val64 != exp_val) {
5566 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5569 val64 = readq(&bar0->xgxs_efifo_cfg);
5570 if (val64 != 0x000000001923141EULL) {
5572 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5575 val64 = 0x5A5A5A5A5A5A5A5AULL;
5576 writeq(val64, &bar0->xmsi_data);
5577 val64 = readq(&bar0->xmsi_data);
5578 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5580 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5583 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5584 writeq(val64, &bar0->xmsi_data);
5585 val64 = readq(&bar0->xmsi_data);
5586 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5588 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5596 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5597 * @sp : private member of the device structure, which is a pointer to the
5598 * s2io_nic structure.
5599 * @data:variable that returns the result of each of the test conducted by
5602 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5608 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5611 u64 ret_data, org_4F0, org_7F0;
5612 u8 saved_4F0 = 0, saved_7F0 = 0;
5613 struct net_device *dev = sp->dev;
5615 /* Test Write Error at offset 0 */
5616 /* Note that SPI interface allows write access to all areas
5617 * of EEPROM. Hence doing all negative testing only for Xframe I.
5619 if (sp->device_type == XFRAME_I_DEVICE)
5620 if (!write_eeprom(sp, 0, 0, 3))
5623 /* Save current values at offsets 0x4F0 and 0x7F0 */
5624 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5626 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5629 /* Test Write at offset 4f0 */
5630 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5632 if (read_eeprom(sp, 0x4F0, &ret_data))
5635 if (ret_data != 0x012345) {
5636 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5637 "Data written %llx Data read %llx\n",
5638 dev->name, (unsigned long long)0x12345,
5639 (unsigned long long)ret_data);
5643 /* Reset the EEPROM data go FFFF */
5644 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5646 /* Test Write Request Error at offset 0x7c */
5647 if (sp->device_type == XFRAME_I_DEVICE)
5648 if (!write_eeprom(sp, 0x07C, 0, 3))
5651 /* Test Write Request at offset 0x7f0 */
5652 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5654 if (read_eeprom(sp, 0x7F0, &ret_data))
5657 if (ret_data != 0x012345) {
5658 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5659 "Data written %llx Data read %llx\n",
5660 dev->name, (unsigned long long)0x12345,
5661 (unsigned long long)ret_data);
5665 /* Reset the EEPROM data go FFFF */
5666 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5668 if (sp->device_type == XFRAME_I_DEVICE) {
5669 /* Test Write Error at offset 0x80 */
5670 if (!write_eeprom(sp, 0x080, 0, 3))
5673 /* Test Write Error at offset 0xfc */
5674 if (!write_eeprom(sp, 0x0FC, 0, 3))
5677 /* Test Write Error at offset 0x100 */
5678 if (!write_eeprom(sp, 0x100, 0, 3))
5681 /* Test Write Error at offset 4ec */
5682 if (!write_eeprom(sp, 0x4EC, 0, 3))
5686 /* Restore values at offsets 0x4F0 and 0x7F0 */
5688 write_eeprom(sp, 0x4F0, org_4F0, 3);
5690 write_eeprom(sp, 0x7F0, org_7F0, 3);
5697 * s2io_bist_test - invokes the MemBist test of the card .
5698 * @sp : private member of the device structure, which is a pointer to the
5699 * s2io_nic structure.
5700 * @data:variable that returns the result of each of the test conducted by
5703 * This invokes the MemBist test of the card. We give around
5704 * 2 secs time for the Test to complete. If it's still not complete
5705 * within this peiod, we consider that the test failed.
5707 * 0 on success and -1 on failure.
5710 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5713 int cnt = 0, ret = -1;
5715 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5716 bist |= PCI_BIST_START;
5717 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5720 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5721 if (!(bist & PCI_BIST_START)) {
5722 *data = (bist & PCI_BIST_CODE_MASK);
5734 * s2io-link_test - verifies the link state of the nic
5735 * @sp ; private member of the device structure, which is a pointer to the
5736 * s2io_nic structure.
5737 * @data: variable that returns the result of each of the test conducted by
5740 * The function verifies the link state of the NIC and updates the input
5741 * argument 'data' appropriately.
5746 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5748 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5751 val64 = readq(&bar0->adapter_status);
5752 if(!(LINK_IS_UP(val64)))
5761 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5762 * @sp - private member of the device structure, which is a pointer to the
5763 * s2io_nic structure.
5764 * @data - variable that returns the result of each of the test
5765 * conducted by the driver.
5767 * This is one of the offline test that tests the read and write
5768 * access to the RldRam chip on the NIC.
5773 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5775 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5777 int cnt, iteration = 0, test_fail = 0;
5779 val64 = readq(&bar0->adapter_control);
5780 val64 &= ~ADAPTER_ECC_EN;
5781 writeq(val64, &bar0->adapter_control);
5783 val64 = readq(&bar0->mc_rldram_test_ctrl);
5784 val64 |= MC_RLDRAM_TEST_MODE;
5785 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5787 val64 = readq(&bar0->mc_rldram_mrs);
5788 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5789 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5791 val64 |= MC_RLDRAM_MRS_ENABLE;
5792 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5794 while (iteration < 2) {
5795 val64 = 0x55555555aaaa0000ULL;
5796 if (iteration == 1) {
5797 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5799 writeq(val64, &bar0->mc_rldram_test_d0);
5801 val64 = 0xaaaa5a5555550000ULL;
5802 if (iteration == 1) {
5803 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5805 writeq(val64, &bar0->mc_rldram_test_d1);
5807 val64 = 0x55aaaaaaaa5a0000ULL;
5808 if (iteration == 1) {
5809 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5811 writeq(val64, &bar0->mc_rldram_test_d2);
5813 val64 = (u64) (0x0000003ffffe0100ULL);
5814 writeq(val64, &bar0->mc_rldram_test_add);
5816 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5818 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5820 for (cnt = 0; cnt < 5; cnt++) {
5821 val64 = readq(&bar0->mc_rldram_test_ctrl);
5822 if (val64 & MC_RLDRAM_TEST_DONE)
5830 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5831 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5833 for (cnt = 0; cnt < 5; cnt++) {
5834 val64 = readq(&bar0->mc_rldram_test_ctrl);
5835 if (val64 & MC_RLDRAM_TEST_DONE)
5843 val64 = readq(&bar0->mc_rldram_test_ctrl);
5844 if (!(val64 & MC_RLDRAM_TEST_PASS))
5852 /* Bring the adapter out of test mode */
5853 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5859 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5860 * @sp : private member of the device structure, which is a pointer to the
5861 * s2io_nic structure.
5862 * @ethtest : pointer to a ethtool command specific structure that will be
5863 * returned to the user.
5864 * @data : variable that returns the result of each of the test
5865 * conducted by the driver.
5867 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5868 * the health of the card.
5873 static void s2io_ethtool_test(struct net_device *dev,
5874 struct ethtool_test *ethtest,
5877 struct s2io_nic *sp = dev->priv;
5878 int orig_state = netif_running(sp->dev);
5880 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5881 /* Offline Tests. */
5883 s2io_close(sp->dev);
5885 if (s2io_register_test(sp, &data[0]))
5886 ethtest->flags |= ETH_TEST_FL_FAILED;
5890 if (s2io_rldram_test(sp, &data[3]))
5891 ethtest->flags |= ETH_TEST_FL_FAILED;
5895 if (s2io_eeprom_test(sp, &data[1]))
5896 ethtest->flags |= ETH_TEST_FL_FAILED;
5898 if (s2io_bist_test(sp, &data[4]))
5899 ethtest->flags |= ETH_TEST_FL_FAILED;
5909 "%s: is not up, cannot run test\n",
5918 if (s2io_link_test(sp, &data[2]))
5919 ethtest->flags |= ETH_TEST_FL_FAILED;
5928 static void s2io_get_ethtool_stats(struct net_device *dev,
5929 struct ethtool_stats *estats,
5933 struct s2io_nic *sp = dev->priv;
5934 struct stat_block *stat_info = sp->mac_control.stats_info;
5936 s2io_updt_stats(sp);
5938 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5939 le32_to_cpu(stat_info->tmac_frms);
5941 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5942 le32_to_cpu(stat_info->tmac_data_octets);
5943 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5945 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5946 le32_to_cpu(stat_info->tmac_mcst_frms);
5948 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5949 le32_to_cpu(stat_info->tmac_bcst_frms);
5950 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5952 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5953 le32_to_cpu(stat_info->tmac_ttl_octets);
5955 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5956 le32_to_cpu(stat_info->tmac_ucst_frms);
5958 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5959 le32_to_cpu(stat_info->tmac_nucst_frms);
5961 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5962 le32_to_cpu(stat_info->tmac_any_err_frms);
5963 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5964 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5966 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5967 le32_to_cpu(stat_info->tmac_vld_ip);
5969 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5970 le32_to_cpu(stat_info->tmac_drop_ip);
5972 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5973 le32_to_cpu(stat_info->tmac_icmp);
5975 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5976 le32_to_cpu(stat_info->tmac_rst_tcp);
5977 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5978 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5979 le32_to_cpu(stat_info->tmac_udp);
5981 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5982 le32_to_cpu(stat_info->rmac_vld_frms);
5984 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5985 le32_to_cpu(stat_info->rmac_data_octets);
5986 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5987 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5989 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5990 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5992 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5993 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5994 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5995 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5996 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5997 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5998 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6000 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6001 le32_to_cpu(stat_info->rmac_ttl_octets);
6003 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6004 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6006 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6007 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6009 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6010 le32_to_cpu(stat_info->rmac_discarded_frms);
6012 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6013 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6014 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6015 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6017 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6018 le32_to_cpu(stat_info->rmac_usized_frms);
6020 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6021 le32_to_cpu(stat_info->rmac_osized_frms);
6023 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6024 le32_to_cpu(stat_info->rmac_frag_frms);
6026 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6027 le32_to_cpu(stat_info->rmac_jabber_frms);
6028 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6029 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6030 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6031 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6032 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6033 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6035 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6036 le32_to_cpu(stat_info->rmac_ip);
6037 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6038 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6040 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6041 le32_to_cpu(stat_info->rmac_drop_ip);
6043 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6044 le32_to_cpu(stat_info->rmac_icmp);
6045 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6047 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6048 le32_to_cpu(stat_info->rmac_udp);
6050 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6051 le32_to_cpu(stat_info->rmac_err_drp_udp);
6052 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6053 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6054 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6055 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6056 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6057 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6058 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6059 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6060 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6061 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6062 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6063 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6064 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6065 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6066 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6067 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6068 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6070 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6071 le32_to_cpu(stat_info->rmac_pause_cnt);
6072 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6073 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6075 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6076 le32_to_cpu(stat_info->rmac_accepted_ip);
6077 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6078 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6079 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6080 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6081 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6082 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6083 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6084 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6085 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6086 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6087 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6088 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6089 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6090 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6091 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6092 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6093 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6094 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6095 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6097 /* Enhanced statistics exist only for Hercules */
6098 if(sp->device_type == XFRAME_II_DEVICE) {
6100 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6102 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6104 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6105 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6106 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6107 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6108 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6109 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6110 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6111 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6112 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6113 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6114 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6115 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6116 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6117 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6121 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6122 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6123 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6124 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6125 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6126 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6127 for (k = 0; k < MAX_RX_RINGS; k++)
6128 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6129 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6130 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6131 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6132 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6133 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6134 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6135 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6136 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6137 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6138 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6139 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6140 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6141 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6142 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6143 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6144 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6145 if (stat_info->sw_stat.num_aggregations) {
6146 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6149 * Since 64-bit divide does not work on all platforms,
6150 * do repeated subtraction.
6152 while (tmp >= stat_info->sw_stat.num_aggregations) {
6153 tmp -= stat_info->sw_stat.num_aggregations;
6156 tmp_stats[i++] = count;
6160 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6161 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6162 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6163 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6164 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6165 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6166 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6167 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6168 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6170 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6171 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6172 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6173 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6174 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6176 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6177 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6178 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6179 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6180 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6181 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6182 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6183 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6184 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6185 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6186 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6187 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6188 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6189 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6190 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6191 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6192 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6193 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6194 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6195 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6196 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6197 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6198 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6199 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6200 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6201 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6204 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6206 return (XENA_REG_SPACE);
6210 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6212 struct s2io_nic *sp = dev->priv;
6214 return (sp->rx_csum);
6217 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6219 struct s2io_nic *sp = dev->priv;
6229 static int s2io_get_eeprom_len(struct net_device *dev)
6231 return (XENA_EEPROM_SPACE);
6234 static int s2io_get_sset_count(struct net_device *dev, int sset)
6236 struct s2io_nic *sp = dev->priv;
6240 return S2IO_TEST_LEN;
6242 switch(sp->device_type) {
6243 case XFRAME_I_DEVICE:
6244 return XFRAME_I_STAT_LEN;
6245 case XFRAME_II_DEVICE:
6246 return XFRAME_II_STAT_LEN;
6255 static void s2io_ethtool_get_strings(struct net_device *dev,
6256 u32 stringset, u8 * data)
6259 struct s2io_nic *sp = dev->priv;
6261 switch (stringset) {
6263 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6266 stat_size = sizeof(ethtool_xena_stats_keys);
6267 memcpy(data, ðtool_xena_stats_keys,stat_size);
6268 if(sp->device_type == XFRAME_II_DEVICE) {
6269 memcpy(data + stat_size,
6270 ðtool_enhanced_stats_keys,
6271 sizeof(ethtool_enhanced_stats_keys));
6272 stat_size += sizeof(ethtool_enhanced_stats_keys);
6275 memcpy(data + stat_size, ðtool_driver_stats_keys,
6276 sizeof(ethtool_driver_stats_keys));
6280 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6283 dev->features |= NETIF_F_IP_CSUM;
6285 dev->features &= ~NETIF_F_IP_CSUM;
6290 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6292 return (dev->features & NETIF_F_TSO) != 0;
6294 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6297 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6299 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6304 static const struct ethtool_ops netdev_ethtool_ops = {
6305 .get_settings = s2io_ethtool_gset,
6306 .set_settings = s2io_ethtool_sset,
6307 .get_drvinfo = s2io_ethtool_gdrvinfo,
6308 .get_regs_len = s2io_ethtool_get_regs_len,
6309 .get_regs = s2io_ethtool_gregs,
6310 .get_link = ethtool_op_get_link,
6311 .get_eeprom_len = s2io_get_eeprom_len,
6312 .get_eeprom = s2io_ethtool_geeprom,
6313 .set_eeprom = s2io_ethtool_seeprom,
6314 .get_ringparam = s2io_ethtool_gringparam,
6315 .get_pauseparam = s2io_ethtool_getpause_data,
6316 .set_pauseparam = s2io_ethtool_setpause_data,
6317 .get_rx_csum = s2io_ethtool_get_rx_csum,
6318 .set_rx_csum = s2io_ethtool_set_rx_csum,
6319 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6320 .set_sg = ethtool_op_set_sg,
6321 .get_tso = s2io_ethtool_op_get_tso,
6322 .set_tso = s2io_ethtool_op_set_tso,
6323 .set_ufo = ethtool_op_set_ufo,
6324 .self_test = s2io_ethtool_test,
6325 .get_strings = s2io_ethtool_get_strings,
6326 .phys_id = s2io_ethtool_idnic,
6327 .get_ethtool_stats = s2io_get_ethtool_stats,
6328 .get_sset_count = s2io_get_sset_count,
6332 * s2io_ioctl - Entry point for the Ioctl
6333 * @dev : Device pointer.
6334 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6335 * a proprietary structure used to pass information to the driver.
6336 * @cmd : This is used to distinguish between the different commands that
6337 * can be passed to the IOCTL functions.
6339 * Currently there are no special functionality supported in IOCTL, hence
6340 * function always return EOPNOTSUPPORTED
6343 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6349 * s2io_change_mtu - entry point to change MTU size for the device.
6350 * @dev : device pointer.
6351 * @new_mtu : the new MTU size for the device.
6352 * Description: A driver entry point to change MTU size for the device.
6353 * Before changing the MTU the device must be stopped.
6355 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6359 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6361 struct s2io_nic *sp = dev->priv;
6363 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6364 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6370 if (netif_running(dev)) {
6372 netif_stop_queue(dev);
6373 if (s2io_card_up(sp)) {
6374 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6377 if (netif_queue_stopped(dev))
6378 netif_wake_queue(dev);
6379 } else { /* Device is down */
6380 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6381 u64 val64 = new_mtu;
6383 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6390 * s2io_tasklet - Bottom half of the ISR.
6391 * @dev_adr : address of the device structure in dma_addr_t format.
6393 * This is the tasklet or the bottom half of the ISR. This is
6394 * an extension of the ISR which is scheduled by the scheduler to be run
6395 * when the load on the CPU is low. All low priority tasks of the ISR can
6396 * be pushed into the tasklet. For now the tasklet is used only to
6397 * replenish the Rx buffers in the Rx buffer descriptors.
6402 static void s2io_tasklet(unsigned long dev_addr)
6404 struct net_device *dev = (struct net_device *) dev_addr;
6405 struct s2io_nic *sp = dev->priv;
6407 struct mac_info *mac_control;
6408 struct config_param *config;
6410 mac_control = &sp->mac_control;
6411 config = &sp->config;
6413 if (!TASKLET_IN_USE) {
6414 for (i = 0; i < config->rx_ring_num; i++) {
6415 ret = fill_rx_buffers(sp, i);
6416 if (ret == -ENOMEM) {
6417 DBG_PRINT(INFO_DBG, "%s: Out of ",
6419 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6421 } else if (ret == -EFILL) {
6423 "%s: Rx Ring %d is full\n",
6428 clear_bit(0, (&sp->tasklet_status));
6433 * s2io_set_link - Set the LInk status
6434 * @data: long pointer to device private structue
6435 * Description: Sets the link status for the adapter
6438 static void s2io_set_link(struct work_struct *work)
6440 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6441 struct net_device *dev = nic->dev;
6442 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6448 if (!netif_running(dev))
6451 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6452 /* The card is being reset, no point doing anything */
6456 subid = nic->pdev->subsystem_device;
6457 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6459 * Allow a small delay for the NICs self initiated
6460 * cleanup to complete.
6465 val64 = readq(&bar0->adapter_status);
6466 if (LINK_IS_UP(val64)) {
6467 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6468 if (verify_xena_quiescence(nic)) {
6469 val64 = readq(&bar0->adapter_control);
6470 val64 |= ADAPTER_CNTL_EN;
6471 writeq(val64, &bar0->adapter_control);
6472 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6473 nic->device_type, subid)) {
6474 val64 = readq(&bar0->gpio_control);
6475 val64 |= GPIO_CTRL_GPIO_0;
6476 writeq(val64, &bar0->gpio_control);
6477 val64 = readq(&bar0->gpio_control);
6479 val64 |= ADAPTER_LED_ON;
6480 writeq(val64, &bar0->adapter_control);
6482 nic->device_enabled_once = TRUE;
6484 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6485 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6486 netif_stop_queue(dev);
6489 val64 = readq(&bar0->adapter_control);
6490 val64 |= ADAPTER_LED_ON;
6491 writeq(val64, &bar0->adapter_control);
6492 s2io_link(nic, LINK_UP);
6494 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6496 val64 = readq(&bar0->gpio_control);
6497 val64 &= ~GPIO_CTRL_GPIO_0;
6498 writeq(val64, &bar0->gpio_control);
6499 val64 = readq(&bar0->gpio_control);
6502 val64 = readq(&bar0->adapter_control);
6503 val64 = val64 &(~ADAPTER_LED_ON);
6504 writeq(val64, &bar0->adapter_control);
6505 s2io_link(nic, LINK_DOWN);
6507 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6513 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6515 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6516 u64 *temp2, int size)
6518 struct net_device *dev = sp->dev;
6519 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6521 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6522 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6525 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6527 * As Rx frame are not going to be processed,
6528 * using same mapped address for the Rxd
6531 rxdp1->Buffer0_ptr = *temp0;
6533 *skb = dev_alloc_skb(size);
6535 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6536 DBG_PRINT(INFO_DBG, "memory to allocate ");
6537 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6538 sp->mac_control.stats_info->sw_stat. \
6539 mem_alloc_fail_cnt++;
6542 sp->mac_control.stats_info->sw_stat.mem_allocated
6543 += (*skb)->truesize;
6544 /* storing the mapped addr in a temp variable
6545 * such it will be used for next rxd whose
6546 * Host Control is NULL
6548 rxdp1->Buffer0_ptr = *temp0 =
6549 pci_map_single( sp->pdev, (*skb)->data,
6550 size - NET_IP_ALIGN,
6551 PCI_DMA_FROMDEVICE);
6552 if( (rxdp1->Buffer0_ptr == 0) ||
6553 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6554 goto memalloc_failed;
6556 rxdp->Host_Control = (unsigned long) (*skb);
6558 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6559 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6560 /* Two buffer Mode */
6562 rxdp3->Buffer2_ptr = *temp2;
6563 rxdp3->Buffer0_ptr = *temp0;
6564 rxdp3->Buffer1_ptr = *temp1;
6566 *skb = dev_alloc_skb(size);
6568 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6569 DBG_PRINT(INFO_DBG, "memory to allocate ");
6570 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6571 sp->mac_control.stats_info->sw_stat. \
6572 mem_alloc_fail_cnt++;
6575 sp->mac_control.stats_info->sw_stat.mem_allocated
6576 += (*skb)->truesize;
6577 rxdp3->Buffer2_ptr = *temp2 =
6578 pci_map_single(sp->pdev, (*skb)->data,
6580 PCI_DMA_FROMDEVICE);
6581 if( (rxdp3->Buffer2_ptr == 0) ||
6582 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6583 goto memalloc_failed;
6585 rxdp3->Buffer0_ptr = *temp0 =
6586 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6587 PCI_DMA_FROMDEVICE);
6588 if( (rxdp3->Buffer0_ptr == 0) ||
6589 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6590 pci_unmap_single (sp->pdev,
6591 (dma_addr_t)rxdp3->Buffer2_ptr,
6592 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6593 goto memalloc_failed;
6595 rxdp->Host_Control = (unsigned long) (*skb);
6597 /* Buffer-1 will be dummy buffer not used */
6598 rxdp3->Buffer1_ptr = *temp1 =
6599 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6600 PCI_DMA_FROMDEVICE);
6601 if( (rxdp3->Buffer1_ptr == 0) ||
6602 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6603 pci_unmap_single (sp->pdev,
6604 (dma_addr_t)rxdp3->Buffer0_ptr,
6605 BUF0_LEN, PCI_DMA_FROMDEVICE);
6606 pci_unmap_single (sp->pdev,
6607 (dma_addr_t)rxdp3->Buffer2_ptr,
6608 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6609 goto memalloc_failed;
6615 stats->pci_map_fail_cnt++;
6616 stats->mem_freed += (*skb)->truesize;
6617 dev_kfree_skb(*skb);
6621 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6624 struct net_device *dev = sp->dev;
6625 if (sp->rxd_mode == RXD_MODE_1) {
6626 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6627 } else if (sp->rxd_mode == RXD_MODE_3B) {
6628 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6629 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6630 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6634 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6636 int i, j, k, blk_cnt = 0, size;
6637 struct mac_info * mac_control = &sp->mac_control;
6638 struct config_param *config = &sp->config;
6639 struct net_device *dev = sp->dev;
6640 struct RxD_t *rxdp = NULL;
6641 struct sk_buff *skb = NULL;
6642 struct buffAdd *ba = NULL;
6643 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6645 /* Calculate the size based on ring mode */
6646 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6647 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6648 if (sp->rxd_mode == RXD_MODE_1)
6649 size += NET_IP_ALIGN;
6650 else if (sp->rxd_mode == RXD_MODE_3B)
6651 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6653 for (i = 0; i < config->rx_ring_num; i++) {
6654 blk_cnt = config->rx_cfg[i].num_rxd /
6655 (rxd_count[sp->rxd_mode] +1);
6657 for (j = 0; j < blk_cnt; j++) {
6658 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6659 rxdp = mac_control->rings[i].
6660 rx_blocks[j].rxds[k].virt_addr;
6661 if(sp->rxd_mode == RXD_MODE_3B)
6662 ba = &mac_control->rings[i].ba[j][k];
6663 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6664 &skb,(u64 *)&temp0_64,
6671 set_rxd_buffer_size(sp, rxdp, size);
6673 /* flip the Ownership bit to Hardware */
6674 rxdp->Control_1 |= RXD_OWN_XENA;
6682 static int s2io_add_isr(struct s2io_nic * sp)
6685 struct net_device *dev = sp->dev;
6688 if (sp->config.intr_type == MSI_X)
6689 ret = s2io_enable_msi_x(sp);
6691 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6692 sp->config.intr_type = INTA;
6695 /* Store the values of the MSIX table in the struct s2io_nic structure */
6696 store_xmsi_data(sp);
6698 /* After proper initialization of H/W, register ISR */
6699 if (sp->config.intr_type == MSI_X) {
6700 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6702 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6703 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6704 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6706 err = request_irq(sp->entries[i].vector,
6707 s2io_msix_fifo_handle, 0, sp->desc[i],
6708 sp->s2io_entries[i].arg);
6709 /* If either data or addr is zero print it */
6710 if(!(sp->msix_info[i].addr &&
6711 sp->msix_info[i].data)) {
6712 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6713 "Data:0x%lx\n",sp->desc[i],
6714 (unsigned long long)
6715 sp->msix_info[i].addr,
6717 ntohl(sp->msix_info[i].data));
6722 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6724 err = request_irq(sp->entries[i].vector,
6725 s2io_msix_ring_handle, 0, sp->desc[i],
6726 sp->s2io_entries[i].arg);
6727 /* If either data or addr is zero print it */
6728 if(!(sp->msix_info[i].addr &&
6729 sp->msix_info[i].data)) {
6730 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6731 "Data:0x%lx\n",sp->desc[i],
6732 (unsigned long long)
6733 sp->msix_info[i].addr,
6735 ntohl(sp->msix_info[i].data));
6741 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6742 "failed\n", dev->name, i);
6743 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6746 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6748 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6749 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6751 if (sp->config.intr_type == INTA) {
6752 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6755 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6762 static void s2io_rem_isr(struct s2io_nic * sp)
6764 struct net_device *dev = sp->dev;
6765 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6767 if (sp->config.intr_type == MSI_X) {
6771 for (i=1; (sp->s2io_entries[i].in_use ==
6772 MSIX_REGISTERED_SUCCESS); i++) {
6773 int vector = sp->entries[i].vector;
6774 void *arg = sp->s2io_entries[i].arg;
6776 synchronize_irq(vector);
6777 free_irq(vector, arg);
6782 (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6783 kfree(sp->s2io_entries);
6785 (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6787 sp->s2io_entries = NULL;
6789 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6790 msi_control &= 0xFFFE; /* Disable MSI */
6791 pci_write_config_word(sp->pdev, 0x42, msi_control);
6793 pci_disable_msix(sp->pdev);
6795 synchronize_irq(sp->pdev->irq);
6796 free_irq(sp->pdev->irq, dev);
6800 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6803 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6804 unsigned long flags;
6805 register u64 val64 = 0;
6807 del_timer_sync(&sp->alarm_timer);
6808 /* If s2io_set_link task is executing, wait till it completes. */
6809 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6812 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6814 /* disable Tx and Rx traffic on the NIC */
6821 tasklet_kill(&sp->task);
6823 /* Check if the device is Quiescent and then Reset the NIC */
6825 /* As per the HW requirement we need to replenish the
6826 * receive buffer to avoid the ring bump. Since there is
6827 * no intention of processing the Rx frame at this pointwe are
6828 * just settting the ownership bit of rxd in Each Rx
6829 * ring to HW and set the appropriate buffer size
6830 * based on the ring mode
6832 rxd_owner_bit_reset(sp);
6834 val64 = readq(&bar0->adapter_status);
6835 if (verify_xena_quiescence(sp)) {
6836 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6844 "s2io_close:Device not Quiescent ");
6845 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6846 (unsigned long long) val64);
6853 spin_lock_irqsave(&sp->tx_lock, flags);
6854 /* Free all Tx buffers */
6855 free_tx_buffers(sp);
6856 spin_unlock_irqrestore(&sp->tx_lock, flags);
6858 /* Free all Rx buffers */
6859 spin_lock_irqsave(&sp->rx_lock, flags);
6860 free_rx_buffers(sp);
6861 spin_unlock_irqrestore(&sp->rx_lock, flags);
6863 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6866 static void s2io_card_down(struct s2io_nic * sp)
6868 do_s2io_card_down(sp, 1);
6871 static int s2io_card_up(struct s2io_nic * sp)
6874 struct mac_info *mac_control;
6875 struct config_param *config;
6876 struct net_device *dev = (struct net_device *) sp->dev;
6879 /* Initialize the H/W I/O registers */
6880 if (init_nic(sp) != 0) {
6881 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6888 * Initializing the Rx buffers. For now we are considering only 1
6889 * Rx ring and initializing buffers into 30 Rx blocks
6891 mac_control = &sp->mac_control;
6892 config = &sp->config;
6894 for (i = 0; i < config->rx_ring_num; i++) {
6895 if ((ret = fill_rx_buffers(sp, i))) {
6896 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6899 free_rx_buffers(sp);
6902 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6903 atomic_read(&sp->rx_bufs_left[i]));
6905 /* Maintain the state prior to the open */
6906 if (sp->promisc_flg)
6907 sp->promisc_flg = 0;
6908 if (sp->m_cast_flg) {
6910 sp->all_multi_pos= 0;
6913 /* Setting its receive mode */
6914 s2io_set_multicast(dev);
6917 /* Initialize max aggregatable pkts per session based on MTU */
6918 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6919 /* Check if we can use(if specified) user provided value */
6920 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6921 sp->lro_max_aggr_per_sess = lro_max_pkts;
6924 /* Enable Rx Traffic and interrupts on the NIC */
6925 if (start_nic(sp)) {
6926 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6928 free_rx_buffers(sp);
6932 /* Add interrupt service routine */
6933 if (s2io_add_isr(sp) != 0) {
6934 if (sp->config.intr_type == MSI_X)
6937 free_rx_buffers(sp);
6941 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6943 /* Enable tasklet for the device */
6944 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6946 /* Enable select interrupts */
6947 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6948 if (sp->config.intr_type != INTA)
6949 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6951 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6952 interruptible |= TX_PIC_INTR;
6953 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6956 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6961 * s2io_restart_nic - Resets the NIC.
6962 * @data : long pointer to the device private structure
6964 * This function is scheduled to be run by the s2io_tx_watchdog
6965 * function after 0.5 secs to reset the NIC. The idea is to reduce
6966 * the run time of the watch dog routine which is run holding a
6970 static void s2io_restart_nic(struct work_struct *work)
6972 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6973 struct net_device *dev = sp->dev;
6977 if (!netif_running(dev))
6981 if (s2io_card_up(sp)) {
6982 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6985 netif_wake_queue(dev);
6986 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6993 * s2io_tx_watchdog - Watchdog for transmit side.
6994 * @dev : Pointer to net device structure
6996 * This function is triggered if the Tx Queue is stopped
6997 * for a pre-defined amount of time when the Interface is still up.
6998 * If the Interface is jammed in such a situation, the hardware is
6999 * reset (by s2io_close) and restarted again (by s2io_open) to
7000 * overcome any problem that might have been caused in the hardware.
7005 static void s2io_tx_watchdog(struct net_device *dev)
7007 struct s2io_nic *sp = dev->priv;
7009 if (netif_carrier_ok(dev)) {
7010 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7011 schedule_work(&sp->rst_timer_task);
7012 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7017 * rx_osm_handler - To perform some OS related operations on SKB.
7018 * @sp: private member of the device structure,pointer to s2io_nic structure.
7019 * @skb : the socket buffer pointer.
7020 * @len : length of the packet
7021 * @cksum : FCS checksum of the frame.
7022 * @ring_no : the ring from which this RxD was extracted.
7024 * This function is called by the Rx interrupt serivce routine to perform
7025 * some OS related operations on the SKB before passing it to the upper
7026 * layers. It mainly checks if the checksum is OK, if so adds it to the
7027 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7028 * to the upper layer. If the checksum is wrong, it increments the Rx
7029 * packet error count, frees the SKB and returns error.
7031 * SUCCESS on success and -1 on failure.
7033 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7035 struct s2io_nic *sp = ring_data->nic;
7036 struct net_device *dev = (struct net_device *) sp->dev;
7037 struct sk_buff *skb = (struct sk_buff *)
7038 ((unsigned long) rxdp->Host_Control);
7039 int ring_no = ring_data->ring_no;
7040 u16 l3_csum, l4_csum;
7041 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7048 /* Check for parity error */
7050 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7052 err_mask = err >> 48;
7055 sp->mac_control.stats_info->sw_stat.
7056 rx_parity_err_cnt++;
7060 sp->mac_control.stats_info->sw_stat.
7065 sp->mac_control.stats_info->sw_stat.
7066 rx_parity_abort_cnt++;
7070 sp->mac_control.stats_info->sw_stat.
7075 sp->mac_control.stats_info->sw_stat.
7080 sp->mac_control.stats_info->sw_stat.
7085 sp->mac_control.stats_info->sw_stat.
7086 rx_buf_size_err_cnt++;
7090 sp->mac_control.stats_info->sw_stat.
7091 rx_rxd_corrupt_cnt++;
7095 sp->mac_control.stats_info->sw_stat.
7100 * Drop the packet if bad transfer code. Exception being
7101 * 0x5, which could be due to unsupported IPv6 extension header.
7102 * In this case, we let stack handle the packet.
7103 * Note that in this case, since checksum will be incorrect,
7104 * stack will validate the same.
7106 if (err_mask != 0x5) {
7107 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7108 dev->name, err_mask);
7109 sp->stats.rx_crc_errors++;
7110 sp->mac_control.stats_info->sw_stat.mem_freed
7113 atomic_dec(&sp->rx_bufs_left[ring_no]);
7114 rxdp->Host_Control = 0;
7119 /* Updating statistics */
7120 sp->stats.rx_packets++;
7121 rxdp->Host_Control = 0;
7122 if (sp->rxd_mode == RXD_MODE_1) {
7123 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7125 sp->stats.rx_bytes += len;
7128 } else if (sp->rxd_mode == RXD_MODE_3B) {
7129 int get_block = ring_data->rx_curr_get_info.block_index;
7130 int get_off = ring_data->rx_curr_get_info.offset;
7131 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7132 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7133 unsigned char *buff = skb_push(skb, buf0_len);
7135 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7136 sp->stats.rx_bytes += buf0_len + buf2_len;
7137 memcpy(buff, ba->ba_0, buf0_len);
7138 skb_put(skb, buf2_len);
7141 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7142 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7144 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7145 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7146 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7148 * NIC verifies if the Checksum of the received
7149 * frame is Ok or not and accordingly returns
7150 * a flag in the RxD.
7152 skb->ip_summed = CHECKSUM_UNNECESSARY;
7158 ret = s2io_club_tcp_session(skb->data, &tcp,
7159 &tcp_len, &lro, rxdp, sp);
7161 case 3: /* Begin anew */
7164 case 1: /* Aggregate */
7166 lro_append_pkt(sp, lro,
7170 case 4: /* Flush session */
7172 lro_append_pkt(sp, lro,
7174 queue_rx_frame(lro->parent);
7175 clear_lro_session(lro);
7176 sp->mac_control.stats_info->
7177 sw_stat.flush_max_pkts++;
7180 case 2: /* Flush both */
7181 lro->parent->data_len =
7183 sp->mac_control.stats_info->
7184 sw_stat.sending_both++;
7185 queue_rx_frame(lro->parent);
7186 clear_lro_session(lro);
7188 case 0: /* sessions exceeded */
7189 case -1: /* non-TCP or not
7193 * First pkt in session not
7194 * L3/L4 aggregatable
7199 "%s: Samadhana!!\n",
7206 * Packet with erroneous checksum, let the
7207 * upper layers deal with it.
7209 skb->ip_summed = CHECKSUM_NONE;
7212 skb->ip_summed = CHECKSUM_NONE;
7214 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7216 skb->protocol = eth_type_trans(skb, dev);
7217 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7219 /* Queueing the vlan frame to the upper layer */
7221 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7222 RXD_GET_VLAN_TAG(rxdp->Control_2));
7224 vlan_hwaccel_rx(skb, sp->vlgrp,
7225 RXD_GET_VLAN_TAG(rxdp->Control_2));
7228 netif_receive_skb(skb);
7234 queue_rx_frame(skb);
7236 dev->last_rx = jiffies;
7238 atomic_dec(&sp->rx_bufs_left[ring_no]);
7243 * s2io_link - stops/starts the Tx queue.
7244 * @sp : private member of the device structure, which is a pointer to the
7245 * s2io_nic structure.
7246 * @link : inidicates whether link is UP/DOWN.
7248 * This function stops/starts the Tx queue depending on whether the link
7249 * status of the NIC is is down or up. This is called by the Alarm
7250 * interrupt handler whenever a link change interrupt comes up.
7255 static void s2io_link(struct s2io_nic * sp, int link)
7257 struct net_device *dev = (struct net_device *) sp->dev;
7259 if (link != sp->last_link_state) {
7260 if (link == LINK_DOWN) {
7261 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7262 netif_carrier_off(dev);
7263 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7264 sp->mac_control.stats_info->sw_stat.link_up_time =
7265 jiffies - sp->start_time;
7266 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7268 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7269 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7270 sp->mac_control.stats_info->sw_stat.link_down_time =
7271 jiffies - sp->start_time;
7272 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7273 netif_carrier_on(dev);
7276 sp->last_link_state = link;
7277 sp->start_time = jiffies;
7281 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7282 * @sp : private member of the device structure, which is a pointer to the
7283 * s2io_nic structure.
7285 * This function initializes a few of the PCI and PCI-X configuration registers
7286 * with recommended values.
7291 static void s2io_init_pci(struct s2io_nic * sp)
7293 u16 pci_cmd = 0, pcix_cmd = 0;
7295 /* Enable Data Parity Error Recovery in PCI-X command register. */
7296 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7298 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7300 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7303 /* Set the PErr Response bit in PCI command register. */
7304 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7305 pci_write_config_word(sp->pdev, PCI_COMMAND,
7306 (pci_cmd | PCI_COMMAND_PARITY));
7307 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7310 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7312 if ( tx_fifo_num > 8) {
7313 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7315 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7318 if ( rx_ring_num > 8) {
7319 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7321 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7324 if (*dev_intr_type != INTA)
7327 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7328 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7329 "Defaulting to INTA\n");
7330 *dev_intr_type = INTA;
7333 if ((*dev_intr_type == MSI_X) &&
7334 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7335 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7336 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7337 "Defaulting to INTA\n");
7338 *dev_intr_type = INTA;
7341 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7342 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7343 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7350 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7351 * or Traffic class respectively.
7352 * @nic: device peivate variable
7353 * Description: The function configures the receive steering to
7354 * desired receive ring.
7355 * Return Value: SUCCESS on success and
7356 * '-1' on failure (endian settings incorrect).
7358 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7360 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7361 register u64 val64 = 0;
7363 if (ds_codepoint > 63)
7366 val64 = RTS_DS_MEM_DATA(ring);
7367 writeq(val64, &bar0->rts_ds_mem_data);
7369 val64 = RTS_DS_MEM_CTRL_WE |
7370 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7371 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7373 writeq(val64, &bar0->rts_ds_mem_ctrl);
7375 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7376 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7381 * s2io_init_nic - Initialization of the adapter .
7382 * @pdev : structure containing the PCI related information of the device.
7383 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7385 * The function initializes an adapter identified by the pci_dec structure.
7386 * All OS related initialization including memory and device structure and
7387 * initlaization of the device private variable is done. Also the swapper
7388 * control register is initialized to enable read and write into the I/O
7389 * registers of the device.
7391 * returns 0 on success and negative on failure.
7394 static int __devinit
7395 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7397 struct s2io_nic *sp;
7398 struct net_device *dev;
7400 int dma_flag = FALSE;
7401 u32 mac_up, mac_down;
7402 u64 val64 = 0, tmp64 = 0;
7403 struct XENA_dev_config __iomem *bar0 = NULL;
7405 struct mac_info *mac_control;
7406 struct config_param *config;
7408 u8 dev_intr_type = intr_type;
7409 DECLARE_MAC_BUF(mac);
7411 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7414 if ((ret = pci_enable_device(pdev))) {
7416 "s2io_init_nic: pci_enable_device failed\n");
7420 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7421 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7423 if (pci_set_consistent_dma_mask
7424 (pdev, DMA_64BIT_MASK)) {
7426 "Unable to obtain 64bit DMA for \
7427 consistent allocations\n");
7428 pci_disable_device(pdev);
7431 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7432 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7434 pci_disable_device(pdev);
7437 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7438 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7439 pci_disable_device(pdev);
7443 dev = alloc_etherdev(sizeof(struct s2io_nic));
7445 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7446 pci_disable_device(pdev);
7447 pci_release_regions(pdev);
7451 pci_set_master(pdev);
7452 pci_set_drvdata(pdev, dev);
7453 SET_NETDEV_DEV(dev, &pdev->dev);
7455 /* Private member variable initialized to s2io NIC structure */
7457 memset(sp, 0, sizeof(struct s2io_nic));
7460 sp->high_dma_flag = dma_flag;
7461 sp->device_enabled_once = FALSE;
7462 if (rx_ring_mode == 1)
7463 sp->rxd_mode = RXD_MODE_1;
7464 if (rx_ring_mode == 2)
7465 sp->rxd_mode = RXD_MODE_3B;
7467 sp->config.intr_type = dev_intr_type;
7469 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7470 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7471 sp->device_type = XFRAME_II_DEVICE;
7473 sp->device_type = XFRAME_I_DEVICE;
7477 /* Initialize some PCI/PCI-X fields of the NIC. */
7481 * Setting the device configuration parameters.
7482 * Most of these parameters can be specified by the user during
7483 * module insertion as they are module loadable parameters. If
7484 * these parameters are not not specified during load time, they
7485 * are initialized with default values.
7487 mac_control = &sp->mac_control;
7488 config = &sp->config;
7490 config->napi = napi;
7492 /* Tx side parameters. */
7493 config->tx_fifo_num = tx_fifo_num;
7494 for (i = 0; i < MAX_TX_FIFOS; i++) {
7495 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7496 config->tx_cfg[i].fifo_priority = i;
7499 /* mapping the QoS priority to the configured fifos */
7500 for (i = 0; i < MAX_TX_FIFOS; i++)
7501 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7503 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7504 for (i = 0; i < config->tx_fifo_num; i++) {
7505 config->tx_cfg[i].f_no_snoop =
7506 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7507 if (config->tx_cfg[i].fifo_len < 65) {
7508 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7512 /* + 2 because one Txd for skb->data and one Txd for UFO */
7513 config->max_txds = MAX_SKB_FRAGS + 2;
7515 /* Rx side parameters. */
7516 config->rx_ring_num = rx_ring_num;
7517 for (i = 0; i < MAX_RX_RINGS; i++) {
7518 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7519 (rxd_count[sp->rxd_mode] + 1);
7520 config->rx_cfg[i].ring_priority = i;
7523 for (i = 0; i < rx_ring_num; i++) {
7524 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7525 config->rx_cfg[i].f_no_snoop =
7526 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7529 /* Setting Mac Control parameters */
7530 mac_control->rmac_pause_time = rmac_pause_time;
7531 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7532 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7535 /* Initialize Ring buffer parameters. */
7536 for (i = 0; i < config->rx_ring_num; i++)
7537 atomic_set(&sp->rx_bufs_left[i], 0);
7539 /* initialize the shared memory used by the NIC and the host */
7540 if (init_shared_mem(sp)) {
7541 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7544 goto mem_alloc_failed;
7547 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7548 pci_resource_len(pdev, 0));
7550 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7553 goto bar0_remap_failed;
7556 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7557 pci_resource_len(pdev, 2));
7559 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7562 goto bar1_remap_failed;
7565 dev->irq = pdev->irq;
7566 dev->base_addr = (unsigned long) sp->bar0;
7568 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7569 for (j = 0; j < MAX_TX_FIFOS; j++) {
7570 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7571 (sp->bar1 + (j * 0x00020000));
7574 /* Driver entry points */
7575 dev->open = &s2io_open;
7576 dev->stop = &s2io_close;
7577 dev->hard_start_xmit = &s2io_xmit;
7578 dev->get_stats = &s2io_get_stats;
7579 dev->set_multicast_list = &s2io_set_multicast;
7580 dev->do_ioctl = &s2io_ioctl;
7581 dev->change_mtu = &s2io_change_mtu;
7582 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7583 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7584 dev->vlan_rx_register = s2io_vlan_rx_register;
7587 * will use eth_mac_addr() for dev->set_mac_address
7588 * mac address will be set every time dev->open() is called
7590 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7592 #ifdef CONFIG_NET_POLL_CONTROLLER
7593 dev->poll_controller = s2io_netpoll;
7596 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7597 if (sp->high_dma_flag == TRUE)
7598 dev->features |= NETIF_F_HIGHDMA;
7599 dev->features |= NETIF_F_TSO;
7600 dev->features |= NETIF_F_TSO6;
7601 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7602 dev->features |= NETIF_F_UFO;
7603 dev->features |= NETIF_F_HW_CSUM;
7606 dev->tx_timeout = &s2io_tx_watchdog;
7607 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7608 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7609 INIT_WORK(&sp->set_link_task, s2io_set_link);
7611 pci_save_state(sp->pdev);
7613 /* Setting swapper control on the NIC, for proper reset operation */
7614 if (s2io_set_swapper(sp)) {
7615 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7618 goto set_swap_failed;
7621 /* Verify if the Herc works on the slot its placed into */
7622 if (sp->device_type & XFRAME_II_DEVICE) {
7623 mode = s2io_verify_pci_mode(sp);
7625 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7626 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7628 goto set_swap_failed;
7632 /* Not needed for Herc */
7633 if (sp->device_type & XFRAME_I_DEVICE) {
7635 * Fix for all "FFs" MAC address problems observed on
7638 fix_mac_address(sp);
7643 * MAC address initialization.
7644 * For now only one mac address will be read and used.
7647 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7648 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7649 writeq(val64, &bar0->rmac_addr_cmd_mem);
7650 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7651 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7652 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7653 mac_down = (u32) tmp64;
7654 mac_up = (u32) (tmp64 >> 32);
7656 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7657 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7658 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7659 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7660 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7661 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7663 /* Set the factory defined MAC address initially */
7664 dev->addr_len = ETH_ALEN;
7665 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7667 /* Store the values of the MSIX table in the s2io_nic structure */
7668 store_xmsi_data(sp);
7669 /* reset Nic and bring it to known state */
7673 * Initialize the tasklet status and link state flags
7674 * and the card state parameter
7676 sp->tasklet_status = 0;
7679 /* Initialize spinlocks */
7680 spin_lock_init(&sp->tx_lock);
7683 spin_lock_init(&sp->put_lock);
7684 spin_lock_init(&sp->rx_lock);
7687 * SXE-002: Configure link and activity LED to init state
7690 subid = sp->pdev->subsystem_device;
7691 if ((subid & 0xFF) >= 0x07) {
7692 val64 = readq(&bar0->gpio_control);
7693 val64 |= 0x0000800000000000ULL;
7694 writeq(val64, &bar0->gpio_control);
7695 val64 = 0x0411040400000000ULL;
7696 writeq(val64, (void __iomem *) bar0 + 0x2700);
7697 val64 = readq(&bar0->gpio_control);
7700 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7702 if (register_netdev(dev)) {
7703 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7705 goto register_failed;
7708 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7709 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7710 sp->product_name, pdev->revision);
7711 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7712 s2io_driver_version);
7713 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7714 dev->name, print_mac(mac, dev->dev_addr));
7715 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7716 if (sp->device_type & XFRAME_II_DEVICE) {
7717 mode = s2io_print_pci_mode(sp);
7719 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7721 unregister_netdev(dev);
7722 goto set_swap_failed;
7725 switch(sp->rxd_mode) {
7727 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7731 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7737 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7738 switch(sp->config.intr_type) {
7740 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7743 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7747 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7750 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7751 " enabled\n", dev->name);
7752 /* Initialize device name */
7753 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7755 /* Initialize bimodal Interrupts */
7756 sp->config.bimodal = bimodal;
7757 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7758 sp->config.bimodal = 0;
7759 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7764 * Make Link state as off at this point, when the Link change
7765 * interrupt comes the state will be automatically changed to
7768 netif_carrier_off(dev);
7779 free_shared_mem(sp);
7780 pci_disable_device(pdev);
7781 pci_release_regions(pdev);
7782 pci_set_drvdata(pdev, NULL);
7789 * s2io_rem_nic - Free the PCI device
7790 * @pdev: structure containing the PCI related information of the device.
7791 * Description: This function is called by the Pci subsystem to release a
7792 * PCI device and free up all resource held up by the device. This could
7793 * be in response to a Hot plug event or when the driver is to be removed
7797 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7799 struct net_device *dev =
7800 (struct net_device *) pci_get_drvdata(pdev);
7801 struct s2io_nic *sp;
7804 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7808 flush_scheduled_work();
7811 unregister_netdev(dev);
7813 free_shared_mem(sp);
7816 pci_release_regions(pdev);
7817 pci_set_drvdata(pdev, NULL);
7819 pci_disable_device(pdev);
7823 * s2io_starter - Entry point for the driver
7824 * Description: This function is the entry point for the driver. It verifies
7825 * the module loadable parameters and initializes PCI configuration space.
7828 int __init s2io_starter(void)
7830 return pci_register_driver(&s2io_driver);
7834 * s2io_closer - Cleanup routine for the driver
7835 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7838 static __exit void s2io_closer(void)
7840 pci_unregister_driver(&s2io_driver);
7841 DBG_PRINT(INIT_DBG, "cleanup done\n");
7844 module_init(s2io_starter);
7845 module_exit(s2io_closer);
7847 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7848 struct tcphdr **tcp, struct RxD_t *rxdp)
7851 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7853 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7854 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7860 * By default the VLAN field in the MAC is stripped by the card, if this
7861 * feature is turned off in rx_pa_cfg register, then the ip_off field
7862 * has to be shifted by a further 2 bytes
7865 case 0: /* DIX type */
7866 case 4: /* DIX type with VLAN */
7867 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7869 /* LLC, SNAP etc are considered non-mergeable */
7874 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7875 ip_len = (u8)((*ip)->ihl);
7877 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7882 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7885 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7886 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7887 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7892 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7894 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7897 static void initiate_new_session(struct lro *lro, u8 *l2h,
7898 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7900 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7904 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7905 lro->tcp_ack = ntohl(tcp->ack_seq);
7907 lro->total_len = ntohs(ip->tot_len);
7910 * check if we saw TCP timestamp. Other consistency checks have
7911 * already been done.
7913 if (tcp->doff == 8) {
7915 ptr = (u32 *)(tcp+1);
7917 lro->cur_tsval = *(ptr+1);
7918 lro->cur_tsecr = *(ptr+2);
7923 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7925 struct iphdr *ip = lro->iph;
7926 struct tcphdr *tcp = lro->tcph;
7928 struct stat_block *statinfo = sp->mac_control.stats_info;
7929 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7931 /* Update L3 header */
7932 ip->tot_len = htons(lro->total_len);
7934 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7937 /* Update L4 header */
7938 tcp->ack_seq = lro->tcp_ack;
7939 tcp->window = lro->window;
7941 /* Update tsecr field if this session has timestamps enabled */
7943 u32 *ptr = (u32 *)(tcp + 1);
7944 *(ptr+2) = lro->cur_tsecr;
7947 /* Update counters required for calculation of
7948 * average no. of packets aggregated.
7950 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7951 statinfo->sw_stat.num_aggregations++;
7954 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7955 struct tcphdr *tcp, u32 l4_pyld)
7957 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7958 lro->total_len += l4_pyld;
7959 lro->frags_len += l4_pyld;
7960 lro->tcp_next_seq += l4_pyld;
7963 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7964 lro->tcp_ack = tcp->ack_seq;
7965 lro->window = tcp->window;
7969 /* Update tsecr and tsval from this packet */
7970 ptr = (u32 *) (tcp + 1);
7971 lro->cur_tsval = *(ptr + 1);
7972 lro->cur_tsecr = *(ptr + 2);
7976 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7977 struct tcphdr *tcp, u32 tcp_pyld_len)
7981 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7983 if (!tcp_pyld_len) {
7984 /* Runt frame or a pure ack */
7988 if (ip->ihl != 5) /* IP has options */
7991 /* If we see CE codepoint in IP header, packet is not mergeable */
7992 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7995 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7996 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7997 tcp->ece || tcp->cwr || !tcp->ack) {
7999 * Currently recognize only the ack control word and
8000 * any other control field being set would result in
8001 * flushing the LRO session
8007 * Allow only one TCP timestamp option. Don't aggregate if
8008 * any other options are detected.
8010 if (tcp->doff != 5 && tcp->doff != 8)
8013 if (tcp->doff == 8) {
8014 ptr = (u8 *)(tcp + 1);
8015 while (*ptr == TCPOPT_NOP)
8017 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8020 /* Ensure timestamp value increases monotonically */
8022 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8025 /* timestamp echo reply should be non-zero */
8026 if (*((u32 *)(ptr+6)) == 0)
8034 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8035 struct RxD_t *rxdp, struct s2io_nic *sp)
8038 struct tcphdr *tcph;
8041 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8043 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8044 ip->saddr, ip->daddr);
8049 tcph = (struct tcphdr *)*tcp;
8050 *tcp_len = get_l4_pyld_length(ip, tcph);
8051 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8052 struct lro *l_lro = &sp->lro0_n[i];
8053 if (l_lro->in_use) {
8054 if (check_for_socket_match(l_lro, ip, tcph))
8056 /* Sock pair matched */
8059 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8060 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8061 "0x%x, actual 0x%x\n", __FUNCTION__,
8062 (*lro)->tcp_next_seq,
8065 sp->mac_control.stats_info->
8066 sw_stat.outof_sequence_pkts++;
8071 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8072 ret = 1; /* Aggregate */
8074 ret = 2; /* Flush both */
8080 /* Before searching for available LRO objects,
8081 * check if the pkt is L3/L4 aggregatable. If not
8082 * don't create new LRO session. Just send this
8085 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8089 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8090 struct lro *l_lro = &sp->lro0_n[i];
8091 if (!(l_lro->in_use)) {
8093 ret = 3; /* Begin anew */
8099 if (ret == 0) { /* sessions exceeded */
8100 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8108 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8111 update_L3L4_header(sp, *lro);
8114 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8115 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8116 update_L3L4_header(sp, *lro);
8117 ret = 4; /* Flush the LRO */
8121 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8129 static void clear_lro_session(struct lro *lro)
8131 static u16 lro_struct_size = sizeof(struct lro);
8133 memset(lro, 0, lro_struct_size);
8136 static void queue_rx_frame(struct sk_buff *skb)
8138 struct net_device *dev = skb->dev;
8140 skb->protocol = eth_type_trans(skb, dev);
8142 netif_receive_skb(skb);
8147 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8148 struct sk_buff *skb,
8151 struct sk_buff *first = lro->parent;
8153 first->len += tcp_len;
8154 first->data_len = lro->frags_len;
8155 skb_pull(skb, (skb->len - tcp_len));
8156 if (skb_shinfo(first)->frag_list)
8157 lro->last_frag->next = skb;
8159 skb_shinfo(first)->frag_list = skb;
8160 first->truesize += skb->truesize;
8161 lro->last_frag = skb;
8162 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8167 * s2io_io_error_detected - called when PCI error is detected
8168 * @pdev: Pointer to PCI device
8169 * @state: The current pci connection state
8171 * This function is called after a PCI bus error affecting
8172 * this device has been detected.
8174 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8175 pci_channel_state_t state)
8177 struct net_device *netdev = pci_get_drvdata(pdev);
8178 struct s2io_nic *sp = netdev->priv;
8180 netif_device_detach(netdev);
8182 if (netif_running(netdev)) {
8183 /* Bring down the card, while avoiding PCI I/O */
8184 do_s2io_card_down(sp, 0);
8186 pci_disable_device(pdev);
8188 return PCI_ERS_RESULT_NEED_RESET;
8192 * s2io_io_slot_reset - called after the pci bus has been reset.
8193 * @pdev: Pointer to PCI device
8195 * Restart the card from scratch, as if from a cold-boot.
8196 * At this point, the card has exprienced a hard reset,
8197 * followed by fixups by BIOS, and has its config space
8198 * set up identically to what it was at cold boot.
8200 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8202 struct net_device *netdev = pci_get_drvdata(pdev);
8203 struct s2io_nic *sp = netdev->priv;
8205 if (pci_enable_device(pdev)) {
8206 printk(KERN_ERR "s2io: "
8207 "Cannot re-enable PCI device after reset.\n");
8208 return PCI_ERS_RESULT_DISCONNECT;
8211 pci_set_master(pdev);
8214 return PCI_ERS_RESULT_RECOVERED;
8218 * s2io_io_resume - called when traffic can start flowing again.
8219 * @pdev: Pointer to PCI device
8221 * This callback is called when the error recovery driver tells
8222 * us that its OK to resume normal operation.
8224 static void s2io_io_resume(struct pci_dev *pdev)
8226 struct net_device *netdev = pci_get_drvdata(pdev);
8227 struct s2io_nic *sp = netdev->priv;
8229 if (netif_running(netdev)) {
8230 if (s2io_card_up(sp)) {
8231 printk(KERN_ERR "s2io: "
8232 "Can't bring device back up after reset.\n");
8236 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8238 printk(KERN_ERR "s2io: "
8239 "Can't resetore mac addr after reset.\n");
8244 netif_device_attach(netdev);
8245 netif_wake_queue(netdev);