Pull bugzilla-9535 into release branch
[pandora-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.10"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         {"alarm_transceiver_temp_high"},
280         {"alarm_transceiver_temp_low"},
281         {"alarm_laser_bias_current_high"},
282         {"alarm_laser_bias_current_low"},
283         {"alarm_laser_output_power_high"},
284         {"alarm_laser_output_power_low"},
285         {"warn_transceiver_temp_high"},
286         {"warn_transceiver_temp_low"},
287         {"warn_laser_bias_current_high"},
288         {"warn_laser_bias_current_low"},
289         {"warn_laser_output_power_high"},
290         {"warn_laser_output_power_low"},
291         {"lro_aggregated_pkts"},
292         {"lro_flush_both_count"},
293         {"lro_out_of_sequence_pkts"},
294         {"lro_flush_due_to_max_pkts"},
295         {"lro_avg_aggr_pkts"},
296         {"mem_alloc_fail_cnt"},
297         {"pci_map_fail_cnt"},
298         {"watchdog_timer_cnt"},
299         {"mem_allocated"},
300         {"mem_freed"},
301         {"link_up_cnt"},
302         {"link_down_cnt"},
303         {"link_up_time"},
304         {"link_down_time"},
305         {"tx_tcode_buf_abort_cnt"},
306         {"tx_tcode_desc_abort_cnt"},
307         {"tx_tcode_parity_err_cnt"},
308         {"tx_tcode_link_loss_cnt"},
309         {"tx_tcode_list_proc_err_cnt"},
310         {"rx_tcode_parity_err_cnt"},
311         {"rx_tcode_abort_cnt"},
312         {"rx_tcode_parity_abort_cnt"},
313         {"rx_tcode_rda_fail_cnt"},
314         {"rx_tcode_unkn_prot_cnt"},
315         {"rx_tcode_fcs_err_cnt"},
316         {"rx_tcode_buf_size_err_cnt"},
317         {"rx_tcode_rxd_corrupt_cnt"},
318         {"rx_tcode_unkn_err_cnt"},
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340                                         ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
342
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
345
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
348
349 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
351
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
353                         init_timer(&timer);                     \
354                         timer.function = handle;                \
355                         timer.data = (unsigned long) arg;       \
356                         mod_timer(&timer, (jiffies + exp))      \
357
358 /* copy mac addr to def_mac_addr array */
359 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
360 {
361         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
362         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
363         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
364         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
365         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
366         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
367 }
368 /* Add the vlan */
369 static void s2io_vlan_rx_register(struct net_device *dev,
370                                         struct vlan_group *grp)
371 {
372         struct s2io_nic *nic = dev->priv;
373         unsigned long flags;
374
375         spin_lock_irqsave(&nic->tx_lock, flags);
376         nic->vlgrp = grp;
377         spin_unlock_irqrestore(&nic->tx_lock, flags);
378 }
379
380 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
381 static int vlan_strip_flag;
382
383 /*
384  * Constants to be programmed into the Xena's registers, to configure
385  * the XAUI.
386  */
387
388 #define END_SIGN        0x0
389 static const u64 herc_act_dtx_cfg[] = {
390         /* Set address */
391         0x8000051536750000ULL, 0x80000515367500E0ULL,
392         /* Write data */
393         0x8000051536750004ULL, 0x80000515367500E4ULL,
394         /* Set address */
395         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
396         /* Write data */
397         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
398         /* Set address */
399         0x801205150D440000ULL, 0x801205150D4400E0ULL,
400         /* Write data */
401         0x801205150D440004ULL, 0x801205150D4400E4ULL,
402         /* Set address */
403         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
404         /* Write data */
405         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
406         /* Done */
407         END_SIGN
408 };
409
410 static const u64 xena_dtx_cfg[] = {
411         /* Set address */
412         0x8000051500000000ULL, 0x80000515000000E0ULL,
413         /* Write data */
414         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
415         /* Set address */
416         0x8001051500000000ULL, 0x80010515000000E0ULL,
417         /* Write data */
418         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
419         /* Set address */
420         0x8002051500000000ULL, 0x80020515000000E0ULL,
421         /* Write data */
422         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
423         END_SIGN
424 };
425
426 /*
427  * Constants for Fixing the MacAddress problem seen mostly on
428  * Alpha machines.
429  */
430 static const u64 fix_mac[] = {
431         0x0060000000000000ULL, 0x0060600000000000ULL,
432         0x0040600000000000ULL, 0x0000600000000000ULL,
433         0x0020600000000000ULL, 0x0060600000000000ULL,
434         0x0020600000000000ULL, 0x0060600000000000ULL,
435         0x0020600000000000ULL, 0x0060600000000000ULL,
436         0x0020600000000000ULL, 0x0060600000000000ULL,
437         0x0020600000000000ULL, 0x0060600000000000ULL,
438         0x0020600000000000ULL, 0x0060600000000000ULL,
439         0x0020600000000000ULL, 0x0060600000000000ULL,
440         0x0020600000000000ULL, 0x0060600000000000ULL,
441         0x0020600000000000ULL, 0x0060600000000000ULL,
442         0x0020600000000000ULL, 0x0060600000000000ULL,
443         0x0020600000000000ULL, 0x0000600000000000ULL,
444         0x0040600000000000ULL, 0x0060600000000000ULL,
445         END_SIGN
446 };
447
448 MODULE_LICENSE("GPL");
449 MODULE_VERSION(DRV_VERSION);
450
451
452 /* Module Loadable parameters. */
453 S2IO_PARM_INT(tx_fifo_num, 1);
454 S2IO_PARM_INT(rx_ring_num, 1);
455
456
457 S2IO_PARM_INT(rx_ring_mode, 1);
458 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
459 S2IO_PARM_INT(rmac_pause_time, 0x100);
460 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
461 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
462 S2IO_PARM_INT(shared_splits, 0);
463 S2IO_PARM_INT(tmac_util_period, 5);
464 S2IO_PARM_INT(rmac_util_period, 5);
465 S2IO_PARM_INT(l3l4hdr_size, 128);
466 /* Frequency of Rx desc syncs expressed as power of 2 */
467 S2IO_PARM_INT(rxsync_frequency, 3);
468 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
469 S2IO_PARM_INT(intr_type, 2);
470 /* Large receive offload feature */
471 static unsigned int lro_enable;
472 module_param_named(lro, lro_enable, uint, 0);
473
474 /* Max pkts to be aggregated by LRO at one time. If not specified,
475  * aggregation happens until we hit max IP pkt size(64K)
476  */
477 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
478 S2IO_PARM_INT(indicate_max_pkts, 0);
479
480 S2IO_PARM_INT(napi, 1);
481 S2IO_PARM_INT(ufo, 0);
482 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
483
484 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
485     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
486 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
487     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
488 static unsigned int rts_frm_len[MAX_RX_RINGS] =
489     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
490
491 module_param_array(tx_fifo_len, uint, NULL, 0);
492 module_param_array(rx_ring_sz, uint, NULL, 0);
493 module_param_array(rts_frm_len, uint, NULL, 0);
494
495 /*
496  * S2IO device table.
497  * This table lists all the devices that this driver supports.
498  */
499 static struct pci_device_id s2io_tbl[] __devinitdata = {
500         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
501          PCI_ANY_ID, PCI_ANY_ID},
502         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
503          PCI_ANY_ID, PCI_ANY_ID},
504         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
505          PCI_ANY_ID, PCI_ANY_ID},
506         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
507          PCI_ANY_ID, PCI_ANY_ID},
508         {0,}
509 };
510
511 MODULE_DEVICE_TABLE(pci, s2io_tbl);
512
513 static struct pci_error_handlers s2io_err_handler = {
514         .error_detected = s2io_io_error_detected,
515         .slot_reset = s2io_io_slot_reset,
516         .resume = s2io_io_resume,
517 };
518
519 static struct pci_driver s2io_driver = {
520       .name = "S2IO",
521       .id_table = s2io_tbl,
522       .probe = s2io_init_nic,
523       .remove = __devexit_p(s2io_rem_nic),
524       .err_handler = &s2io_err_handler,
525 };
526
527 /* A simplifier macro used both by init and free shared_mem Fns(). */
528 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
529
530 /**
531  * init_shared_mem - Allocation and Initialization of Memory
532  * @nic: Device private variable.
533  * Description: The function allocates all the memory areas shared
534  * between the NIC and the driver. This includes Tx descriptors,
535  * Rx descriptors and the statistics block.
536  */
537
538 static int init_shared_mem(struct s2io_nic *nic)
539 {
540         u32 size;
541         void *tmp_v_addr, *tmp_v_addr_next;
542         dma_addr_t tmp_p_addr, tmp_p_addr_next;
543         struct RxD_block *pre_rxd_blk = NULL;
544         int i, j, blk_cnt;
545         int lst_size, lst_per_page;
546         struct net_device *dev = nic->dev;
547         unsigned long tmp;
548         struct buffAdd *ba;
549
550         struct mac_info *mac_control;
551         struct config_param *config;
552         unsigned long long mem_allocated = 0;
553
554         mac_control = &nic->mac_control;
555         config = &nic->config;
556
557
558         /* Allocation and initialization of TXDLs in FIOFs */
559         size = 0;
560         for (i = 0; i < config->tx_fifo_num; i++) {
561                 size += config->tx_cfg[i].fifo_len;
562         }
563         if (size > MAX_AVAILABLE_TXDS) {
564                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
565                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
566                 return -EINVAL;
567         }
568
569         lst_size = (sizeof(struct TxD) * config->max_txds);
570         lst_per_page = PAGE_SIZE / lst_size;
571
572         for (i = 0; i < config->tx_fifo_num; i++) {
573                 int fifo_len = config->tx_cfg[i].fifo_len;
574                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
575                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
576                                                           GFP_KERNEL);
577                 if (!mac_control->fifos[i].list_info) {
578                         DBG_PRINT(INFO_DBG,
579                                   "Malloc failed for list_info\n");
580                         return -ENOMEM;
581                 }
582                 mem_allocated += list_holder_size;
583         }
584         for (i = 0; i < config->tx_fifo_num; i++) {
585                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
586                                                 lst_per_page);
587                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
588                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
589                     config->tx_cfg[i].fifo_len - 1;
590                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
591                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
592                     config->tx_cfg[i].fifo_len - 1;
593                 mac_control->fifos[i].fifo_no = i;
594                 mac_control->fifos[i].nic = nic;
595                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
596
597                 for (j = 0; j < page_num; j++) {
598                         int k = 0;
599                         dma_addr_t tmp_p;
600                         void *tmp_v;
601                         tmp_v = pci_alloc_consistent(nic->pdev,
602                                                      PAGE_SIZE, &tmp_p);
603                         if (!tmp_v) {
604                                 DBG_PRINT(INFO_DBG,
605                                           "pci_alloc_consistent ");
606                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
607                                 return -ENOMEM;
608                         }
609                         /* If we got a zero DMA address(can happen on
610                          * certain platforms like PPC), reallocate.
611                          * Store virtual address of page we don't want,
612                          * to be freed later.
613                          */
614                         if (!tmp_p) {
615                                 mac_control->zerodma_virt_addr = tmp_v;
616                                 DBG_PRINT(INIT_DBG,
617                                 "%s: Zero DMA address for TxDL. ", dev->name);
618                                 DBG_PRINT(INIT_DBG,
619                                 "Virtual address %p\n", tmp_v);
620                                 tmp_v = pci_alloc_consistent(nic->pdev,
621                                                      PAGE_SIZE, &tmp_p);
622                                 if (!tmp_v) {
623                                         DBG_PRINT(INFO_DBG,
624                                           "pci_alloc_consistent ");
625                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
626                                         return -ENOMEM;
627                                 }
628                                 mem_allocated += PAGE_SIZE;
629                         }
630                         while (k < lst_per_page) {
631                                 int l = (j * lst_per_page) + k;
632                                 if (l == config->tx_cfg[i].fifo_len)
633                                         break;
634                                 mac_control->fifos[i].list_info[l].list_virt_addr =
635                                     tmp_v + (k * lst_size);
636                                 mac_control->fifos[i].list_info[l].list_phy_addr =
637                                     tmp_p + (k * lst_size);
638                                 k++;
639                         }
640                 }
641         }
642
643         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
644         if (!nic->ufo_in_band_v)
645                 return -ENOMEM;
646          mem_allocated += (size * sizeof(u64));
647
648         /* Allocation and initialization of RXDs in Rings */
649         size = 0;
650         for (i = 0; i < config->rx_ring_num; i++) {
651                 if (config->rx_cfg[i].num_rxd %
652                     (rxd_count[nic->rxd_mode] + 1)) {
653                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
654                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
655                                   i);
656                         DBG_PRINT(ERR_DBG, "RxDs per Block");
657                         return FAILURE;
658                 }
659                 size += config->rx_cfg[i].num_rxd;
660                 mac_control->rings[i].block_count =
661                         config->rx_cfg[i].num_rxd /
662                         (rxd_count[nic->rxd_mode] + 1 );
663                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
664                         mac_control->rings[i].block_count;
665         }
666         if (nic->rxd_mode == RXD_MODE_1)
667                 size = (size * (sizeof(struct RxD1)));
668         else
669                 size = (size * (sizeof(struct RxD3)));
670
671         for (i = 0; i < config->rx_ring_num; i++) {
672                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
673                 mac_control->rings[i].rx_curr_get_info.offset = 0;
674                 mac_control->rings[i].rx_curr_get_info.ring_len =
675                     config->rx_cfg[i].num_rxd - 1;
676                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
677                 mac_control->rings[i].rx_curr_put_info.offset = 0;
678                 mac_control->rings[i].rx_curr_put_info.ring_len =
679                     config->rx_cfg[i].num_rxd - 1;
680                 mac_control->rings[i].nic = nic;
681                 mac_control->rings[i].ring_no = i;
682
683                 blk_cnt = config->rx_cfg[i].num_rxd /
684                                 (rxd_count[nic->rxd_mode] + 1);
685                 /*  Allocating all the Rx blocks */
686                 for (j = 0; j < blk_cnt; j++) {
687                         struct rx_block_info *rx_blocks;
688                         int l;
689
690                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
691                         size = SIZE_OF_BLOCK; //size is always page size
692                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
693                                                           &tmp_p_addr);
694                         if (tmp_v_addr == NULL) {
695                                 /*
696                                  * In case of failure, free_shared_mem()
697                                  * is called, which should free any
698                                  * memory that was alloced till the
699                                  * failure happened.
700                                  */
701                                 rx_blocks->block_virt_addr = tmp_v_addr;
702                                 return -ENOMEM;
703                         }
704                         mem_allocated += size;
705                         memset(tmp_v_addr, 0, size);
706                         rx_blocks->block_virt_addr = tmp_v_addr;
707                         rx_blocks->block_dma_addr = tmp_p_addr;
708                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
709                                                   rxd_count[nic->rxd_mode],
710                                                   GFP_KERNEL);
711                         if (!rx_blocks->rxds)
712                                 return -ENOMEM;
713                         mem_allocated +=
714                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
715                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
716                                 rx_blocks->rxds[l].virt_addr =
717                                         rx_blocks->block_virt_addr +
718                                         (rxd_size[nic->rxd_mode] * l);
719                                 rx_blocks->rxds[l].dma_addr =
720                                         rx_blocks->block_dma_addr +
721                                         (rxd_size[nic->rxd_mode] * l);
722                         }
723                 }
724                 /* Interlinking all Rx Blocks */
725                 for (j = 0; j < blk_cnt; j++) {
726                         tmp_v_addr =
727                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
728                         tmp_v_addr_next =
729                                 mac_control->rings[i].rx_blocks[(j + 1) %
730                                               blk_cnt].block_virt_addr;
731                         tmp_p_addr =
732                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
733                         tmp_p_addr_next =
734                                 mac_control->rings[i].rx_blocks[(j + 1) %
735                                               blk_cnt].block_dma_addr;
736
737                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
738                         pre_rxd_blk->reserved_2_pNext_RxD_block =
739                             (unsigned long) tmp_v_addr_next;
740                         pre_rxd_blk->pNext_RxD_Blk_physical =
741                             (u64) tmp_p_addr_next;
742                 }
743         }
744         if (nic->rxd_mode == RXD_MODE_3B) {
745                 /*
746                  * Allocation of Storages for buffer addresses in 2BUFF mode
747                  * and the buffers as well.
748                  */
749                 for (i = 0; i < config->rx_ring_num; i++) {
750                         blk_cnt = config->rx_cfg[i].num_rxd /
751                            (rxd_count[nic->rxd_mode]+ 1);
752                         mac_control->rings[i].ba =
753                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
754                                      GFP_KERNEL);
755                         if (!mac_control->rings[i].ba)
756                                 return -ENOMEM;
757                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
758                         for (j = 0; j < blk_cnt; j++) {
759                                 int k = 0;
760                                 mac_control->rings[i].ba[j] =
761                                         kmalloc((sizeof(struct buffAdd) *
762                                                 (rxd_count[nic->rxd_mode] + 1)),
763                                                 GFP_KERNEL);
764                                 if (!mac_control->rings[i].ba[j])
765                                         return -ENOMEM;
766                                 mem_allocated += (sizeof(struct buffAdd) *  \
767                                         (rxd_count[nic->rxd_mode] + 1));
768                                 while (k != rxd_count[nic->rxd_mode]) {
769                                         ba = &mac_control->rings[i].ba[j][k];
770
771                                         ba->ba_0_org = (void *) kmalloc
772                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
773                                         if (!ba->ba_0_org)
774                                                 return -ENOMEM;
775                                         mem_allocated +=
776                                                 (BUF0_LEN + ALIGN_SIZE);
777                                         tmp = (unsigned long)ba->ba_0_org;
778                                         tmp += ALIGN_SIZE;
779                                         tmp &= ~((unsigned long) ALIGN_SIZE);
780                                         ba->ba_0 = (void *) tmp;
781
782                                         ba->ba_1_org = (void *) kmalloc
783                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
784                                         if (!ba->ba_1_org)
785                                                 return -ENOMEM;
786                                         mem_allocated
787                                                 += (BUF1_LEN + ALIGN_SIZE);
788                                         tmp = (unsigned long) ba->ba_1_org;
789                                         tmp += ALIGN_SIZE;
790                                         tmp &= ~((unsigned long) ALIGN_SIZE);
791                                         ba->ba_1 = (void *) tmp;
792                                         k++;
793                                 }
794                         }
795                 }
796         }
797
798         /* Allocation and initialization of Statistics block */
799         size = sizeof(struct stat_block);
800         mac_control->stats_mem = pci_alloc_consistent
801             (nic->pdev, size, &mac_control->stats_mem_phy);
802
803         if (!mac_control->stats_mem) {
804                 /*
805                  * In case of failure, free_shared_mem() is called, which
806                  * should free any memory that was alloced till the
807                  * failure happened.
808                  */
809                 return -ENOMEM;
810         }
811         mem_allocated += size;
812         mac_control->stats_mem_sz = size;
813
814         tmp_v_addr = mac_control->stats_mem;
815         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
816         memset(tmp_v_addr, 0, size);
817         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
818                   (unsigned long long) tmp_p_addr);
819         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
820         return SUCCESS;
821 }
822
823 /**
824  * free_shared_mem - Free the allocated Memory
825  * @nic:  Device private variable.
826  * Description: This function is to free all memory locations allocated by
827  * the init_shared_mem() function and return it to the kernel.
828  */
829
830 static void free_shared_mem(struct s2io_nic *nic)
831 {
832         int i, j, blk_cnt, size;
833         u32 ufo_size = 0;
834         void *tmp_v_addr;
835         dma_addr_t tmp_p_addr;
836         struct mac_info *mac_control;
837         struct config_param *config;
838         int lst_size, lst_per_page;
839         struct net_device *dev;
840         int page_num = 0;
841
842         if (!nic)
843                 return;
844
845         dev = nic->dev;
846
847         mac_control = &nic->mac_control;
848         config = &nic->config;
849
850         lst_size = (sizeof(struct TxD) * config->max_txds);
851         lst_per_page = PAGE_SIZE / lst_size;
852
853         for (i = 0; i < config->tx_fifo_num; i++) {
854                 ufo_size += config->tx_cfg[i].fifo_len;
855                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
856                                                         lst_per_page);
857                 for (j = 0; j < page_num; j++) {
858                         int mem_blks = (j * lst_per_page);
859                         if (!mac_control->fifos[i].list_info)
860                                 return;
861                         if (!mac_control->fifos[i].list_info[mem_blks].
862                                  list_virt_addr)
863                                 break;
864                         pci_free_consistent(nic->pdev, PAGE_SIZE,
865                                             mac_control->fifos[i].
866                                             list_info[mem_blks].
867                                             list_virt_addr,
868                                             mac_control->fifos[i].
869                                             list_info[mem_blks].
870                                             list_phy_addr);
871                         nic->mac_control.stats_info->sw_stat.mem_freed
872                                                 += PAGE_SIZE;
873                 }
874                 /* If we got a zero DMA address during allocation,
875                  * free the page now
876                  */
877                 if (mac_control->zerodma_virt_addr) {
878                         pci_free_consistent(nic->pdev, PAGE_SIZE,
879                                             mac_control->zerodma_virt_addr,
880                                             (dma_addr_t)0);
881                         DBG_PRINT(INIT_DBG,
882                                 "%s: Freeing TxDL with zero DMA addr. ",
883                                 dev->name);
884                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
885                                 mac_control->zerodma_virt_addr);
886                         nic->mac_control.stats_info->sw_stat.mem_freed
887                                                 += PAGE_SIZE;
888                 }
889                 kfree(mac_control->fifos[i].list_info);
890                 nic->mac_control.stats_info->sw_stat.mem_freed +=
891                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
892         }
893
894         size = SIZE_OF_BLOCK;
895         for (i = 0; i < config->rx_ring_num; i++) {
896                 blk_cnt = mac_control->rings[i].block_count;
897                 for (j = 0; j < blk_cnt; j++) {
898                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
899                                 block_virt_addr;
900                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
901                                 block_dma_addr;
902                         if (tmp_v_addr == NULL)
903                                 break;
904                         pci_free_consistent(nic->pdev, size,
905                                             tmp_v_addr, tmp_p_addr);
906                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
907                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
908                         nic->mac_control.stats_info->sw_stat.mem_freed +=
909                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
910                 }
911         }
912
913         if (nic->rxd_mode == RXD_MODE_3B) {
914                 /* Freeing buffer storage addresses in 2BUFF mode. */
915                 for (i = 0; i < config->rx_ring_num; i++) {
916                         blk_cnt = config->rx_cfg[i].num_rxd /
917                             (rxd_count[nic->rxd_mode] + 1);
918                         for (j = 0; j < blk_cnt; j++) {
919                                 int k = 0;
920                                 if (!mac_control->rings[i].ba[j])
921                                         continue;
922                                 while (k != rxd_count[nic->rxd_mode]) {
923                                         struct buffAdd *ba =
924                                                 &mac_control->rings[i].ba[j][k];
925                                         kfree(ba->ba_0_org);
926                                         nic->mac_control.stats_info->sw_stat.\
927                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
928                                         kfree(ba->ba_1_org);
929                                         nic->mac_control.stats_info->sw_stat.\
930                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
931                                         k++;
932                                 }
933                                 kfree(mac_control->rings[i].ba[j]);
934                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
935                                         (sizeof(struct buffAdd) *
936                                         (rxd_count[nic->rxd_mode] + 1));
937                         }
938                         kfree(mac_control->rings[i].ba);
939                         nic->mac_control.stats_info->sw_stat.mem_freed +=
940                         (sizeof(struct buffAdd *) * blk_cnt);
941                 }
942         }
943
944         if (mac_control->stats_mem) {
945                 pci_free_consistent(nic->pdev,
946                                     mac_control->stats_mem_sz,
947                                     mac_control->stats_mem,
948                                     mac_control->stats_mem_phy);
949                 nic->mac_control.stats_info->sw_stat.mem_freed +=
950                         mac_control->stats_mem_sz;
951         }
952         if (nic->ufo_in_band_v) {
953                 kfree(nic->ufo_in_band_v);
954                 nic->mac_control.stats_info->sw_stat.mem_freed
955                         += (ufo_size * sizeof(u64));
956         }
957 }
958
959 /**
960  * s2io_verify_pci_mode -
961  */
962
963 static int s2io_verify_pci_mode(struct s2io_nic *nic)
964 {
965         struct XENA_dev_config __iomem *bar0 = nic->bar0;
966         register u64 val64 = 0;
967         int     mode;
968
969         val64 = readq(&bar0->pci_mode);
970         mode = (u8)GET_PCI_MODE(val64);
971
972         if ( val64 & PCI_MODE_UNKNOWN_MODE)
973                 return -1;      /* Unknown PCI mode */
974         return mode;
975 }
976
977 #define NEC_VENID   0x1033
978 #define NEC_DEVID   0x0125
979 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
980 {
981         struct pci_dev *tdev = NULL;
982         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
983                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
984                         if (tdev->bus == s2io_pdev->bus->parent)
985                                 pci_dev_put(tdev);
986                                 return 1;
987                 }
988         }
989         return 0;
990 }
991
992 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
993 /**
994  * s2io_print_pci_mode -
995  */
996 static int s2io_print_pci_mode(struct s2io_nic *nic)
997 {
998         struct XENA_dev_config __iomem *bar0 = nic->bar0;
999         register u64 val64 = 0;
1000         int     mode;
1001         struct config_param *config = &nic->config;
1002
1003         val64 = readq(&bar0->pci_mode);
1004         mode = (u8)GET_PCI_MODE(val64);
1005
1006         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1007                 return -1;      /* Unknown PCI mode */
1008
1009         config->bus_speed = bus_speed[mode];
1010
1011         if (s2io_on_nec_bridge(nic->pdev)) {
1012                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1013                                                         nic->dev->name);
1014                 return mode;
1015         }
1016
1017         if (val64 & PCI_MODE_32_BITS) {
1018                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1019         } else {
1020                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1021         }
1022
1023         switch(mode) {
1024                 case PCI_MODE_PCI_33:
1025                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1026                         break;
1027                 case PCI_MODE_PCI_66:
1028                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1029                         break;
1030                 case PCI_MODE_PCIX_M1_66:
1031                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1032                         break;
1033                 case PCI_MODE_PCIX_M1_100:
1034                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1035                         break;
1036                 case PCI_MODE_PCIX_M1_133:
1037                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1038                         break;
1039                 case PCI_MODE_PCIX_M2_66:
1040                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1041                         break;
1042                 case PCI_MODE_PCIX_M2_100:
1043                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1044                         break;
1045                 case PCI_MODE_PCIX_M2_133:
1046                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1047                         break;
1048                 default:
1049                         return -1;      /* Unsupported bus speed */
1050         }
1051
1052         return mode;
1053 }
1054
1055 /**
1056  *  init_nic - Initialization of hardware
1057  *  @nic: device peivate variable
1058  *  Description: The function sequentially configures every block
1059  *  of the H/W from their reset values.
1060  *  Return Value:  SUCCESS on success and
1061  *  '-1' on failure (endian settings incorrect).
1062  */
1063
1064 static int init_nic(struct s2io_nic *nic)
1065 {
1066         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1067         struct net_device *dev = nic->dev;
1068         register u64 val64 = 0;
1069         void __iomem *add;
1070         u32 time;
1071         int i, j;
1072         struct mac_info *mac_control;
1073         struct config_param *config;
1074         int dtx_cnt = 0;
1075         unsigned long long mem_share;
1076         int mem_size;
1077
1078         mac_control = &nic->mac_control;
1079         config = &nic->config;
1080
1081         /* to set the swapper controle on the card */
1082         if(s2io_set_swapper(nic)) {
1083                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1084                 return -EIO;
1085         }
1086
1087         /*
1088          * Herc requires EOI to be removed from reset before XGXS, so..
1089          */
1090         if (nic->device_type & XFRAME_II_DEVICE) {
1091                 val64 = 0xA500000000ULL;
1092                 writeq(val64, &bar0->sw_reset);
1093                 msleep(500);
1094                 val64 = readq(&bar0->sw_reset);
1095         }
1096
1097         /* Remove XGXS from reset state */
1098         val64 = 0;
1099         writeq(val64, &bar0->sw_reset);
1100         msleep(500);
1101         val64 = readq(&bar0->sw_reset);
1102
1103         /* Ensure that it's safe to access registers by checking
1104          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1105          */
1106         if (nic->device_type == XFRAME_II_DEVICE) {
1107                 for (i = 0; i < 50; i++) {
1108                         val64 = readq(&bar0->adapter_status);
1109                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1110                                 break;
1111                         msleep(10);
1112                 }
1113                 if (i == 50)
1114                         return -ENODEV;
1115         }
1116
1117         /*  Enable Receiving broadcasts */
1118         add = &bar0->mac_cfg;
1119         val64 = readq(&bar0->mac_cfg);
1120         val64 |= MAC_RMAC_BCAST_ENABLE;
1121         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1122         writel((u32) val64, add);
1123         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1124         writel((u32) (val64 >> 32), (add + 4));
1125
1126         /* Read registers in all blocks */
1127         val64 = readq(&bar0->mac_int_mask);
1128         val64 = readq(&bar0->mc_int_mask);
1129         val64 = readq(&bar0->xgxs_int_mask);
1130
1131         /*  Set MTU */
1132         val64 = dev->mtu;
1133         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1134
1135         if (nic->device_type & XFRAME_II_DEVICE) {
1136                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1137                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1138                                           &bar0->dtx_control, UF);
1139                         if (dtx_cnt & 0x1)
1140                                 msleep(1); /* Necessary!! */
1141                         dtx_cnt++;
1142                 }
1143         } else {
1144                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1145                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1146                                           &bar0->dtx_control, UF);
1147                         val64 = readq(&bar0->dtx_control);
1148                         dtx_cnt++;
1149                 }
1150         }
1151
1152         /*  Tx DMA Initialization */
1153         val64 = 0;
1154         writeq(val64, &bar0->tx_fifo_partition_0);
1155         writeq(val64, &bar0->tx_fifo_partition_1);
1156         writeq(val64, &bar0->tx_fifo_partition_2);
1157         writeq(val64, &bar0->tx_fifo_partition_3);
1158
1159
1160         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1161                 val64 |=
1162                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1163                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1164                                     ((i * 32) + 5), 3);
1165
1166                 if (i == (config->tx_fifo_num - 1)) {
1167                         if (i % 2 == 0)
1168                                 i++;
1169                 }
1170
1171                 switch (i) {
1172                 case 1:
1173                         writeq(val64, &bar0->tx_fifo_partition_0);
1174                         val64 = 0;
1175                         break;
1176                 case 3:
1177                         writeq(val64, &bar0->tx_fifo_partition_1);
1178                         val64 = 0;
1179                         break;
1180                 case 5:
1181                         writeq(val64, &bar0->tx_fifo_partition_2);
1182                         val64 = 0;
1183                         break;
1184                 case 7:
1185                         writeq(val64, &bar0->tx_fifo_partition_3);
1186                         break;
1187                 }
1188         }
1189
1190         /*
1191          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1192          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1193          */
1194         if ((nic->device_type == XFRAME_I_DEVICE) &&
1195                 (nic->pdev->revision < 4))
1196                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1197
1198         val64 = readq(&bar0->tx_fifo_partition_0);
1199         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1200                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1201
1202         /*
1203          * Initialization of Tx_PA_CONFIG register to ignore packet
1204          * integrity checking.
1205          */
1206         val64 = readq(&bar0->tx_pa_cfg);
1207         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1208             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1209         writeq(val64, &bar0->tx_pa_cfg);
1210
1211         /* Rx DMA intialization. */
1212         val64 = 0;
1213         for (i = 0; i < config->rx_ring_num; i++) {
1214                 val64 |=
1215                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1216                          3);
1217         }
1218         writeq(val64, &bar0->rx_queue_priority);
1219
1220         /*
1221          * Allocating equal share of memory to all the
1222          * configured Rings.
1223          */
1224         val64 = 0;
1225         if (nic->device_type & XFRAME_II_DEVICE)
1226                 mem_size = 32;
1227         else
1228                 mem_size = 64;
1229
1230         for (i = 0; i < config->rx_ring_num; i++) {
1231                 switch (i) {
1232                 case 0:
1233                         mem_share = (mem_size / config->rx_ring_num +
1234                                      mem_size % config->rx_ring_num);
1235                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1236                         continue;
1237                 case 1:
1238                         mem_share = (mem_size / config->rx_ring_num);
1239                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1240                         continue;
1241                 case 2:
1242                         mem_share = (mem_size / config->rx_ring_num);
1243                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1244                         continue;
1245                 case 3:
1246                         mem_share = (mem_size / config->rx_ring_num);
1247                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1248                         continue;
1249                 case 4:
1250                         mem_share = (mem_size / config->rx_ring_num);
1251                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1252                         continue;
1253                 case 5:
1254                         mem_share = (mem_size / config->rx_ring_num);
1255                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1256                         continue;
1257                 case 6:
1258                         mem_share = (mem_size / config->rx_ring_num);
1259                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1260                         continue;
1261                 case 7:
1262                         mem_share = (mem_size / config->rx_ring_num);
1263                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1264                         continue;
1265                 }
1266         }
1267         writeq(val64, &bar0->rx_queue_cfg);
1268
1269         /*
1270          * Filling Tx round robin registers
1271          * as per the number of FIFOs
1272          */
1273         switch (config->tx_fifo_num) {
1274         case 1:
1275                 val64 = 0x0000000000000000ULL;
1276                 writeq(val64, &bar0->tx_w_round_robin_0);
1277                 writeq(val64, &bar0->tx_w_round_robin_1);
1278                 writeq(val64, &bar0->tx_w_round_robin_2);
1279                 writeq(val64, &bar0->tx_w_round_robin_3);
1280                 writeq(val64, &bar0->tx_w_round_robin_4);
1281                 break;
1282         case 2:
1283                 val64 = 0x0000010000010000ULL;
1284                 writeq(val64, &bar0->tx_w_round_robin_0);
1285                 val64 = 0x0100000100000100ULL;
1286                 writeq(val64, &bar0->tx_w_round_robin_1);
1287                 val64 = 0x0001000001000001ULL;
1288                 writeq(val64, &bar0->tx_w_round_robin_2);
1289                 val64 = 0x0000010000010000ULL;
1290                 writeq(val64, &bar0->tx_w_round_robin_3);
1291                 val64 = 0x0100000000000000ULL;
1292                 writeq(val64, &bar0->tx_w_round_robin_4);
1293                 break;
1294         case 3:
1295                 val64 = 0x0001000102000001ULL;
1296                 writeq(val64, &bar0->tx_w_round_robin_0);
1297                 val64 = 0x0001020000010001ULL;
1298                 writeq(val64, &bar0->tx_w_round_robin_1);
1299                 val64 = 0x0200000100010200ULL;
1300                 writeq(val64, &bar0->tx_w_round_robin_2);
1301                 val64 = 0x0001000102000001ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_3);
1303                 val64 = 0x0001020000000000ULL;
1304                 writeq(val64, &bar0->tx_w_round_robin_4);
1305                 break;
1306         case 4:
1307                 val64 = 0x0001020300010200ULL;
1308                 writeq(val64, &bar0->tx_w_round_robin_0);
1309                 val64 = 0x0100000102030001ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_1);
1311                 val64 = 0x0200010000010203ULL;
1312                 writeq(val64, &bar0->tx_w_round_robin_2);
1313                 val64 = 0x0001020001000001ULL;
1314                 writeq(val64, &bar0->tx_w_round_robin_3);
1315                 val64 = 0x0203000100000000ULL;
1316                 writeq(val64, &bar0->tx_w_round_robin_4);
1317                 break;
1318         case 5:
1319                 val64 = 0x0001000203000102ULL;
1320                 writeq(val64, &bar0->tx_w_round_robin_0);
1321                 val64 = 0x0001020001030004ULL;
1322                 writeq(val64, &bar0->tx_w_round_robin_1);
1323                 val64 = 0x0001000203000102ULL;
1324                 writeq(val64, &bar0->tx_w_round_robin_2);
1325                 val64 = 0x0001020001030004ULL;
1326                 writeq(val64, &bar0->tx_w_round_robin_3);
1327                 val64 = 0x0001000000000000ULL;
1328                 writeq(val64, &bar0->tx_w_round_robin_4);
1329                 break;
1330         case 6:
1331                 val64 = 0x0001020304000102ULL;
1332                 writeq(val64, &bar0->tx_w_round_robin_0);
1333                 val64 = 0x0304050001020001ULL;
1334                 writeq(val64, &bar0->tx_w_round_robin_1);
1335                 val64 = 0x0203000100000102ULL;
1336                 writeq(val64, &bar0->tx_w_round_robin_2);
1337                 val64 = 0x0304000102030405ULL;
1338                 writeq(val64, &bar0->tx_w_round_robin_3);
1339                 val64 = 0x0001000200000000ULL;
1340                 writeq(val64, &bar0->tx_w_round_robin_4);
1341                 break;
1342         case 7:
1343                 val64 = 0x0001020001020300ULL;
1344                 writeq(val64, &bar0->tx_w_round_robin_0);
1345                 val64 = 0x0102030400010203ULL;
1346                 writeq(val64, &bar0->tx_w_round_robin_1);
1347                 val64 = 0x0405060001020001ULL;
1348                 writeq(val64, &bar0->tx_w_round_robin_2);
1349                 val64 = 0x0304050000010200ULL;
1350                 writeq(val64, &bar0->tx_w_round_robin_3);
1351                 val64 = 0x0102030000000000ULL;
1352                 writeq(val64, &bar0->tx_w_round_robin_4);
1353                 break;
1354         case 8:
1355                 val64 = 0x0001020300040105ULL;
1356                 writeq(val64, &bar0->tx_w_round_robin_0);
1357                 val64 = 0x0200030106000204ULL;
1358                 writeq(val64, &bar0->tx_w_round_robin_1);
1359                 val64 = 0x0103000502010007ULL;
1360                 writeq(val64, &bar0->tx_w_round_robin_2);
1361                 val64 = 0x0304010002060500ULL;
1362                 writeq(val64, &bar0->tx_w_round_robin_3);
1363                 val64 = 0x0103020400000000ULL;
1364                 writeq(val64, &bar0->tx_w_round_robin_4);
1365                 break;
1366         }
1367
1368         /* Enable all configured Tx FIFO partitions */
1369         val64 = readq(&bar0->tx_fifo_partition_0);
1370         val64 |= (TX_FIFO_PARTITION_EN);
1371         writeq(val64, &bar0->tx_fifo_partition_0);
1372
1373         /* Filling the Rx round robin registers as per the
1374          * number of Rings and steering based on QoS.
1375          */
1376         switch (config->rx_ring_num) {
1377         case 1:
1378                 val64 = 0x8080808080808080ULL;
1379                 writeq(val64, &bar0->rts_qos_steering);
1380                 break;
1381         case 2:
1382                 val64 = 0x0000010000010000ULL;
1383                 writeq(val64, &bar0->rx_w_round_robin_0);
1384                 val64 = 0x0100000100000100ULL;
1385                 writeq(val64, &bar0->rx_w_round_robin_1);
1386                 val64 = 0x0001000001000001ULL;
1387                 writeq(val64, &bar0->rx_w_round_robin_2);
1388                 val64 = 0x0000010000010000ULL;
1389                 writeq(val64, &bar0->rx_w_round_robin_3);
1390                 val64 = 0x0100000000000000ULL;
1391                 writeq(val64, &bar0->rx_w_round_robin_4);
1392
1393                 val64 = 0x8080808040404040ULL;
1394                 writeq(val64, &bar0->rts_qos_steering);
1395                 break;
1396         case 3:
1397                 val64 = 0x0001000102000001ULL;
1398                 writeq(val64, &bar0->rx_w_round_robin_0);
1399                 val64 = 0x0001020000010001ULL;
1400                 writeq(val64, &bar0->rx_w_round_robin_1);
1401                 val64 = 0x0200000100010200ULL;
1402                 writeq(val64, &bar0->rx_w_round_robin_2);
1403                 val64 = 0x0001000102000001ULL;
1404                 writeq(val64, &bar0->rx_w_round_robin_3);
1405                 val64 = 0x0001020000000000ULL;
1406                 writeq(val64, &bar0->rx_w_round_robin_4);
1407
1408                 val64 = 0x8080804040402020ULL;
1409                 writeq(val64, &bar0->rts_qos_steering);
1410                 break;
1411         case 4:
1412                 val64 = 0x0001020300010200ULL;
1413                 writeq(val64, &bar0->rx_w_round_robin_0);
1414                 val64 = 0x0100000102030001ULL;
1415                 writeq(val64, &bar0->rx_w_round_robin_1);
1416                 val64 = 0x0200010000010203ULL;
1417                 writeq(val64, &bar0->rx_w_round_robin_2);
1418                 val64 = 0x0001020001000001ULL;
1419                 writeq(val64, &bar0->rx_w_round_robin_3);
1420                 val64 = 0x0203000100000000ULL;
1421                 writeq(val64, &bar0->rx_w_round_robin_4);
1422
1423                 val64 = 0x8080404020201010ULL;
1424                 writeq(val64, &bar0->rts_qos_steering);
1425                 break;
1426         case 5:
1427                 val64 = 0x0001000203000102ULL;
1428                 writeq(val64, &bar0->rx_w_round_robin_0);
1429                 val64 = 0x0001020001030004ULL;
1430                 writeq(val64, &bar0->rx_w_round_robin_1);
1431                 val64 = 0x0001000203000102ULL;
1432                 writeq(val64, &bar0->rx_w_round_robin_2);
1433                 val64 = 0x0001020001030004ULL;
1434                 writeq(val64, &bar0->rx_w_round_robin_3);
1435                 val64 = 0x0001000000000000ULL;
1436                 writeq(val64, &bar0->rx_w_round_robin_4);
1437
1438                 val64 = 0x8080404020201008ULL;
1439                 writeq(val64, &bar0->rts_qos_steering);
1440                 break;
1441         case 6:
1442                 val64 = 0x0001020304000102ULL;
1443                 writeq(val64, &bar0->rx_w_round_robin_0);
1444                 val64 = 0x0304050001020001ULL;
1445                 writeq(val64, &bar0->rx_w_round_robin_1);
1446                 val64 = 0x0203000100000102ULL;
1447                 writeq(val64, &bar0->rx_w_round_robin_2);
1448                 val64 = 0x0304000102030405ULL;
1449                 writeq(val64, &bar0->rx_w_round_robin_3);
1450                 val64 = 0x0001000200000000ULL;
1451                 writeq(val64, &bar0->rx_w_round_robin_4);
1452
1453                 val64 = 0x8080404020100804ULL;
1454                 writeq(val64, &bar0->rts_qos_steering);
1455                 break;
1456         case 7:
1457                 val64 = 0x0001020001020300ULL;
1458                 writeq(val64, &bar0->rx_w_round_robin_0);
1459                 val64 = 0x0102030400010203ULL;
1460                 writeq(val64, &bar0->rx_w_round_robin_1);
1461                 val64 = 0x0405060001020001ULL;
1462                 writeq(val64, &bar0->rx_w_round_robin_2);
1463                 val64 = 0x0304050000010200ULL;
1464                 writeq(val64, &bar0->rx_w_round_robin_3);
1465                 val64 = 0x0102030000000000ULL;
1466                 writeq(val64, &bar0->rx_w_round_robin_4);
1467
1468                 val64 = 0x8080402010080402ULL;
1469                 writeq(val64, &bar0->rts_qos_steering);
1470                 break;
1471         case 8:
1472                 val64 = 0x0001020300040105ULL;
1473                 writeq(val64, &bar0->rx_w_round_robin_0);
1474                 val64 = 0x0200030106000204ULL;
1475                 writeq(val64, &bar0->rx_w_round_robin_1);
1476                 val64 = 0x0103000502010007ULL;
1477                 writeq(val64, &bar0->rx_w_round_robin_2);
1478                 val64 = 0x0304010002060500ULL;
1479                 writeq(val64, &bar0->rx_w_round_robin_3);
1480                 val64 = 0x0103020400000000ULL;
1481                 writeq(val64, &bar0->rx_w_round_robin_4);
1482
1483                 val64 = 0x8040201008040201ULL;
1484                 writeq(val64, &bar0->rts_qos_steering);
1485                 break;
1486         }
1487
1488         /* UDP Fix */
1489         val64 = 0;
1490         for (i = 0; i < 8; i++)
1491                 writeq(val64, &bar0->rts_frm_len_n[i]);
1492
1493         /* Set the default rts frame length for the rings configured */
1494         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1495         for (i = 0 ; i < config->rx_ring_num ; i++)
1496                 writeq(val64, &bar0->rts_frm_len_n[i]);
1497
1498         /* Set the frame length for the configured rings
1499          * desired by the user
1500          */
1501         for (i = 0; i < config->rx_ring_num; i++) {
1502                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1503                  * specified frame length steering.
1504                  * If the user provides the frame length then program
1505                  * the rts_frm_len register for those values or else
1506                  * leave it as it is.
1507                  */
1508                 if (rts_frm_len[i] != 0) {
1509                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1510                                 &bar0->rts_frm_len_n[i]);
1511                 }
1512         }
1513
1514         /* Disable differentiated services steering logic */
1515         for (i = 0; i < 64; i++) {
1516                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1517                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1518                                 dev->name);
1519                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1520                         return -ENODEV;
1521                 }
1522         }
1523
1524         /* Program statistics memory */
1525         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1526
1527         if (nic->device_type == XFRAME_II_DEVICE) {
1528                 val64 = STAT_BC(0x320);
1529                 writeq(val64, &bar0->stat_byte_cnt);
1530         }
1531
1532         /*
1533          * Initializing the sampling rate for the device to calculate the
1534          * bandwidth utilization.
1535          */
1536         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1537             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1538         writeq(val64, &bar0->mac_link_util);
1539
1540
1541         /*
1542          * Initializing the Transmit and Receive Traffic Interrupt
1543          * Scheme.
1544          */
1545         /*
1546          * TTI Initialization. Default Tx timer gets us about
1547          * 250 interrupts per sec. Continuous interrupts are enabled
1548          * by default.
1549          */
1550         if (nic->device_type == XFRAME_II_DEVICE) {
1551                 int count = (nic->config.bus_speed * 125)/2;
1552                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1553         } else {
1554
1555                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1556         }
1557         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1558             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1559             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1560                 if (use_continuous_tx_intrs)
1561                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1562         writeq(val64, &bar0->tti_data1_mem);
1563
1564         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1565             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1566             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1567         writeq(val64, &bar0->tti_data2_mem);
1568
1569         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1570         writeq(val64, &bar0->tti_command_mem);
1571
1572         /*
1573          * Once the operation completes, the Strobe bit of the command
1574          * register will be reset. We poll for this particular condition
1575          * We wait for a maximum of 500ms for the operation to complete,
1576          * if it's not complete by then we return error.
1577          */
1578         time = 0;
1579         while (TRUE) {
1580                 val64 = readq(&bar0->tti_command_mem);
1581                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1582                         break;
1583                 }
1584                 if (time > 10) {
1585                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1586                                   dev->name);
1587                         return -ENODEV;
1588                 }
1589                 msleep(50);
1590                 time++;
1591         }
1592
1593         /* RTI Initialization */
1594         if (nic->device_type == XFRAME_II_DEVICE) {
1595                 /*
1596                  * Programmed to generate Apprx 500 Intrs per
1597                  * second
1598                  */
1599                 int count = (nic->config.bus_speed * 125)/4;
1600                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1601         } else
1602                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1603         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1604                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1605                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1606
1607         writeq(val64, &bar0->rti_data1_mem);
1608
1609         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1610                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1611         if (nic->config.intr_type == MSI_X)
1612             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1613                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1614         else
1615             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1616                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1617         writeq(val64, &bar0->rti_data2_mem);
1618
1619         for (i = 0; i < config->rx_ring_num; i++) {
1620                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1621                                 | RTI_CMD_MEM_OFFSET(i);
1622                 writeq(val64, &bar0->rti_command_mem);
1623
1624                 /*
1625                  * Once the operation completes, the Strobe bit of the
1626                  * command register will be reset. We poll for this
1627                  * particular condition. We wait for a maximum of 500ms
1628                  * for the operation to complete, if it's not complete
1629                  * by then we return error.
1630                  */
1631                 time = 0;
1632                 while (TRUE) {
1633                         val64 = readq(&bar0->rti_command_mem);
1634                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1635                                 break;
1636
1637                         if (time > 10) {
1638                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1639                                           dev->name);
1640                                 return -ENODEV;
1641                         }
1642                         time++;
1643                         msleep(50);
1644                 }
1645         }
1646
1647         /*
1648          * Initializing proper values as Pause threshold into all
1649          * the 8 Queues on Rx side.
1650          */
1651         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1652         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1653
1654         /* Disable RMAC PAD STRIPPING */
1655         add = &bar0->mac_cfg;
1656         val64 = readq(&bar0->mac_cfg);
1657         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1658         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1659         writel((u32) (val64), add);
1660         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1661         writel((u32) (val64 >> 32), (add + 4));
1662         val64 = readq(&bar0->mac_cfg);
1663
1664         /* Enable FCS stripping by adapter */
1665         add = &bar0->mac_cfg;
1666         val64 = readq(&bar0->mac_cfg);
1667         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1668         if (nic->device_type == XFRAME_II_DEVICE)
1669                 writeq(val64, &bar0->mac_cfg);
1670         else {
1671                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1672                 writel((u32) (val64), add);
1673                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1674                 writel((u32) (val64 >> 32), (add + 4));
1675         }
1676
1677         /*
1678          * Set the time value to be inserted in the pause frame
1679          * generated by xena.
1680          */
1681         val64 = readq(&bar0->rmac_pause_cfg);
1682         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1683         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1684         writeq(val64, &bar0->rmac_pause_cfg);
1685
1686         /*
1687          * Set the Threshold Limit for Generating the pause frame
1688          * If the amount of data in any Queue exceeds ratio of
1689          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1690          * pause frame is generated
1691          */
1692         val64 = 0;
1693         for (i = 0; i < 4; i++) {
1694                 val64 |=
1695                     (((u64) 0xFF00 | nic->mac_control.
1696                       mc_pause_threshold_q0q3)
1697                      << (i * 2 * 8));
1698         }
1699         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1700
1701         val64 = 0;
1702         for (i = 0; i < 4; i++) {
1703                 val64 |=
1704                     (((u64) 0xFF00 | nic->mac_control.
1705                       mc_pause_threshold_q4q7)
1706                      << (i * 2 * 8));
1707         }
1708         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1709
1710         /*
1711          * TxDMA will stop Read request if the number of read split has
1712          * exceeded the limit pointed by shared_splits
1713          */
1714         val64 = readq(&bar0->pic_control);
1715         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1716         writeq(val64, &bar0->pic_control);
1717
1718         if (nic->config.bus_speed == 266) {
1719                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1720                 writeq(0x0, &bar0->read_retry_delay);
1721                 writeq(0x0, &bar0->write_retry_delay);
1722         }
1723
1724         /*
1725          * Programming the Herc to split every write transaction
1726          * that does not start on an ADB to reduce disconnects.
1727          */
1728         if (nic->device_type == XFRAME_II_DEVICE) {
1729                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1730                         MISC_LINK_STABILITY_PRD(3);
1731                 writeq(val64, &bar0->misc_control);
1732                 val64 = readq(&bar0->pic_control2);
1733                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1734                 writeq(val64, &bar0->pic_control2);
1735         }
1736         if (strstr(nic->product_name, "CX4")) {
1737                 val64 = TMAC_AVG_IPG(0x17);
1738                 writeq(val64, &bar0->tmac_avg_ipg);
1739         }
1740
1741         return SUCCESS;
1742 }
1743 #define LINK_UP_DOWN_INTERRUPT          1
1744 #define MAC_RMAC_ERR_TIMER              2
1745
1746 static int s2io_link_fault_indication(struct s2io_nic *nic)
1747 {
1748         if (nic->config.intr_type != INTA)
1749                 return MAC_RMAC_ERR_TIMER;
1750         if (nic->device_type == XFRAME_II_DEVICE)
1751                 return LINK_UP_DOWN_INTERRUPT;
1752         else
1753                 return MAC_RMAC_ERR_TIMER;
1754 }
1755
1756 /**
1757  *  do_s2io_write_bits -  update alarm bits in alarm register
1758  *  @value: alarm bits
1759  *  @flag: interrupt status
1760  *  @addr: address value
1761  *  Description: update alarm bits in alarm register
1762  *  Return Value:
1763  *  NONE.
1764  */
1765 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1766 {
1767         u64 temp64;
1768
1769         temp64 = readq(addr);
1770
1771         if(flag == ENABLE_INTRS)
1772                 temp64 &= ~((u64) value);
1773         else
1774                 temp64 |= ((u64) value);
1775         writeq(temp64, addr);
1776 }
1777
1778 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1779 {
1780         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1781         register u64 gen_int_mask = 0;
1782
1783         if (mask & TX_DMA_INTR) {
1784
1785                 gen_int_mask |= TXDMA_INT_M;
1786
1787                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1788                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1789                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1790                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1791
1792                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1793                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1794                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1795                                 &bar0->pfc_err_mask);
1796
1797                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1798                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1799                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1800
1801                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1802                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1803                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1804                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1805                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1806                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1807
1808                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1809                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1810
1811                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1812                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1813                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1814                                 flag, &bar0->lso_err_mask);
1815
1816                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1817                                 flag, &bar0->tpa_err_mask);
1818
1819                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1820
1821         }
1822
1823         if (mask & TX_MAC_INTR) {
1824                 gen_int_mask |= TXMAC_INT_M;
1825                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1826                                 &bar0->mac_int_mask);
1827                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1828                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1829                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1830                                 flag, &bar0->mac_tmac_err_mask);
1831         }
1832
1833         if (mask & TX_XGXS_INTR) {
1834                 gen_int_mask |= TXXGXS_INT_M;
1835                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1836                                 &bar0->xgxs_int_mask);
1837                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1838                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1839                                 flag, &bar0->xgxs_txgxs_err_mask);
1840         }
1841
1842         if (mask & RX_DMA_INTR) {
1843                 gen_int_mask |= RXDMA_INT_M;
1844                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1845                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1846                                 flag, &bar0->rxdma_int_mask);
1847                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1848                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1849                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1850                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1851                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1852                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1853                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1854                                 &bar0->prc_pcix_err_mask);
1855                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1856                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1857                                 &bar0->rpa_err_mask);
1858                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1859                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1860                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1861                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1862                                 flag, &bar0->rda_err_mask);
1863                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1864                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1865                                 flag, &bar0->rti_err_mask);
1866         }
1867
1868         if (mask & RX_MAC_INTR) {
1869                 gen_int_mask |= RXMAC_INT_M;
1870                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1871                                 &bar0->mac_int_mask);
1872                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1873                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1874                                 RMAC_DOUBLE_ECC_ERR |
1875                                 RMAC_LINK_STATE_CHANGE_INT,
1876                                 flag, &bar0->mac_rmac_err_mask);
1877         }
1878
1879         if (mask & RX_XGXS_INTR)
1880         {
1881                 gen_int_mask |= RXXGXS_INT_M;
1882                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1883                                 &bar0->xgxs_int_mask);
1884                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1885                                 &bar0->xgxs_rxgxs_err_mask);
1886         }
1887
1888         if (mask & MC_INTR) {
1889                 gen_int_mask |= MC_INT_M;
1890                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1891                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1892                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1893                                 &bar0->mc_err_mask);
1894         }
1895         nic->general_int_mask = gen_int_mask;
1896
1897         /* Remove this line when alarm interrupts are enabled */
1898         nic->general_int_mask = 0;
1899 }
1900 /**
1901  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1902  *  @nic: device private variable,
1903  *  @mask: A mask indicating which Intr block must be modified and,
1904  *  @flag: A flag indicating whether to enable or disable the Intrs.
1905  *  Description: This function will either disable or enable the interrupts
1906  *  depending on the flag argument. The mask argument can be used to
1907  *  enable/disable any Intr block.
1908  *  Return Value: NONE.
1909  */
1910
1911 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1912 {
1913         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1914         register u64 temp64 = 0, intr_mask = 0;
1915
1916         intr_mask = nic->general_int_mask;
1917
1918         /*  Top level interrupt classification */
1919         /*  PIC Interrupts */
1920         if (mask & TX_PIC_INTR) {
1921                 /*  Enable PIC Intrs in the general intr mask register */
1922                 intr_mask |= TXPIC_INT_M;
1923                 if (flag == ENABLE_INTRS) {
1924                         /*
1925                          * If Hercules adapter enable GPIO otherwise
1926                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1927                          * interrupts for now.
1928                          * TODO
1929                          */
1930                         if (s2io_link_fault_indication(nic) ==
1931                                         LINK_UP_DOWN_INTERRUPT ) {
1932                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1933                                                 &bar0->pic_int_mask);
1934                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1935                                                 &bar0->gpio_int_mask);
1936                         } else
1937                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1938                 } else if (flag == DISABLE_INTRS) {
1939                         /*
1940                          * Disable PIC Intrs in the general
1941                          * intr mask register
1942                          */
1943                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1944                 }
1945         }
1946
1947         /*  Tx traffic interrupts */
1948         if (mask & TX_TRAFFIC_INTR) {
1949                 intr_mask |= TXTRAFFIC_INT_M;
1950                 if (flag == ENABLE_INTRS) {
1951                         /*
1952                          * Enable all the Tx side interrupts
1953                          * writing 0 Enables all 64 TX interrupt levels
1954                          */
1955                         writeq(0x0, &bar0->tx_traffic_mask);
1956                 } else if (flag == DISABLE_INTRS) {
1957                         /*
1958                          * Disable Tx Traffic Intrs in the general intr mask
1959                          * register.
1960                          */
1961                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1962                 }
1963         }
1964
1965         /*  Rx traffic interrupts */
1966         if (mask & RX_TRAFFIC_INTR) {
1967                 intr_mask |= RXTRAFFIC_INT_M;
1968                 if (flag == ENABLE_INTRS) {
1969                         /* writing 0 Enables all 8 RX interrupt levels */
1970                         writeq(0x0, &bar0->rx_traffic_mask);
1971                 } else if (flag == DISABLE_INTRS) {
1972                         /*
1973                          * Disable Rx Traffic Intrs in the general intr mask
1974                          * register.
1975                          */
1976                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1977                 }
1978         }
1979
1980         temp64 = readq(&bar0->general_int_mask);
1981         if (flag == ENABLE_INTRS)
1982                 temp64 &= ~((u64) intr_mask);
1983         else
1984                 temp64 = DISABLE_ALL_INTRS;
1985         writeq(temp64, &bar0->general_int_mask);
1986
1987         nic->general_int_mask = readq(&bar0->general_int_mask);
1988 }
1989
1990 /**
1991  *  verify_pcc_quiescent- Checks for PCC quiescent state
1992  *  Return: 1 If PCC is quiescence
1993  *          0 If PCC is not quiescence
1994  */
1995 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1996 {
1997         int ret = 0, herc;
1998         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1999         u64 val64 = readq(&bar0->adapter_status);
2000
2001         herc = (sp->device_type == XFRAME_II_DEVICE);
2002
2003         if (flag == FALSE) {
2004                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2005                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2006                                 ret = 1;
2007                 } else {
2008                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2009                                 ret = 1;
2010                 }
2011         } else {
2012                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2013                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2014                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2015                                 ret = 1;
2016                 } else {
2017                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2018                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2019                                 ret = 1;
2020                 }
2021         }
2022
2023         return ret;
2024 }
2025 /**
2026  *  verify_xena_quiescence - Checks whether the H/W is ready
2027  *  Description: Returns whether the H/W is ready to go or not. Depending
2028  *  on whether adapter enable bit was written or not the comparison
2029  *  differs and the calling function passes the input argument flag to
2030  *  indicate this.
2031  *  Return: 1 If xena is quiescence
2032  *          0 If Xena is not quiescence
2033  */
2034
2035 static int verify_xena_quiescence(struct s2io_nic *sp)
2036 {
2037         int  mode;
2038         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2039         u64 val64 = readq(&bar0->adapter_status);
2040         mode = s2io_verify_pci_mode(sp);
2041
2042         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2043                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2044                 return 0;
2045         }
2046         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2047         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2048                 return 0;
2049         }
2050         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2051                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2052                 return 0;
2053         }
2054         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2055                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2056                 return 0;
2057         }
2058         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2059                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2060                 return 0;
2061         }
2062         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2063                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2064                 return 0;
2065         }
2066         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2067                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2068                 return 0;
2069         }
2070         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2071                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2072                 return 0;
2073         }
2074
2075         /*
2076          * In PCI 33 mode, the P_PLL is not used, and therefore,
2077          * the the P_PLL_LOCK bit in the adapter_status register will
2078          * not be asserted.
2079          */
2080         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2081                 sp->device_type == XFRAME_II_DEVICE && mode !=
2082                 PCI_MODE_PCI_33) {
2083                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2084                 return 0;
2085         }
2086         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2087                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2088                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2089                 return 0;
2090         }
2091         return 1;
2092 }
2093
2094 /**
2095  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2096  * @sp: Pointer to device specifc structure
2097  * Description :
2098  * New procedure to clear mac address reading  problems on Alpha platforms
2099  *
2100  */
2101
2102 static void fix_mac_address(struct s2io_nic * sp)
2103 {
2104         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2105         u64 val64;
2106         int i = 0;
2107
2108         while (fix_mac[i] != END_SIGN) {
2109                 writeq(fix_mac[i++], &bar0->gpio_control);
2110                 udelay(10);
2111                 val64 = readq(&bar0->gpio_control);
2112         }
2113 }
2114
2115 /**
2116  *  start_nic - Turns the device on
2117  *  @nic : device private variable.
2118  *  Description:
2119  *  This function actually turns the device on. Before this  function is
2120  *  called,all Registers are configured from their reset states
2121  *  and shared memory is allocated but the NIC is still quiescent. On
2122  *  calling this function, the device interrupts are cleared and the NIC is
2123  *  literally switched on by writing into the adapter control register.
2124  *  Return Value:
2125  *  SUCCESS on success and -1 on failure.
2126  */
2127
2128 static int start_nic(struct s2io_nic *nic)
2129 {
2130         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2131         struct net_device *dev = nic->dev;
2132         register u64 val64 = 0;
2133         u16 subid, i;
2134         struct mac_info *mac_control;
2135         struct config_param *config;
2136
2137         mac_control = &nic->mac_control;
2138         config = &nic->config;
2139
2140         /*  PRC Initialization and configuration */
2141         for (i = 0; i < config->rx_ring_num; i++) {
2142                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2143                        &bar0->prc_rxd0_n[i]);
2144
2145                 val64 = readq(&bar0->prc_ctrl_n[i]);
2146                 if (nic->rxd_mode == RXD_MODE_1)
2147                         val64 |= PRC_CTRL_RC_ENABLED;
2148                 else
2149                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2150                 if (nic->device_type == XFRAME_II_DEVICE)
2151                         val64 |= PRC_CTRL_GROUP_READS;
2152                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2153                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2154                 writeq(val64, &bar0->prc_ctrl_n[i]);
2155         }
2156
2157         if (nic->rxd_mode == RXD_MODE_3B) {
2158                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2159                 val64 = readq(&bar0->rx_pa_cfg);
2160                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2161                 writeq(val64, &bar0->rx_pa_cfg);
2162         }
2163
2164         if (vlan_tag_strip == 0) {
2165                 val64 = readq(&bar0->rx_pa_cfg);
2166                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2167                 writeq(val64, &bar0->rx_pa_cfg);
2168                 vlan_strip_flag = 0;
2169         }
2170
2171         /*
2172          * Enabling MC-RLDRAM. After enabling the device, we timeout
2173          * for around 100ms, which is approximately the time required
2174          * for the device to be ready for operation.
2175          */
2176         val64 = readq(&bar0->mc_rldram_mrs);
2177         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2178         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2179         val64 = readq(&bar0->mc_rldram_mrs);
2180
2181         msleep(100);    /* Delay by around 100 ms. */
2182
2183         /* Enabling ECC Protection. */
2184         val64 = readq(&bar0->adapter_control);
2185         val64 &= ~ADAPTER_ECC_EN;
2186         writeq(val64, &bar0->adapter_control);
2187
2188         /*
2189          * Verify if the device is ready to be enabled, if so enable
2190          * it.
2191          */
2192         val64 = readq(&bar0->adapter_status);
2193         if (!verify_xena_quiescence(nic)) {
2194                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2195                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2196                           (unsigned long long) val64);
2197                 return FAILURE;
2198         }
2199
2200         /*
2201          * With some switches, link might be already up at this point.
2202          * Because of this weird behavior, when we enable laser,
2203          * we may not get link. We need to handle this. We cannot
2204          * figure out which switch is misbehaving. So we are forced to
2205          * make a global change.
2206          */
2207
2208         /* Enabling Laser. */
2209         val64 = readq(&bar0->adapter_control);
2210         val64 |= ADAPTER_EOI_TX_ON;
2211         writeq(val64, &bar0->adapter_control);
2212
2213         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2214                 /*
2215                  * Dont see link state interrupts initally on some switches,
2216                  * so directly scheduling the link state task here.
2217                  */
2218                 schedule_work(&nic->set_link_task);
2219         }
2220         /* SXE-002: Initialize link and activity LED */
2221         subid = nic->pdev->subsystem_device;
2222         if (((subid & 0xFF) >= 0x07) &&
2223             (nic->device_type == XFRAME_I_DEVICE)) {
2224                 val64 = readq(&bar0->gpio_control);
2225                 val64 |= 0x0000800000000000ULL;
2226                 writeq(val64, &bar0->gpio_control);
2227                 val64 = 0x0411040400000000ULL;
2228                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2229         }
2230
2231         return SUCCESS;
2232 }
2233 /**
2234  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2235  */
2236 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2237                                         TxD *txdlp, int get_off)
2238 {
2239         struct s2io_nic *nic = fifo_data->nic;
2240         struct sk_buff *skb;
2241         struct TxD *txds;
2242         u16 j, frg_cnt;
2243
2244         txds = txdlp;
2245         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2246                 pci_unmap_single(nic->pdev, (dma_addr_t)
2247                         txds->Buffer_Pointer, sizeof(u64),
2248                         PCI_DMA_TODEVICE);
2249                 txds++;
2250         }
2251
2252         skb = (struct sk_buff *) ((unsigned long)
2253                         txds->Host_Control);
2254         if (!skb) {
2255                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2256                 return NULL;
2257         }
2258         pci_unmap_single(nic->pdev, (dma_addr_t)
2259                          txds->Buffer_Pointer,
2260                          skb->len - skb->data_len,
2261                          PCI_DMA_TODEVICE);
2262         frg_cnt = skb_shinfo(skb)->nr_frags;
2263         if (frg_cnt) {
2264                 txds++;
2265                 for (j = 0; j < frg_cnt; j++, txds++) {
2266                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2267                         if (!txds->Buffer_Pointer)
2268                                 break;
2269                         pci_unmap_page(nic->pdev, (dma_addr_t)
2270                                         txds->Buffer_Pointer,
2271                                        frag->size, PCI_DMA_TODEVICE);
2272                 }
2273         }
2274         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2275         return(skb);
2276 }
2277
2278 /**
2279  *  free_tx_buffers - Free all queued Tx buffers
2280  *  @nic : device private variable.
2281  *  Description:
2282  *  Free all queued Tx buffers.
2283  *  Return Value: void
2284 */
2285
2286 static void free_tx_buffers(struct s2io_nic *nic)
2287 {
2288         struct net_device *dev = nic->dev;
2289         struct sk_buff *skb;
2290         struct TxD *txdp;
2291         int i, j;
2292         struct mac_info *mac_control;
2293         struct config_param *config;
2294         int cnt = 0;
2295
2296         mac_control = &nic->mac_control;
2297         config = &nic->config;
2298
2299         for (i = 0; i < config->tx_fifo_num; i++) {
2300                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2301                         txdp = (struct TxD *) \
2302                         mac_control->fifos[i].list_info[j].list_virt_addr;
2303                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2304                         if (skb) {
2305                                 nic->mac_control.stats_info->sw_stat.mem_freed
2306                                         += skb->truesize;
2307                                 dev_kfree_skb(skb);
2308                                 cnt++;
2309                         }
2310                 }
2311                 DBG_PRINT(INTR_DBG,
2312                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2313                           dev->name, cnt, i);
2314                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2315                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2316         }
2317 }
2318
2319 /**
2320  *   stop_nic -  To stop the nic
2321  *   @nic ; device private variable.
2322  *   Description:
2323  *   This function does exactly the opposite of what the start_nic()
2324  *   function does. This function is called to stop the device.
2325  *   Return Value:
2326  *   void.
2327  */
2328
2329 static void stop_nic(struct s2io_nic *nic)
2330 {
2331         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2332         register u64 val64 = 0;
2333         u16 interruptible;
2334         struct mac_info *mac_control;
2335         struct config_param *config;
2336
2337         mac_control = &nic->mac_control;
2338         config = &nic->config;
2339
2340         /*  Disable all interrupts */
2341         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2342         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2343         interruptible |= TX_PIC_INTR;
2344         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2345
2346         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2347         val64 = readq(&bar0->adapter_control);
2348         val64 &= ~(ADAPTER_CNTL_EN);
2349         writeq(val64, &bar0->adapter_control);
2350 }
2351
2352 /**
2353  *  fill_rx_buffers - Allocates the Rx side skbs
2354  *  @nic:  device private variable
2355  *  @ring_no: ring number
2356  *  Description:
2357  *  The function allocates Rx side skbs and puts the physical
2358  *  address of these buffers into the RxD buffer pointers, so that the NIC
2359  *  can DMA the received frame into these locations.
2360  *  The NIC supports 3 receive modes, viz
2361  *  1. single buffer,
2362  *  2. three buffer and
2363  *  3. Five buffer modes.
2364  *  Each mode defines how many fragments the received frame will be split
2365  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2366  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2367  *  is split into 3 fragments. As of now only single buffer mode is
2368  *  supported.
2369  *   Return Value:
2370  *  SUCCESS on success or an appropriate -ve value on failure.
2371  */
2372
2373 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2374 {
2375         struct net_device *dev = nic->dev;
2376         struct sk_buff *skb;
2377         struct RxD_t *rxdp;
2378         int off, off1, size, block_no, block_no1;
2379         u32 alloc_tab = 0;
2380         u32 alloc_cnt;
2381         struct mac_info *mac_control;
2382         struct config_param *config;
2383         u64 tmp;
2384         struct buffAdd *ba;
2385         unsigned long flags;
2386         struct RxD_t *first_rxdp = NULL;
2387         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2388         struct RxD1 *rxdp1;
2389         struct RxD3 *rxdp3;
2390         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2391
2392         mac_control = &nic->mac_control;
2393         config = &nic->config;
2394         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2395             atomic_read(&nic->rx_bufs_left[ring_no]);
2396
2397         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2398         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2399         while (alloc_tab < alloc_cnt) {
2400                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2401                     block_index;
2402                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2403
2404                 rxdp = mac_control->rings[ring_no].
2405                                 rx_blocks[block_no].rxds[off].virt_addr;
2406
2407                 if ((block_no == block_no1) && (off == off1) &&
2408                                         (rxdp->Host_Control)) {
2409                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2410                                   dev->name);
2411                         DBG_PRINT(INTR_DBG, " info equated\n");
2412                         goto end;
2413                 }
2414                 if (off && (off == rxd_count[nic->rxd_mode])) {
2415                         mac_control->rings[ring_no].rx_curr_put_info.
2416                             block_index++;
2417                         if (mac_control->rings[ring_no].rx_curr_put_info.
2418                             block_index == mac_control->rings[ring_no].
2419                                         block_count)
2420                                 mac_control->rings[ring_no].rx_curr_put_info.
2421                                         block_index = 0;
2422                         block_no = mac_control->rings[ring_no].
2423                                         rx_curr_put_info.block_index;
2424                         if (off == rxd_count[nic->rxd_mode])
2425                                 off = 0;
2426                         mac_control->rings[ring_no].rx_curr_put_info.
2427                                 offset = off;
2428                         rxdp = mac_control->rings[ring_no].
2429                                 rx_blocks[block_no].block_virt_addr;
2430                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2431                                   dev->name, rxdp);
2432                 }
2433                 if(!napi) {
2434                         spin_lock_irqsave(&nic->put_lock, flags);
2435                         mac_control->rings[ring_no].put_pos =
2436                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2437                         spin_unlock_irqrestore(&nic->put_lock, flags);
2438                 } else {
2439                         mac_control->rings[ring_no].put_pos =
2440                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2441                 }
2442                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2443                         ((nic->rxd_mode == RXD_MODE_3B) &&
2444                                 (rxdp->Control_2 & s2BIT(0)))) {
2445                         mac_control->rings[ring_no].rx_curr_put_info.
2446                                         offset = off;
2447                         goto end;
2448                 }
2449                 /* calculate size of skb based on ring mode */
2450                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2451                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2452                 if (nic->rxd_mode == RXD_MODE_1)
2453                         size += NET_IP_ALIGN;
2454                 else
2455                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2456
2457                 /* allocate skb */
2458                 skb = dev_alloc_skb(size);
2459                 if(!skb) {
2460                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2461                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2462                         if (first_rxdp) {
2463                                 wmb();
2464                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2465                         }
2466                         nic->mac_control.stats_info->sw_stat. \
2467                                 mem_alloc_fail_cnt++;
2468                         return -ENOMEM ;
2469                 }
2470                 nic->mac_control.stats_info->sw_stat.mem_allocated
2471                         += skb->truesize;
2472                 if (nic->rxd_mode == RXD_MODE_1) {
2473                         /* 1 buffer mode - normal operation mode */
2474                         rxdp1 = (struct RxD1*)rxdp;
2475                         memset(rxdp, 0, sizeof(struct RxD1));
2476                         skb_reserve(skb, NET_IP_ALIGN);
2477                         rxdp1->Buffer0_ptr = pci_map_single
2478                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2479                                 PCI_DMA_FROMDEVICE);
2480                         if( (rxdp1->Buffer0_ptr == 0) ||
2481                                 (rxdp1->Buffer0_ptr ==
2482                                 DMA_ERROR_CODE))
2483                                 goto pci_map_failed;
2484
2485                         rxdp->Control_2 =
2486                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2487
2488                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2489                         /*
2490                          * 2 buffer mode -
2491                          * 2 buffer mode provides 128
2492                          * byte aligned receive buffers.
2493                          */
2494
2495                         rxdp3 = (struct RxD3*)rxdp;
2496                         /* save buffer pointers to avoid frequent dma mapping */
2497                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2498                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2499                         memset(rxdp, 0, sizeof(struct RxD3));
2500                         /* restore the buffer pointers for dma sync*/
2501                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2502                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2503
2504                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2505                         skb_reserve(skb, BUF0_LEN);
2506                         tmp = (u64)(unsigned long) skb->data;
2507                         tmp += ALIGN_SIZE;
2508                         tmp &= ~ALIGN_SIZE;
2509                         skb->data = (void *) (unsigned long)tmp;
2510                         skb_reset_tail_pointer(skb);
2511
2512                         if (!(rxdp3->Buffer0_ptr))
2513                                 rxdp3->Buffer0_ptr =
2514                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2515                                            PCI_DMA_FROMDEVICE);
2516                         else
2517                                 pci_dma_sync_single_for_device(nic->pdev,
2518                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2519                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2520                         if( (rxdp3->Buffer0_ptr == 0) ||
2521                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2522                                 goto pci_map_failed;
2523
2524                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2525                         if (nic->rxd_mode == RXD_MODE_3B) {
2526                                 /* Two buffer mode */
2527
2528                                 /*
2529                                  * Buffer2 will have L3/L4 header plus
2530                                  * L4 payload
2531                                  */
2532                                 rxdp3->Buffer2_ptr = pci_map_single
2533                                 (nic->pdev, skb->data, dev->mtu + 4,
2534                                                 PCI_DMA_FROMDEVICE);
2535
2536                                 if( (rxdp3->Buffer2_ptr == 0) ||
2537                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2538                                         goto pci_map_failed;
2539
2540                                 rxdp3->Buffer1_ptr =
2541                                                 pci_map_single(nic->pdev,
2542                                                 ba->ba_1, BUF1_LEN,
2543                                                 PCI_DMA_FROMDEVICE);
2544                                 if( (rxdp3->Buffer1_ptr == 0) ||
2545                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2546                                         pci_unmap_single
2547                                                 (nic->pdev,
2548                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2549                                                 dev->mtu + 4,
2550                                                 PCI_DMA_FROMDEVICE);
2551                                         goto pci_map_failed;
2552                                 }
2553                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2554                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2555                                                                 (dev->mtu + 4);
2556                         }
2557                         rxdp->Control_2 |= s2BIT(0);
2558                 }
2559                 rxdp->Host_Control = (unsigned long) (skb);
2560                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2561                         rxdp->Control_1 |= RXD_OWN_XENA;
2562                 off++;
2563                 if (off == (rxd_count[nic->rxd_mode] + 1))
2564                         off = 0;
2565                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2566
2567                 rxdp->Control_2 |= SET_RXD_MARKER;
2568                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2569                         if (first_rxdp) {
2570                                 wmb();
2571                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2572                         }
2573                         first_rxdp = rxdp;
2574                 }
2575                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2576                 alloc_tab++;
2577         }
2578
2579       end:
2580         /* Transfer ownership of first descriptor to adapter just before
2581          * exiting. Before that, use memory barrier so that ownership
2582          * and other fields are seen by adapter correctly.
2583          */
2584         if (first_rxdp) {
2585                 wmb();
2586                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2587         }
2588
2589         return SUCCESS;
2590 pci_map_failed:
2591         stats->pci_map_fail_cnt++;
2592         stats->mem_freed += skb->truesize;
2593         dev_kfree_skb_irq(skb);
2594         return -ENOMEM;
2595 }
2596
2597 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2598 {
2599         struct net_device *dev = sp->dev;
2600         int j;
2601         struct sk_buff *skb;
2602         struct RxD_t *rxdp;
2603         struct mac_info *mac_control;
2604         struct buffAdd *ba;
2605         struct RxD1 *rxdp1;
2606         struct RxD3 *rxdp3;
2607
2608         mac_control = &sp->mac_control;
2609         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2610                 rxdp = mac_control->rings[ring_no].
2611                                 rx_blocks[blk].rxds[j].virt_addr;
2612                 skb = (struct sk_buff *)
2613                         ((unsigned long) rxdp->Host_Control);
2614                 if (!skb) {
2615                         continue;
2616                 }
2617                 if (sp->rxd_mode == RXD_MODE_1) {
2618                         rxdp1 = (struct RxD1*)rxdp;
2619                         pci_unmap_single(sp->pdev, (dma_addr_t)
2620                                 rxdp1->Buffer0_ptr,
2621                                 dev->mtu +
2622                                 HEADER_ETHERNET_II_802_3_SIZE
2623                                 + HEADER_802_2_SIZE +
2624                                 HEADER_SNAP_SIZE,
2625                                 PCI_DMA_FROMDEVICE);
2626                         memset(rxdp, 0, sizeof(struct RxD1));
2627                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2628                         rxdp3 = (struct RxD3*)rxdp;
2629                         ba = &mac_control->rings[ring_no].
2630                                 ba[blk][j];
2631                         pci_unmap_single(sp->pdev, (dma_addr_t)
2632                                 rxdp3->Buffer0_ptr,
2633                                 BUF0_LEN,
2634                                 PCI_DMA_FROMDEVICE);
2635                         pci_unmap_single(sp->pdev, (dma_addr_t)
2636                                 rxdp3->Buffer1_ptr,
2637                                 BUF1_LEN,
2638                                 PCI_DMA_FROMDEVICE);
2639                         pci_unmap_single(sp->pdev, (dma_addr_t)
2640                                 rxdp3->Buffer2_ptr,
2641                                 dev->mtu + 4,
2642                                 PCI_DMA_FROMDEVICE);
2643                         memset(rxdp, 0, sizeof(struct RxD3));
2644                 }
2645                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2646                 dev_kfree_skb(skb);
2647                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2648         }
2649 }
2650
2651 /**
2652  *  free_rx_buffers - Frees all Rx buffers
2653  *  @sp: device private variable.
2654  *  Description:
2655  *  This function will free all Rx buffers allocated by host.
2656  *  Return Value:
2657  *  NONE.
2658  */
2659
2660 static void free_rx_buffers(struct s2io_nic *sp)
2661 {
2662         struct net_device *dev = sp->dev;
2663         int i, blk = 0, buf_cnt = 0;
2664         struct mac_info *mac_control;
2665         struct config_param *config;
2666
2667         mac_control = &sp->mac_control;
2668         config = &sp->config;
2669
2670         for (i = 0; i < config->rx_ring_num; i++) {
2671                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2672                         free_rxd_blk(sp,i,blk);
2673
2674                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2675                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2676                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2677                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2678                 atomic_set(&sp->rx_bufs_left[i], 0);
2679                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2680                           dev->name, buf_cnt, i);
2681         }
2682 }
2683
2684 /**
2685  * s2io_poll - Rx interrupt handler for NAPI support
2686  * @napi : pointer to the napi structure.
2687  * @budget : The number of packets that were budgeted to be processed
2688  * during  one pass through the 'Poll" function.
2689  * Description:
2690  * Comes into picture only if NAPI support has been incorporated. It does
2691  * the same thing that rx_intr_handler does, but not in a interrupt context
2692  * also It will process only a given number of packets.
2693  * Return value:
2694  * 0 on success and 1 if there are No Rx packets to be processed.
2695  */
2696
2697 static int s2io_poll(struct napi_struct *napi, int budget)
2698 {
2699         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2700         struct net_device *dev = nic->dev;
2701         int pkt_cnt = 0, org_pkts_to_process;
2702         struct mac_info *mac_control;
2703         struct config_param *config;
2704         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2705         int i;
2706
2707         mac_control = &nic->mac_control;
2708         config = &nic->config;
2709
2710         nic->pkts_to_process = budget;
2711         org_pkts_to_process = nic->pkts_to_process;
2712
2713         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2714         readl(&bar0->rx_traffic_int);
2715
2716         for (i = 0; i < config->rx_ring_num; i++) {
2717                 rx_intr_handler(&mac_control->rings[i]);
2718                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2719                 if (!nic->pkts_to_process) {
2720                         /* Quota for the current iteration has been met */
2721                         goto no_rx;
2722                 }
2723         }
2724
2725         netif_rx_complete(dev, napi);
2726
2727         for (i = 0; i < config->rx_ring_num; i++) {
2728                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2729                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2730                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2731                         break;
2732                 }
2733         }
2734         /* Re enable the Rx interrupts. */
2735         writeq(0x0, &bar0->rx_traffic_mask);
2736         readl(&bar0->rx_traffic_mask);
2737         return pkt_cnt;
2738
2739 no_rx:
2740         for (i = 0; i < config->rx_ring_num; i++) {
2741                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2742                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2743                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2744                         break;
2745                 }
2746         }
2747         return pkt_cnt;
2748 }
2749
2750 #ifdef CONFIG_NET_POLL_CONTROLLER
2751 /**
2752  * s2io_netpoll - netpoll event handler entry point
2753  * @dev : pointer to the device structure.
2754  * Description:
2755  *      This function will be called by upper layer to check for events on the
2756  * interface in situations where interrupts are disabled. It is used for
2757  * specific in-kernel networking tasks, such as remote consoles and kernel
2758  * debugging over the network (example netdump in RedHat).
2759  */
2760 static void s2io_netpoll(struct net_device *dev)
2761 {
2762         struct s2io_nic *nic = dev->priv;
2763         struct mac_info *mac_control;
2764         struct config_param *config;
2765         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2766         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2767         int i;
2768
2769         if (pci_channel_offline(nic->pdev))
2770                 return;
2771
2772         disable_irq(dev->irq);
2773
2774         mac_control = &nic->mac_control;
2775         config = &nic->config;
2776
2777         writeq(val64, &bar0->rx_traffic_int);
2778         writeq(val64, &bar0->tx_traffic_int);
2779
2780         /* we need to free up the transmitted skbufs or else netpoll will
2781          * run out of skbs and will fail and eventually netpoll application such
2782          * as netdump will fail.
2783          */
2784         for (i = 0; i < config->tx_fifo_num; i++)
2785                 tx_intr_handler(&mac_control->fifos[i]);
2786
2787         /* check for received packet and indicate up to network */
2788         for (i = 0; i < config->rx_ring_num; i++)
2789                 rx_intr_handler(&mac_control->rings[i]);
2790
2791         for (i = 0; i < config->rx_ring_num; i++) {
2792                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2793                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2794                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2795                         break;
2796                 }
2797         }
2798         enable_irq(dev->irq);
2799         return;
2800 }
2801 #endif
2802
2803 /**
2804  *  rx_intr_handler - Rx interrupt handler
2805  *  @nic: device private variable.
2806  *  Description:
2807  *  If the interrupt is because of a received frame or if the
2808  *  receive ring contains fresh as yet un-processed frames,this function is
2809  *  called. It picks out the RxD at which place the last Rx processing had
2810  *  stopped and sends the skb to the OSM's Rx handler and then increments
2811  *  the offset.
2812  *  Return Value:
2813  *  NONE.
2814  */
2815 static void rx_intr_handler(struct ring_info *ring_data)
2816 {
2817         struct s2io_nic *nic = ring_data->nic;
2818         struct net_device *dev = (struct net_device *) nic->dev;
2819         int get_block, put_block, put_offset;
2820         struct rx_curr_get_info get_info, put_info;
2821         struct RxD_t *rxdp;
2822         struct sk_buff *skb;
2823         int pkt_cnt = 0;
2824         int i;
2825         struct RxD1* rxdp1;
2826         struct RxD3* rxdp3;
2827
2828         spin_lock(&nic->rx_lock);
2829
2830         get_info = ring_data->rx_curr_get_info;
2831         get_block = get_info.block_index;
2832         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2833         put_block = put_info.block_index;
2834         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2835         if (!napi) {
2836                 spin_lock(&nic->put_lock);
2837                 put_offset = ring_data->put_pos;
2838                 spin_unlock(&nic->put_lock);
2839         } else
2840                 put_offset = ring_data->put_pos;
2841
2842         while (RXD_IS_UP2DT(rxdp)) {
2843                 /*
2844                  * If your are next to put index then it's
2845                  * FIFO full condition
2846                  */
2847                 if ((get_block == put_block) &&
2848                     (get_info.offset + 1) == put_info.offset) {
2849                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2850                         break;
2851                 }
2852                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2853                 if (skb == NULL) {
2854                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2855                                   dev->name);
2856                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2857                         spin_unlock(&nic->rx_lock);
2858                         return;
2859                 }
2860                 if (nic->rxd_mode == RXD_MODE_1) {
2861                         rxdp1 = (struct RxD1*)rxdp;
2862                         pci_unmap_single(nic->pdev, (dma_addr_t)
2863                                 rxdp1->Buffer0_ptr,
2864                                 dev->mtu +
2865                                 HEADER_ETHERNET_II_802_3_SIZE +
2866                                 HEADER_802_2_SIZE +
2867                                 HEADER_SNAP_SIZE,
2868                                 PCI_DMA_FROMDEVICE);
2869                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2870                         rxdp3 = (struct RxD3*)rxdp;
2871                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2872                                 rxdp3->Buffer0_ptr,
2873                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2874                         pci_unmap_single(nic->pdev, (dma_addr_t)
2875                                 rxdp3->Buffer2_ptr,
2876                                 dev->mtu + 4,
2877                                 PCI_DMA_FROMDEVICE);
2878                 }
2879                 prefetch(skb->data);
2880                 rx_osm_handler(ring_data, rxdp);
2881                 get_info.offset++;
2882                 ring_data->rx_curr_get_info.offset = get_info.offset;
2883                 rxdp = ring_data->rx_blocks[get_block].
2884                                 rxds[get_info.offset].virt_addr;
2885                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2886                         get_info.offset = 0;
2887                         ring_data->rx_curr_get_info.offset = get_info.offset;
2888                         get_block++;
2889                         if (get_block == ring_data->block_count)
2890                                 get_block = 0;
2891                         ring_data->rx_curr_get_info.block_index = get_block;
2892                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2893                 }
2894
2895                 nic->pkts_to_process -= 1;
2896                 if ((napi) && (!nic->pkts_to_process))
2897                         break;
2898                 pkt_cnt++;
2899                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2900                         break;
2901         }
2902         if (nic->lro) {
2903                 /* Clear all LRO sessions before exiting */
2904                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2905                         struct lro *lro = &nic->lro0_n[i];
2906                         if (lro->in_use) {
2907                                 update_L3L4_header(nic, lro);
2908                                 queue_rx_frame(lro->parent);
2909                                 clear_lro_session(lro);
2910                         }
2911                 }
2912         }
2913
2914         spin_unlock(&nic->rx_lock);
2915 }
2916
2917 /**
2918  *  tx_intr_handler - Transmit interrupt handler
2919  *  @nic : device private variable
2920  *  Description:
2921  *  If an interrupt was raised to indicate DMA complete of the
2922  *  Tx packet, this function is called. It identifies the last TxD
2923  *  whose buffer was freed and frees all skbs whose data have already
2924  *  DMA'ed into the NICs internal memory.
2925  *  Return Value:
2926  *  NONE
2927  */
2928
2929 static void tx_intr_handler(struct fifo_info *fifo_data)
2930 {
2931         struct s2io_nic *nic = fifo_data->nic;
2932         struct net_device *dev = (struct net_device *) nic->dev;
2933         struct tx_curr_get_info get_info, put_info;
2934         struct sk_buff *skb;
2935         struct TxD *txdlp;
2936         u8 err_mask;
2937
2938         get_info = fifo_data->tx_curr_get_info;
2939         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2940         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2941             list_virt_addr;
2942         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2943                (get_info.offset != put_info.offset) &&
2944                (txdlp->Host_Control)) {
2945                 /* Check for TxD errors */
2946                 if (txdlp->Control_1 & TXD_T_CODE) {
2947                         unsigned long long err;
2948                         err = txdlp->Control_1 & TXD_T_CODE;
2949                         if (err & 0x1) {
2950                                 nic->mac_control.stats_info->sw_stat.
2951                                                 parity_err_cnt++;
2952                         }
2953
2954                         /* update t_code statistics */
2955                         err_mask = err >> 48;
2956                         switch(err_mask) {
2957                                 case 2:
2958                                         nic->mac_control.stats_info->sw_stat.
2959                                                         tx_buf_abort_cnt++;
2960                                 break;
2961
2962                                 case 3:
2963                                         nic->mac_control.stats_info->sw_stat.
2964                                                         tx_desc_abort_cnt++;
2965                                 break;
2966
2967                                 case 7:
2968                                         nic->mac_control.stats_info->sw_stat.
2969                                                         tx_parity_err_cnt++;
2970                                 break;
2971
2972                                 case 10:
2973                                         nic->mac_control.stats_info->sw_stat.
2974                                                         tx_link_loss_cnt++;
2975                                 break;
2976
2977                                 case 15:
2978                                         nic->mac_control.stats_info->sw_stat.
2979                                                         tx_list_proc_err_cnt++;
2980                                 break;
2981                         }
2982                 }
2983
2984                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2985                 if (skb == NULL) {
2986                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2987                         __FUNCTION__);
2988                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2989                         return;
2990                 }
2991
2992                 /* Updating the statistics block */
2993                 nic->stats.tx_bytes += skb->len;
2994                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2995                 dev_kfree_skb_irq(skb);
2996
2997                 get_info.offset++;
2998                 if (get_info.offset == get_info.fifo_len + 1)
2999                         get_info.offset = 0;
3000                 txdlp = (struct TxD *) fifo_data->list_info
3001                     [get_info.offset].list_virt_addr;
3002                 fifo_data->tx_curr_get_info.offset =
3003                     get_info.offset;
3004         }
3005
3006         spin_lock(&nic->tx_lock);
3007         if (netif_queue_stopped(dev))
3008                 netif_wake_queue(dev);
3009         spin_unlock(&nic->tx_lock);
3010 }
3011
3012 /**
3013  *  s2io_mdio_write - Function to write in to MDIO registers
3014  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3015  *  @addr     : address value
3016  *  @value    : data value
3017  *  @dev      : pointer to net_device structure
3018  *  Description:
3019  *  This function is used to write values to the MDIO registers
3020  *  NONE
3021  */
3022 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3023 {
3024         u64 val64 = 0x0;
3025         struct s2io_nic *sp = dev->priv;
3026         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3027
3028         //address transaction
3029         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3030                         | MDIO_MMD_DEV_ADDR(mmd_type)
3031                         | MDIO_MMS_PRT_ADDR(0x0);
3032         writeq(val64, &bar0->mdio_control);
3033         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3034         writeq(val64, &bar0->mdio_control);
3035         udelay(100);
3036
3037         //Data transaction
3038         val64 = 0x0;
3039         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3040                         | MDIO_MMD_DEV_ADDR(mmd_type)
3041                         | MDIO_MMS_PRT_ADDR(0x0)
3042                         | MDIO_MDIO_DATA(value)
3043                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3044         writeq(val64, &bar0->mdio_control);
3045         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3046         writeq(val64, &bar0->mdio_control);
3047         udelay(100);
3048
3049         val64 = 0x0;
3050         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3051         | MDIO_MMD_DEV_ADDR(mmd_type)
3052         | MDIO_MMS_PRT_ADDR(0x0)
3053         | MDIO_OP(MDIO_OP_READ_TRANS);
3054         writeq(val64, &bar0->mdio_control);
3055         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3056         writeq(val64, &bar0->mdio_control);
3057         udelay(100);
3058
3059 }
3060
3061 /**
3062  *  s2io_mdio_read - Function to write in to MDIO registers
3063  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3064  *  @addr     : address value
3065  *  @dev      : pointer to net_device structure
3066  *  Description:
3067  *  This function is used to read values to the MDIO registers
3068  *  NONE
3069  */
3070 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3071 {
3072         u64 val64 = 0x0;
3073         u64 rval64 = 0x0;
3074         struct s2io_nic *sp = dev->priv;
3075         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3076
3077         /* address transaction */
3078         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3079                         | MDIO_MMD_DEV_ADDR(mmd_type)
3080                         | MDIO_MMS_PRT_ADDR(0x0);
3081         writeq(val64, &bar0->mdio_control);
3082         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3083         writeq(val64, &bar0->mdio_control);
3084         udelay(100);
3085
3086         /* Data transaction */
3087         val64 = 0x0;
3088         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3089                         | MDIO_MMD_DEV_ADDR(mmd_type)
3090                         | MDIO_MMS_PRT_ADDR(0x0)
3091                         | MDIO_OP(MDIO_OP_READ_TRANS);
3092         writeq(val64, &bar0->mdio_control);
3093         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3094         writeq(val64, &bar0->mdio_control);
3095         udelay(100);
3096
3097         /* Read the value from regs */
3098         rval64 = readq(&bar0->mdio_control);
3099         rval64 = rval64 & 0xFFFF0000;
3100         rval64 = rval64 >> 16;
3101         return rval64;
3102 }
3103 /**
3104  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3105  *  @counter      : couter value to be updated
3106  *  @flag         : flag to indicate the status
3107  *  @type         : counter type
3108  *  Description:
3109  *  This function is to check the status of the xpak counters value
3110  *  NONE
3111  */
3112
3113 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3114 {
3115         u64 mask = 0x3;
3116         u64 val64;
3117         int i;
3118         for(i = 0; i <index; i++)
3119                 mask = mask << 0x2;
3120
3121         if(flag > 0)
3122         {
3123                 *counter = *counter + 1;
3124                 val64 = *regs_stat & mask;
3125                 val64 = val64 >> (index * 0x2);
3126                 val64 = val64 + 1;
3127                 if(val64 == 3)
3128                 {
3129                         switch(type)
3130                         {
3131                         case 1:
3132                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3133                                           "service. Excessive temperatures may "
3134                                           "result in premature transceiver "
3135                                           "failure \n");
3136                         break;
3137                         case 2:
3138                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3139                                           "service Excessive bias currents may "
3140                                           "indicate imminent laser diode "
3141                                           "failure \n");
3142                         break;
3143                         case 3:
3144                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3145                                           "service Excessive laser output "
3146                                           "power may saturate far-end "
3147                                           "receiver\n");
3148                         break;
3149                         default:
3150                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3151                                           "type \n");
3152                         }
3153                         val64 = 0x0;
3154                 }
3155                 val64 = val64 << (index * 0x2);
3156                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3157
3158         } else {
3159                 *regs_stat = *regs_stat & (~mask);
3160         }
3161 }
3162
3163 /**
3164  *  s2io_updt_xpak_counter - Function to update the xpak counters
3165  *  @dev         : pointer to net_device struct
3166  *  Description:
3167  *  This function is to upate the status of the xpak counters value
3168  *  NONE
3169  */
3170 static void s2io_updt_xpak_counter(struct net_device *dev)
3171 {
3172         u16 flag  = 0x0;
3173         u16 type  = 0x0;
3174         u16 val16 = 0x0;
3175         u64 val64 = 0x0;
3176         u64 addr  = 0x0;
3177
3178         struct s2io_nic *sp = dev->priv;
3179         struct stat_block *stat_info = sp->mac_control.stats_info;
3180
3181         /* Check the communication with the MDIO slave */
3182         addr = 0x0000;
3183         val64 = 0x0;
3184         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3185         if((val64 == 0xFFFF) || (val64 == 0x0000))
3186         {
3187                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3188                           "Returned %llx\n", (unsigned long long)val64);
3189                 return;
3190         }
3191
3192         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3193         if(val64 != 0x2040)
3194         {
3195                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3196                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3197                           (unsigned long long)val64);
3198                 return;
3199         }
3200
3201         /* Loading the DOM register to MDIO register */
3202         addr = 0xA100;
3203         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3204         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3205
3206         /* Reading the Alarm flags */
3207         addr = 0xA070;
3208         val64 = 0x0;
3209         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3210
3211         flag = CHECKBIT(val64, 0x7);
3212         type = 1;
3213         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3214                                 &stat_info->xpak_stat.xpak_regs_stat,
3215                                 0x0, flag, type);
3216
3217         if(CHECKBIT(val64, 0x6))
3218                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3219
3220         flag = CHECKBIT(val64, 0x3);
3221         type = 2;
3222         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3223                                 &stat_info->xpak_stat.xpak_regs_stat,
3224                                 0x2, flag, type);
3225
3226         if(CHECKBIT(val64, 0x2))
3227                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3228
3229         flag = CHECKBIT(val64, 0x1);
3230         type = 3;
3231         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3232                                 &stat_info->xpak_stat.xpak_regs_stat,
3233                                 0x4, flag, type);
3234
3235         if(CHECKBIT(val64, 0x0))
3236                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3237
3238         /* Reading the Warning flags */
3239         addr = 0xA074;
3240         val64 = 0x0;
3241         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3242
3243         if(CHECKBIT(val64, 0x7))
3244                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3245
3246         if(CHECKBIT(val64, 0x6))
3247                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3248
3249         if(CHECKBIT(val64, 0x3))
3250                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3251
3252         if(CHECKBIT(val64, 0x2))
3253                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3254
3255         if(CHECKBIT(val64, 0x1))
3256                 stat_info->xpak_stat.warn_laser_output_power_high++;
3257
3258         if(CHECKBIT(val64, 0x0))
3259                 stat_info->xpak_stat.warn_laser_output_power_low++;
3260 }
3261
3262 /**
3263  *  wait_for_cmd_complete - waits for a command to complete.
3264  *  @sp : private member of the device structure, which is a pointer to the
3265  *  s2io_nic structure.
3266  *  Description: Function that waits for a command to Write into RMAC
3267  *  ADDR DATA registers to be completed and returns either success or
3268  *  error depending on whether the command was complete or not.
3269  *  Return value:
3270  *   SUCCESS on success and FAILURE on failure.
3271  */
3272
3273 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3274                                 int bit_state)
3275 {
3276         int ret = FAILURE, cnt = 0, delay = 1;
3277         u64 val64;
3278
3279         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3280                 return FAILURE;
3281
3282         do {
3283                 val64 = readq(addr);
3284                 if (bit_state == S2IO_BIT_RESET) {
3285                         if (!(val64 & busy_bit)) {
3286                                 ret = SUCCESS;
3287                                 break;
3288                         }
3289                 } else {
3290                         if (!(val64 & busy_bit)) {
3291                                 ret = SUCCESS;
3292                                 break;
3293                         }
3294                 }
3295
3296                 if(in_interrupt())
3297                         mdelay(delay);
3298                 else
3299                         msleep(delay);
3300
3301                 if (++cnt >= 10)
3302                         delay = 50;
3303         } while (cnt < 20);
3304         return ret;
3305 }
3306 /*
3307  * check_pci_device_id - Checks if the device id is supported
3308  * @id : device id
3309  * Description: Function to check if the pci device id is supported by driver.
3310  * Return value: Actual device id if supported else PCI_ANY_ID
3311  */
3312 static u16 check_pci_device_id(u16 id)
3313 {
3314         switch (id) {
3315         case PCI_DEVICE_ID_HERC_WIN:
3316         case PCI_DEVICE_ID_HERC_UNI:
3317                 return XFRAME_II_DEVICE;
3318         case PCI_DEVICE_ID_S2IO_UNI:
3319         case PCI_DEVICE_ID_S2IO_WIN:
3320                 return XFRAME_I_DEVICE;
3321         default:
3322                 return PCI_ANY_ID;
3323         }
3324 }
3325
3326 /**
3327  *  s2io_reset - Resets the card.
3328  *  @sp : private member of the device structure.
3329  *  Description: Function to Reset the card. This function then also
3330  *  restores the previously saved PCI configuration space registers as
3331  *  the card reset also resets the configuration space.
3332  *  Return value:
3333  *  void.
3334  */
3335
3336 static void s2io_reset(struct s2io_nic * sp)
3337 {
3338         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3339         u64 val64;
3340         u16 subid, pci_cmd;
3341         int i;
3342         u16 val16;
3343         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3344         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3345
3346         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3347                         __FUNCTION__, sp->dev->name);
3348
3349         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3350         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3351
3352         val64 = SW_RESET_ALL;
3353         writeq(val64, &bar0->sw_reset);
3354         if (strstr(sp->product_name, "CX4")) {
3355                 msleep(750);
3356         }
3357         msleep(250);
3358         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3359
3360                 /* Restore the PCI state saved during initialization. */
3361                 pci_restore_state(sp->pdev);
3362                 pci_read_config_word(sp->pdev, 0x2, &val16);
3363                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3364                         break;
3365                 msleep(200);
3366         }
3367
3368         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3369                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3370         }
3371
3372         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3373
3374         s2io_init_pci(sp);
3375
3376         /* Set swapper to enable I/O register access */
3377         s2io_set_swapper(sp);
3378
3379         /* Restore the MSIX table entries from local variables */
3380         restore_xmsi_data(sp);
3381
3382         /* Clear certain PCI/PCI-X fields after reset */
3383         if (sp->device_type == XFRAME_II_DEVICE) {
3384                 /* Clear "detected parity error" bit */
3385                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3386
3387                 /* Clearing PCIX Ecc status register */
3388                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3389
3390                 /* Clearing PCI_STATUS error reflected here */
3391                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3392         }
3393
3394         /* Reset device statistics maintained by OS */
3395         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3396
3397         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3398         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3399         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3400         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3401         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3402         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3403         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3404         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3405         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3406         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3407         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3408         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3409         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3410         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3411         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3412         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3413         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3414         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3415         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3416
3417         /* SXE-002: Configure link and activity LED to turn it off */
3418         subid = sp->pdev->subsystem_device;
3419         if (((subid & 0xFF) >= 0x07) &&
3420             (sp->device_type == XFRAME_I_DEVICE)) {
3421                 val64 = readq(&bar0->gpio_control);
3422                 val64 |= 0x0000800000000000ULL;
3423                 writeq(val64, &bar0->gpio_control);
3424                 val64 = 0x0411040400000000ULL;
3425                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3426         }
3427
3428         /*
3429          * Clear spurious ECC interrupts that would have occured on
3430          * XFRAME II cards after reset.
3431          */
3432         if (sp->device_type == XFRAME_II_DEVICE) {
3433                 val64 = readq(&bar0->pcc_err_reg);
3434                 writeq(val64, &bar0->pcc_err_reg);
3435         }
3436
3437         /* restore the previously assigned mac address */
3438         do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3439
3440         sp->device_enabled_once = FALSE;
3441 }
3442
3443 /**
3444  *  s2io_set_swapper - to set the swapper controle on the card
3445  *  @sp : private member of the device structure,
3446  *  pointer to the s2io_nic structure.
3447  *  Description: Function to set the swapper control on the card
3448  *  correctly depending on the 'endianness' of the system.
3449  *  Return value:
3450  *  SUCCESS on success and FAILURE on failure.
3451  */
3452
3453 static int s2io_set_swapper(struct s2io_nic * sp)
3454 {
3455         struct net_device *dev = sp->dev;
3456         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3457         u64 val64, valt, valr;
3458
3459         /*
3460          * Set proper endian settings and verify the same by reading
3461          * the PIF Feed-back register.
3462          */
3463
3464         val64 = readq(&bar0->pif_rd_swapper_fb);
3465         if (val64 != 0x0123456789ABCDEFULL) {
3466                 int i = 0;
3467                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3468                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3469                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3470                                 0};                     /* FE=0, SE=0 */
3471
3472                 while(i<4) {
3473                         writeq(value[i], &bar0->swapper_ctrl);
3474                         val64 = readq(&bar0->pif_rd_swapper_fb);
3475                         if (val64 == 0x0123456789ABCDEFULL)
3476                                 break;
3477                         i++;
3478                 }
3479                 if (i == 4) {
3480                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3481                                 dev->name);
3482                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3483                                 (unsigned long long) val64);
3484                         return FAILURE;
3485                 }
3486                 valr = value[i];
3487         } else {
3488                 valr = readq(&bar0->swapper_ctrl);
3489         }
3490
3491         valt = 0x0123456789ABCDEFULL;
3492         writeq(valt, &bar0->xmsi_address);
3493         val64 = readq(&bar0->xmsi_address);
3494
3495         if(val64 != valt) {
3496                 int i = 0;
3497                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3498                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3499                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3500                                 0};                     /* FE=0, SE=0 */
3501
3502                 while(i<4) {
3503                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3504                         writeq(valt, &bar0->xmsi_address);
3505                         val64 = readq(&bar0->xmsi_address);
3506                         if(val64 == valt)
3507                                 break;
3508                         i++;
3509                 }
3510                 if(i == 4) {
3511                         unsigned long long x = val64;
3512                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3513                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3514                         return FAILURE;
3515                 }
3516         }
3517         val64 = readq(&bar0->swapper_ctrl);
3518         val64 &= 0xFFFF000000000000ULL;
3519
3520 #ifdef  __BIG_ENDIAN
3521         /*
3522          * The device by default set to a big endian format, so a
3523          * big endian driver need not set anything.
3524          */
3525         val64 |= (SWAPPER_CTRL_TXP_FE |
3526                  SWAPPER_CTRL_TXP_SE |
3527                  SWAPPER_CTRL_TXD_R_FE |
3528                  SWAPPER_CTRL_TXD_W_FE |
3529                  SWAPPER_CTRL_TXF_R_FE |
3530                  SWAPPER_CTRL_RXD_R_FE |
3531                  SWAPPER_CTRL_RXD_W_FE |
3532                  SWAPPER_CTRL_RXF_W_FE |
3533                  SWAPPER_CTRL_XMSI_FE |
3534                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3535         if (sp->config.intr_type == INTA)
3536                 val64 |= SWAPPER_CTRL_XMSI_SE;
3537         writeq(val64, &bar0->swapper_ctrl);
3538 #else
3539         /*
3540          * Initially we enable all bits to make it accessible by the
3541          * driver, then we selectively enable only those bits that
3542          * we want to set.
3543          */
3544         val64 |= (SWAPPER_CTRL_TXP_FE |
3545                  SWAPPER_CTRL_TXP_SE |
3546                  SWAPPER_CTRL_TXD_R_FE |
3547                  SWAPPER_CTRL_TXD_R_SE |
3548                  SWAPPER_CTRL_TXD_W_FE |
3549                  SWAPPER_CTRL_TXD_W_SE |
3550                  SWAPPER_CTRL_TXF_R_FE |
3551                  SWAPPER_CTRL_RXD_R_FE |
3552                  SWAPPER_CTRL_RXD_R_SE |
3553                  SWAPPER_CTRL_RXD_W_FE |
3554                  SWAPPER_CTRL_RXD_W_SE |
3555                  SWAPPER_CTRL_RXF_W_FE |
3556                  SWAPPER_CTRL_XMSI_FE |
3557                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3558         if (sp->config.intr_type == INTA)
3559                 val64 |= SWAPPER_CTRL_XMSI_SE;
3560         writeq(val64, &bar0->swapper_ctrl);
3561 #endif
3562         val64 = readq(&bar0->swapper_ctrl);
3563
3564         /*
3565          * Verifying if endian settings are accurate by reading a
3566          * feedback register.
3567          */
3568         val64 = readq(&bar0->pif_rd_swapper_fb);
3569         if (val64 != 0x0123456789ABCDEFULL) {
3570                 /* Endian settings are incorrect, calls for another dekko. */
3571                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3572                           dev->name);
3573                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3574                           (unsigned long long) val64);
3575                 return FAILURE;
3576         }
3577
3578         return SUCCESS;
3579 }
3580
3581 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3582 {
3583         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3584         u64 val64;
3585         int ret = 0, cnt = 0;
3586
3587         do {
3588                 val64 = readq(&bar0->xmsi_access);
3589                 if (!(val64 & s2BIT(15)))
3590                         break;
3591                 mdelay(1);
3592                 cnt++;
3593         } while(cnt < 5);
3594         if (cnt == 5) {
3595                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3596                 ret = 1;
3597         }
3598
3599         return ret;
3600 }
3601
3602 static void restore_xmsi_data(struct s2io_nic *nic)
3603 {
3604         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3605         u64 val64;
3606         int i;
3607
3608         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3609                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3610                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3611                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3612                 writeq(val64, &bar0->xmsi_access);
3613                 if (wait_for_msix_trans(nic, i)) {
3614                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3615                         continue;
3616                 }
3617         }
3618 }
3619
3620 static void store_xmsi_data(struct s2io_nic *nic)
3621 {
3622         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3623         u64 val64, addr, data;
3624         int i;
3625
3626         /* Store and display */
3627         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3628                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3629                 writeq(val64, &bar0->xmsi_access);
3630                 if (wait_for_msix_trans(nic, i)) {
3631                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3632                         continue;
3633                 }
3634                 addr = readq(&bar0->xmsi_address);
3635                 data = readq(&bar0->xmsi_data);
3636                 if (addr && data) {
3637                         nic->msix_info[i].addr = addr;
3638                         nic->msix_info[i].data = data;
3639                 }
3640         }
3641 }
3642
3643 static int s2io_enable_msi_x(struct s2io_nic *nic)
3644 {
3645         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3646         u64 tx_mat, rx_mat;
3647         u16 msi_control; /* Temp variable */
3648         int ret, i, j, msix_indx = 1;
3649
3650         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3651                                GFP_KERNEL);
3652         if (!nic->entries) {
3653                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3654                         __FUNCTION__);
3655                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3656                 return -ENOMEM;
3657         }
3658         nic->mac_control.stats_info->sw_stat.mem_allocated
3659                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3660
3661         nic->s2io_entries =
3662                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3663                                    GFP_KERNEL);
3664         if (!nic->s2io_entries) {
3665                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3666                         __FUNCTION__);
3667                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3668                 kfree(nic->entries);
3669                 nic->mac_control.stats_info->sw_stat.mem_freed
3670                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3671                 return -ENOMEM;
3672         }
3673          nic->mac_control.stats_info->sw_stat.mem_allocated
3674                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3675
3676         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3677                 nic->entries[i].entry = i;
3678                 nic->s2io_entries[i].entry = i;
3679                 nic->s2io_entries[i].arg = NULL;
3680                 nic->s2io_entries[i].in_use = 0;
3681         }
3682
3683         tx_mat = readq(&bar0->tx_mat0_n[0]);
3684         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3685                 tx_mat |= TX_MAT_SET(i, msix_indx);
3686                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3687                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3688                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3689         }
3690         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3691
3692         rx_mat = readq(&bar0->rx_mat);
3693         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3694                 rx_mat |= RX_MAT_SET(j, msix_indx);
3695                 nic->s2io_entries[msix_indx].arg
3696                         = &nic->mac_control.rings[j];
3697                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3698                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3699         }
3700         writeq(rx_mat, &bar0->rx_mat);
3701
3702         nic->avail_msix_vectors = 0;
3703         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3704         /* We fail init if error or we get less vectors than min required */
3705         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3706                 nic->avail_msix_vectors = ret;
3707                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3708         }
3709         if (ret) {
3710                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3711                 kfree(nic->entries);
3712                 nic->mac_control.stats_info->sw_stat.mem_freed
3713                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3714                 kfree(nic->s2io_entries);
3715                 nic->mac_control.stats_info->sw_stat.mem_freed
3716                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3717                 nic->entries = NULL;
3718                 nic->s2io_entries = NULL;
3719                 nic->avail_msix_vectors = 0;
3720                 return -ENOMEM;
3721         }
3722         if (!nic->avail_msix_vectors)
3723                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3724
3725         /*
3726          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3727          * in the herc NIC. (Temp change, needs to be removed later)
3728          */
3729         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3730         msi_control |= 0x1; /* Enable MSI */
3731         pci_write_config_word(nic->pdev, 0x42, msi_control);
3732
3733         return 0;
3734 }
3735
3736 /* Handle software interrupt used during MSI(X) test */
3737 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3738 {
3739         struct s2io_nic *sp = dev_id;
3740
3741         sp->msi_detected = 1;
3742         wake_up(&sp->msi_wait);
3743
3744         return IRQ_HANDLED;
3745 }
3746
3747 /* Test interrupt path by forcing a a software IRQ */
3748 static int s2io_test_msi(struct s2io_nic *sp)
3749 {
3750         struct pci_dev *pdev = sp->pdev;
3751         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3752         int err;
3753         u64 val64, saved64;
3754
3755         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3756                         sp->name, sp);
3757         if (err) {
3758                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3759                        sp->dev->name, pci_name(pdev), pdev->irq);
3760                 return err;
3761         }
3762
3763         init_waitqueue_head (&sp->msi_wait);
3764         sp->msi_detected = 0;
3765
3766         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3767         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3768         val64 |= SCHED_INT_CTRL_TIMER_EN;
3769         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3770         writeq(val64, &bar0->scheduled_int_ctrl);
3771
3772         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3773
3774         if (!sp->msi_detected) {
3775                 /* MSI(X) test failed, go back to INTx mode */
3776                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3777                         "using MSI(X) during test\n", sp->dev->name,
3778                         pci_name(pdev));
3779
3780                 err = -EOPNOTSUPP;
3781         }
3782
3783         free_irq(sp->entries[1].vector, sp);
3784
3785         writeq(saved64, &bar0->scheduled_int_ctrl);
3786
3787         return err;
3788 }
3789
3790 static void remove_msix_isr(struct s2io_nic *sp)
3791 {
3792         int i;
3793         u16 msi_control;
3794
3795         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3796                 if (sp->s2io_entries[i].in_use ==
3797                         MSIX_REGISTERED_SUCCESS) {
3798                         int vector = sp->entries[i].vector;
3799                         void *arg = sp->s2io_entries[i].arg;
3800                         free_irq(vector, arg);
3801                 }
3802         }
3803
3804         kfree(sp->entries);
3805         kfree(sp->s2io_entries);
3806         sp->entries = NULL;
3807         sp->s2io_entries = NULL;
3808
3809         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3810         msi_control &= 0xFFFE; /* Disable MSI */
3811         pci_write_config_word(sp->pdev, 0x42, msi_control);
3812
3813         pci_disable_msix(sp->pdev);
3814 }
3815
3816 static void remove_inta_isr(struct s2io_nic *sp)
3817 {
3818         struct net_device *dev = sp->dev;
3819
3820         free_irq(sp->pdev->irq, dev);
3821 }
3822
3823 /* ********************************************************* *
3824  * Functions defined below concern the OS part of the driver *
3825  * ********************************************************* */
3826
3827 /**
3828  *  s2io_open - open entry point of the driver
3829  *  @dev : pointer to the device structure.
3830  *  Description:
3831  *  This function is the open entry point of the driver. It mainly calls a
3832  *  function to allocate Rx buffers and inserts them into the buffer
3833  *  descriptors and then enables the Rx part of the NIC.
3834  *  Return value:
3835  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3836  *   file on failure.
3837  */
3838
3839 static int s2io_open(struct net_device *dev)
3840 {
3841         struct s2io_nic *sp = dev->priv;
3842         int err = 0;
3843
3844         /*
3845          * Make sure you have link off by default every time
3846          * Nic is initialized
3847          */
3848         netif_carrier_off(dev);
3849         sp->last_link_state = 0;
3850
3851         napi_enable(&sp->napi);
3852
3853         if (sp->config.intr_type == MSI_X) {
3854                 int ret = s2io_enable_msi_x(sp);
3855
3856                 if (!ret) {
3857                         ret = s2io_test_msi(sp);
3858                         /* rollback MSI-X, will re-enable during add_isr() */
3859                         remove_msix_isr(sp);
3860                 }
3861                 if (ret) {
3862
3863                         DBG_PRINT(ERR_DBG,
3864                           "%s: MSI-X requested but failed to enable\n",
3865                           dev->name);
3866                         sp->config.intr_type = INTA;
3867                 }
3868         }
3869
3870         /* NAPI doesn't work well with MSI(X) */
3871          if (sp->config.intr_type != INTA) {
3872                 if(sp->config.napi)
3873                         sp->config.napi = 0;
3874         }
3875
3876         /* Initialize H/W and enable interrupts */
3877         err = s2io_card_up(sp);
3878         if (err) {
3879                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3880                           dev->name);
3881                 goto hw_init_failed;
3882         }
3883
3884         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3885                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3886                 s2io_card_down(sp);
3887                 err = -ENODEV;
3888                 goto hw_init_failed;
3889         }
3890
3891         netif_start_queue(dev);
3892         return 0;
3893
3894 hw_init_failed:
3895         napi_disable(&sp->napi);
3896         if (sp->config.intr_type == MSI_X) {
3897                 if (sp->entries) {
3898                         kfree(sp->entries);
3899                         sp->mac_control.stats_info->sw_stat.mem_freed
3900                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3901                 }
3902                 if (sp->s2io_entries) {
3903                         kfree(sp->s2io_entries);
3904                         sp->mac_control.stats_info->sw_stat.mem_freed
3905                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3906                 }
3907         }
3908         return err;
3909 }
3910
3911 /**
3912  *  s2io_close -close entry point of the driver
3913  *  @dev : device pointer.
3914  *  Description:
3915  *  This is the stop entry point of the driver. It needs to undo exactly
3916  *  whatever was done by the open entry point,thus it's usually referred to
3917  *  as the close function.Among other things this function mainly stops the
3918  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3919  *  Return value:
3920  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3921  *  file on failure.
3922  */
3923
3924 static int s2io_close(struct net_device *dev)
3925 {
3926         struct s2io_nic *sp = dev->priv;
3927
3928         /* Return if the device is already closed               *
3929         *  Can happen when s2io_card_up failed in change_mtu    *
3930         */
3931         if (!is_s2io_card_up(sp))
3932                 return 0;
3933
3934         netif_stop_queue(dev);
3935         napi_disable(&sp->napi);
3936         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3937         s2io_card_down(sp);
3938
3939         return 0;
3940 }
3941
3942 /**
3943  *  s2io_xmit - Tx entry point of te driver
3944  *  @skb : the socket buffer containing the Tx data.
3945  *  @dev : device pointer.
3946  *  Description :
3947  *  This function is the Tx entry point of the driver. S2IO NIC supports
3948  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3949  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3950  *  not be upadted.
3951  *  Return value:
3952  *  0 on success & 1 on failure.
3953  */
3954
3955 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3956 {
3957         struct s2io_nic *sp = dev->priv;
3958         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3959         register u64 val64;
3960         struct TxD *txdp;
3961         struct TxFIFO_element __iomem *tx_fifo;
3962         unsigned long flags;
3963         u16 vlan_tag = 0;
3964         int vlan_priority = 0;
3965         struct mac_info *mac_control;
3966         struct config_param *config;
3967         int offload_type;
3968         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3969
3970         mac_control = &sp->mac_control;
3971         config = &sp->config;
3972
3973         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3974
3975         if (unlikely(skb->len <= 0)) {
3976                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3977                 dev_kfree_skb_any(skb);
3978                 return 0;
3979 }
3980
3981         spin_lock_irqsave(&sp->tx_lock, flags);
3982         if (!is_s2io_card_up(sp)) {
3983                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3984                           dev->name);
3985                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3986                 dev_kfree_skb(skb);
3987                 return 0;
3988         }
3989
3990         queue = 0;
3991         /* Get Fifo number to Transmit based on vlan priority */
3992         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3993                 vlan_tag = vlan_tx_tag_get(skb);
3994                 vlan_priority = vlan_tag >> 13;
3995                 queue = config->fifo_mapping[vlan_priority];
3996         }
3997
3998         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3999         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4000         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4001                 list_virt_addr;
4002
4003         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4004         /* Avoid "put" pointer going beyond "get" pointer */
4005         if (txdp->Host_Control ||
4006                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4007                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4008                 netif_stop_queue(dev);
4009                 dev_kfree_skb(skb);
4010                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4011                 return 0;
4012         }
4013
4014         offload_type = s2io_offload_type(skb);
4015         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4016                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4017                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4018         }
4019         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4020                 txdp->Control_2 |=
4021                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4022                      TXD_TX_CKO_UDP_EN);
4023         }
4024         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4025         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4026         txdp->Control_2 |= config->tx_intr_type;
4027
4028         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4029                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4030                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4031         }
4032
4033         frg_len = skb->len - skb->data_len;
4034         if (offload_type == SKB_GSO_UDP) {
4035                 int ufo_size;
4036
4037                 ufo_size = s2io_udp_mss(skb);
4038                 ufo_size &= ~7;
4039                 txdp->Control_1 |= TXD_UFO_EN;
4040                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4041                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4042 #ifdef __BIG_ENDIAN
4043                 sp->ufo_in_band_v[put_off] =
4044                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4045 #else
4046                 sp->ufo_in_band_v[put_off] =
4047                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4048 #endif
4049                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4050                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4051                                         sp->ufo_in_band_v,
4052                                         sizeof(u64), PCI_DMA_TODEVICE);
4053                 if((txdp->Buffer_Pointer == 0) ||
4054                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4055                         goto pci_map_failed;
4056                 txdp++;
4057         }
4058
4059         txdp->Buffer_Pointer = pci_map_single
4060             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4061         if((txdp->Buffer_Pointer == 0) ||
4062                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4063                 goto pci_map_failed;
4064
4065         txdp->Host_Control = (unsigned long) skb;
4066         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4067         if (offload_type == SKB_GSO_UDP)
4068                 txdp->Control_1 |= TXD_UFO_EN;
4069
4070         frg_cnt = skb_shinfo(skb)->nr_frags;
4071         /* For fragmented SKB. */
4072         for (i = 0; i < frg_cnt; i++) {
4073                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4074                 /* A '0' length fragment will be ignored */
4075                 if (!frag->size)
4076                         continue;
4077                 txdp++;
4078                 txdp->Buffer_Pointer = (u64) pci_map_page
4079                     (sp->pdev, frag->page, frag->page_offset,
4080                      frag->size, PCI_DMA_TODEVICE);
4081                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4082                 if (offload_type == SKB_GSO_UDP)
4083                         txdp->Control_1 |= TXD_UFO_EN;
4084         }
4085         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4086
4087         if (offload_type == SKB_GSO_UDP)
4088                 frg_cnt++; /* as Txd0 was used for inband header */
4089
4090         tx_fifo = mac_control->tx_FIFO_start[queue];
4091         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4092         writeq(val64, &tx_fifo->TxDL_Pointer);
4093
4094         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4095                  TX_FIFO_LAST_LIST);
4096         if (offload_type)
4097                 val64 |= TX_FIFO_SPECIAL_FUNC;
4098
4099         writeq(val64, &tx_fifo->List_Control);
4100
4101         mmiowb();
4102
4103         put_off++;
4104         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4105                 put_off = 0;
4106         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4107
4108         /* Avoid "put" pointer going beyond "get" pointer */
4109         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4110                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4111                 DBG_PRINT(TX_DBG,
4112                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4113                           put_off, get_off);
4114                 netif_stop_queue(dev);
4115         }
4116         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4117         dev->trans_start = jiffies;
4118         spin_unlock_irqrestore(&sp->tx_lock, flags);
4119
4120         return 0;
4121 pci_map_failed:
4122         stats->pci_map_fail_cnt++;
4123         netif_stop_queue(dev);
4124         stats->mem_freed += skb->truesize;
4125         dev_kfree_skb(skb);
4126         spin_unlock_irqrestore(&sp->tx_lock, flags);
4127         return 0;
4128 }
4129
4130 static void
4131 s2io_alarm_handle(unsigned long data)
4132 {
4133         struct s2io_nic *sp = (struct s2io_nic *)data;
4134         struct net_device *dev = sp->dev;
4135
4136         s2io_handle_errors(dev);
4137         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4138 }
4139
4140 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4141 {
4142         int rxb_size, level;
4143
4144         if (!sp->lro) {
4145                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4146                 level = rx_buffer_level(sp, rxb_size, rng_n);
4147
4148                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4149                         int ret;
4150                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4151                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4152                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4153                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4154                                           __FUNCTION__);
4155                                 clear_bit(0, (&sp->tasklet_status));
4156                                 return -1;
4157                         }
4158                         clear_bit(0, (&sp->tasklet_status));
4159                 } else if (level == LOW)
4160                         tasklet_schedule(&sp->task);
4161
4162         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4163                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4164                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4165         }
4166         return 0;
4167 }
4168
4169 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4170 {
4171         struct ring_info *ring = (struct ring_info *)dev_id;
4172         struct s2io_nic *sp = ring->nic;
4173
4174         if (!is_s2io_card_up(sp))
4175                 return IRQ_HANDLED;
4176
4177         rx_intr_handler(ring);
4178         s2io_chk_rx_buffers(sp, ring->ring_no);
4179
4180         return IRQ_HANDLED;
4181 }
4182
4183 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4184 {
4185         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4186         struct s2io_nic *sp = fifo->nic;
4187
4188         if (!is_s2io_card_up(sp))
4189                 return IRQ_HANDLED;
4190
4191         tx_intr_handler(fifo);
4192         return IRQ_HANDLED;
4193 }
4194 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4195 {
4196         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4197         u64 val64;
4198
4199         val64 = readq(&bar0->pic_int_status);
4200         if (val64 & PIC_INT_GPIO) {
4201                 val64 = readq(&bar0->gpio_int_reg);
4202                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4203                     (val64 & GPIO_INT_REG_LINK_UP)) {
4204                         /*
4205                          * This is unstable state so clear both up/down
4206                          * interrupt and adapter to re-evaluate the link state.
4207                          */
4208                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4209                         val64 |= GPIO_INT_REG_LINK_UP;
4210                         writeq(val64, &bar0->gpio_int_reg);
4211                         val64 = readq(&bar0->gpio_int_mask);
4212                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4213                                    GPIO_INT_MASK_LINK_DOWN);
4214                         writeq(val64, &bar0->gpio_int_mask);
4215                 }
4216                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4217                         val64 = readq(&bar0->adapter_status);
4218                                 /* Enable Adapter */
4219                         val64 = readq(&bar0->adapter_control);
4220                         val64 |= ADAPTER_CNTL_EN;
4221                         writeq(val64, &bar0->adapter_control);
4222                         val64 |= ADAPTER_LED_ON;
4223                         writeq(val64, &bar0->adapter_control);
4224                         if (!sp->device_enabled_once)
4225                                 sp->device_enabled_once = 1;
4226
4227                         s2io_link(sp, LINK_UP);
4228                         /*
4229                          * unmask link down interrupt and mask link-up
4230                          * intr
4231                          */
4232                         val64 = readq(&bar0->gpio_int_mask);
4233                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4234                         val64 |= GPIO_INT_MASK_LINK_UP;
4235                         writeq(val64, &bar0->gpio_int_mask);
4236
4237                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4238                         val64 = readq(&bar0->adapter_status);
4239                         s2io_link(sp, LINK_DOWN);
4240                         /* Link is down so unmaks link up interrupt */
4241                         val64 = readq(&bar0->gpio_int_mask);
4242                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4243                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4244                         writeq(val64, &bar0->gpio_int_mask);
4245
4246                         /* turn off LED */
4247                         val64 = readq(&bar0->adapter_control);
4248                         val64 = val64 &(~ADAPTER_LED_ON);
4249                         writeq(val64, &bar0->adapter_control);
4250                 }
4251         }
4252         val64 = readq(&bar0->gpio_int_mask);
4253 }
4254
4255 /**
4256  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4257  *  @value: alarm bits
4258  *  @addr: address value
4259  *  @cnt: counter variable
4260  *  Description: Check for alarm and increment the counter
4261  *  Return Value:
4262  *  1 - if alarm bit set
4263  *  0 - if alarm bit is not set
4264  */
4265 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4266                           unsigned long long *cnt)
4267 {
4268         u64 val64;
4269         val64 = readq(addr);
4270         if ( val64 & value ) {
4271                 writeq(val64, addr);
4272                 (*cnt)++;
4273                 return 1;
4274         }
4275         return 0;
4276
4277 }
4278
4279 /**
4280  *  s2io_handle_errors - Xframe error indication handler
4281  *  @nic: device private variable
4282  *  Description: Handle alarms such as loss of link, single or
4283  *  double ECC errors, critical and serious errors.
4284  *  Return Value:
4285  *  NONE
4286  */
4287 static void s2io_handle_errors(void * dev_id)
4288 {
4289         struct net_device *dev = (struct net_device *) dev_id;
4290         struct s2io_nic *sp = dev->priv;
4291         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4292         u64 temp64 = 0,val64=0;
4293         int i = 0;
4294
4295         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4296         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4297
4298         if (!is_s2io_card_up(sp))
4299                 return;
4300
4301         if (pci_channel_offline(sp->pdev))
4302                 return;
4303
4304         memset(&sw_stat->ring_full_cnt, 0,
4305                 sizeof(sw_stat->ring_full_cnt));
4306
4307         /* Handling the XPAK counters update */
4308         if(stats->xpak_timer_count < 72000) {
4309                 /* waiting for an hour */
4310                 stats->xpak_timer_count++;
4311         } else {
4312                 s2io_updt_xpak_counter(dev);
4313                 /* reset the count to zero */
4314                 stats->xpak_timer_count = 0;
4315         }
4316
4317         /* Handling link status change error Intr */
4318         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4319                 val64 = readq(&bar0->mac_rmac_err_reg);
4320                 writeq(val64, &bar0->mac_rmac_err_reg);
4321                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4322                         schedule_work(&sp->set_link_task);
4323         }
4324
4325         /* In case of a serious error, the device will be Reset. */
4326         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4327                                 &sw_stat->serious_err_cnt))
4328                 goto reset;
4329
4330         /* Check for data parity error */
4331         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4332                                 &sw_stat->parity_err_cnt))
4333                 goto reset;
4334
4335         /* Check for ring full counter */
4336         if (sp->device_type == XFRAME_II_DEVICE) {
4337                 val64 = readq(&bar0->ring_bump_counter1);
4338                 for (i=0; i<4; i++) {
4339                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4340                         temp64 >>= 64 - ((i+1)*16);
4341                         sw_stat->ring_full_cnt[i] += temp64;
4342                 }
4343
4344                 val64 = readq(&bar0->ring_bump_counter2);
4345                 for (i=0; i<4; i++) {
4346                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4347                         temp64 >>= 64 - ((i+1)*16);
4348                          sw_stat->ring_full_cnt[i+4] += temp64;
4349                 }
4350         }
4351
4352         val64 = readq(&bar0->txdma_int_status);
4353         /*check for pfc_err*/
4354         if (val64 & TXDMA_PFC_INT) {
4355                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4356                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4357                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4358                                 &sw_stat->pfc_err_cnt))
4359                         goto reset;
4360                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4361                                 &sw_stat->pfc_err_cnt);
4362         }
4363
4364         /*check for tda_err*/
4365         if (val64 & TXDMA_TDA_INT) {
4366                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4367                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4368                                 &sw_stat->tda_err_cnt))
4369                         goto reset;
4370                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4371                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4372         }
4373         /*check for pcc_err*/
4374         if (val64 & TXDMA_PCC_INT) {
4375                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4376                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4377                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4378                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4379                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4380                                 &sw_stat->pcc_err_cnt))
4381                         goto reset;
4382                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4383                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4384         }
4385
4386         /*check for tti_err*/
4387         if (val64 & TXDMA_TTI_INT) {
4388                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4389                                 &sw_stat->tti_err_cnt))
4390                         goto reset;
4391                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4392                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4393         }
4394
4395         /*check for lso_err*/
4396         if (val64 & TXDMA_LSO_INT) {
4397                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4398                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4399                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4400                         goto reset;
4401                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4402                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4403         }
4404
4405         /*check for tpa_err*/
4406         if (val64 & TXDMA_TPA_INT) {
4407                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4408                         &sw_stat->tpa_err_cnt))
4409                         goto reset;
4410                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4411                         &sw_stat->tpa_err_cnt);
4412         }
4413
4414         /*check for sm_err*/
4415         if (val64 & TXDMA_SM_INT) {
4416                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4417                         &sw_stat->sm_err_cnt))
4418                         goto reset;
4419         }
4420
4421         val64 = readq(&bar0->mac_int_status);
4422         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4423                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4424                                 &bar0->mac_tmac_err_reg,
4425                                 &sw_stat->mac_tmac_err_cnt))
4426                         goto reset;
4427                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4428                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4429                                 &bar0->mac_tmac_err_reg,
4430                                 &sw_stat->mac_tmac_err_cnt);
4431         }
4432
4433         val64 = readq(&bar0->xgxs_int_status);
4434         if (val64 & XGXS_INT_STATUS_TXGXS) {
4435                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4436                                 &bar0->xgxs_txgxs_err_reg,
4437                                 &sw_stat->xgxs_txgxs_err_cnt))
4438                         goto reset;
4439                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4440                                 &bar0->xgxs_txgxs_err_reg,
4441                                 &sw_stat->xgxs_txgxs_err_cnt);
4442         }
4443
4444         val64 = readq(&bar0->rxdma_int_status);
4445         if (val64 & RXDMA_INT_RC_INT_M) {
4446                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4447                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4448                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4449                         goto reset;
4450                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4451                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4452                                 &sw_stat->rc_err_cnt);
4453                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4454                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4455                                 &sw_stat->prc_pcix_err_cnt))
4456                         goto reset;
4457                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4458                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4459                                 &sw_stat->prc_pcix_err_cnt);
4460         }
4461
4462         if (val64 & RXDMA_INT_RPA_INT_M) {
4463                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4464                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4465                         goto reset;
4466                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4467                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4468         }
4469
4470         if (val64 & RXDMA_INT_RDA_INT_M) {
4471                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4472                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4473                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4474                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4475                         goto reset;
4476                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4477                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4478                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4479         }
4480
4481         if (val64 & RXDMA_INT_RTI_INT_M) {
4482                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4483                                 &sw_stat->rti_err_cnt))
4484                         goto reset;
4485                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4486                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4487         }
4488
4489         val64 = readq(&bar0->mac_int_status);
4490         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4491                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4492                                 &bar0->mac_rmac_err_reg,
4493                                 &sw_stat->mac_rmac_err_cnt))
4494                         goto reset;
4495                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4496                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4497                                 &sw_stat->mac_rmac_err_cnt);
4498         }
4499
4500         val64 = readq(&bar0->xgxs_int_status);
4501         if (val64 & XGXS_INT_STATUS_RXGXS) {
4502                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4503                                 &bar0->xgxs_rxgxs_err_reg,
4504                                 &sw_stat->xgxs_rxgxs_err_cnt))
4505                         goto reset;
4506         }
4507
4508         val64 = readq(&bar0->mc_int_status);
4509         if(val64 & MC_INT_STATUS_MC_INT) {
4510                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4511                                 &sw_stat->mc_err_cnt))
4512                         goto reset;
4513
4514                 /* Handling Ecc errors */
4515                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4516                         writeq(val64, &bar0->mc_err_reg);
4517                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4518                                 sw_stat->double_ecc_errs++;
4519                                 if (sp->device_type != XFRAME_II_DEVICE) {
4520                                         /*
4521                                          * Reset XframeI only if critical error
4522                                          */
4523                                         if (val64 &
4524                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4525                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4526                                                                 goto reset;
4527                                         }
4528                         } else
4529                                 sw_stat->single_ecc_errs++;
4530                 }
4531         }
4532         return;
4533
4534 reset:
4535         netif_stop_queue(dev);
4536         schedule_work(&sp->rst_timer_task);
4537         sw_stat->soft_reset_cnt++;
4538         return;
4539 }
4540
4541 /**
4542  *  s2io_isr - ISR handler of the device .
4543  *  @irq: the irq of the device.
4544  *  @dev_id: a void pointer to the dev structure of the NIC.
4545  *  Description:  This function is the ISR handler of the device. It
4546  *  identifies the reason for the interrupt and calls the relevant
4547  *  service routines. As a contongency measure, this ISR allocates the
4548  *  recv buffers, if their numbers are below the panic value which is
4549  *  presently set to 25% of the original number of rcv buffers allocated.
4550  *  Return value:
4551  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4552  *   IRQ_NONE: will be returned if interrupt is not from our device
4553  */
4554 static irqreturn_t s2io_isr(int irq, void *dev_id)
4555 {
4556         struct net_device *dev = (struct net_device *) dev_id;
4557         struct s2io_nic *sp = dev->priv;
4558         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4559         int i;
4560         u64 reason = 0;
4561         struct mac_info *mac_control;
4562         struct config_param *config;
4563
4564         /* Pretend we handled any irq's from a disconnected card */
4565         if (pci_channel_offline(sp->pdev))
4566                 return IRQ_NONE;
4567
4568         if (!is_s2io_card_up(sp))
4569                 return IRQ_NONE;
4570
4571         mac_control = &sp->mac_control;
4572         config = &sp->config;
4573
4574         /*
4575          * Identify the cause for interrupt and call the appropriate
4576          * interrupt handler. Causes for the interrupt could be;
4577          * 1. Rx of packet.
4578          * 2. Tx complete.
4579          * 3. Link down.
4580          */
4581         reason = readq(&bar0->general_int_status);
4582
4583         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4584                 /* Nothing much can be done. Get out */
4585                 return IRQ_HANDLED;
4586         }
4587
4588         if (reason & (GEN_INTR_RXTRAFFIC |
4589                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4590         {
4591                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4592
4593                 if (config->napi) {
4594                         if (reason & GEN_INTR_RXTRAFFIC) {
4595                                 if (likely(netif_rx_schedule_prep(dev,
4596                                                         &sp->napi))) {
4597                                         __netif_rx_schedule(dev, &sp->napi);
4598                                         writeq(S2IO_MINUS_ONE,
4599                                                &bar0->rx_traffic_mask);
4600                                 } else
4601                                         writeq(S2IO_MINUS_ONE,
4602                                                &bar0->rx_traffic_int);
4603                         }
4604                 } else {
4605                         /*
4606                          * rx_traffic_int reg is an R1 register, writing all 1's
4607                          * will ensure that the actual interrupt causing bit
4608                          * get's cleared and hence a read can be avoided.
4609                          */
4610                         if (reason & GEN_INTR_RXTRAFFIC)
4611                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4612
4613                         for (i = 0; i < config->rx_ring_num; i++)
4614                                 rx_intr_handler(&mac_control->rings[i]);
4615                 }
4616
4617                 /*
4618                  * tx_traffic_int reg is an R1 register, writing all 1's
4619                  * will ensure that the actual interrupt causing bit get's
4620                  * cleared and hence a read can be avoided.
4621                  */
4622                 if (reason & GEN_INTR_TXTRAFFIC)
4623                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4624
4625                 for (i = 0; i < config->tx_fifo_num; i++)
4626                         tx_intr_handler(&mac_control->fifos[i]);
4627
4628                 if (reason & GEN_INTR_TXPIC)
4629                         s2io_txpic_intr_handle(sp);
4630
4631                 /*
4632                  * Reallocate the buffers from the interrupt handler itself.
4633                  */
4634                 if (!config->napi) {
4635                         for (i = 0; i < config->rx_ring_num; i++)
4636                                 s2io_chk_rx_buffers(sp, i);
4637                 }
4638                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4639                 readl(&bar0->general_int_status);
4640
4641                 return IRQ_HANDLED;
4642
4643         }
4644         else if (!reason) {
4645                 /* The interrupt was not raised by us */
4646                 return IRQ_NONE;
4647         }
4648
4649         return IRQ_HANDLED;
4650 }
4651
4652 /**
4653  * s2io_updt_stats -
4654  */
4655 static void s2io_updt_stats(struct s2io_nic *sp)
4656 {
4657         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4658         u64 val64;
4659         int cnt = 0;
4660
4661         if (is_s2io_card_up(sp)) {
4662                 /* Apprx 30us on a 133 MHz bus */
4663                 val64 = SET_UPDT_CLICKS(10) |
4664                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4665                 writeq(val64, &bar0->stat_cfg);
4666                 do {
4667                         udelay(100);
4668                         val64 = readq(&bar0->stat_cfg);
4669                         if (!(val64 & s2BIT(0)))
4670                                 break;
4671                         cnt++;
4672                         if (cnt == 5)
4673                                 break; /* Updt failed */
4674                 } while(1);
4675         }
4676 }
4677
4678 /**
4679  *  s2io_get_stats - Updates the device statistics structure.
4680  *  @dev : pointer to the device structure.
4681  *  Description:
4682  *  This function updates the device statistics structure in the s2io_nic
4683  *  structure and returns a pointer to the same.
4684  *  Return value:
4685  *  pointer to the updated net_device_stats structure.
4686  */
4687
4688 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4689 {
4690         struct s2io_nic *sp = dev->priv;
4691         struct mac_info *mac_control;
4692         struct config_param *config;
4693
4694
4695         mac_control = &sp->mac_control;
4696         config = &sp->config;
4697
4698         /* Configure Stats for immediate updt */
4699         s2io_updt_stats(sp);
4700
4701         sp->stats.tx_packets =
4702                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4703         sp->stats.tx_errors =
4704                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4705         sp->stats.rx_errors =
4706                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4707         sp->stats.multicast =
4708                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4709         sp->stats.rx_length_errors =
4710                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4711
4712         return (&sp->stats);
4713 }
4714
4715 /**
4716  *  s2io_set_multicast - entry point for multicast address enable/disable.
4717  *  @dev : pointer to the device structure
4718  *  Description:
4719  *  This function is a driver entry point which gets called by the kernel
4720  *  whenever multicast addresses must be enabled/disabled. This also gets
4721  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4722  *  determine, if multicast address must be enabled or if promiscuous mode
4723  *  is to be disabled etc.
4724  *  Return value:
4725  *  void.
4726  */
4727
4728 static void s2io_set_multicast(struct net_device *dev)
4729 {
4730         int i, j, prev_cnt;
4731         struct dev_mc_list *mclist;
4732         struct s2io_nic *sp = dev->priv;
4733         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4734         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4735             0xfeffffffffffULL;
4736         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4737         void __iomem *add;
4738
4739         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4740                 /*  Enable all Multicast addresses */
4741                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4742                        &bar0->rmac_addr_data0_mem);
4743                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4744                        &bar0->rmac_addr_data1_mem);
4745                 val64 = RMAC_ADDR_CMD_MEM_WE |
4746                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4747                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4748                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4749                 /* Wait till command completes */
4750                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4751                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4752                                         S2IO_BIT_RESET);
4753
4754                 sp->m_cast_flg = 1;
4755                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4756         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4757                 /*  Disable all Multicast addresses */
4758                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4759                        &bar0->rmac_addr_data0_mem);
4760                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4761                        &bar0->rmac_addr_data1_mem);
4762                 val64 = RMAC_ADDR_CMD_MEM_WE |
4763                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4764                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4765                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4766                 /* Wait till command completes */
4767                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4768                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4769                                         S2IO_BIT_RESET);
4770
4771                 sp->m_cast_flg = 0;
4772                 sp->all_multi_pos = 0;
4773         }
4774
4775         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4776                 /*  Put the NIC into promiscuous mode */
4777                 add = &bar0->mac_cfg;
4778                 val64 = readq(&bar0->mac_cfg);
4779                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4780
4781                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4782                 writel((u32) val64, add);
4783                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4784                 writel((u32) (val64 >> 32), (add + 4));
4785
4786                 if (vlan_tag_strip != 1) {
4787                         val64 = readq(&bar0->rx_pa_cfg);
4788                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4789                         writeq(val64, &bar0->rx_pa_cfg);
4790                         vlan_strip_flag = 0;
4791                 }
4792
4793                 val64 = readq(&bar0->mac_cfg);
4794                 sp->promisc_flg = 1;
4795                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4796                           dev->name);
4797         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4798                 /*  Remove the NIC from promiscuous mode */
4799                 add = &bar0->mac_cfg;
4800                 val64 = readq(&bar0->mac_cfg);
4801                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4802
4803                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4804                 writel((u32) val64, add);
4805                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4806                 writel((u32) (val64 >> 32), (add + 4));
4807
4808                 if (vlan_tag_strip != 0) {
4809                         val64 = readq(&bar0->rx_pa_cfg);
4810                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4811                         writeq(val64, &bar0->rx_pa_cfg);
4812                         vlan_strip_flag = 1;
4813                 }
4814
4815                 val64 = readq(&bar0->mac_cfg);
4816                 sp->promisc_flg = 0;
4817                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4818                           dev->name);
4819         }
4820
4821         /*  Update individual M_CAST address list */
4822         if ((!sp->m_cast_flg) && dev->mc_count) {
4823                 if (dev->mc_count >
4824                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4825                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4826                                   dev->name);
4827                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4828                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4829                         return;
4830                 }
4831
4832                 prev_cnt = sp->mc_addr_count;
4833                 sp->mc_addr_count = dev->mc_count;
4834
4835                 /* Clear out the previous list of Mc in the H/W. */
4836                 for (i = 0; i < prev_cnt; i++) {
4837                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4838                                &bar0->rmac_addr_data0_mem);
4839                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4840                                 &bar0->rmac_addr_data1_mem);
4841                         val64 = RMAC_ADDR_CMD_MEM_WE |
4842                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4843                             RMAC_ADDR_CMD_MEM_OFFSET
4844                             (MAC_MC_ADDR_START_OFFSET + i);
4845                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4846
4847                         /* Wait for command completes */
4848                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4849                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4850                                         S2IO_BIT_RESET)) {
4851                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4852                                           dev->name);
4853                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4854                                 return;
4855                         }
4856                 }
4857
4858                 /* Create the new Rx filter list and update the same in H/W. */
4859                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4860                      i++, mclist = mclist->next) {
4861                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4862                                ETH_ALEN);
4863                         mac_addr = 0;
4864                         for (j = 0; j < ETH_ALEN; j++) {
4865                                 mac_addr |= mclist->dmi_addr[j];
4866                                 mac_addr <<= 8;
4867                         }
4868                         mac_addr >>= 8;
4869                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4870                                &bar0->rmac_addr_data0_mem);
4871                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4872                                 &bar0->rmac_addr_data1_mem);
4873                         val64 = RMAC_ADDR_CMD_MEM_WE |
4874                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4875                             RMAC_ADDR_CMD_MEM_OFFSET
4876                             (i + MAC_MC_ADDR_START_OFFSET);
4877                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4878
4879                         /* Wait for command completes */
4880                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4881                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4882                                         S2IO_BIT_RESET)) {
4883                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4884                                           dev->name);
4885                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4886                                 return;
4887                         }
4888                 }
4889         }
4890 }
4891
4892 /* add unicast MAC address to CAM */
4893 static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off)
4894 {
4895         u64 val64;
4896         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4897
4898         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
4899                 &bar0->rmac_addr_data0_mem);
4900
4901         val64 =
4902                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4903                 RMAC_ADDR_CMD_MEM_OFFSET(off);
4904         writeq(val64, &bar0->rmac_addr_cmd_mem);
4905
4906         /* Wait till command completes */
4907         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4908                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4909                 S2IO_BIT_RESET)) {
4910                 DBG_PRINT(INFO_DBG, "add_mac_addr failed\n");
4911                 return FAILURE;
4912         }
4913         return SUCCESS;
4914 }
4915
4916 /**
4917  * s2io_set_mac_addr driver entry point
4918  */
4919 static int s2io_set_mac_addr(struct net_device *dev, void *p)
4920 {
4921         struct sockaddr *addr = p;
4922
4923         if (!is_valid_ether_addr(addr->sa_data))
4924                 return -EINVAL;
4925
4926         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4927
4928         /* store the MAC address in CAM */
4929         return (do_s2io_prog_unicast(dev, dev->dev_addr));
4930 }
4931
4932 /**
4933  *  do_s2io_prog_unicast - Programs the Xframe mac address
4934  *  @dev : pointer to the device structure.
4935  *  @addr: a uchar pointer to the new mac address which is to be set.
4936  *  Description : This procedure will program the Xframe to receive
4937  *  frames with new Mac Address
4938  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4939  *  as defined in errno.h file on failure.
4940  */
4941 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
4942 {
4943         struct s2io_nic *sp = dev->priv;
4944         register u64 mac_addr = 0, perm_addr = 0;
4945         int i;
4946
4947         /*
4948         * Set the new MAC address as the new unicast filter and reflect this
4949         * change on the device address registered with the OS. It will be
4950         * at offset 0.
4951         */
4952         for (i = 0; i < ETH_ALEN; i++) {
4953                 mac_addr <<= 8;
4954                 mac_addr |= addr[i];
4955                 perm_addr <<= 8;
4956                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
4957         }
4958
4959         /* check if the dev_addr is different than perm_addr */
4960         if (mac_addr == perm_addr)
4961                 return SUCCESS;
4962
4963         /* Update the internal structure with this new mac address */
4964         do_s2io_copy_mac_addr(sp, 0, mac_addr);
4965         return (do_s2io_add_unicast(sp, mac_addr, 0));
4966 }
4967
4968 /**
4969  * s2io_ethtool_sset - Sets different link parameters.
4970  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4971  * @info: pointer to the structure with parameters given by ethtool to set
4972  * link information.
4973  * Description:
4974  * The function sets different link parameters provided by the user onto
4975  * the NIC.
4976  * Return value:
4977  * 0 on success.
4978 */
4979
4980 static int s2io_ethtool_sset(struct net_device *dev,
4981                              struct ethtool_cmd *info)
4982 {
4983         struct s2io_nic *sp = dev->priv;
4984         if ((info->autoneg == AUTONEG_ENABLE) ||
4985             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4986                 return -EINVAL;
4987         else {
4988                 s2io_close(sp->dev);
4989                 s2io_open(sp->dev);
4990         }
4991
4992         return 0;
4993 }
4994
4995 /**
4996  * s2io_ethtol_gset - Return link specific information.
4997  * @sp : private member of the device structure, pointer to the
4998  *      s2io_nic structure.
4999  * @info : pointer to the structure with parameters given by ethtool
5000  * to return link information.
5001  * Description:
5002  * Returns link specific information like speed, duplex etc.. to ethtool.
5003  * Return value :
5004  * return 0 on success.
5005  */
5006
5007 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5008 {
5009         struct s2io_nic *sp = dev->priv;
5010         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5011         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5012         info->port = PORT_FIBRE;
5013
5014         /* info->transceiver */
5015         info->transceiver = XCVR_EXTERNAL;
5016
5017         if (netif_carrier_ok(sp->dev)) {
5018                 info->speed = 10000;
5019                 info->duplex = DUPLEX_FULL;
5020         } else {
5021                 info->speed = -1;
5022                 info->duplex = -1;
5023         }
5024
5025         info->autoneg = AUTONEG_DISABLE;
5026         return 0;
5027 }
5028
5029 /**
5030  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5031  * @sp : private member of the device structure, which is a pointer to the
5032  * s2io_nic structure.
5033  * @info : pointer to the structure with parameters given by ethtool to
5034  * return driver information.
5035  * Description:
5036  * Returns driver specefic information like name, version etc.. to ethtool.
5037  * Return value:
5038  *  void
5039  */
5040
5041 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5042                                   struct ethtool_drvinfo *info)
5043 {
5044         struct s2io_nic *sp = dev->priv;
5045
5046         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5047         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5048         strncpy(info->fw_version, "", sizeof(info->fw_version));
5049         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5050         info->regdump_len = XENA_REG_SPACE;
5051         info->eedump_len = XENA_EEPROM_SPACE;
5052 }
5053
5054 /**
5055  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5056  *  @sp: private member of the device structure, which is a pointer to the
5057  *  s2io_nic structure.
5058  *  @regs : pointer to the structure with parameters given by ethtool for
5059  *  dumping the registers.
5060  *  @reg_space: The input argumnet into which all the registers are dumped.
5061  *  Description:
5062  *  Dumps the entire register space of xFrame NIC into the user given
5063  *  buffer area.
5064  * Return value :
5065  * void .
5066 */
5067
5068 static void s2io_ethtool_gregs(struct net_device *dev,
5069                                struct ethtool_regs *regs, void *space)
5070 {
5071         int i;
5072         u64 reg;
5073         u8 *reg_space = (u8 *) space;
5074         struct s2io_nic *sp = dev->priv;
5075
5076         regs->len = XENA_REG_SPACE;
5077         regs->version = sp->pdev->subsystem_device;
5078
5079         for (i = 0; i < regs->len; i += 8) {
5080                 reg = readq(sp->bar0 + i);
5081                 memcpy((reg_space + i), &reg, 8);
5082         }
5083 }
5084
5085 /**
5086  *  s2io_phy_id  - timer function that alternates adapter LED.
5087  *  @data : address of the private member of the device structure, which
5088  *  is a pointer to the s2io_nic structure, provided as an u32.
5089  * Description: This is actually the timer function that alternates the
5090  * adapter LED bit of the adapter control bit to set/reset every time on
5091  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5092  *  once every second.
5093 */
5094 static void s2io_phy_id(unsigned long data)
5095 {
5096         struct s2io_nic *sp = (struct s2io_nic *) data;
5097         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5098         u64 val64 = 0;
5099         u16 subid;
5100
5101         subid = sp->pdev->subsystem_device;
5102         if ((sp->device_type == XFRAME_II_DEVICE) ||
5103                    ((subid & 0xFF) >= 0x07)) {
5104                 val64 = readq(&bar0->gpio_control);
5105                 val64 ^= GPIO_CTRL_GPIO_0;
5106                 writeq(val64, &bar0->gpio_control);
5107         } else {
5108                 val64 = readq(&bar0->adapter_control);
5109                 val64 ^= ADAPTER_LED_ON;
5110                 writeq(val64, &bar0->adapter_control);
5111         }
5112
5113         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5114 }
5115
5116 /**
5117  * s2io_ethtool_idnic - To physically identify the nic on the system.
5118  * @sp : private member of the device structure, which is a pointer to the
5119  * s2io_nic structure.
5120  * @id : pointer to the structure with identification parameters given by
5121  * ethtool.
5122  * Description: Used to physically identify the NIC on the system.
5123  * The Link LED will blink for a time specified by the user for
5124  * identification.
5125  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5126  * identification is possible only if it's link is up.
5127  * Return value:
5128  * int , returns 0 on success
5129  */
5130
5131 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5132 {
5133         u64 val64 = 0, last_gpio_ctrl_val;
5134         struct s2io_nic *sp = dev->priv;
5135         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5136         u16 subid;
5137
5138         subid = sp->pdev->subsystem_device;
5139         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5140         if ((sp->device_type == XFRAME_I_DEVICE) &&
5141                 ((subid & 0xFF) < 0x07)) {
5142                 val64 = readq(&bar0->adapter_control);
5143                 if (!(val64 & ADAPTER_CNTL_EN)) {
5144                         printk(KERN_ERR
5145                                "Adapter Link down, cannot blink LED\n");
5146                         return -EFAULT;
5147                 }
5148         }
5149         if (sp->id_timer.function == NULL) {
5150                 init_timer(&sp->id_timer);
5151                 sp->id_timer.function = s2io_phy_id;
5152                 sp->id_timer.data = (unsigned long) sp;
5153         }
5154         mod_timer(&sp->id_timer, jiffies);
5155         if (data)
5156                 msleep_interruptible(data * HZ);
5157         else
5158                 msleep_interruptible(MAX_FLICKER_TIME);
5159         del_timer_sync(&sp->id_timer);
5160
5161         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5162                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5163                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5164         }
5165
5166         return 0;
5167 }
5168
5169 static void s2io_ethtool_gringparam(struct net_device *dev,
5170                                     struct ethtool_ringparam *ering)
5171 {
5172         struct s2io_nic *sp = dev->priv;
5173         int i,tx_desc_count=0,rx_desc_count=0;
5174
5175         if (sp->rxd_mode == RXD_MODE_1)
5176                 ering->rx_max_pending = MAX_RX_DESC_1;
5177         else if (sp->rxd_mode == RXD_MODE_3B)
5178                 ering->rx_max_pending = MAX_RX_DESC_2;
5179
5180         ering->tx_max_pending = MAX_TX_DESC;
5181         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5182                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5183
5184         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5185         ering->tx_pending = tx_desc_count;
5186         rx_desc_count = 0;
5187         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5188                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5189
5190         ering->rx_pending = rx_desc_count;
5191
5192         ering->rx_mini_max_pending = 0;
5193         ering->rx_mini_pending = 0;
5194         if(sp->rxd_mode == RXD_MODE_1)
5195                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5196         else if (sp->rxd_mode == RXD_MODE_3B)
5197                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5198         ering->rx_jumbo_pending = rx_desc_count;
5199 }
5200
5201 /**
5202  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5203  * @sp : private member of the device structure, which is a pointer to the
5204  *      s2io_nic structure.
5205  * @ep : pointer to the structure with pause parameters given by ethtool.
5206  * Description:
5207  * Returns the Pause frame generation and reception capability of the NIC.
5208  * Return value:
5209  *  void
5210  */
5211 static void s2io_ethtool_getpause_data(struct net_device *dev,
5212                                        struct ethtool_pauseparam *ep)
5213 {
5214         u64 val64;
5215         struct s2io_nic *sp = dev->priv;
5216         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5217
5218         val64 = readq(&bar0->rmac_pause_cfg);
5219         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5220                 ep->tx_pause = TRUE;
5221         if (val64 & RMAC_PAUSE_RX_ENABLE)
5222                 ep->rx_pause = TRUE;
5223         ep->autoneg = FALSE;
5224 }
5225
5226 /**
5227  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5228  * @sp : private member of the device structure, which is a pointer to the
5229  *      s2io_nic structure.
5230  * @ep : pointer to the structure with pause parameters given by ethtool.
5231  * Description:
5232  * It can be used to set or reset Pause frame generation or reception
5233  * support of the NIC.
5234  * Return value:
5235  * int, returns 0 on Success
5236  */
5237
5238 static int s2io_ethtool_setpause_data(struct net_device *dev,
5239                                struct ethtool_pauseparam *ep)
5240 {
5241         u64 val64;
5242         struct s2io_nic *sp = dev->priv;
5243         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5244
5245         val64 = readq(&bar0->rmac_pause_cfg);
5246         if (ep->tx_pause)
5247                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5248         else
5249                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5250         if (ep->rx_pause)
5251                 val64 |= RMAC_PAUSE_RX_ENABLE;
5252         else
5253                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5254         writeq(val64, &bar0->rmac_pause_cfg);
5255         return 0;
5256 }
5257
5258 /**
5259  * read_eeprom - reads 4 bytes of data from user given offset.
5260  * @sp : private member of the device structure, which is a pointer to the
5261  *      s2io_nic structure.
5262  * @off : offset at which the data must be written
5263  * @data : Its an output parameter where the data read at the given
5264  *      offset is stored.
5265  * Description:
5266  * Will read 4 bytes of data from the user given offset and return the
5267  * read data.
5268  * NOTE: Will allow to read only part of the EEPROM visible through the
5269  *   I2C bus.
5270  * Return value:
5271  *  -1 on failure and 0 on success.
5272  */
5273
5274 #define S2IO_DEV_ID             5
5275 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5276 {
5277         int ret = -1;
5278         u32 exit_cnt = 0;
5279         u64 val64;
5280         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5281
5282         if (sp->device_type == XFRAME_I_DEVICE) {
5283                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5284                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5285                     I2C_CONTROL_CNTL_START;
5286                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5287
5288                 while (exit_cnt < 5) {
5289                         val64 = readq(&bar0->i2c_control);
5290                         if (I2C_CONTROL_CNTL_END(val64)) {
5291                                 *data = I2C_CONTROL_GET_DATA(val64);
5292                                 ret = 0;
5293                                 break;
5294                         }
5295                         msleep(50);
5296                         exit_cnt++;
5297                 }
5298         }
5299
5300         if (sp->device_type == XFRAME_II_DEVICE) {
5301                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5302                         SPI_CONTROL_BYTECNT(0x3) |
5303                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5304                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5305                 val64 |= SPI_CONTROL_REQ;
5306                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5307                 while (exit_cnt < 5) {
5308                         val64 = readq(&bar0->spi_control);
5309                         if (val64 & SPI_CONTROL_NACK) {
5310                                 ret = 1;
5311                                 break;
5312                         } else if (val64 & SPI_CONTROL_DONE) {
5313                                 *data = readq(&bar0->spi_data);
5314                                 *data &= 0xffffff;
5315                                 ret = 0;
5316                                 break;
5317                         }
5318                         msleep(50);
5319                         exit_cnt++;
5320                 }
5321         }
5322         return ret;
5323 }
5324
5325 /**
5326  *  write_eeprom - actually writes the relevant part of the data value.
5327  *  @sp : private member of the device structure, which is a pointer to the
5328  *       s2io_nic structure.
5329  *  @off : offset at which the data must be written
5330  *  @data : The data that is to be written
5331  *  @cnt : Number of bytes of the data that are actually to be written into
5332  *  the Eeprom. (max of 3)
5333  * Description:
5334  *  Actually writes the relevant part of the data value into the Eeprom
5335  *  through the I2C bus.
5336  * Return value:
5337  *  0 on success, -1 on failure.
5338  */
5339
5340 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5341 {
5342         int exit_cnt = 0, ret = -1;
5343         u64 val64;
5344         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5345
5346         if (sp->device_type == XFRAME_I_DEVICE) {
5347                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5348                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5349                     I2C_CONTROL_CNTL_START;
5350                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5351
5352                 while (exit_cnt < 5) {
5353                         val64 = readq(&bar0->i2c_control);
5354                         if (I2C_CONTROL_CNTL_END(val64)) {
5355                                 if (!(val64 & I2C_CONTROL_NACK))
5356                                         ret = 0;
5357                                 break;
5358                         }
5359                         msleep(50);
5360                         exit_cnt++;
5361                 }
5362         }
5363
5364         if (sp->device_type == XFRAME_II_DEVICE) {
5365                 int write_cnt = (cnt == 8) ? 0 : cnt;
5366                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5367
5368                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5369                         SPI_CONTROL_BYTECNT(write_cnt) |
5370                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5371                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5372                 val64 |= SPI_CONTROL_REQ;
5373                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5374                 while (exit_cnt < 5) {
5375                         val64 = readq(&bar0->spi_control);
5376                         if (val64 & SPI_CONTROL_NACK) {
5377                                 ret = 1;
5378                                 break;
5379                         } else if (val64 & SPI_CONTROL_DONE) {
5380                                 ret = 0;
5381                                 break;
5382                         }
5383                         msleep(50);
5384                         exit_cnt++;
5385                 }
5386         }
5387         return ret;
5388 }
5389 static void s2io_vpd_read(struct s2io_nic *nic)
5390 {
5391         u8 *vpd_data;
5392         u8 data;
5393         int i=0, cnt, fail = 0;
5394         int vpd_addr = 0x80;
5395
5396         if (nic->device_type == XFRAME_II_DEVICE) {
5397                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5398                 vpd_addr = 0x80;
5399         }
5400         else {
5401                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5402                 vpd_addr = 0x50;
5403         }
5404         strcpy(nic->serial_num, "NOT AVAILABLE");
5405
5406         vpd_data = kmalloc(256, GFP_KERNEL);
5407         if (!vpd_data) {
5408                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5409                 return;
5410         }
5411         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5412
5413         for (i = 0; i < 256; i +=4 ) {
5414                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5415                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5416                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5417                 for (cnt = 0; cnt <5; cnt++) {
5418                         msleep(2);
5419                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5420                         if (data == 0x80)
5421                                 break;
5422                 }
5423                 if (cnt >= 5) {
5424                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5425                         fail = 1;
5426                         break;
5427                 }
5428                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5429                                       (u32 *)&vpd_data[i]);
5430         }
5431
5432         if(!fail) {
5433                 /* read serial number of adapter */
5434                 for (cnt = 0; cnt < 256; cnt++) {
5435                 if ((vpd_data[cnt] == 'S') &&
5436                         (vpd_data[cnt+1] == 'N') &&
5437                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5438                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5439                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5440                                         vpd_data[cnt+2]);
5441                                 break;
5442                         }
5443                 }
5444         }
5445
5446         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5447                 memset(nic->product_name, 0, vpd_data[1]);
5448                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5449         }
5450         kfree(vpd_data);
5451         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5452 }
5453
5454 /**
5455  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5456  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5457  *  @eeprom : pointer to the user level structure provided by ethtool,
5458  *  containing all relevant information.
5459  *  @data_buf : user defined value to be written into Eeprom.
5460  *  Description: Reads the values stored in the Eeprom at given offset
5461  *  for a given length. Stores these values int the input argument data
5462  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5463  *  Return value:
5464  *  int  0 on success
5465  */
5466
5467 static int s2io_ethtool_geeprom(struct net_device *dev,
5468                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5469 {
5470         u32 i, valid;
5471         u64 data;
5472         struct s2io_nic *sp = dev->priv;
5473
5474         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5475
5476         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5477                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5478
5479         for (i = 0; i < eeprom->len; i += 4) {
5480                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5481                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5482                         return -EFAULT;
5483                 }
5484                 valid = INV(data);
5485                 memcpy((data_buf + i), &valid, 4);
5486         }
5487         return 0;
5488 }
5489
5490 /**
5491  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5492  *  @sp : private member of the device structure, which is a pointer to the
5493  *  s2io_nic structure.
5494  *  @eeprom : pointer to the user level structure provided by ethtool,
5495  *  containing all relevant information.
5496  *  @data_buf ; user defined value to be written into Eeprom.
5497  *  Description:
5498  *  Tries to write the user provided value in the Eeprom, at the offset
5499  *  given by the user.
5500  *  Return value:
5501  *  0 on success, -EFAULT on failure.
5502  */
5503
5504 static int s2io_ethtool_seeprom(struct net_device *dev,
5505                                 struct ethtool_eeprom *eeprom,
5506                                 u8 * data_buf)
5507 {
5508         int len = eeprom->len, cnt = 0;
5509         u64 valid = 0, data;
5510         struct s2io_nic *sp = dev->priv;
5511
5512         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5513                 DBG_PRINT(ERR_DBG,
5514                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5515                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5516                           eeprom->magic);
5517                 return -EFAULT;
5518         }
5519
5520         while (len) {
5521                 data = (u32) data_buf[cnt] & 0x000000FF;
5522                 if (data) {
5523                         valid = (u32) (data << 24);
5524                 } else
5525                         valid = data;
5526
5527                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5528                         DBG_PRINT(ERR_DBG,
5529                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5530                         DBG_PRINT(ERR_DBG,
5531                                   "write into the specified offset\n");
5532                         return -EFAULT;
5533                 }
5534                 cnt++;
5535                 len--;
5536         }
5537
5538         return 0;
5539 }
5540
5541 /**
5542  * s2io_register_test - reads and writes into all clock domains.
5543  * @sp : private member of the device structure, which is a pointer to the
5544  * s2io_nic structure.
5545  * @data : variable that returns the result of each of the test conducted b
5546  * by the driver.
5547  * Description:
5548  * Read and write into all clock domains. The NIC has 3 clock domains,
5549  * see that registers in all the three regions are accessible.
5550  * Return value:
5551  * 0 on success.
5552  */
5553
5554 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5555 {
5556         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5557         u64 val64 = 0, exp_val;
5558         int fail = 0;
5559
5560         val64 = readq(&bar0->pif_rd_swapper_fb);
5561         if (val64 != 0x123456789abcdefULL) {
5562                 fail = 1;
5563                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5564         }
5565
5566         val64 = readq(&bar0->rmac_pause_cfg);
5567         if (val64 != 0xc000ffff00000000ULL) {
5568                 fail = 1;
5569                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5570         }
5571
5572         val64 = readq(&bar0->rx_queue_cfg);
5573         if (sp->device_type == XFRAME_II_DEVICE)
5574                 exp_val = 0x0404040404040404ULL;
5575         else
5576                 exp_val = 0x0808080808080808ULL;
5577         if (val64 != exp_val) {
5578                 fail = 1;
5579                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5580         }
5581
5582         val64 = readq(&bar0->xgxs_efifo_cfg);
5583         if (val64 != 0x000000001923141EULL) {
5584                 fail = 1;
5585                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5586         }
5587
5588         val64 = 0x5A5A5A5A5A5A5A5AULL;
5589         writeq(val64, &bar0->xmsi_data);
5590         val64 = readq(&bar0->xmsi_data);
5591         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5592                 fail = 1;
5593                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5594         }
5595
5596         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5597         writeq(val64, &bar0->xmsi_data);
5598         val64 = readq(&bar0->xmsi_data);
5599         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5600                 fail = 1;
5601                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5602         }
5603
5604         *data = fail;
5605         return fail;
5606 }
5607
5608 /**
5609  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5610  * @sp : private member of the device structure, which is a pointer to the
5611  * s2io_nic structure.
5612  * @data:variable that returns the result of each of the test conducted by
5613  * the driver.
5614  * Description:
5615  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5616  * register.
5617  * Return value:
5618  * 0 on success.
5619  */
5620
5621 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5622 {
5623         int fail = 0;
5624         u64 ret_data, org_4F0, org_7F0;
5625         u8 saved_4F0 = 0, saved_7F0 = 0;
5626         struct net_device *dev = sp->dev;
5627
5628         /* Test Write Error at offset 0 */
5629         /* Note that SPI interface allows write access to all areas
5630          * of EEPROM. Hence doing all negative testing only for Xframe I.
5631          */
5632         if (sp->device_type == XFRAME_I_DEVICE)
5633                 if (!write_eeprom(sp, 0, 0, 3))
5634                         fail = 1;
5635
5636         /* Save current values at offsets 0x4F0 and 0x7F0 */
5637         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5638                 saved_4F0 = 1;
5639         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5640                 saved_7F0 = 1;
5641
5642         /* Test Write at offset 4f0 */
5643         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5644                 fail = 1;
5645         if (read_eeprom(sp, 0x4F0, &ret_data))
5646                 fail = 1;
5647
5648         if (ret_data != 0x012345) {
5649                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5650                         "Data written %llx Data read %llx\n",
5651                         dev->name, (unsigned long long)0x12345,
5652                         (unsigned long long)ret_data);
5653                 fail = 1;
5654         }
5655
5656         /* Reset the EEPROM data go FFFF */
5657         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5658
5659         /* Test Write Request Error at offset 0x7c */
5660         if (sp->device_type == XFRAME_I_DEVICE)
5661                 if (!write_eeprom(sp, 0x07C, 0, 3))
5662                         fail = 1;
5663
5664         /* Test Write Request at offset 0x7f0 */
5665         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5666                 fail = 1;
5667         if (read_eeprom(sp, 0x7F0, &ret_data))
5668                 fail = 1;
5669
5670         if (ret_data != 0x012345) {
5671                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5672                         "Data written %llx Data read %llx\n",
5673                         dev->name, (unsigned long long)0x12345,
5674                         (unsigned long long)ret_data);
5675                 fail = 1;
5676         }
5677
5678         /* Reset the EEPROM data go FFFF */
5679         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5680
5681         if (sp->device_type == XFRAME_I_DEVICE) {
5682                 /* Test Write Error at offset 0x80 */
5683                 if (!write_eeprom(sp, 0x080, 0, 3))
5684                         fail = 1;
5685
5686                 /* Test Write Error at offset 0xfc */
5687                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5688                         fail = 1;
5689
5690                 /* Test Write Error at offset 0x100 */
5691                 if (!write_eeprom(sp, 0x100, 0, 3))
5692                         fail = 1;
5693
5694                 /* Test Write Error at offset 4ec */
5695                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5696                         fail = 1;
5697         }
5698
5699         /* Restore values at offsets 0x4F0 and 0x7F0 */
5700         if (saved_4F0)
5701                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5702         if (saved_7F0)
5703                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5704
5705         *data = fail;
5706         return fail;
5707 }
5708
5709 /**
5710  * s2io_bist_test - invokes the MemBist test of the card .
5711  * @sp : private member of the device structure, which is a pointer to the
5712  * s2io_nic structure.
5713  * @data:variable that returns the result of each of the test conducted by
5714  * the driver.
5715  * Description:
5716  * This invokes the MemBist test of the card. We give around
5717  * 2 secs time for the Test to complete. If it's still not complete
5718  * within this peiod, we consider that the test failed.
5719  * Return value:
5720  * 0 on success and -1 on failure.
5721  */
5722
5723 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5724 {
5725         u8 bist = 0;
5726         int cnt = 0, ret = -1;
5727
5728         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5729         bist |= PCI_BIST_START;
5730         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5731
5732         while (cnt < 20) {
5733                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5734                 if (!(bist & PCI_BIST_START)) {
5735                         *data = (bist & PCI_BIST_CODE_MASK);
5736                         ret = 0;
5737                         break;
5738                 }
5739                 msleep(100);
5740                 cnt++;
5741         }
5742
5743         return ret;
5744 }
5745
5746 /**
5747  * s2io-link_test - verifies the link state of the nic
5748  * @sp ; private member of the device structure, which is a pointer to the
5749  * s2io_nic structure.
5750  * @data: variable that returns the result of each of the test conducted by
5751  * the driver.
5752  * Description:
5753  * The function verifies the link state of the NIC and updates the input
5754  * argument 'data' appropriately.
5755  * Return value:
5756  * 0 on success.
5757  */
5758
5759 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5760 {
5761         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5762         u64 val64;
5763
5764         val64 = readq(&bar0->adapter_status);
5765         if(!(LINK_IS_UP(val64)))
5766                 *data = 1;
5767         else
5768                 *data = 0;
5769
5770         return *data;
5771 }
5772
5773 /**
5774  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5775  * @sp - private member of the device structure, which is a pointer to the
5776  * s2io_nic structure.
5777  * @data - variable that returns the result of each of the test
5778  * conducted by the driver.
5779  * Description:
5780  *  This is one of the offline test that tests the read and write
5781  *  access to the RldRam chip on the NIC.
5782  * Return value:
5783  *  0 on success.
5784  */
5785
5786 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5787 {
5788         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5789         u64 val64;
5790         int cnt, iteration = 0, test_fail = 0;
5791
5792         val64 = readq(&bar0->adapter_control);
5793         val64 &= ~ADAPTER_ECC_EN;
5794         writeq(val64, &bar0->adapter_control);
5795
5796         val64 = readq(&bar0->mc_rldram_test_ctrl);
5797         val64 |= MC_RLDRAM_TEST_MODE;
5798         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5799
5800         val64 = readq(&bar0->mc_rldram_mrs);
5801         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5802         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5803
5804         val64 |= MC_RLDRAM_MRS_ENABLE;
5805         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5806
5807         while (iteration < 2) {
5808                 val64 = 0x55555555aaaa0000ULL;
5809                 if (iteration == 1) {
5810                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5811                 }
5812                 writeq(val64, &bar0->mc_rldram_test_d0);
5813
5814                 val64 = 0xaaaa5a5555550000ULL;
5815                 if (iteration == 1) {
5816                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5817                 }
5818                 writeq(val64, &bar0->mc_rldram_test_d1);
5819
5820                 val64 = 0x55aaaaaaaa5a0000ULL;
5821                 if (iteration == 1) {
5822                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5823                 }
5824                 writeq(val64, &bar0->mc_rldram_test_d2);
5825
5826                 val64 = (u64) (0x0000003ffffe0100ULL);
5827                 writeq(val64, &bar0->mc_rldram_test_add);
5828
5829                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5830                         MC_RLDRAM_TEST_GO;
5831                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5832
5833                 for (cnt = 0; cnt < 5; cnt++) {
5834                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5835                         if (val64 & MC_RLDRAM_TEST_DONE)
5836                                 break;
5837                         msleep(200);
5838                 }
5839
5840                 if (cnt == 5)
5841                         break;
5842
5843                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5844                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5845
5846                 for (cnt = 0; cnt < 5; cnt++) {
5847                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5848                         if (val64 & MC_RLDRAM_TEST_DONE)
5849                                 break;
5850                         msleep(500);
5851                 }
5852
5853                 if (cnt == 5)
5854                         break;
5855
5856                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5857                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5858                         test_fail = 1;
5859
5860                 iteration++;
5861         }
5862
5863         *data = test_fail;
5864
5865         /* Bring the adapter out of test mode */
5866         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5867
5868         return test_fail;
5869 }
5870
5871 /**
5872  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5873  *  @sp : private member of the device structure, which is a pointer to the
5874  *  s2io_nic structure.
5875  *  @ethtest : pointer to a ethtool command specific structure that will be
5876  *  returned to the user.
5877  *  @data : variable that returns the result of each of the test
5878  * conducted by the driver.
5879  * Description:
5880  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5881  *  the health of the card.
5882  * Return value:
5883  *  void
5884  */
5885
5886 static void s2io_ethtool_test(struct net_device *dev,
5887                               struct ethtool_test *ethtest,
5888                               uint64_t * data)
5889 {
5890         struct s2io_nic *sp = dev->priv;
5891         int orig_state = netif_running(sp->dev);
5892
5893         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5894                 /* Offline Tests. */
5895                 if (orig_state)
5896                         s2io_close(sp->dev);
5897
5898                 if (s2io_register_test(sp, &data[0]))
5899                         ethtest->flags |= ETH_TEST_FL_FAILED;
5900
5901                 s2io_reset(sp);
5902
5903                 if (s2io_rldram_test(sp, &data[3]))
5904                         ethtest->flags |= ETH_TEST_FL_FAILED;
5905
5906                 s2io_reset(sp);
5907
5908                 if (s2io_eeprom_test(sp, &data[1]))
5909                         ethtest->flags |= ETH_TEST_FL_FAILED;
5910
5911                 if (s2io_bist_test(sp, &data[4]))
5912                         ethtest->flags |= ETH_TEST_FL_FAILED;
5913
5914                 if (orig_state)
5915                         s2io_open(sp->dev);
5916
5917                 data[2] = 0;
5918         } else {
5919                 /* Online Tests. */
5920                 if (!orig_state) {
5921                         DBG_PRINT(ERR_DBG,
5922                                   "%s: is not up, cannot run test\n",
5923                                   dev->name);
5924                         data[0] = -1;
5925                         data[1] = -1;
5926                         data[2] = -1;
5927                         data[3] = -1;
5928                         data[4] = -1;
5929                 }
5930
5931                 if (s2io_link_test(sp, &data[2]))
5932                         ethtest->flags |= ETH_TEST_FL_FAILED;
5933
5934                 data[0] = 0;
5935                 data[1] = 0;
5936                 data[3] = 0;
5937                 data[4] = 0;
5938         }
5939 }
5940
5941 static void s2io_get_ethtool_stats(struct net_device *dev,
5942                                    struct ethtool_stats *estats,
5943                                    u64 * tmp_stats)
5944 {
5945         int i = 0, k;
5946         struct s2io_nic *sp = dev->priv;
5947         struct stat_block *stat_info = sp->mac_control.stats_info;
5948
5949         s2io_updt_stats(sp);
5950         tmp_stats[i++] =
5951                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5952                 le32_to_cpu(stat_info->tmac_frms);
5953         tmp_stats[i++] =
5954                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5955                 le32_to_cpu(stat_info->tmac_data_octets);
5956         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5957         tmp_stats[i++] =
5958                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5959                 le32_to_cpu(stat_info->tmac_mcst_frms);
5960         tmp_stats[i++] =
5961                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5962                 le32_to_cpu(stat_info->tmac_bcst_frms);
5963         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5964         tmp_stats[i++] =
5965                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5966                 le32_to_cpu(stat_info->tmac_ttl_octets);
5967         tmp_stats[i++] =
5968                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5969                 le32_to_cpu(stat_info->tmac_ucst_frms);
5970         tmp_stats[i++] =
5971                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5972                 le32_to_cpu(stat_info->tmac_nucst_frms);
5973         tmp_stats[i++] =
5974                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5975                 le32_to_cpu(stat_info->tmac_any_err_frms);
5976         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5977         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5978         tmp_stats[i++] =
5979                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5980                 le32_to_cpu(stat_info->tmac_vld_ip);
5981         tmp_stats[i++] =
5982                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5983                 le32_to_cpu(stat_info->tmac_drop_ip);
5984         tmp_stats[i++] =
5985                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5986                 le32_to_cpu(stat_info->tmac_icmp);
5987         tmp_stats[i++] =
5988                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5989                 le32_to_cpu(stat_info->tmac_rst_tcp);
5990         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5991         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5992                 le32_to_cpu(stat_info->tmac_udp);
5993         tmp_stats[i++] =
5994                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5995                 le32_to_cpu(stat_info->rmac_vld_frms);
5996         tmp_stats[i++] =
5997                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5998                 le32_to_cpu(stat_info->rmac_data_octets);
5999         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6000         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6001         tmp_stats[i++] =
6002                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6003                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6004         tmp_stats[i++] =
6005                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6006                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6007         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6008         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6009         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6010         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6011         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6012         tmp_stats[i++] =
6013                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6014                 le32_to_cpu(stat_info->rmac_ttl_octets);
6015         tmp_stats[i++] =
6016                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6017                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6018         tmp_stats[i++] =
6019                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6020                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6021         tmp_stats[i++] =
6022                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6023                 le32_to_cpu(stat_info->rmac_discarded_frms);
6024         tmp_stats[i++] =
6025                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6026                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6027         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6028         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6029         tmp_stats[i++] =
6030                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6031                 le32_to_cpu(stat_info->rmac_usized_frms);
6032         tmp_stats[i++] =
6033                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6034                 le32_to_cpu(stat_info->rmac_osized_frms);
6035         tmp_stats[i++] =
6036                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6037                 le32_to_cpu(stat_info->rmac_frag_frms);
6038         tmp_stats[i++] =
6039                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6040                 le32_to_cpu(stat_info->rmac_jabber_frms);
6041         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6042         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6043         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6044         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6045         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6046         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6047         tmp_stats[i++] =
6048                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6049                 le32_to_cpu(stat_info->rmac_ip);
6050         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6051         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6052         tmp_stats[i++] =
6053                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6054                 le32_to_cpu(stat_info->rmac_drop_ip);
6055         tmp_stats[i++] =
6056                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6057                 le32_to_cpu(stat_info->rmac_icmp);
6058         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6059         tmp_stats[i++] =
6060                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6061                 le32_to_cpu(stat_info->rmac_udp);
6062         tmp_stats[i++] =
6063                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6064                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6065         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6066         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6067         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6068         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6069         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6070         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6071         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6072         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6073         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6074         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6075         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6076         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6077         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6078         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6079         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6080         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6081         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6082         tmp_stats[i++] =
6083                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6084                 le32_to_cpu(stat_info->rmac_pause_cnt);
6085         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6086         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6087         tmp_stats[i++] =
6088                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6089                 le32_to_cpu(stat_info->rmac_accepted_ip);
6090         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6091         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6092         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6093         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6094         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6095         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6096         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6097         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6098         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6099         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6100         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6101         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6102         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6103         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6104         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6105         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6106         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6107         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6108         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6109
6110         /* Enhanced statistics exist only for Hercules */
6111         if(sp->device_type == XFRAME_II_DEVICE) {
6112                 tmp_stats[i++] =
6113                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6114                 tmp_stats[i++] =
6115                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6116                 tmp_stats[i++] =
6117                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6118                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6119                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6120                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6121                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6122                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6123                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6124                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6125                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6126                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6127                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6128                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6129                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6130                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6131         }
6132
6133         tmp_stats[i++] = 0;
6134         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6135         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6136         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6137         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6138         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6139         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6140         for (k = 0; k < MAX_RX_RINGS; k++)
6141                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6142         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6143         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6144         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6145         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6146         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6147         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6148         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6149         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6150         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6151         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6152         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6153         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6154         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6155         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6156         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6157         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6158         if (stat_info->sw_stat.num_aggregations) {
6159                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6160                 int count = 0;
6161                 /*
6162                  * Since 64-bit divide does not work on all platforms,
6163                  * do repeated subtraction.
6164                  */
6165                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6166                         tmp -= stat_info->sw_stat.num_aggregations;
6167                         count++;
6168                 }
6169                 tmp_stats[i++] = count;
6170         }
6171         else
6172                 tmp_stats[i++] = 0;
6173         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6174         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6175         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6176         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6177         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6178         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6179         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6180         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6181         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6182
6183         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6184         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6185         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6186         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6187         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6188
6189         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6190         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6191         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6192         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6193         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6194         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6195         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6196         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6197         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6198         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6199         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6200         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6201         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6202         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6203         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6204         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6205         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6206         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6207         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6208         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6209         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6210         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6211         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6212         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6213         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6214         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6215 }
6216
6217 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6218 {
6219         return (XENA_REG_SPACE);
6220 }
6221
6222
6223 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6224 {
6225         struct s2io_nic *sp = dev->priv;
6226
6227         return (sp->rx_csum);
6228 }
6229
6230 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6231 {
6232         struct s2io_nic *sp = dev->priv;
6233
6234         if (data)
6235                 sp->rx_csum = 1;
6236         else
6237                 sp->rx_csum = 0;
6238
6239         return 0;
6240 }
6241
6242 static int s2io_get_eeprom_len(struct net_device *dev)
6243 {
6244         return (XENA_EEPROM_SPACE);
6245 }
6246
6247 static int s2io_get_sset_count(struct net_device *dev, int sset)
6248 {
6249         struct s2io_nic *sp = dev->priv;
6250
6251         switch (sset) {
6252         case ETH_SS_TEST:
6253                 return S2IO_TEST_LEN;
6254         case ETH_SS_STATS:
6255                 switch(sp->device_type) {
6256                 case XFRAME_I_DEVICE:
6257                         return XFRAME_I_STAT_LEN;
6258                 case XFRAME_II_DEVICE:
6259                         return XFRAME_II_STAT_LEN;
6260                 default:
6261                         return 0;
6262                 }
6263         default:
6264                 return -EOPNOTSUPP;
6265         }
6266 }
6267
6268 static void s2io_ethtool_get_strings(struct net_device *dev,
6269                                      u32 stringset, u8 * data)
6270 {
6271         int stat_size = 0;
6272         struct s2io_nic *sp = dev->priv;
6273
6274         switch (stringset) {
6275         case ETH_SS_TEST:
6276                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6277                 break;
6278         case ETH_SS_STATS:
6279                 stat_size = sizeof(ethtool_xena_stats_keys);
6280                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6281                 if(sp->device_type == XFRAME_II_DEVICE) {
6282                         memcpy(data + stat_size,
6283                                 &ethtool_enhanced_stats_keys,
6284                                 sizeof(ethtool_enhanced_stats_keys));
6285                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6286                 }
6287
6288                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6289                         sizeof(ethtool_driver_stats_keys));
6290         }
6291 }
6292
6293 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6294 {
6295         if (data)
6296                 dev->features |= NETIF_F_IP_CSUM;
6297         else
6298                 dev->features &= ~NETIF_F_IP_CSUM;
6299
6300         return 0;
6301 }
6302
6303 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6304 {
6305         return (dev->features & NETIF_F_TSO) != 0;
6306 }
6307 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6308 {
6309         if (data)
6310                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6311         else
6312                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6313
6314         return 0;
6315 }
6316
6317 static const struct ethtool_ops netdev_ethtool_ops = {
6318         .get_settings = s2io_ethtool_gset,
6319         .set_settings = s2io_ethtool_sset,
6320         .get_drvinfo = s2io_ethtool_gdrvinfo,
6321         .get_regs_len = s2io_ethtool_get_regs_len,
6322         .get_regs = s2io_ethtool_gregs,
6323         .get_link = ethtool_op_get_link,
6324         .get_eeprom_len = s2io_get_eeprom_len,
6325         .get_eeprom = s2io_ethtool_geeprom,
6326         .set_eeprom = s2io_ethtool_seeprom,
6327         .get_ringparam = s2io_ethtool_gringparam,
6328         .get_pauseparam = s2io_ethtool_getpause_data,
6329         .set_pauseparam = s2io_ethtool_setpause_data,
6330         .get_rx_csum = s2io_ethtool_get_rx_csum,
6331         .set_rx_csum = s2io_ethtool_set_rx_csum,
6332         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6333         .set_sg = ethtool_op_set_sg,
6334         .get_tso = s2io_ethtool_op_get_tso,
6335         .set_tso = s2io_ethtool_op_set_tso,
6336         .set_ufo = ethtool_op_set_ufo,
6337         .self_test = s2io_ethtool_test,
6338         .get_strings = s2io_ethtool_get_strings,
6339         .phys_id = s2io_ethtool_idnic,
6340         .get_ethtool_stats = s2io_get_ethtool_stats,
6341         .get_sset_count = s2io_get_sset_count,
6342 };
6343
6344 /**
6345  *  s2io_ioctl - Entry point for the Ioctl
6346  *  @dev :  Device pointer.
6347  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6348  *  a proprietary structure used to pass information to the driver.
6349  *  @cmd :  This is used to distinguish between the different commands that
6350  *  can be passed to the IOCTL functions.
6351  *  Description:
6352  *  Currently there are no special functionality supported in IOCTL, hence
6353  *  function always return EOPNOTSUPPORTED
6354  */
6355
6356 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6357 {
6358         return -EOPNOTSUPP;
6359 }
6360
6361 /**
6362  *  s2io_change_mtu - entry point to change MTU size for the device.
6363  *   @dev : device pointer.
6364  *   @new_mtu : the new MTU size for the device.
6365  *   Description: A driver entry point to change MTU size for the device.
6366  *   Before changing the MTU the device must be stopped.
6367  *  Return value:
6368  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6369  *   file on failure.
6370  */
6371
6372 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6373 {
6374         struct s2io_nic *sp = dev->priv;
6375         int ret = 0;
6376
6377         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6378                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6379                           dev->name);
6380                 return -EPERM;
6381         }
6382
6383         dev->mtu = new_mtu;
6384         if (netif_running(dev)) {
6385                 s2io_card_down(sp);
6386                 netif_stop_queue(dev);
6387                 ret = s2io_card_up(sp);
6388                 if (ret) {
6389                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6390                                   __FUNCTION__);
6391                         return ret;
6392                 }
6393                 if (netif_queue_stopped(dev))
6394                         netif_wake_queue(dev);
6395         } else { /* Device is down */
6396                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6397                 u64 val64 = new_mtu;
6398
6399                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6400         }
6401
6402         return ret;
6403 }
6404
6405 /**
6406  *  s2io_tasklet - Bottom half of the ISR.
6407  *  @dev_adr : address of the device structure in dma_addr_t format.
6408  *  Description:
6409  *  This is the tasklet or the bottom half of the ISR. This is
6410  *  an extension of the ISR which is scheduled by the scheduler to be run
6411  *  when the load on the CPU is low. All low priority tasks of the ISR can
6412  *  be pushed into the tasklet. For now the tasklet is used only to
6413  *  replenish the Rx buffers in the Rx buffer descriptors.
6414  *  Return value:
6415  *  void.
6416  */
6417
6418 static void s2io_tasklet(unsigned long dev_addr)
6419 {
6420         struct net_device *dev = (struct net_device *) dev_addr;
6421         struct s2io_nic *sp = dev->priv;
6422         int i, ret;
6423         struct mac_info *mac_control;
6424         struct config_param *config;
6425
6426         mac_control = &sp->mac_control;
6427         config = &sp->config;
6428
6429         if (!TASKLET_IN_USE) {
6430                 for (i = 0; i < config->rx_ring_num; i++) {
6431                         ret = fill_rx_buffers(sp, i);
6432                         if (ret == -ENOMEM) {
6433                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6434                                           dev->name);
6435                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6436                                 break;
6437                         } else if (ret == -EFILL) {
6438                                 DBG_PRINT(INFO_DBG,
6439                                           "%s: Rx Ring %d is full\n",
6440                                           dev->name, i);
6441                                 break;
6442                         }
6443                 }
6444                 clear_bit(0, (&sp->tasklet_status));
6445         }
6446 }
6447
6448 /**
6449  * s2io_set_link - Set the LInk status
6450  * @data: long pointer to device private structue
6451  * Description: Sets the link status for the adapter
6452  */
6453
6454 static void s2io_set_link(struct work_struct *work)
6455 {
6456         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6457         struct net_device *dev = nic->dev;
6458         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6459         register u64 val64;
6460         u16 subid;
6461
6462         rtnl_lock();
6463
6464         if (!netif_running(dev))
6465                 goto out_unlock;
6466
6467         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6468                 /* The card is being reset, no point doing anything */
6469                 goto out_unlock;
6470         }
6471
6472         subid = nic->pdev->subsystem_device;
6473         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6474                 /*
6475                  * Allow a small delay for the NICs self initiated
6476                  * cleanup to complete.
6477                  */
6478                 msleep(100);
6479         }
6480
6481         val64 = readq(&bar0->adapter_status);
6482         if (LINK_IS_UP(val64)) {
6483                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6484                         if (verify_xena_quiescence(nic)) {
6485                                 val64 = readq(&bar0->adapter_control);
6486                                 val64 |= ADAPTER_CNTL_EN;
6487                                 writeq(val64, &bar0->adapter_control);
6488                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6489                                         nic->device_type, subid)) {
6490                                         val64 = readq(&bar0->gpio_control);
6491                                         val64 |= GPIO_CTRL_GPIO_0;
6492                                         writeq(val64, &bar0->gpio_control);
6493                                         val64 = readq(&bar0->gpio_control);
6494                                 } else {
6495                                         val64 |= ADAPTER_LED_ON;
6496                                         writeq(val64, &bar0->adapter_control);
6497                                 }
6498                                 nic->device_enabled_once = TRUE;
6499                         } else {
6500                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6501                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6502                                 netif_stop_queue(dev);
6503                         }
6504                 }
6505                 val64 = readq(&bar0->adapter_control);
6506                 val64 |= ADAPTER_LED_ON;
6507                 writeq(val64, &bar0->adapter_control);
6508                 s2io_link(nic, LINK_UP);
6509         } else {
6510                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6511                                                       subid)) {
6512                         val64 = readq(&bar0->gpio_control);
6513                         val64 &= ~GPIO_CTRL_GPIO_0;
6514                         writeq(val64, &bar0->gpio_control);
6515                         val64 = readq(&bar0->gpio_control);
6516                 }
6517                 /* turn off LED */
6518                 val64 = readq(&bar0->adapter_control);
6519                 val64 = val64 &(~ADAPTER_LED_ON);
6520                 writeq(val64, &bar0->adapter_control);
6521                 s2io_link(nic, LINK_DOWN);
6522         }
6523         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6524
6525 out_unlock:
6526         rtnl_unlock();
6527 }
6528
6529 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6530                                 struct buffAdd *ba,
6531                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6532                                 u64 *temp2, int size)
6533 {
6534         struct net_device *dev = sp->dev;
6535         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6536
6537         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6538                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6539                 /* allocate skb */
6540                 if (*skb) {
6541                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6542                         /*
6543                          * As Rx frame are not going to be processed,
6544                          * using same mapped address for the Rxd
6545                          * buffer pointer
6546                          */
6547                         rxdp1->Buffer0_ptr = *temp0;
6548                 } else {
6549                         *skb = dev_alloc_skb(size);
6550                         if (!(*skb)) {
6551                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6552                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6553                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6554                                 sp->mac_control.stats_info->sw_stat. \
6555                                         mem_alloc_fail_cnt++;
6556                                 return -ENOMEM ;
6557                         }
6558                         sp->mac_control.stats_info->sw_stat.mem_allocated
6559                                 += (*skb)->truesize;
6560                         /* storing the mapped addr in a temp variable
6561                          * such it will be used for next rxd whose
6562                          * Host Control is NULL
6563                          */
6564                         rxdp1->Buffer0_ptr = *temp0 =
6565                                 pci_map_single( sp->pdev, (*skb)->data,
6566                                         size - NET_IP_ALIGN,
6567                                         PCI_DMA_FROMDEVICE);
6568                         if( (rxdp1->Buffer0_ptr == 0) ||
6569                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6570                                 goto memalloc_failed;
6571                         }
6572                         rxdp->Host_Control = (unsigned long) (*skb);
6573                 }
6574         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6575                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6576                 /* Two buffer Mode */
6577                 if (*skb) {
6578                         rxdp3->Buffer2_ptr = *temp2;
6579                         rxdp3->Buffer0_ptr = *temp0;
6580                         rxdp3->Buffer1_ptr = *temp1;
6581                 } else {
6582                         *skb = dev_alloc_skb(size);
6583                         if (!(*skb)) {
6584                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6585                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6586                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6587                                 sp->mac_control.stats_info->sw_stat. \
6588                                         mem_alloc_fail_cnt++;
6589                                 return -ENOMEM;
6590                         }
6591                         sp->mac_control.stats_info->sw_stat.mem_allocated
6592                                 += (*skb)->truesize;
6593                         rxdp3->Buffer2_ptr = *temp2 =
6594                                 pci_map_single(sp->pdev, (*skb)->data,
6595                                                dev->mtu + 4,
6596                                                PCI_DMA_FROMDEVICE);
6597                         if( (rxdp3->Buffer2_ptr == 0) ||
6598                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6599                                 goto memalloc_failed;
6600                         }
6601                         rxdp3->Buffer0_ptr = *temp0 =
6602                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6603                                                 PCI_DMA_FROMDEVICE);
6604                         if( (rxdp3->Buffer0_ptr == 0) ||
6605                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6606                                 pci_unmap_single (sp->pdev,
6607                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6608                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6609                                 goto memalloc_failed;
6610                         }
6611                         rxdp->Host_Control = (unsigned long) (*skb);
6612
6613                         /* Buffer-1 will be dummy buffer not used */
6614                         rxdp3->Buffer1_ptr = *temp1 =
6615                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6616                                                 PCI_DMA_FROMDEVICE);
6617                         if( (rxdp3->Buffer1_ptr == 0) ||
6618                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6619                                 pci_unmap_single (sp->pdev,
6620                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6621                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6622                                 pci_unmap_single (sp->pdev,
6623                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6624                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6625                                 goto memalloc_failed;
6626                         }
6627                 }
6628         }
6629         return 0;
6630         memalloc_failed:
6631                 stats->pci_map_fail_cnt++;
6632                 stats->mem_freed += (*skb)->truesize;
6633                 dev_kfree_skb(*skb);
6634                 return -ENOMEM;
6635 }
6636
6637 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6638                                 int size)
6639 {
6640         struct net_device *dev = sp->dev;
6641         if (sp->rxd_mode == RXD_MODE_1) {
6642                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6643         } else if (sp->rxd_mode == RXD_MODE_3B) {
6644                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6645                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6646                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6647         }
6648 }
6649
6650 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6651 {
6652         int i, j, k, blk_cnt = 0, size;
6653         struct mac_info * mac_control = &sp->mac_control;
6654         struct config_param *config = &sp->config;
6655         struct net_device *dev = sp->dev;
6656         struct RxD_t *rxdp = NULL;
6657         struct sk_buff *skb = NULL;
6658         struct buffAdd *ba = NULL;
6659         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6660
6661         /* Calculate the size based on ring mode */
6662         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6663                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6664         if (sp->rxd_mode == RXD_MODE_1)
6665                 size += NET_IP_ALIGN;
6666         else if (sp->rxd_mode == RXD_MODE_3B)
6667                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6668
6669         for (i = 0; i < config->rx_ring_num; i++) {
6670                 blk_cnt = config->rx_cfg[i].num_rxd /
6671                         (rxd_count[sp->rxd_mode] +1);
6672
6673                 for (j = 0; j < blk_cnt; j++) {
6674                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6675                                 rxdp = mac_control->rings[i].
6676                                         rx_blocks[j].rxds[k].virt_addr;
6677                                 if(sp->rxd_mode == RXD_MODE_3B)
6678                                         ba = &mac_control->rings[i].ba[j][k];
6679                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6680                                                        &skb,(u64 *)&temp0_64,
6681                                                        (u64 *)&temp1_64,
6682                                                        (u64 *)&temp2_64,
6683                                                         size) == ENOMEM) {
6684                                         return 0;
6685                                 }
6686
6687                                 set_rxd_buffer_size(sp, rxdp, size);
6688                                 wmb();
6689                                 /* flip the Ownership bit to Hardware */
6690                                 rxdp->Control_1 |= RXD_OWN_XENA;
6691                         }
6692                 }
6693         }
6694         return 0;
6695
6696 }
6697
6698 static int s2io_add_isr(struct s2io_nic * sp)
6699 {
6700         int ret = 0;
6701         struct net_device *dev = sp->dev;
6702         int err = 0;
6703
6704         if (sp->config.intr_type == MSI_X)
6705                 ret = s2io_enable_msi_x(sp);
6706         if (ret) {
6707                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6708                 sp->config.intr_type = INTA;
6709         }
6710
6711         /* Store the values of the MSIX table in the struct s2io_nic structure */
6712         store_xmsi_data(sp);
6713
6714         /* After proper initialization of H/W, register ISR */
6715         if (sp->config.intr_type == MSI_X) {
6716                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6717
6718                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6719                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6720                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6721                                         dev->name, i);
6722                                 err = request_irq(sp->entries[i].vector,
6723                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6724                                                   sp->s2io_entries[i].arg);
6725                                 /* If either data or addr is zero print it */
6726                                 if(!(sp->msix_info[i].addr &&
6727                                         sp->msix_info[i].data)) {
6728                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6729                                                 "Data:0x%lx\n",sp->desc[i],
6730                                                 (unsigned long long)
6731                                                 sp->msix_info[i].addr,
6732                                                 (unsigned long)
6733                                                 ntohl(sp->msix_info[i].data));
6734                                 } else {
6735                                         msix_tx_cnt++;
6736                                 }
6737                         } else {
6738                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6739                                         dev->name, i);
6740                                 err = request_irq(sp->entries[i].vector,
6741                                           s2io_msix_ring_handle, 0, sp->desc[i],
6742                                                   sp->s2io_entries[i].arg);
6743                                 /* If either data or addr is zero print it */
6744                                 if(!(sp->msix_info[i].addr &&
6745                                         sp->msix_info[i].data)) {
6746                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6747                                                 "Data:0x%lx\n",sp->desc[i],
6748                                                 (unsigned long long)
6749                                                 sp->msix_info[i].addr,
6750                                                 (unsigned long)
6751                                                 ntohl(sp->msix_info[i].data));
6752                                 } else {
6753                                         msix_rx_cnt++;
6754                                 }
6755                         }
6756                         if (err) {
6757                                 remove_msix_isr(sp);
6758                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6759                                           "failed\n", dev->name, i);
6760                                 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
6761                                                  dev->name);
6762                                 sp->config.intr_type = INTA;
6763                                 break;
6764                         }
6765                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6766                 }
6767                 if (!err) {
6768                         printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
6769                                 msix_tx_cnt);
6770                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
6771                                 msix_rx_cnt);
6772                 }
6773         }
6774         if (sp->config.intr_type == INTA) {
6775                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6776                                 sp->name, dev);
6777                 if (err) {
6778                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6779                                   dev->name);
6780                         return -1;
6781                 }
6782         }
6783         return 0;
6784 }
6785 static void s2io_rem_isr(struct s2io_nic * sp)
6786 {
6787         if (sp->config.intr_type == MSI_X)
6788                 remove_msix_isr(sp);
6789         else
6790                 remove_inta_isr(sp);
6791 }
6792
6793 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6794 {
6795         int cnt = 0;
6796         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6797         unsigned long flags;
6798         register u64 val64 = 0;
6799
6800         if (!is_s2io_card_up(sp))
6801                 return;
6802
6803         del_timer_sync(&sp->alarm_timer);
6804         /* If s2io_set_link task is executing, wait till it completes. */
6805         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6806                 msleep(50);
6807         }
6808         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6809
6810         /* disable Tx and Rx traffic on the NIC */
6811         if (do_io)
6812                 stop_nic(sp);
6813
6814         s2io_rem_isr(sp);
6815
6816         /* Kill tasklet. */
6817         tasklet_kill(&sp->task);
6818
6819         /* Check if the device is Quiescent and then Reset the NIC */
6820         while(do_io) {
6821                 /* As per the HW requirement we need to replenish the
6822                  * receive buffer to avoid the ring bump. Since there is
6823                  * no intention of processing the Rx frame at this pointwe are
6824                  * just settting the ownership bit of rxd in Each Rx
6825                  * ring to HW and set the appropriate buffer size
6826                  * based on the ring mode
6827                  */
6828                 rxd_owner_bit_reset(sp);
6829
6830                 val64 = readq(&bar0->adapter_status);
6831                 if (verify_xena_quiescence(sp)) {
6832                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6833                         break;
6834                 }
6835
6836                 msleep(50);
6837                 cnt++;
6838                 if (cnt == 10) {
6839                         DBG_PRINT(ERR_DBG,
6840                                   "s2io_close:Device not Quiescent ");
6841                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6842                                   (unsigned long long) val64);
6843                         break;
6844                 }
6845         }
6846         if (do_io)
6847                 s2io_reset(sp);
6848
6849         spin_lock_irqsave(&sp->tx_lock, flags);
6850         /* Free all Tx buffers */
6851         free_tx_buffers(sp);
6852         spin_unlock_irqrestore(&sp->tx_lock, flags);
6853
6854         /* Free all Rx buffers */
6855         spin_lock_irqsave(&sp->rx_lock, flags);
6856         free_rx_buffers(sp);
6857         spin_unlock_irqrestore(&sp->rx_lock, flags);
6858
6859         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6860 }
6861
6862 static void s2io_card_down(struct s2io_nic * sp)
6863 {
6864         do_s2io_card_down(sp, 1);
6865 }
6866
6867 static int s2io_card_up(struct s2io_nic * sp)
6868 {
6869         int i, ret = 0;
6870         struct mac_info *mac_control;
6871         struct config_param *config;
6872         struct net_device *dev = (struct net_device *) sp->dev;
6873         u16 interruptible;
6874
6875         /* Initialize the H/W I/O registers */
6876         ret = init_nic(sp);
6877         if (ret != 0) {
6878                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6879                           dev->name);
6880                 if (ret != -EIO)
6881                         s2io_reset(sp);
6882                 return ret;
6883         }
6884
6885         /*
6886          * Initializing the Rx buffers. For now we are considering only 1
6887          * Rx ring and initializing buffers into 30 Rx blocks
6888          */
6889         mac_control = &sp->mac_control;
6890         config = &sp->config;
6891
6892         for (i = 0; i < config->rx_ring_num; i++) {
6893                 if ((ret = fill_rx_buffers(sp, i))) {
6894                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6895                                   dev->name);
6896                         s2io_reset(sp);
6897                         free_rx_buffers(sp);
6898                         return -ENOMEM;
6899                 }
6900                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6901                           atomic_read(&sp->rx_bufs_left[i]));
6902         }
6903         /* Maintain the state prior to the open */
6904         if (sp->promisc_flg)
6905                 sp->promisc_flg = 0;
6906         if (sp->m_cast_flg) {
6907                 sp->m_cast_flg = 0;
6908                 sp->all_multi_pos= 0;
6909         }
6910
6911         /* Setting its receive mode */
6912         s2io_set_multicast(dev);
6913
6914         if (sp->lro) {
6915                 /* Initialize max aggregatable pkts per session based on MTU */
6916                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6917                 /* Check if we can use(if specified) user provided value */
6918                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6919                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6920         }
6921
6922         /* Enable Rx Traffic and interrupts on the NIC */
6923         if (start_nic(sp)) {
6924                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6925                 s2io_reset(sp);
6926                 free_rx_buffers(sp);
6927                 return -ENODEV;
6928         }
6929
6930         /* Add interrupt service routine */
6931         if (s2io_add_isr(sp) != 0) {
6932                 if (sp->config.intr_type == MSI_X)
6933                         s2io_rem_isr(sp);
6934                 s2io_reset(sp);
6935                 free_rx_buffers(sp);
6936                 return -ENODEV;
6937         }
6938
6939         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6940
6941         /* Enable tasklet for the device */
6942         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6943
6944         /*  Enable select interrupts */
6945         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6946         if (sp->config.intr_type != INTA)
6947                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6948         else {
6949                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6950                 interruptible |= TX_PIC_INTR;
6951                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6952         }
6953
6954         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6955         return 0;
6956 }
6957
6958 /**
6959  * s2io_restart_nic - Resets the NIC.
6960  * @data : long pointer to the device private structure
6961  * Description:
6962  * This function is scheduled to be run by the s2io_tx_watchdog
6963  * function after 0.5 secs to reset the NIC. The idea is to reduce
6964  * the run time of the watch dog routine which is run holding a
6965  * spin lock.
6966  */
6967
6968 static void s2io_restart_nic(struct work_struct *work)
6969 {
6970         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6971         struct net_device *dev = sp->dev;
6972
6973         rtnl_lock();
6974
6975         if (!netif_running(dev))
6976                 goto out_unlock;
6977
6978         s2io_card_down(sp);
6979         if (s2io_card_up(sp)) {
6980                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6981                           dev->name);
6982         }
6983         netif_wake_queue(dev);
6984         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6985                   dev->name);
6986 out_unlock:
6987         rtnl_unlock();
6988 }
6989
6990 /**
6991  *  s2io_tx_watchdog - Watchdog for transmit side.
6992  *  @dev : Pointer to net device structure
6993  *  Description:
6994  *  This function is triggered if the Tx Queue is stopped
6995  *  for a pre-defined amount of time when the Interface is still up.
6996  *  If the Interface is jammed in such a situation, the hardware is
6997  *  reset (by s2io_close) and restarted again (by s2io_open) to
6998  *  overcome any problem that might have been caused in the hardware.
6999  *  Return value:
7000  *  void
7001  */
7002
7003 static void s2io_tx_watchdog(struct net_device *dev)
7004 {
7005         struct s2io_nic *sp = dev->priv;
7006
7007         if (netif_carrier_ok(dev)) {
7008                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7009                 schedule_work(&sp->rst_timer_task);
7010                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7011         }
7012 }
7013
7014 /**
7015  *   rx_osm_handler - To perform some OS related operations on SKB.
7016  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7017  *   @skb : the socket buffer pointer.
7018  *   @len : length of the packet
7019  *   @cksum : FCS checksum of the frame.
7020  *   @ring_no : the ring from which this RxD was extracted.
7021  *   Description:
7022  *   This function is called by the Rx interrupt serivce routine to perform
7023  *   some OS related operations on the SKB before passing it to the upper
7024  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7025  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7026  *   to the upper layer. If the checksum is wrong, it increments the Rx
7027  *   packet error count, frees the SKB and returns error.
7028  *   Return value:
7029  *   SUCCESS on success and -1 on failure.
7030  */
7031 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7032 {
7033         struct s2io_nic *sp = ring_data->nic;
7034         struct net_device *dev = (struct net_device *) sp->dev;
7035         struct sk_buff *skb = (struct sk_buff *)
7036                 ((unsigned long) rxdp->Host_Control);
7037         int ring_no = ring_data->ring_no;
7038         u16 l3_csum, l4_csum;
7039         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7040         struct lro *lro;
7041         u8 err_mask;
7042
7043         skb->dev = dev;
7044
7045         if (err) {
7046                 /* Check for parity error */
7047                 if (err & 0x1) {
7048                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7049                 }
7050                 err_mask = err >> 48;
7051                 switch(err_mask) {
7052                         case 1:
7053                                 sp->mac_control.stats_info->sw_stat.
7054                                 rx_parity_err_cnt++;
7055                         break;
7056
7057                         case 2:
7058                                 sp->mac_control.stats_info->sw_stat.
7059                                 rx_abort_cnt++;
7060                         break;
7061
7062                         case 3:
7063                                 sp->mac_control.stats_info->sw_stat.
7064                                 rx_parity_abort_cnt++;
7065                         break;
7066
7067                         case 4:
7068                                 sp->mac_control.stats_info->sw_stat.
7069                                 rx_rda_fail_cnt++;
7070                         break;
7071
7072                         case 5:
7073                                 sp->mac_control.stats_info->sw_stat.
7074                                 rx_unkn_prot_cnt++;
7075                         break;
7076
7077                         case 6:
7078                                 sp->mac_control.stats_info->sw_stat.
7079                                 rx_fcs_err_cnt++;
7080                         break;
7081
7082                         case 7:
7083                                 sp->mac_control.stats_info->sw_stat.
7084                                 rx_buf_size_err_cnt++;
7085                         break;
7086
7087                         case 8:
7088                                 sp->mac_control.stats_info->sw_stat.
7089                                 rx_rxd_corrupt_cnt++;
7090                         break;
7091
7092                         case 15:
7093                                 sp->mac_control.stats_info->sw_stat.
7094                                 rx_unkn_err_cnt++;
7095                         break;
7096                 }
7097                 /*
7098                 * Drop the packet if bad transfer code. Exception being
7099                 * 0x5, which could be due to unsupported IPv6 extension header.
7100                 * In this case, we let stack handle the packet.
7101                 * Note that in this case, since checksum will be incorrect,
7102                 * stack will validate the same.
7103                 */
7104                 if (err_mask != 0x5) {
7105                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7106                                 dev->name, err_mask);
7107                         sp->stats.rx_crc_errors++;
7108                         sp->mac_control.stats_info->sw_stat.mem_freed
7109                                 += skb->truesize;
7110                         dev_kfree_skb(skb);
7111                         atomic_dec(&sp->rx_bufs_left[ring_no]);
7112                         rxdp->Host_Control = 0;
7113                         return 0;
7114                 }
7115         }
7116
7117         /* Updating statistics */
7118         sp->stats.rx_packets++;
7119         rxdp->Host_Control = 0;
7120         if (sp->rxd_mode == RXD_MODE_1) {
7121                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7122
7123                 sp->stats.rx_bytes += len;
7124                 skb_put(skb, len);
7125
7126         } else if (sp->rxd_mode == RXD_MODE_3B) {
7127                 int get_block = ring_data->rx_curr_get_info.block_index;
7128                 int get_off = ring_data->rx_curr_get_info.offset;
7129                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7130                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7131                 unsigned char *buff = skb_push(skb, buf0_len);
7132
7133                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7134                 sp->stats.rx_bytes += buf0_len + buf2_len;
7135                 memcpy(buff, ba->ba_0, buf0_len);
7136                 skb_put(skb, buf2_len);
7137         }
7138
7139         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7140             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7141             (sp->rx_csum)) {
7142                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7143                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7144                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7145                         /*
7146                          * NIC verifies if the Checksum of the received
7147                          * frame is Ok or not and accordingly returns
7148                          * a flag in the RxD.
7149                          */
7150                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7151                         if (sp->lro) {
7152                                 u32 tcp_len;
7153                                 u8 *tcp;
7154                                 int ret = 0;
7155
7156                                 ret = s2io_club_tcp_session(skb->data, &tcp,
7157                                                             &tcp_len, &lro,
7158                                                             rxdp, sp);
7159                                 switch (ret) {
7160                                         case 3: /* Begin anew */
7161                                                 lro->parent = skb;
7162                                                 goto aggregate;
7163                                         case 1: /* Aggregate */
7164                                         {
7165                                                 lro_append_pkt(sp, lro,
7166                                                         skb, tcp_len);
7167                                                 goto aggregate;
7168                                         }
7169                                         case 4: /* Flush session */
7170                                         {
7171                                                 lro_append_pkt(sp, lro,
7172                                                         skb, tcp_len);
7173                                                 queue_rx_frame(lro->parent);
7174                                                 clear_lro_session(lro);
7175                                                 sp->mac_control.stats_info->
7176                                                     sw_stat.flush_max_pkts++;
7177                                                 goto aggregate;
7178                                         }
7179                                         case 2: /* Flush both */
7180                                                 lro->parent->data_len =
7181                                                         lro->frags_len;
7182                                                 sp->mac_control.stats_info->
7183                                                      sw_stat.sending_both++;
7184                                                 queue_rx_frame(lro->parent);
7185                                                 clear_lro_session(lro);
7186                                                 goto send_up;
7187                                         case 0: /* sessions exceeded */
7188                                         case -1: /* non-TCP or not
7189                                                   * L2 aggregatable
7190                                                   */
7191                                         case 5: /*
7192                                                  * First pkt in session not
7193                                                  * L3/L4 aggregatable
7194                                                  */
7195                                                 break;
7196                                         default:
7197                                                 DBG_PRINT(ERR_DBG,
7198                                                         "%s: Samadhana!!\n",
7199                                                          __FUNCTION__);
7200                                                 BUG();
7201                                 }
7202                         }
7203                 } else {
7204                         /*
7205                          * Packet with erroneous checksum, let the
7206                          * upper layers deal with it.
7207                          */
7208                         skb->ip_summed = CHECKSUM_NONE;
7209                 }
7210         } else {
7211                 skb->ip_summed = CHECKSUM_NONE;
7212         }
7213         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7214         if (!sp->lro) {
7215                 skb->protocol = eth_type_trans(skb, dev);
7216                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7217                         vlan_strip_flag)) {
7218                         /* Queueing the vlan frame to the upper layer */
7219                         if (napi)
7220                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7221                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7222                         else
7223                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7224                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7225                 } else {
7226                         if (napi)
7227                                 netif_receive_skb(skb);
7228                         else
7229                                 netif_rx(skb);
7230                 }
7231         } else {
7232 send_up:
7233                 queue_rx_frame(skb);
7234         }
7235         dev->last_rx = jiffies;
7236 aggregate:
7237         atomic_dec(&sp->rx_bufs_left[ring_no]);
7238         return SUCCESS;
7239 }
7240
7241 /**
7242  *  s2io_link - stops/starts the Tx queue.
7243  *  @sp : private member of the device structure, which is a pointer to the
7244  *  s2io_nic structure.
7245  *  @link : inidicates whether link is UP/DOWN.
7246  *  Description:
7247  *  This function stops/starts the Tx queue depending on whether the link
7248  *  status of the NIC is is down or up. This is called by the Alarm
7249  *  interrupt handler whenever a link change interrupt comes up.
7250  *  Return value:
7251  *  void.
7252  */
7253
7254 static void s2io_link(struct s2io_nic * sp, int link)
7255 {
7256         struct net_device *dev = (struct net_device *) sp->dev;
7257
7258         if (link != sp->last_link_state) {
7259                 if (link == LINK_DOWN) {
7260                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7261                         netif_carrier_off(dev);
7262                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7263                         sp->mac_control.stats_info->sw_stat.link_up_time =
7264                                 jiffies - sp->start_time;
7265                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7266                 } else {
7267                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7268                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7269                         sp->mac_control.stats_info->sw_stat.link_down_time =
7270                                 jiffies - sp->start_time;
7271                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7272                         netif_carrier_on(dev);
7273                 }
7274         }
7275         sp->last_link_state = link;
7276         sp->start_time = jiffies;
7277 }
7278
7279 /**
7280  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7281  *  @sp : private member of the device structure, which is a pointer to the
7282  *  s2io_nic structure.
7283  *  Description:
7284  *  This function initializes a few of the PCI and PCI-X configuration registers
7285  *  with recommended values.
7286  *  Return value:
7287  *  void
7288  */
7289
7290 static void s2io_init_pci(struct s2io_nic * sp)
7291 {
7292         u16 pci_cmd = 0, pcix_cmd = 0;
7293
7294         /* Enable Data Parity Error Recovery in PCI-X command register. */
7295         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7296                              &(pcix_cmd));
7297         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7298                               (pcix_cmd | 1));
7299         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7300                              &(pcix_cmd));
7301
7302         /* Set the PErr Response bit in PCI command register. */
7303         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7304         pci_write_config_word(sp->pdev, PCI_COMMAND,
7305                               (pci_cmd | PCI_COMMAND_PARITY));
7306         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7307 }
7308
7309 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7310 {
7311         if ( tx_fifo_num > 8) {
7312                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7313                          "supported\n");
7314                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7315                 tx_fifo_num = 8;
7316         }
7317         if ( rx_ring_num > 8) {
7318                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7319                          "supported\n");
7320                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7321                 rx_ring_num = 8;
7322         }
7323         if (*dev_intr_type != INTA)
7324                 napi = 0;
7325
7326         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7327                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7328                           "Defaulting to INTA\n");
7329                 *dev_intr_type = INTA;
7330         }
7331
7332         if ((*dev_intr_type == MSI_X) &&
7333                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7334                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7335                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7336                                         "Defaulting to INTA\n");
7337                 *dev_intr_type = INTA;
7338         }
7339
7340         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7341                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7342                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7343                 rx_ring_mode = 1;
7344         }
7345         return SUCCESS;
7346 }
7347
7348 /**
7349  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7350  * or Traffic class respectively.
7351  * @nic: device peivate variable
7352  * Description: The function configures the receive steering to
7353  * desired receive ring.
7354  * Return Value:  SUCCESS on success and
7355  * '-1' on failure (endian settings incorrect).
7356  */
7357 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7358 {
7359         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7360         register u64 val64 = 0;
7361
7362         if (ds_codepoint > 63)
7363                 return FAILURE;
7364
7365         val64 = RTS_DS_MEM_DATA(ring);
7366         writeq(val64, &bar0->rts_ds_mem_data);
7367
7368         val64 = RTS_DS_MEM_CTRL_WE |
7369                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7370                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7371
7372         writeq(val64, &bar0->rts_ds_mem_ctrl);
7373
7374         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7375                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7376                                 S2IO_BIT_RESET);
7377 }
7378
7379 /**
7380  *  s2io_init_nic - Initialization of the adapter .
7381  *  @pdev : structure containing the PCI related information of the device.
7382  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7383  *  Description:
7384  *  The function initializes an adapter identified by the pci_dec structure.
7385  *  All OS related initialization including memory and device structure and
7386  *  initlaization of the device private variable is done. Also the swapper
7387  *  control register is initialized to enable read and write into the I/O
7388  *  registers of the device.
7389  *  Return value:
7390  *  returns 0 on success and negative on failure.
7391  */
7392
7393 static int __devinit
7394 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7395 {
7396         struct s2io_nic *sp;
7397         struct net_device *dev;
7398         int i, j, ret;
7399         int dma_flag = FALSE;
7400         u32 mac_up, mac_down;
7401         u64 val64 = 0, tmp64 = 0;
7402         struct XENA_dev_config __iomem *bar0 = NULL;
7403         u16 subid;
7404         struct mac_info *mac_control;
7405         struct config_param *config;
7406         int mode;
7407         u8 dev_intr_type = intr_type;
7408         DECLARE_MAC_BUF(mac);
7409
7410         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7411                 return ret;
7412
7413         if ((ret = pci_enable_device(pdev))) {
7414                 DBG_PRINT(ERR_DBG,
7415                           "s2io_init_nic: pci_enable_device failed\n");
7416                 return ret;
7417         }
7418
7419         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7420                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7421                 dma_flag = TRUE;
7422                 if (pci_set_consistent_dma_mask
7423                     (pdev, DMA_64BIT_MASK)) {
7424                         DBG_PRINT(ERR_DBG,
7425                                   "Unable to obtain 64bit DMA for \
7426                                         consistent allocations\n");
7427                         pci_disable_device(pdev);
7428                         return -ENOMEM;
7429                 }
7430         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7431                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7432         } else {
7433                 pci_disable_device(pdev);
7434                 return -ENOMEM;
7435         }
7436         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7437                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7438                 pci_disable_device(pdev);
7439                 return -ENODEV;
7440         }
7441
7442         dev = alloc_etherdev(sizeof(struct s2io_nic));
7443         if (dev == NULL) {
7444                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7445                 pci_disable_device(pdev);
7446                 pci_release_regions(pdev);
7447                 return -ENODEV;
7448         }
7449
7450         pci_set_master(pdev);
7451         pci_set_drvdata(pdev, dev);
7452         SET_NETDEV_DEV(dev, &pdev->dev);
7453
7454         /*  Private member variable initialized to s2io NIC structure */
7455         sp = dev->priv;
7456         memset(sp, 0, sizeof(struct s2io_nic));
7457         sp->dev = dev;
7458         sp->pdev = pdev;
7459         sp->high_dma_flag = dma_flag;
7460         sp->device_enabled_once = FALSE;
7461         if (rx_ring_mode == 1)
7462                 sp->rxd_mode = RXD_MODE_1;
7463         if (rx_ring_mode == 2)
7464                 sp->rxd_mode = RXD_MODE_3B;
7465
7466         sp->config.intr_type = dev_intr_type;
7467
7468         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7469                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7470                 sp->device_type = XFRAME_II_DEVICE;
7471         else
7472                 sp->device_type = XFRAME_I_DEVICE;
7473
7474         sp->lro = lro_enable;
7475
7476         /* Initialize some PCI/PCI-X fields of the NIC. */
7477         s2io_init_pci(sp);
7478
7479         /*
7480          * Setting the device configuration parameters.
7481          * Most of these parameters can be specified by the user during
7482          * module insertion as they are module loadable parameters. If
7483          * these parameters are not not specified during load time, they
7484          * are initialized with default values.
7485          */
7486         mac_control = &sp->mac_control;
7487         config = &sp->config;
7488
7489         config->napi = napi;
7490
7491         /* Tx side parameters. */
7492         config->tx_fifo_num = tx_fifo_num;
7493         for (i = 0; i < MAX_TX_FIFOS; i++) {
7494                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7495                 config->tx_cfg[i].fifo_priority = i;
7496         }
7497
7498         /* mapping the QoS priority to the configured fifos */
7499         for (i = 0; i < MAX_TX_FIFOS; i++)
7500                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7501
7502         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7503         for (i = 0; i < config->tx_fifo_num; i++) {
7504                 config->tx_cfg[i].f_no_snoop =
7505                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7506                 if (config->tx_cfg[i].fifo_len < 65) {
7507                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7508                         break;
7509                 }
7510         }
7511         /* + 2 because one Txd for skb->data and one Txd for UFO */
7512         config->max_txds = MAX_SKB_FRAGS + 2;
7513
7514         /* Rx side parameters. */
7515         config->rx_ring_num = rx_ring_num;
7516         for (i = 0; i < MAX_RX_RINGS; i++) {
7517                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7518                     (rxd_count[sp->rxd_mode] + 1);
7519                 config->rx_cfg[i].ring_priority = i;
7520         }
7521
7522         for (i = 0; i < rx_ring_num; i++) {
7523                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7524                 config->rx_cfg[i].f_no_snoop =
7525                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7526         }
7527
7528         /*  Setting Mac Control parameters */
7529         mac_control->rmac_pause_time = rmac_pause_time;
7530         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7531         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7532
7533
7534         /* Initialize Ring buffer parameters. */
7535         for (i = 0; i < config->rx_ring_num; i++)
7536                 atomic_set(&sp->rx_bufs_left[i], 0);
7537
7538         /*  initialize the shared memory used by the NIC and the host */
7539         if (init_shared_mem(sp)) {
7540                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7541                           dev->name);
7542                 ret = -ENOMEM;
7543                 goto mem_alloc_failed;
7544         }
7545
7546         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7547                                      pci_resource_len(pdev, 0));
7548         if (!sp->bar0) {
7549                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7550                           dev->name);
7551                 ret = -ENOMEM;
7552                 goto bar0_remap_failed;
7553         }
7554
7555         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7556                                      pci_resource_len(pdev, 2));
7557         if (!sp->bar1) {
7558                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7559                           dev->name);
7560                 ret = -ENOMEM;
7561                 goto bar1_remap_failed;
7562         }
7563
7564         dev->irq = pdev->irq;
7565         dev->base_addr = (unsigned long) sp->bar0;
7566
7567         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7568         for (j = 0; j < MAX_TX_FIFOS; j++) {
7569                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7570                     (sp->bar1 + (j * 0x00020000));
7571         }
7572
7573         /*  Driver entry points */
7574         dev->open = &s2io_open;
7575         dev->stop = &s2io_close;
7576         dev->hard_start_xmit = &s2io_xmit;
7577         dev->get_stats = &s2io_get_stats;
7578         dev->set_multicast_list = &s2io_set_multicast;
7579         dev->do_ioctl = &s2io_ioctl;
7580         dev->set_mac_address = &s2io_set_mac_addr;
7581         dev->change_mtu = &s2io_change_mtu;
7582         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7583         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7584         dev->vlan_rx_register = s2io_vlan_rx_register;
7585
7586         /*
7587          * will use eth_mac_addr() for  dev->set_mac_address
7588          * mac address will be set every time dev->open() is called
7589          */
7590         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7591
7592 #ifdef CONFIG_NET_POLL_CONTROLLER
7593         dev->poll_controller = s2io_netpoll;
7594 #endif
7595
7596         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7597         if (sp->high_dma_flag == TRUE)
7598                 dev->features |= NETIF_F_HIGHDMA;
7599         dev->features |= NETIF_F_TSO;
7600         dev->features |= NETIF_F_TSO6;
7601         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7602                 dev->features |= NETIF_F_UFO;
7603                 dev->features |= NETIF_F_HW_CSUM;
7604         }
7605
7606         dev->tx_timeout = &s2io_tx_watchdog;
7607         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7608         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7609         INIT_WORK(&sp->set_link_task, s2io_set_link);
7610
7611         pci_save_state(sp->pdev);
7612
7613         /* Setting swapper control on the NIC, for proper reset operation */
7614         if (s2io_set_swapper(sp)) {
7615                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7616                           dev->name);
7617                 ret = -EAGAIN;
7618                 goto set_swap_failed;
7619         }
7620
7621         /* Verify if the Herc works on the slot its placed into */
7622         if (sp->device_type & XFRAME_II_DEVICE) {
7623                 mode = s2io_verify_pci_mode(sp);
7624                 if (mode < 0) {
7625                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7626                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7627                         ret = -EBADSLT;
7628                         goto set_swap_failed;
7629                 }
7630         }
7631
7632         /* Not needed for Herc */
7633         if (sp->device_type & XFRAME_I_DEVICE) {
7634                 /*
7635                  * Fix for all "FFs" MAC address problems observed on
7636                  * Alpha platforms
7637                  */
7638                 fix_mac_address(sp);
7639                 s2io_reset(sp);
7640         }
7641
7642         /*
7643          * MAC address initialization.
7644          * For now only one mac address will be read and used.
7645          */
7646         bar0 = sp->bar0;
7647         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7648             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7649         writeq(val64, &bar0->rmac_addr_cmd_mem);
7650         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7651                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7652         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7653         mac_down = (u32) tmp64;
7654         mac_up = (u32) (tmp64 >> 32);
7655
7656         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7657         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7658         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7659         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7660         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7661         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7662
7663         /*  Set the factory defined MAC address initially   */
7664         dev->addr_len = ETH_ALEN;
7665         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7666         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7667
7668          /* Store the values of the MSIX table in the s2io_nic structure */
7669         store_xmsi_data(sp);
7670         /* reset Nic and bring it to known state */
7671         s2io_reset(sp);
7672
7673         /*
7674          * Initialize the tasklet status and link state flags
7675          * and the card state parameter
7676          */
7677         sp->tasklet_status = 0;
7678         sp->state = 0;
7679
7680         /* Initialize spinlocks */
7681         spin_lock_init(&sp->tx_lock);
7682
7683         if (!napi)
7684                 spin_lock_init(&sp->put_lock);
7685         spin_lock_init(&sp->rx_lock);
7686
7687         /*
7688          * SXE-002: Configure link and activity LED to init state
7689          * on driver load.
7690          */
7691         subid = sp->pdev->subsystem_device;
7692         if ((subid & 0xFF) >= 0x07) {
7693                 val64 = readq(&bar0->gpio_control);
7694                 val64 |= 0x0000800000000000ULL;
7695                 writeq(val64, &bar0->gpio_control);
7696                 val64 = 0x0411040400000000ULL;
7697                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7698                 val64 = readq(&bar0->gpio_control);
7699         }
7700
7701         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7702
7703         if (register_netdev(dev)) {
7704                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7705                 ret = -ENODEV;
7706                 goto register_failed;
7707         }
7708         s2io_vpd_read(sp);
7709         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7710         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7711                   sp->product_name, pdev->revision);
7712         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7713                   s2io_driver_version);
7714         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7715                   dev->name, print_mac(mac, dev->dev_addr));
7716         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7717         if (sp->device_type & XFRAME_II_DEVICE) {
7718                 mode = s2io_print_pci_mode(sp);
7719                 if (mode < 0) {
7720                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7721                         ret = -EBADSLT;
7722                         unregister_netdev(dev);
7723                         goto set_swap_failed;
7724                 }
7725         }
7726         switch(sp->rxd_mode) {
7727                 case RXD_MODE_1:
7728                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7729                                                 dev->name);
7730                     break;
7731                 case RXD_MODE_3B:
7732                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7733                                                 dev->name);
7734                     break;
7735         }
7736
7737         if (napi)
7738                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7739         switch(sp->config.intr_type) {
7740                 case INTA:
7741                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7742                     break;
7743                 case MSI_X:
7744                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7745                     break;
7746         }
7747         if (sp->lro)
7748                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7749                           dev->name);
7750         if (ufo)
7751                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7752                                         " enabled\n", dev->name);
7753         /* Initialize device name */
7754         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7755
7756         /*
7757          * Make Link state as off at this point, when the Link change
7758          * interrupt comes the state will be automatically changed to
7759          * the right state.
7760          */
7761         netif_carrier_off(dev);
7762
7763         return 0;
7764
7765       register_failed:
7766       set_swap_failed:
7767         iounmap(sp->bar1);
7768       bar1_remap_failed:
7769         iounmap(sp->bar0);
7770       bar0_remap_failed:
7771       mem_alloc_failed:
7772         free_shared_mem(sp);
7773         pci_disable_device(pdev);
7774         pci_release_regions(pdev);
7775         pci_set_drvdata(pdev, NULL);
7776         free_netdev(dev);
7777
7778         return ret;
7779 }
7780
7781 /**
7782  * s2io_rem_nic - Free the PCI device
7783  * @pdev: structure containing the PCI related information of the device.
7784  * Description: This function is called by the Pci subsystem to release a
7785  * PCI device and free up all resource held up by the device. This could
7786  * be in response to a Hot plug event or when the driver is to be removed
7787  * from memory.
7788  */
7789
7790 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7791 {
7792         struct net_device *dev =
7793             (struct net_device *) pci_get_drvdata(pdev);
7794         struct s2io_nic *sp;
7795
7796         if (dev == NULL) {
7797                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7798                 return;
7799         }
7800
7801         flush_scheduled_work();
7802
7803         sp = dev->priv;
7804         unregister_netdev(dev);
7805
7806         free_shared_mem(sp);
7807         iounmap(sp->bar0);
7808         iounmap(sp->bar1);
7809         pci_release_regions(pdev);
7810         pci_set_drvdata(pdev, NULL);
7811         free_netdev(dev);
7812         pci_disable_device(pdev);
7813 }
7814
7815 /**
7816  * s2io_starter - Entry point for the driver
7817  * Description: This function is the entry point for the driver. It verifies
7818  * the module loadable parameters and initializes PCI configuration space.
7819  */
7820
7821 static int __init s2io_starter(void)
7822 {
7823         return pci_register_driver(&s2io_driver);
7824 }
7825
7826 /**
7827  * s2io_closer - Cleanup routine for the driver
7828  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7829  */
7830
7831 static __exit void s2io_closer(void)
7832 {
7833         pci_unregister_driver(&s2io_driver);
7834         DBG_PRINT(INIT_DBG, "cleanup done\n");
7835 }
7836
7837 module_init(s2io_starter);
7838 module_exit(s2io_closer);
7839
7840 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7841                 struct tcphdr **tcp, struct RxD_t *rxdp)
7842 {
7843         int ip_off;
7844         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7845
7846         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7847                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7848                           __FUNCTION__);
7849                 return -1;
7850         }
7851
7852         /* TODO:
7853          * By default the VLAN field in the MAC is stripped by the card, if this
7854          * feature is turned off in rx_pa_cfg register, then the ip_off field
7855          * has to be shifted by a further 2 bytes
7856          */
7857         switch (l2_type) {
7858                 case 0: /* DIX type */
7859                 case 4: /* DIX type with VLAN */
7860                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7861                         break;
7862                 /* LLC, SNAP etc are considered non-mergeable */
7863                 default:
7864                         return -1;
7865         }
7866
7867         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7868         ip_len = (u8)((*ip)->ihl);
7869         ip_len <<= 2;
7870         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7871
7872         return 0;
7873 }
7874
7875 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7876                                   struct tcphdr *tcp)
7877 {
7878         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7879         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7880            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7881                 return -1;
7882         return 0;
7883 }
7884
7885 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7886 {
7887         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7888 }
7889
7890 static void initiate_new_session(struct lro *lro, u8 *l2h,
7891                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7892 {
7893         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7894         lro->l2h = l2h;
7895         lro->iph = ip;
7896         lro->tcph = tcp;
7897         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7898         lro->tcp_ack = ntohl(tcp->ack_seq);
7899         lro->sg_num = 1;
7900         lro->total_len = ntohs(ip->tot_len);
7901         lro->frags_len = 0;
7902         /*
7903          * check if we saw TCP timestamp. Other consistency checks have
7904          * already been done.
7905          */
7906         if (tcp->doff == 8) {
7907                 u32 *ptr;
7908                 ptr = (u32 *)(tcp+1);
7909                 lro->saw_ts = 1;
7910                 lro->cur_tsval = *(ptr+1);
7911                 lro->cur_tsecr = *(ptr+2);
7912         }
7913         lro->in_use = 1;
7914 }
7915
7916 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7917 {
7918         struct iphdr *ip = lro->iph;
7919         struct tcphdr *tcp = lro->tcph;
7920         __sum16 nchk;
7921         struct stat_block *statinfo = sp->mac_control.stats_info;
7922         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7923
7924         /* Update L3 header */
7925         ip->tot_len = htons(lro->total_len);
7926         ip->check = 0;
7927         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7928         ip->check = nchk;
7929
7930         /* Update L4 header */
7931         tcp->ack_seq = lro->tcp_ack;
7932         tcp->window = lro->window;
7933
7934         /* Update tsecr field if this session has timestamps enabled */
7935         if (lro->saw_ts) {
7936                 u32 *ptr = (u32 *)(tcp + 1);
7937                 *(ptr+2) = lro->cur_tsecr;
7938         }
7939
7940         /* Update counters required for calculation of
7941          * average no. of packets aggregated.
7942          */
7943         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7944         statinfo->sw_stat.num_aggregations++;
7945 }
7946
7947 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7948                 struct tcphdr *tcp, u32 l4_pyld)
7949 {
7950         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7951         lro->total_len += l4_pyld;
7952         lro->frags_len += l4_pyld;
7953         lro->tcp_next_seq += l4_pyld;
7954         lro->sg_num++;
7955
7956         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7957         lro->tcp_ack = tcp->ack_seq;
7958         lro->window = tcp->window;
7959
7960         if (lro->saw_ts) {
7961                 u32 *ptr;
7962                 /* Update tsecr and tsval from this packet */
7963                 ptr = (u32 *) (tcp + 1);
7964                 lro->cur_tsval = *(ptr + 1);
7965                 lro->cur_tsecr = *(ptr + 2);
7966         }
7967 }
7968
7969 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7970                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7971 {
7972         u8 *ptr;
7973
7974         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7975
7976         if (!tcp_pyld_len) {
7977                 /* Runt frame or a pure ack */
7978                 return -1;
7979         }
7980
7981         if (ip->ihl != 5) /* IP has options */
7982                 return -1;
7983
7984         /* If we see CE codepoint in IP header, packet is not mergeable */
7985         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7986                 return -1;
7987
7988         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7989         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7990                                     tcp->ece || tcp->cwr || !tcp->ack) {
7991                 /*
7992                  * Currently recognize only the ack control word and
7993                  * any other control field being set would result in
7994                  * flushing the LRO session
7995                  */
7996                 return -1;
7997         }
7998
7999         /*
8000          * Allow only one TCP timestamp option. Don't aggregate if
8001          * any other options are detected.
8002          */
8003         if (tcp->doff != 5 && tcp->doff != 8)
8004                 return -1;
8005
8006         if (tcp->doff == 8) {
8007                 ptr = (u8 *)(tcp + 1);
8008                 while (*ptr == TCPOPT_NOP)
8009                         ptr++;
8010                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8011                         return -1;
8012
8013                 /* Ensure timestamp value increases monotonically */
8014                 if (l_lro)
8015                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8016                                 return -1;
8017
8018                 /* timestamp echo reply should be non-zero */
8019                 if (*((u32 *)(ptr+6)) == 0)
8020                         return -1;
8021         }
8022
8023         return 0;
8024 }
8025
8026 static int
8027 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8028                       struct RxD_t *rxdp, struct s2io_nic *sp)
8029 {
8030         struct iphdr *ip;
8031         struct tcphdr *tcph;
8032         int ret = 0, i;
8033
8034         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8035                                          rxdp))) {
8036                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8037                           ip->saddr, ip->daddr);
8038         } else {
8039                 return ret;
8040         }
8041
8042         tcph = (struct tcphdr *)*tcp;
8043         *tcp_len = get_l4_pyld_length(ip, tcph);
8044         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8045                 struct lro *l_lro = &sp->lro0_n[i];
8046                 if (l_lro->in_use) {
8047                         if (check_for_socket_match(l_lro, ip, tcph))
8048                                 continue;
8049                         /* Sock pair matched */
8050                         *lro = l_lro;
8051
8052                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8053                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8054                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8055                                           (*lro)->tcp_next_seq,
8056                                           ntohl(tcph->seq));
8057
8058                                 sp->mac_control.stats_info->
8059                                    sw_stat.outof_sequence_pkts++;
8060                                 ret = 2;
8061                                 break;
8062                         }
8063
8064                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8065                                 ret = 1; /* Aggregate */
8066                         else
8067                                 ret = 2; /* Flush both */
8068                         break;
8069                 }
8070         }
8071
8072         if (ret == 0) {
8073                 /* Before searching for available LRO objects,
8074                  * check if the pkt is L3/L4 aggregatable. If not
8075                  * don't create new LRO session. Just send this
8076                  * packet up.
8077                  */
8078                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8079                         return 5;
8080                 }
8081
8082                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8083                         struct lro *l_lro = &sp->lro0_n[i];
8084                         if (!(l_lro->in_use)) {
8085                                 *lro = l_lro;
8086                                 ret = 3; /* Begin anew */
8087                                 break;
8088                         }
8089                 }
8090         }
8091
8092         if (ret == 0) { /* sessions exceeded */
8093                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8094                           __FUNCTION__);
8095                 *lro = NULL;
8096                 return ret;
8097         }
8098
8099         switch (ret) {
8100                 case 3:
8101                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8102                         break;
8103                 case 2:
8104                         update_L3L4_header(sp, *lro);
8105                         break;
8106                 case 1:
8107                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8108                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8109                                 update_L3L4_header(sp, *lro);
8110                                 ret = 4; /* Flush the LRO */
8111                         }
8112                         break;
8113                 default:
8114                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8115                                 __FUNCTION__);
8116                         break;
8117         }
8118
8119         return ret;
8120 }
8121
8122 static void clear_lro_session(struct lro *lro)
8123 {
8124         static u16 lro_struct_size = sizeof(struct lro);
8125
8126         memset(lro, 0, lro_struct_size);
8127 }
8128
8129 static void queue_rx_frame(struct sk_buff *skb)
8130 {
8131         struct net_device *dev = skb->dev;
8132
8133         skb->protocol = eth_type_trans(skb, dev);
8134         if (napi)
8135                 netif_receive_skb(skb);
8136         else
8137                 netif_rx(skb);
8138 }
8139
8140 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8141                            struct sk_buff *skb,
8142                            u32 tcp_len)
8143 {
8144         struct sk_buff *first = lro->parent;
8145
8146         first->len += tcp_len;
8147         first->data_len = lro->frags_len;
8148         skb_pull(skb, (skb->len - tcp_len));
8149         if (skb_shinfo(first)->frag_list)
8150                 lro->last_frag->next = skb;
8151         else
8152                 skb_shinfo(first)->frag_list = skb;
8153         first->truesize += skb->truesize;
8154         lro->last_frag = skb;
8155         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8156         return;
8157 }
8158
8159 /**
8160  * s2io_io_error_detected - called when PCI error is detected
8161  * @pdev: Pointer to PCI device
8162  * @state: The current pci connection state
8163  *
8164  * This function is called after a PCI bus error affecting
8165  * this device has been detected.
8166  */
8167 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8168                                                pci_channel_state_t state)
8169 {
8170         struct net_device *netdev = pci_get_drvdata(pdev);
8171         struct s2io_nic *sp = netdev->priv;
8172
8173         netif_device_detach(netdev);
8174
8175         if (netif_running(netdev)) {
8176                 /* Bring down the card, while avoiding PCI I/O */
8177                 do_s2io_card_down(sp, 0);
8178         }
8179         pci_disable_device(pdev);
8180
8181         return PCI_ERS_RESULT_NEED_RESET;
8182 }
8183
8184 /**
8185  * s2io_io_slot_reset - called after the pci bus has been reset.
8186  * @pdev: Pointer to PCI device
8187  *
8188  * Restart the card from scratch, as if from a cold-boot.
8189  * At this point, the card has exprienced a hard reset,
8190  * followed by fixups by BIOS, and has its config space
8191  * set up identically to what it was at cold boot.
8192  */
8193 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8194 {
8195         struct net_device *netdev = pci_get_drvdata(pdev);
8196         struct s2io_nic *sp = netdev->priv;
8197
8198         if (pci_enable_device(pdev)) {
8199                 printk(KERN_ERR "s2io: "
8200                        "Cannot re-enable PCI device after reset.\n");
8201                 return PCI_ERS_RESULT_DISCONNECT;
8202         }
8203
8204         pci_set_master(pdev);
8205         s2io_reset(sp);
8206
8207         return PCI_ERS_RESULT_RECOVERED;
8208 }
8209
8210 /**
8211  * s2io_io_resume - called when traffic can start flowing again.
8212  * @pdev: Pointer to PCI device
8213  *
8214  * This callback is called when the error recovery driver tells
8215  * us that its OK to resume normal operation.
8216  */
8217 static void s2io_io_resume(struct pci_dev *pdev)
8218 {
8219         struct net_device *netdev = pci_get_drvdata(pdev);
8220         struct s2io_nic *sp = netdev->priv;
8221
8222         if (netif_running(netdev)) {
8223                 if (s2io_card_up(sp)) {
8224                         printk(KERN_ERR "s2io: "
8225                                "Can't bring device back up after reset.\n");
8226                         return;
8227                 }
8228
8229                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8230                         s2io_card_down(sp);
8231                         printk(KERN_ERR "s2io: "
8232                                "Can't resetore mac addr after reset.\n");
8233                         return;
8234                 }
8235         }
8236
8237         netif_device_attach(netdev);
8238         netif_wake_queue(netdev);
8239 }