Pull battery-2.6.24 into release branch
[pandora-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.10"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         {"alarm_transceiver_temp_high"},
280         {"alarm_transceiver_temp_low"},
281         {"alarm_laser_bias_current_high"},
282         {"alarm_laser_bias_current_low"},
283         {"alarm_laser_output_power_high"},
284         {"alarm_laser_output_power_low"},
285         {"warn_transceiver_temp_high"},
286         {"warn_transceiver_temp_low"},
287         {"warn_laser_bias_current_high"},
288         {"warn_laser_bias_current_low"},
289         {"warn_laser_output_power_high"},
290         {"warn_laser_output_power_low"},
291         {"lro_aggregated_pkts"},
292         {"lro_flush_both_count"},
293         {"lro_out_of_sequence_pkts"},
294         {"lro_flush_due_to_max_pkts"},
295         {"lro_avg_aggr_pkts"},
296         {"mem_alloc_fail_cnt"},
297         {"pci_map_fail_cnt"},
298         {"watchdog_timer_cnt"},
299         {"mem_allocated"},
300         {"mem_freed"},
301         {"link_up_cnt"},
302         {"link_down_cnt"},
303         {"link_up_time"},
304         {"link_down_time"},
305         {"tx_tcode_buf_abort_cnt"},
306         {"tx_tcode_desc_abort_cnt"},
307         {"tx_tcode_parity_err_cnt"},
308         {"tx_tcode_link_loss_cnt"},
309         {"tx_tcode_list_proc_err_cnt"},
310         {"rx_tcode_parity_err_cnt"},
311         {"rx_tcode_abort_cnt"},
312         {"rx_tcode_parity_abort_cnt"},
313         {"rx_tcode_rda_fail_cnt"},
314         {"rx_tcode_unkn_prot_cnt"},
315         {"rx_tcode_fcs_err_cnt"},
316         {"rx_tcode_buf_size_err_cnt"},
317         {"rx_tcode_rxd_corrupt_cnt"},
318         {"rx_tcode_unkn_err_cnt"},
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340                                         ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
342
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
345
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
348
349 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
351
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
353                         init_timer(&timer);                     \
354                         timer.function = handle;                \
355                         timer.data = (unsigned long) arg;       \
356                         mod_timer(&timer, (jiffies + exp))      \
357
358 /* copy mac addr to def_mac_addr array */
359 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
360 {
361         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
362         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
363         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
364         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
365         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
366         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
367 }
368 /* Add the vlan */
369 static void s2io_vlan_rx_register(struct net_device *dev,
370                                         struct vlan_group *grp)
371 {
372         struct s2io_nic *nic = dev->priv;
373         unsigned long flags;
374
375         spin_lock_irqsave(&nic->tx_lock, flags);
376         nic->vlgrp = grp;
377         spin_unlock_irqrestore(&nic->tx_lock, flags);
378 }
379
380 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
381 static int vlan_strip_flag;
382
383 /*
384  * Constants to be programmed into the Xena's registers, to configure
385  * the XAUI.
386  */
387
388 #define END_SIGN        0x0
389 static const u64 herc_act_dtx_cfg[] = {
390         /* Set address */
391         0x8000051536750000ULL, 0x80000515367500E0ULL,
392         /* Write data */
393         0x8000051536750004ULL, 0x80000515367500E4ULL,
394         /* Set address */
395         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
396         /* Write data */
397         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
398         /* Set address */
399         0x801205150D440000ULL, 0x801205150D4400E0ULL,
400         /* Write data */
401         0x801205150D440004ULL, 0x801205150D4400E4ULL,
402         /* Set address */
403         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
404         /* Write data */
405         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
406         /* Done */
407         END_SIGN
408 };
409
410 static const u64 xena_dtx_cfg[] = {
411         /* Set address */
412         0x8000051500000000ULL, 0x80000515000000E0ULL,
413         /* Write data */
414         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
415         /* Set address */
416         0x8001051500000000ULL, 0x80010515000000E0ULL,
417         /* Write data */
418         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
419         /* Set address */
420         0x8002051500000000ULL, 0x80020515000000E0ULL,
421         /* Write data */
422         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
423         END_SIGN
424 };
425
426 /*
427  * Constants for Fixing the MacAddress problem seen mostly on
428  * Alpha machines.
429  */
430 static const u64 fix_mac[] = {
431         0x0060000000000000ULL, 0x0060600000000000ULL,
432         0x0040600000000000ULL, 0x0000600000000000ULL,
433         0x0020600000000000ULL, 0x0060600000000000ULL,
434         0x0020600000000000ULL, 0x0060600000000000ULL,
435         0x0020600000000000ULL, 0x0060600000000000ULL,
436         0x0020600000000000ULL, 0x0060600000000000ULL,
437         0x0020600000000000ULL, 0x0060600000000000ULL,
438         0x0020600000000000ULL, 0x0060600000000000ULL,
439         0x0020600000000000ULL, 0x0060600000000000ULL,
440         0x0020600000000000ULL, 0x0060600000000000ULL,
441         0x0020600000000000ULL, 0x0060600000000000ULL,
442         0x0020600000000000ULL, 0x0060600000000000ULL,
443         0x0020600000000000ULL, 0x0000600000000000ULL,
444         0x0040600000000000ULL, 0x0060600000000000ULL,
445         END_SIGN
446 };
447
448 MODULE_LICENSE("GPL");
449 MODULE_VERSION(DRV_VERSION);
450
451
452 /* Module Loadable parameters. */
453 S2IO_PARM_INT(tx_fifo_num, 1);
454 S2IO_PARM_INT(rx_ring_num, 1);
455
456
457 S2IO_PARM_INT(rx_ring_mode, 1);
458 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
459 S2IO_PARM_INT(rmac_pause_time, 0x100);
460 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
461 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
462 S2IO_PARM_INT(shared_splits, 0);
463 S2IO_PARM_INT(tmac_util_period, 5);
464 S2IO_PARM_INT(rmac_util_period, 5);
465 S2IO_PARM_INT(l3l4hdr_size, 128);
466 /* Frequency of Rx desc syncs expressed as power of 2 */
467 S2IO_PARM_INT(rxsync_frequency, 3);
468 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
469 S2IO_PARM_INT(intr_type, 2);
470 /* Large receive offload feature */
471 static unsigned int lro_enable;
472 module_param_named(lro, lro_enable, uint, 0);
473
474 /* Max pkts to be aggregated by LRO at one time. If not specified,
475  * aggregation happens until we hit max IP pkt size(64K)
476  */
477 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
478 S2IO_PARM_INT(indicate_max_pkts, 0);
479
480 S2IO_PARM_INT(napi, 1);
481 S2IO_PARM_INT(ufo, 0);
482 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
483
484 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
485     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
486 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
487     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
488 static unsigned int rts_frm_len[MAX_RX_RINGS] =
489     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
490
491 module_param_array(tx_fifo_len, uint, NULL, 0);
492 module_param_array(rx_ring_sz, uint, NULL, 0);
493 module_param_array(rts_frm_len, uint, NULL, 0);
494
495 /*
496  * S2IO device table.
497  * This table lists all the devices that this driver supports.
498  */
499 static struct pci_device_id s2io_tbl[] __devinitdata = {
500         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
501          PCI_ANY_ID, PCI_ANY_ID},
502         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
503          PCI_ANY_ID, PCI_ANY_ID},
504         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
505          PCI_ANY_ID, PCI_ANY_ID},
506         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
507          PCI_ANY_ID, PCI_ANY_ID},
508         {0,}
509 };
510
511 MODULE_DEVICE_TABLE(pci, s2io_tbl);
512
513 static struct pci_error_handlers s2io_err_handler = {
514         .error_detected = s2io_io_error_detected,
515         .slot_reset = s2io_io_slot_reset,
516         .resume = s2io_io_resume,
517 };
518
519 static struct pci_driver s2io_driver = {
520       .name = "S2IO",
521       .id_table = s2io_tbl,
522       .probe = s2io_init_nic,
523       .remove = __devexit_p(s2io_rem_nic),
524       .err_handler = &s2io_err_handler,
525 };
526
527 /* A simplifier macro used both by init and free shared_mem Fns(). */
528 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
529
530 /**
531  * init_shared_mem - Allocation and Initialization of Memory
532  * @nic: Device private variable.
533  * Description: The function allocates all the memory areas shared
534  * between the NIC and the driver. This includes Tx descriptors,
535  * Rx descriptors and the statistics block.
536  */
537
538 static int init_shared_mem(struct s2io_nic *nic)
539 {
540         u32 size;
541         void *tmp_v_addr, *tmp_v_addr_next;
542         dma_addr_t tmp_p_addr, tmp_p_addr_next;
543         struct RxD_block *pre_rxd_blk = NULL;
544         int i, j, blk_cnt;
545         int lst_size, lst_per_page;
546         struct net_device *dev = nic->dev;
547         unsigned long tmp;
548         struct buffAdd *ba;
549
550         struct mac_info *mac_control;
551         struct config_param *config;
552         unsigned long long mem_allocated = 0;
553
554         mac_control = &nic->mac_control;
555         config = &nic->config;
556
557
558         /* Allocation and initialization of TXDLs in FIOFs */
559         size = 0;
560         for (i = 0; i < config->tx_fifo_num; i++) {
561                 size += config->tx_cfg[i].fifo_len;
562         }
563         if (size > MAX_AVAILABLE_TXDS) {
564                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
565                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
566                 return -EINVAL;
567         }
568
569         lst_size = (sizeof(struct TxD) * config->max_txds);
570         lst_per_page = PAGE_SIZE / lst_size;
571
572         for (i = 0; i < config->tx_fifo_num; i++) {
573                 int fifo_len = config->tx_cfg[i].fifo_len;
574                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
575                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
576                                                           GFP_KERNEL);
577                 if (!mac_control->fifos[i].list_info) {
578                         DBG_PRINT(INFO_DBG,
579                                   "Malloc failed for list_info\n");
580                         return -ENOMEM;
581                 }
582                 mem_allocated += list_holder_size;
583         }
584         for (i = 0; i < config->tx_fifo_num; i++) {
585                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
586                                                 lst_per_page);
587                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
588                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
589                     config->tx_cfg[i].fifo_len - 1;
590                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
591                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
592                     config->tx_cfg[i].fifo_len - 1;
593                 mac_control->fifos[i].fifo_no = i;
594                 mac_control->fifos[i].nic = nic;
595                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
596
597                 for (j = 0; j < page_num; j++) {
598                         int k = 0;
599                         dma_addr_t tmp_p;
600                         void *tmp_v;
601                         tmp_v = pci_alloc_consistent(nic->pdev,
602                                                      PAGE_SIZE, &tmp_p);
603                         if (!tmp_v) {
604                                 DBG_PRINT(INFO_DBG,
605                                           "pci_alloc_consistent ");
606                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
607                                 return -ENOMEM;
608                         }
609                         /* If we got a zero DMA address(can happen on
610                          * certain platforms like PPC), reallocate.
611                          * Store virtual address of page we don't want,
612                          * to be freed later.
613                          */
614                         if (!tmp_p) {
615                                 mac_control->zerodma_virt_addr = tmp_v;
616                                 DBG_PRINT(INIT_DBG,
617                                 "%s: Zero DMA address for TxDL. ", dev->name);
618                                 DBG_PRINT(INIT_DBG,
619                                 "Virtual address %p\n", tmp_v);
620                                 tmp_v = pci_alloc_consistent(nic->pdev,
621                                                      PAGE_SIZE, &tmp_p);
622                                 if (!tmp_v) {
623                                         DBG_PRINT(INFO_DBG,
624                                           "pci_alloc_consistent ");
625                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
626                                         return -ENOMEM;
627                                 }
628                                 mem_allocated += PAGE_SIZE;
629                         }
630                         while (k < lst_per_page) {
631                                 int l = (j * lst_per_page) + k;
632                                 if (l == config->tx_cfg[i].fifo_len)
633                                         break;
634                                 mac_control->fifos[i].list_info[l].list_virt_addr =
635                                     tmp_v + (k * lst_size);
636                                 mac_control->fifos[i].list_info[l].list_phy_addr =
637                                     tmp_p + (k * lst_size);
638                                 k++;
639                         }
640                 }
641         }
642
643         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
644         if (!nic->ufo_in_band_v)
645                 return -ENOMEM;
646          mem_allocated += (size * sizeof(u64));
647
648         /* Allocation and initialization of RXDs in Rings */
649         size = 0;
650         for (i = 0; i < config->rx_ring_num; i++) {
651                 if (config->rx_cfg[i].num_rxd %
652                     (rxd_count[nic->rxd_mode] + 1)) {
653                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
654                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
655                                   i);
656                         DBG_PRINT(ERR_DBG, "RxDs per Block");
657                         return FAILURE;
658                 }
659                 size += config->rx_cfg[i].num_rxd;
660                 mac_control->rings[i].block_count =
661                         config->rx_cfg[i].num_rxd /
662                         (rxd_count[nic->rxd_mode] + 1 );
663                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
664                         mac_control->rings[i].block_count;
665         }
666         if (nic->rxd_mode == RXD_MODE_1)
667                 size = (size * (sizeof(struct RxD1)));
668         else
669                 size = (size * (sizeof(struct RxD3)));
670
671         for (i = 0; i < config->rx_ring_num; i++) {
672                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
673                 mac_control->rings[i].rx_curr_get_info.offset = 0;
674                 mac_control->rings[i].rx_curr_get_info.ring_len =
675                     config->rx_cfg[i].num_rxd - 1;
676                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
677                 mac_control->rings[i].rx_curr_put_info.offset = 0;
678                 mac_control->rings[i].rx_curr_put_info.ring_len =
679                     config->rx_cfg[i].num_rxd - 1;
680                 mac_control->rings[i].nic = nic;
681                 mac_control->rings[i].ring_no = i;
682
683                 blk_cnt = config->rx_cfg[i].num_rxd /
684                                 (rxd_count[nic->rxd_mode] + 1);
685                 /*  Allocating all the Rx blocks */
686                 for (j = 0; j < blk_cnt; j++) {
687                         struct rx_block_info *rx_blocks;
688                         int l;
689
690                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
691                         size = SIZE_OF_BLOCK; //size is always page size
692                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
693                                                           &tmp_p_addr);
694                         if (tmp_v_addr == NULL) {
695                                 /*
696                                  * In case of failure, free_shared_mem()
697                                  * is called, which should free any
698                                  * memory that was alloced till the
699                                  * failure happened.
700                                  */
701                                 rx_blocks->block_virt_addr = tmp_v_addr;
702                                 return -ENOMEM;
703                         }
704                         mem_allocated += size;
705                         memset(tmp_v_addr, 0, size);
706                         rx_blocks->block_virt_addr = tmp_v_addr;
707                         rx_blocks->block_dma_addr = tmp_p_addr;
708                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
709                                                   rxd_count[nic->rxd_mode],
710                                                   GFP_KERNEL);
711                         if (!rx_blocks->rxds)
712                                 return -ENOMEM;
713                         mem_allocated +=
714                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
715                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
716                                 rx_blocks->rxds[l].virt_addr =
717                                         rx_blocks->block_virt_addr +
718                                         (rxd_size[nic->rxd_mode] * l);
719                                 rx_blocks->rxds[l].dma_addr =
720                                         rx_blocks->block_dma_addr +
721                                         (rxd_size[nic->rxd_mode] * l);
722                         }
723                 }
724                 /* Interlinking all Rx Blocks */
725                 for (j = 0; j < blk_cnt; j++) {
726                         tmp_v_addr =
727                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
728                         tmp_v_addr_next =
729                                 mac_control->rings[i].rx_blocks[(j + 1) %
730                                               blk_cnt].block_virt_addr;
731                         tmp_p_addr =
732                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
733                         tmp_p_addr_next =
734                                 mac_control->rings[i].rx_blocks[(j + 1) %
735                                               blk_cnt].block_dma_addr;
736
737                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
738                         pre_rxd_blk->reserved_2_pNext_RxD_block =
739                             (unsigned long) tmp_v_addr_next;
740                         pre_rxd_blk->pNext_RxD_Blk_physical =
741                             (u64) tmp_p_addr_next;
742                 }
743         }
744         if (nic->rxd_mode == RXD_MODE_3B) {
745                 /*
746                  * Allocation of Storages for buffer addresses in 2BUFF mode
747                  * and the buffers as well.
748                  */
749                 for (i = 0; i < config->rx_ring_num; i++) {
750                         blk_cnt = config->rx_cfg[i].num_rxd /
751                            (rxd_count[nic->rxd_mode]+ 1);
752                         mac_control->rings[i].ba =
753                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
754                                      GFP_KERNEL);
755                         if (!mac_control->rings[i].ba)
756                                 return -ENOMEM;
757                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
758                         for (j = 0; j < blk_cnt; j++) {
759                                 int k = 0;
760                                 mac_control->rings[i].ba[j] =
761                                         kmalloc((sizeof(struct buffAdd) *
762                                                 (rxd_count[nic->rxd_mode] + 1)),
763                                                 GFP_KERNEL);
764                                 if (!mac_control->rings[i].ba[j])
765                                         return -ENOMEM;
766                                 mem_allocated += (sizeof(struct buffAdd) *  \
767                                         (rxd_count[nic->rxd_mode] + 1));
768                                 while (k != rxd_count[nic->rxd_mode]) {
769                                         ba = &mac_control->rings[i].ba[j][k];
770
771                                         ba->ba_0_org = (void *) kmalloc
772                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
773                                         if (!ba->ba_0_org)
774                                                 return -ENOMEM;
775                                         mem_allocated +=
776                                                 (BUF0_LEN + ALIGN_SIZE);
777                                         tmp = (unsigned long)ba->ba_0_org;
778                                         tmp += ALIGN_SIZE;
779                                         tmp &= ~((unsigned long) ALIGN_SIZE);
780                                         ba->ba_0 = (void *) tmp;
781
782                                         ba->ba_1_org = (void *) kmalloc
783                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
784                                         if (!ba->ba_1_org)
785                                                 return -ENOMEM;
786                                         mem_allocated
787                                                 += (BUF1_LEN + ALIGN_SIZE);
788                                         tmp = (unsigned long) ba->ba_1_org;
789                                         tmp += ALIGN_SIZE;
790                                         tmp &= ~((unsigned long) ALIGN_SIZE);
791                                         ba->ba_1 = (void *) tmp;
792                                         k++;
793                                 }
794                         }
795                 }
796         }
797
798         /* Allocation and initialization of Statistics block */
799         size = sizeof(struct stat_block);
800         mac_control->stats_mem = pci_alloc_consistent
801             (nic->pdev, size, &mac_control->stats_mem_phy);
802
803         if (!mac_control->stats_mem) {
804                 /*
805                  * In case of failure, free_shared_mem() is called, which
806                  * should free any memory that was alloced till the
807                  * failure happened.
808                  */
809                 return -ENOMEM;
810         }
811         mem_allocated += size;
812         mac_control->stats_mem_sz = size;
813
814         tmp_v_addr = mac_control->stats_mem;
815         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
816         memset(tmp_v_addr, 0, size);
817         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
818                   (unsigned long long) tmp_p_addr);
819         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
820         return SUCCESS;
821 }
822
823 /**
824  * free_shared_mem - Free the allocated Memory
825  * @nic:  Device private variable.
826  * Description: This function is to free all memory locations allocated by
827  * the init_shared_mem() function and return it to the kernel.
828  */
829
830 static void free_shared_mem(struct s2io_nic *nic)
831 {
832         int i, j, blk_cnt, size;
833         u32 ufo_size = 0;
834         void *tmp_v_addr;
835         dma_addr_t tmp_p_addr;
836         struct mac_info *mac_control;
837         struct config_param *config;
838         int lst_size, lst_per_page;
839         struct net_device *dev;
840         int page_num = 0;
841
842         if (!nic)
843                 return;
844
845         dev = nic->dev;
846
847         mac_control = &nic->mac_control;
848         config = &nic->config;
849
850         lst_size = (sizeof(struct TxD) * config->max_txds);
851         lst_per_page = PAGE_SIZE / lst_size;
852
853         for (i = 0; i < config->tx_fifo_num; i++) {
854                 ufo_size += config->tx_cfg[i].fifo_len;
855                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
856                                                         lst_per_page);
857                 for (j = 0; j < page_num; j++) {
858                         int mem_blks = (j * lst_per_page);
859                         if (!mac_control->fifos[i].list_info)
860                                 return;
861                         if (!mac_control->fifos[i].list_info[mem_blks].
862                                  list_virt_addr)
863                                 break;
864                         pci_free_consistent(nic->pdev, PAGE_SIZE,
865                                             mac_control->fifos[i].
866                                             list_info[mem_blks].
867                                             list_virt_addr,
868                                             mac_control->fifos[i].
869                                             list_info[mem_blks].
870                                             list_phy_addr);
871                         nic->mac_control.stats_info->sw_stat.mem_freed
872                                                 += PAGE_SIZE;
873                 }
874                 /* If we got a zero DMA address during allocation,
875                  * free the page now
876                  */
877                 if (mac_control->zerodma_virt_addr) {
878                         pci_free_consistent(nic->pdev, PAGE_SIZE,
879                                             mac_control->zerodma_virt_addr,
880                                             (dma_addr_t)0);
881                         DBG_PRINT(INIT_DBG,
882                                 "%s: Freeing TxDL with zero DMA addr. ",
883                                 dev->name);
884                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
885                                 mac_control->zerodma_virt_addr);
886                         nic->mac_control.stats_info->sw_stat.mem_freed
887                                                 += PAGE_SIZE;
888                 }
889                 kfree(mac_control->fifos[i].list_info);
890                 nic->mac_control.stats_info->sw_stat.mem_freed +=
891                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
892         }
893
894         size = SIZE_OF_BLOCK;
895         for (i = 0; i < config->rx_ring_num; i++) {
896                 blk_cnt = mac_control->rings[i].block_count;
897                 for (j = 0; j < blk_cnt; j++) {
898                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
899                                 block_virt_addr;
900                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
901                                 block_dma_addr;
902                         if (tmp_v_addr == NULL)
903                                 break;
904                         pci_free_consistent(nic->pdev, size,
905                                             tmp_v_addr, tmp_p_addr);
906                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
907                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
908                         nic->mac_control.stats_info->sw_stat.mem_freed +=
909                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
910                 }
911         }
912
913         if (nic->rxd_mode == RXD_MODE_3B) {
914                 /* Freeing buffer storage addresses in 2BUFF mode. */
915                 for (i = 0; i < config->rx_ring_num; i++) {
916                         blk_cnt = config->rx_cfg[i].num_rxd /
917                             (rxd_count[nic->rxd_mode] + 1);
918                         for (j = 0; j < blk_cnt; j++) {
919                                 int k = 0;
920                                 if (!mac_control->rings[i].ba[j])
921                                         continue;
922                                 while (k != rxd_count[nic->rxd_mode]) {
923                                         struct buffAdd *ba =
924                                                 &mac_control->rings[i].ba[j][k];
925                                         kfree(ba->ba_0_org);
926                                         nic->mac_control.stats_info->sw_stat.\
927                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
928                                         kfree(ba->ba_1_org);
929                                         nic->mac_control.stats_info->sw_stat.\
930                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
931                                         k++;
932                                 }
933                                 kfree(mac_control->rings[i].ba[j]);
934                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
935                                         (sizeof(struct buffAdd) *
936                                         (rxd_count[nic->rxd_mode] + 1));
937                         }
938                         kfree(mac_control->rings[i].ba);
939                         nic->mac_control.stats_info->sw_stat.mem_freed +=
940                         (sizeof(struct buffAdd *) * blk_cnt);
941                 }
942         }
943
944         if (mac_control->stats_mem) {
945                 pci_free_consistent(nic->pdev,
946                                     mac_control->stats_mem_sz,
947                                     mac_control->stats_mem,
948                                     mac_control->stats_mem_phy);
949                 nic->mac_control.stats_info->sw_stat.mem_freed +=
950                         mac_control->stats_mem_sz;
951         }
952         if (nic->ufo_in_band_v) {
953                 kfree(nic->ufo_in_band_v);
954                 nic->mac_control.stats_info->sw_stat.mem_freed
955                         += (ufo_size * sizeof(u64));
956         }
957 }
958
959 /**
960  * s2io_verify_pci_mode -
961  */
962
963 static int s2io_verify_pci_mode(struct s2io_nic *nic)
964 {
965         struct XENA_dev_config __iomem *bar0 = nic->bar0;
966         register u64 val64 = 0;
967         int     mode;
968
969         val64 = readq(&bar0->pci_mode);
970         mode = (u8)GET_PCI_MODE(val64);
971
972         if ( val64 & PCI_MODE_UNKNOWN_MODE)
973                 return -1;      /* Unknown PCI mode */
974         return mode;
975 }
976
977 #define NEC_VENID   0x1033
978 #define NEC_DEVID   0x0125
979 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
980 {
981         struct pci_dev *tdev = NULL;
982         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
983                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
984                         if (tdev->bus == s2io_pdev->bus->parent)
985                                 pci_dev_put(tdev);
986                                 return 1;
987                 }
988         }
989         return 0;
990 }
991
992 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
993 /**
994  * s2io_print_pci_mode -
995  */
996 static int s2io_print_pci_mode(struct s2io_nic *nic)
997 {
998         struct XENA_dev_config __iomem *bar0 = nic->bar0;
999         register u64 val64 = 0;
1000         int     mode;
1001         struct config_param *config = &nic->config;
1002
1003         val64 = readq(&bar0->pci_mode);
1004         mode = (u8)GET_PCI_MODE(val64);
1005
1006         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1007                 return -1;      /* Unknown PCI mode */
1008
1009         config->bus_speed = bus_speed[mode];
1010
1011         if (s2io_on_nec_bridge(nic->pdev)) {
1012                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1013                                                         nic->dev->name);
1014                 return mode;
1015         }
1016
1017         if (val64 & PCI_MODE_32_BITS) {
1018                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1019         } else {
1020                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1021         }
1022
1023         switch(mode) {
1024                 case PCI_MODE_PCI_33:
1025                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1026                         break;
1027                 case PCI_MODE_PCI_66:
1028                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1029                         break;
1030                 case PCI_MODE_PCIX_M1_66:
1031                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1032                         break;
1033                 case PCI_MODE_PCIX_M1_100:
1034                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1035                         break;
1036                 case PCI_MODE_PCIX_M1_133:
1037                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1038                         break;
1039                 case PCI_MODE_PCIX_M2_66:
1040                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1041                         break;
1042                 case PCI_MODE_PCIX_M2_100:
1043                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1044                         break;
1045                 case PCI_MODE_PCIX_M2_133:
1046                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1047                         break;
1048                 default:
1049                         return -1;      /* Unsupported bus speed */
1050         }
1051
1052         return mode;
1053 }
1054
1055 /**
1056  *  init_nic - Initialization of hardware
1057  *  @nic: device peivate variable
1058  *  Description: The function sequentially configures every block
1059  *  of the H/W from their reset values.
1060  *  Return Value:  SUCCESS on success and
1061  *  '-1' on failure (endian settings incorrect).
1062  */
1063
1064 static int init_nic(struct s2io_nic *nic)
1065 {
1066         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1067         struct net_device *dev = nic->dev;
1068         register u64 val64 = 0;
1069         void __iomem *add;
1070         u32 time;
1071         int i, j;
1072         struct mac_info *mac_control;
1073         struct config_param *config;
1074         int dtx_cnt = 0;
1075         unsigned long long mem_share;
1076         int mem_size;
1077
1078         mac_control = &nic->mac_control;
1079         config = &nic->config;
1080
1081         /* to set the swapper controle on the card */
1082         if(s2io_set_swapper(nic)) {
1083                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1084                 return -EIO;
1085         }
1086
1087         /*
1088          * Herc requires EOI to be removed from reset before XGXS, so..
1089          */
1090         if (nic->device_type & XFRAME_II_DEVICE) {
1091                 val64 = 0xA500000000ULL;
1092                 writeq(val64, &bar0->sw_reset);
1093                 msleep(500);
1094                 val64 = readq(&bar0->sw_reset);
1095         }
1096
1097         /* Remove XGXS from reset state */
1098         val64 = 0;
1099         writeq(val64, &bar0->sw_reset);
1100         msleep(500);
1101         val64 = readq(&bar0->sw_reset);
1102
1103         /* Ensure that it's safe to access registers by checking
1104          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1105          */
1106         if (nic->device_type == XFRAME_II_DEVICE) {
1107                 for (i = 0; i < 50; i++) {
1108                         val64 = readq(&bar0->adapter_status);
1109                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1110                                 break;
1111                         msleep(10);
1112                 }
1113                 if (i == 50)
1114                         return -ENODEV;
1115         }
1116
1117         /*  Enable Receiving broadcasts */
1118         add = &bar0->mac_cfg;
1119         val64 = readq(&bar0->mac_cfg);
1120         val64 |= MAC_RMAC_BCAST_ENABLE;
1121         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1122         writel((u32) val64, add);
1123         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1124         writel((u32) (val64 >> 32), (add + 4));
1125
1126         /* Read registers in all blocks */
1127         val64 = readq(&bar0->mac_int_mask);
1128         val64 = readq(&bar0->mc_int_mask);
1129         val64 = readq(&bar0->xgxs_int_mask);
1130
1131         /*  Set MTU */
1132         val64 = dev->mtu;
1133         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1134
1135         if (nic->device_type & XFRAME_II_DEVICE) {
1136                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1137                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1138                                           &bar0->dtx_control, UF);
1139                         if (dtx_cnt & 0x1)
1140                                 msleep(1); /* Necessary!! */
1141                         dtx_cnt++;
1142                 }
1143         } else {
1144                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1145                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1146                                           &bar0->dtx_control, UF);
1147                         val64 = readq(&bar0->dtx_control);
1148                         dtx_cnt++;
1149                 }
1150         }
1151
1152         /*  Tx DMA Initialization */
1153         val64 = 0;
1154         writeq(val64, &bar0->tx_fifo_partition_0);
1155         writeq(val64, &bar0->tx_fifo_partition_1);
1156         writeq(val64, &bar0->tx_fifo_partition_2);
1157         writeq(val64, &bar0->tx_fifo_partition_3);
1158
1159
1160         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1161                 val64 |=
1162                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1163                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1164                                     ((i * 32) + 5), 3);
1165
1166                 if (i == (config->tx_fifo_num - 1)) {
1167                         if (i % 2 == 0)
1168                                 i++;
1169                 }
1170
1171                 switch (i) {
1172                 case 1:
1173                         writeq(val64, &bar0->tx_fifo_partition_0);
1174                         val64 = 0;
1175                         break;
1176                 case 3:
1177                         writeq(val64, &bar0->tx_fifo_partition_1);
1178                         val64 = 0;
1179                         break;
1180                 case 5:
1181                         writeq(val64, &bar0->tx_fifo_partition_2);
1182                         val64 = 0;
1183                         break;
1184                 case 7:
1185                         writeq(val64, &bar0->tx_fifo_partition_3);
1186                         break;
1187                 }
1188         }
1189
1190         /*
1191          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1192          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1193          */
1194         if ((nic->device_type == XFRAME_I_DEVICE) &&
1195                 (nic->pdev->revision < 4))
1196                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1197
1198         val64 = readq(&bar0->tx_fifo_partition_0);
1199         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1200                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1201
1202         /*
1203          * Initialization of Tx_PA_CONFIG register to ignore packet
1204          * integrity checking.
1205          */
1206         val64 = readq(&bar0->tx_pa_cfg);
1207         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1208             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1209         writeq(val64, &bar0->tx_pa_cfg);
1210
1211         /* Rx DMA intialization. */
1212         val64 = 0;
1213         for (i = 0; i < config->rx_ring_num; i++) {
1214                 val64 |=
1215                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1216                          3);
1217         }
1218         writeq(val64, &bar0->rx_queue_priority);
1219
1220         /*
1221          * Allocating equal share of memory to all the
1222          * configured Rings.
1223          */
1224         val64 = 0;
1225         if (nic->device_type & XFRAME_II_DEVICE)
1226                 mem_size = 32;
1227         else
1228                 mem_size = 64;
1229
1230         for (i = 0; i < config->rx_ring_num; i++) {
1231                 switch (i) {
1232                 case 0:
1233                         mem_share = (mem_size / config->rx_ring_num +
1234                                      mem_size % config->rx_ring_num);
1235                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1236                         continue;
1237                 case 1:
1238                         mem_share = (mem_size / config->rx_ring_num);
1239                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1240                         continue;
1241                 case 2:
1242                         mem_share = (mem_size / config->rx_ring_num);
1243                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1244                         continue;
1245                 case 3:
1246                         mem_share = (mem_size / config->rx_ring_num);
1247                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1248                         continue;
1249                 case 4:
1250                         mem_share = (mem_size / config->rx_ring_num);
1251                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1252                         continue;
1253                 case 5:
1254                         mem_share = (mem_size / config->rx_ring_num);
1255                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1256                         continue;
1257                 case 6:
1258                         mem_share = (mem_size / config->rx_ring_num);
1259                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1260                         continue;
1261                 case 7:
1262                         mem_share = (mem_size / config->rx_ring_num);
1263                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1264                         continue;
1265                 }
1266         }
1267         writeq(val64, &bar0->rx_queue_cfg);
1268
1269         /*
1270          * Filling Tx round robin registers
1271          * as per the number of FIFOs
1272          */
1273         switch (config->tx_fifo_num) {
1274         case 1:
1275                 val64 = 0x0000000000000000ULL;
1276                 writeq(val64, &bar0->tx_w_round_robin_0);
1277                 writeq(val64, &bar0->tx_w_round_robin_1);
1278                 writeq(val64, &bar0->tx_w_round_robin_2);
1279                 writeq(val64, &bar0->tx_w_round_robin_3);
1280                 writeq(val64, &bar0->tx_w_round_robin_4);
1281                 break;
1282         case 2:
1283                 val64 = 0x0000010000010000ULL;
1284                 writeq(val64, &bar0->tx_w_round_robin_0);
1285                 val64 = 0x0100000100000100ULL;
1286                 writeq(val64, &bar0->tx_w_round_robin_1);
1287                 val64 = 0x0001000001000001ULL;
1288                 writeq(val64, &bar0->tx_w_round_robin_2);
1289                 val64 = 0x0000010000010000ULL;
1290                 writeq(val64, &bar0->tx_w_round_robin_3);
1291                 val64 = 0x0100000000000000ULL;
1292                 writeq(val64, &bar0->tx_w_round_robin_4);
1293                 break;
1294         case 3:
1295                 val64 = 0x0001000102000001ULL;
1296                 writeq(val64, &bar0->tx_w_round_robin_0);
1297                 val64 = 0x0001020000010001ULL;
1298                 writeq(val64, &bar0->tx_w_round_robin_1);
1299                 val64 = 0x0200000100010200ULL;
1300                 writeq(val64, &bar0->tx_w_round_robin_2);
1301                 val64 = 0x0001000102000001ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_3);
1303                 val64 = 0x0001020000000000ULL;
1304                 writeq(val64, &bar0->tx_w_round_robin_4);
1305                 break;
1306         case 4:
1307                 val64 = 0x0001020300010200ULL;
1308                 writeq(val64, &bar0->tx_w_round_robin_0);
1309                 val64 = 0x0100000102030001ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_1);
1311                 val64 = 0x0200010000010203ULL;
1312                 writeq(val64, &bar0->tx_w_round_robin_2);
1313                 val64 = 0x0001020001000001ULL;
1314                 writeq(val64, &bar0->tx_w_round_robin_3);
1315                 val64 = 0x0203000100000000ULL;
1316                 writeq(val64, &bar0->tx_w_round_robin_4);
1317                 break;
1318         case 5:
1319                 val64 = 0x0001000203000102ULL;
1320                 writeq(val64, &bar0->tx_w_round_robin_0);
1321                 val64 = 0x0001020001030004ULL;
1322                 writeq(val64, &bar0->tx_w_round_robin_1);
1323                 val64 = 0x0001000203000102ULL;
1324                 writeq(val64, &bar0->tx_w_round_robin_2);
1325                 val64 = 0x0001020001030004ULL;
1326                 writeq(val64, &bar0->tx_w_round_robin_3);
1327                 val64 = 0x0001000000000000ULL;
1328                 writeq(val64, &bar0->tx_w_round_robin_4);
1329                 break;
1330         case 6:
1331                 val64 = 0x0001020304000102ULL;
1332                 writeq(val64, &bar0->tx_w_round_robin_0);
1333                 val64 = 0x0304050001020001ULL;
1334                 writeq(val64, &bar0->tx_w_round_robin_1);
1335                 val64 = 0x0203000100000102ULL;
1336                 writeq(val64, &bar0->tx_w_round_robin_2);
1337                 val64 = 0x0304000102030405ULL;
1338                 writeq(val64, &bar0->tx_w_round_robin_3);
1339                 val64 = 0x0001000200000000ULL;
1340                 writeq(val64, &bar0->tx_w_round_robin_4);
1341                 break;
1342         case 7:
1343                 val64 = 0x0001020001020300ULL;
1344                 writeq(val64, &bar0->tx_w_round_robin_0);
1345                 val64 = 0x0102030400010203ULL;
1346                 writeq(val64, &bar0->tx_w_round_robin_1);
1347                 val64 = 0x0405060001020001ULL;
1348                 writeq(val64, &bar0->tx_w_round_robin_2);
1349                 val64 = 0x0304050000010200ULL;
1350                 writeq(val64, &bar0->tx_w_round_robin_3);
1351                 val64 = 0x0102030000000000ULL;
1352                 writeq(val64, &bar0->tx_w_round_robin_4);
1353                 break;
1354         case 8:
1355                 val64 = 0x0001020300040105ULL;
1356                 writeq(val64, &bar0->tx_w_round_robin_0);
1357                 val64 = 0x0200030106000204ULL;
1358                 writeq(val64, &bar0->tx_w_round_robin_1);
1359                 val64 = 0x0103000502010007ULL;
1360                 writeq(val64, &bar0->tx_w_round_robin_2);
1361                 val64 = 0x0304010002060500ULL;
1362                 writeq(val64, &bar0->tx_w_round_robin_3);
1363                 val64 = 0x0103020400000000ULL;
1364                 writeq(val64, &bar0->tx_w_round_robin_4);
1365                 break;
1366         }
1367
1368         /* Enable all configured Tx FIFO partitions */
1369         val64 = readq(&bar0->tx_fifo_partition_0);
1370         val64 |= (TX_FIFO_PARTITION_EN);
1371         writeq(val64, &bar0->tx_fifo_partition_0);
1372
1373         /* Filling the Rx round robin registers as per the
1374          * number of Rings and steering based on QoS.
1375          */
1376         switch (config->rx_ring_num) {
1377         case 1:
1378                 val64 = 0x8080808080808080ULL;
1379                 writeq(val64, &bar0->rts_qos_steering);
1380                 break;
1381         case 2:
1382                 val64 = 0x0000010000010000ULL;
1383                 writeq(val64, &bar0->rx_w_round_robin_0);
1384                 val64 = 0x0100000100000100ULL;
1385                 writeq(val64, &bar0->rx_w_round_robin_1);
1386                 val64 = 0x0001000001000001ULL;
1387                 writeq(val64, &bar0->rx_w_round_robin_2);
1388                 val64 = 0x0000010000010000ULL;
1389                 writeq(val64, &bar0->rx_w_round_robin_3);
1390                 val64 = 0x0100000000000000ULL;
1391                 writeq(val64, &bar0->rx_w_round_robin_4);
1392
1393                 val64 = 0x8080808040404040ULL;
1394                 writeq(val64, &bar0->rts_qos_steering);
1395                 break;
1396         case 3:
1397                 val64 = 0x0001000102000001ULL;
1398                 writeq(val64, &bar0->rx_w_round_robin_0);
1399                 val64 = 0x0001020000010001ULL;
1400                 writeq(val64, &bar0->rx_w_round_robin_1);
1401                 val64 = 0x0200000100010200ULL;
1402                 writeq(val64, &bar0->rx_w_round_robin_2);
1403                 val64 = 0x0001000102000001ULL;
1404                 writeq(val64, &bar0->rx_w_round_robin_3);
1405                 val64 = 0x0001020000000000ULL;
1406                 writeq(val64, &bar0->rx_w_round_robin_4);
1407
1408                 val64 = 0x8080804040402020ULL;
1409                 writeq(val64, &bar0->rts_qos_steering);
1410                 break;
1411         case 4:
1412                 val64 = 0x0001020300010200ULL;
1413                 writeq(val64, &bar0->rx_w_round_robin_0);
1414                 val64 = 0x0100000102030001ULL;
1415                 writeq(val64, &bar0->rx_w_round_robin_1);
1416                 val64 = 0x0200010000010203ULL;
1417                 writeq(val64, &bar0->rx_w_round_robin_2);
1418                 val64 = 0x0001020001000001ULL;
1419                 writeq(val64, &bar0->rx_w_round_robin_3);
1420                 val64 = 0x0203000100000000ULL;
1421                 writeq(val64, &bar0->rx_w_round_robin_4);
1422
1423                 val64 = 0x8080404020201010ULL;
1424                 writeq(val64, &bar0->rts_qos_steering);
1425                 break;
1426         case 5:
1427                 val64 = 0x0001000203000102ULL;
1428                 writeq(val64, &bar0->rx_w_round_robin_0);
1429                 val64 = 0x0001020001030004ULL;
1430                 writeq(val64, &bar0->rx_w_round_robin_1);
1431                 val64 = 0x0001000203000102ULL;
1432                 writeq(val64, &bar0->rx_w_round_robin_2);
1433                 val64 = 0x0001020001030004ULL;
1434                 writeq(val64, &bar0->rx_w_round_robin_3);
1435                 val64 = 0x0001000000000000ULL;
1436                 writeq(val64, &bar0->rx_w_round_robin_4);
1437
1438                 val64 = 0x8080404020201008ULL;
1439                 writeq(val64, &bar0->rts_qos_steering);
1440                 break;
1441         case 6:
1442                 val64 = 0x0001020304000102ULL;
1443                 writeq(val64, &bar0->rx_w_round_robin_0);
1444                 val64 = 0x0304050001020001ULL;
1445                 writeq(val64, &bar0->rx_w_round_robin_1);
1446                 val64 = 0x0203000100000102ULL;
1447                 writeq(val64, &bar0->rx_w_round_robin_2);
1448                 val64 = 0x0304000102030405ULL;
1449                 writeq(val64, &bar0->rx_w_round_robin_3);
1450                 val64 = 0x0001000200000000ULL;
1451                 writeq(val64, &bar0->rx_w_round_robin_4);
1452
1453                 val64 = 0x8080404020100804ULL;
1454                 writeq(val64, &bar0->rts_qos_steering);
1455                 break;
1456         case 7:
1457                 val64 = 0x0001020001020300ULL;
1458                 writeq(val64, &bar0->rx_w_round_robin_0);
1459                 val64 = 0x0102030400010203ULL;
1460                 writeq(val64, &bar0->rx_w_round_robin_1);
1461                 val64 = 0x0405060001020001ULL;
1462                 writeq(val64, &bar0->rx_w_round_robin_2);
1463                 val64 = 0x0304050000010200ULL;
1464                 writeq(val64, &bar0->rx_w_round_robin_3);
1465                 val64 = 0x0102030000000000ULL;
1466                 writeq(val64, &bar0->rx_w_round_robin_4);
1467
1468                 val64 = 0x8080402010080402ULL;
1469                 writeq(val64, &bar0->rts_qos_steering);
1470                 break;
1471         case 8:
1472                 val64 = 0x0001020300040105ULL;
1473                 writeq(val64, &bar0->rx_w_round_robin_0);
1474                 val64 = 0x0200030106000204ULL;
1475                 writeq(val64, &bar0->rx_w_round_robin_1);
1476                 val64 = 0x0103000502010007ULL;
1477                 writeq(val64, &bar0->rx_w_round_robin_2);
1478                 val64 = 0x0304010002060500ULL;
1479                 writeq(val64, &bar0->rx_w_round_robin_3);
1480                 val64 = 0x0103020400000000ULL;
1481                 writeq(val64, &bar0->rx_w_round_robin_4);
1482
1483                 val64 = 0x8040201008040201ULL;
1484                 writeq(val64, &bar0->rts_qos_steering);
1485                 break;
1486         }
1487
1488         /* UDP Fix */
1489         val64 = 0;
1490         for (i = 0; i < 8; i++)
1491                 writeq(val64, &bar0->rts_frm_len_n[i]);
1492
1493         /* Set the default rts frame length for the rings configured */
1494         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1495         for (i = 0 ; i < config->rx_ring_num ; i++)
1496                 writeq(val64, &bar0->rts_frm_len_n[i]);
1497
1498         /* Set the frame length for the configured rings
1499          * desired by the user
1500          */
1501         for (i = 0; i < config->rx_ring_num; i++) {
1502                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1503                  * specified frame length steering.
1504                  * If the user provides the frame length then program
1505                  * the rts_frm_len register for those values or else
1506                  * leave it as it is.
1507                  */
1508                 if (rts_frm_len[i] != 0) {
1509                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1510                                 &bar0->rts_frm_len_n[i]);
1511                 }
1512         }
1513
1514         /* Disable differentiated services steering logic */
1515         for (i = 0; i < 64; i++) {
1516                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1517                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1518                                 dev->name);
1519                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1520                         return -ENODEV;
1521                 }
1522         }
1523
1524         /* Program statistics memory */
1525         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1526
1527         if (nic->device_type == XFRAME_II_DEVICE) {
1528                 val64 = STAT_BC(0x320);
1529                 writeq(val64, &bar0->stat_byte_cnt);
1530         }
1531
1532         /*
1533          * Initializing the sampling rate for the device to calculate the
1534          * bandwidth utilization.
1535          */
1536         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1537             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1538         writeq(val64, &bar0->mac_link_util);
1539
1540
1541         /*
1542          * Initializing the Transmit and Receive Traffic Interrupt
1543          * Scheme.
1544          */
1545         /*
1546          * TTI Initialization. Default Tx timer gets us about
1547          * 250 interrupts per sec. Continuous interrupts are enabled
1548          * by default.
1549          */
1550         if (nic->device_type == XFRAME_II_DEVICE) {
1551                 int count = (nic->config.bus_speed * 125)/2;
1552                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1553         } else {
1554
1555                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1556         }
1557         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1558             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1559             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1560                 if (use_continuous_tx_intrs)
1561                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1562         writeq(val64, &bar0->tti_data1_mem);
1563
1564         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1565             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1566             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1567         writeq(val64, &bar0->tti_data2_mem);
1568
1569         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1570         writeq(val64, &bar0->tti_command_mem);
1571
1572         /*
1573          * Once the operation completes, the Strobe bit of the command
1574          * register will be reset. We poll for this particular condition
1575          * We wait for a maximum of 500ms for the operation to complete,
1576          * if it's not complete by then we return error.
1577          */
1578         time = 0;
1579         while (TRUE) {
1580                 val64 = readq(&bar0->tti_command_mem);
1581                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1582                         break;
1583                 }
1584                 if (time > 10) {
1585                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1586                                   dev->name);
1587                         return -ENODEV;
1588                 }
1589                 msleep(50);
1590                 time++;
1591         }
1592
1593         /* RTI Initialization */
1594         if (nic->device_type == XFRAME_II_DEVICE) {
1595                 /*
1596                  * Programmed to generate Apprx 500 Intrs per
1597                  * second
1598                  */
1599                 int count = (nic->config.bus_speed * 125)/4;
1600                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1601         } else
1602                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1603         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1604                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1605                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1606
1607         writeq(val64, &bar0->rti_data1_mem);
1608
1609         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1610                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1611         if (nic->config.intr_type == MSI_X)
1612             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1613                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1614         else
1615             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1616                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1617         writeq(val64, &bar0->rti_data2_mem);
1618
1619         for (i = 0; i < config->rx_ring_num; i++) {
1620                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1621                                 | RTI_CMD_MEM_OFFSET(i);
1622                 writeq(val64, &bar0->rti_command_mem);
1623
1624                 /*
1625                  * Once the operation completes, the Strobe bit of the
1626                  * command register will be reset. We poll for this
1627                  * particular condition. We wait for a maximum of 500ms
1628                  * for the operation to complete, if it's not complete
1629                  * by then we return error.
1630                  */
1631                 time = 0;
1632                 while (TRUE) {
1633                         val64 = readq(&bar0->rti_command_mem);
1634                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1635                                 break;
1636
1637                         if (time > 10) {
1638                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1639                                           dev->name);
1640                                 return -ENODEV;
1641                         }
1642                         time++;
1643                         msleep(50);
1644                 }
1645         }
1646
1647         /*
1648          * Initializing proper values as Pause threshold into all
1649          * the 8 Queues on Rx side.
1650          */
1651         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1652         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1653
1654         /* Disable RMAC PAD STRIPPING */
1655         add = &bar0->mac_cfg;
1656         val64 = readq(&bar0->mac_cfg);
1657         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1658         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1659         writel((u32) (val64), add);
1660         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1661         writel((u32) (val64 >> 32), (add + 4));
1662         val64 = readq(&bar0->mac_cfg);
1663
1664         /* Enable FCS stripping by adapter */
1665         add = &bar0->mac_cfg;
1666         val64 = readq(&bar0->mac_cfg);
1667         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1668         if (nic->device_type == XFRAME_II_DEVICE)
1669                 writeq(val64, &bar0->mac_cfg);
1670         else {
1671                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1672                 writel((u32) (val64), add);
1673                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1674                 writel((u32) (val64 >> 32), (add + 4));
1675         }
1676
1677         /*
1678          * Set the time value to be inserted in the pause frame
1679          * generated by xena.
1680          */
1681         val64 = readq(&bar0->rmac_pause_cfg);
1682         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1683         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1684         writeq(val64, &bar0->rmac_pause_cfg);
1685
1686         /*
1687          * Set the Threshold Limit for Generating the pause frame
1688          * If the amount of data in any Queue exceeds ratio of
1689          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1690          * pause frame is generated
1691          */
1692         val64 = 0;
1693         for (i = 0; i < 4; i++) {
1694                 val64 |=
1695                     (((u64) 0xFF00 | nic->mac_control.
1696                       mc_pause_threshold_q0q3)
1697                      << (i * 2 * 8));
1698         }
1699         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1700
1701         val64 = 0;
1702         for (i = 0; i < 4; i++) {
1703                 val64 |=
1704                     (((u64) 0xFF00 | nic->mac_control.
1705                       mc_pause_threshold_q4q7)
1706                      << (i * 2 * 8));
1707         }
1708         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1709
1710         /*
1711          * TxDMA will stop Read request if the number of read split has
1712          * exceeded the limit pointed by shared_splits
1713          */
1714         val64 = readq(&bar0->pic_control);
1715         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1716         writeq(val64, &bar0->pic_control);
1717
1718         if (nic->config.bus_speed == 266) {
1719                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1720                 writeq(0x0, &bar0->read_retry_delay);
1721                 writeq(0x0, &bar0->write_retry_delay);
1722         }
1723
1724         /*
1725          * Programming the Herc to split every write transaction
1726          * that does not start on an ADB to reduce disconnects.
1727          */
1728         if (nic->device_type == XFRAME_II_DEVICE) {
1729                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1730                         MISC_LINK_STABILITY_PRD(3);
1731                 writeq(val64, &bar0->misc_control);
1732                 val64 = readq(&bar0->pic_control2);
1733                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1734                 writeq(val64, &bar0->pic_control2);
1735         }
1736         if (strstr(nic->product_name, "CX4")) {
1737                 val64 = TMAC_AVG_IPG(0x17);
1738                 writeq(val64, &bar0->tmac_avg_ipg);
1739         }
1740
1741         return SUCCESS;
1742 }
1743 #define LINK_UP_DOWN_INTERRUPT          1
1744 #define MAC_RMAC_ERR_TIMER              2
1745
1746 static int s2io_link_fault_indication(struct s2io_nic *nic)
1747 {
1748         if (nic->config.intr_type != INTA)
1749                 return MAC_RMAC_ERR_TIMER;
1750         if (nic->device_type == XFRAME_II_DEVICE)
1751                 return LINK_UP_DOWN_INTERRUPT;
1752         else
1753                 return MAC_RMAC_ERR_TIMER;
1754 }
1755
1756 /**
1757  *  do_s2io_write_bits -  update alarm bits in alarm register
1758  *  @value: alarm bits
1759  *  @flag: interrupt status
1760  *  @addr: address value
1761  *  Description: update alarm bits in alarm register
1762  *  Return Value:
1763  *  NONE.
1764  */
1765 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1766 {
1767         u64 temp64;
1768
1769         temp64 = readq(addr);
1770
1771         if(flag == ENABLE_INTRS)
1772                 temp64 &= ~((u64) value);
1773         else
1774                 temp64 |= ((u64) value);
1775         writeq(temp64, addr);
1776 }
1777
1778 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1779 {
1780         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1781         register u64 gen_int_mask = 0;
1782
1783         if (mask & TX_DMA_INTR) {
1784
1785                 gen_int_mask |= TXDMA_INT_M;
1786
1787                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1788                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1789                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1790                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1791
1792                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1793                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1794                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1795                                 &bar0->pfc_err_mask);
1796
1797                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1798                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1799                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1800
1801                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1802                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1803                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1804                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1805                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1806                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1807
1808                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1809                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1810
1811                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1812                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1813                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1814                                 flag, &bar0->lso_err_mask);
1815
1816                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1817                                 flag, &bar0->tpa_err_mask);
1818
1819                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1820
1821         }
1822
1823         if (mask & TX_MAC_INTR) {
1824                 gen_int_mask |= TXMAC_INT_M;
1825                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1826                                 &bar0->mac_int_mask);
1827                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1828                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1829                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1830                                 flag, &bar0->mac_tmac_err_mask);
1831         }
1832
1833         if (mask & TX_XGXS_INTR) {
1834                 gen_int_mask |= TXXGXS_INT_M;
1835                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1836                                 &bar0->xgxs_int_mask);
1837                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1838                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1839                                 flag, &bar0->xgxs_txgxs_err_mask);
1840         }
1841
1842         if (mask & RX_DMA_INTR) {
1843                 gen_int_mask |= RXDMA_INT_M;
1844                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1845                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1846                                 flag, &bar0->rxdma_int_mask);
1847                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1848                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1849                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1850                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1851                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1852                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1853                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1854                                 &bar0->prc_pcix_err_mask);
1855                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1856                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1857                                 &bar0->rpa_err_mask);
1858                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1859                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1860                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1861                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1862                                 flag, &bar0->rda_err_mask);
1863                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1864                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1865                                 flag, &bar0->rti_err_mask);
1866         }
1867
1868         if (mask & RX_MAC_INTR) {
1869                 gen_int_mask |= RXMAC_INT_M;
1870                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1871                                 &bar0->mac_int_mask);
1872                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1873                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1874                                 RMAC_DOUBLE_ECC_ERR |
1875                                 RMAC_LINK_STATE_CHANGE_INT,
1876                                 flag, &bar0->mac_rmac_err_mask);
1877         }
1878
1879         if (mask & RX_XGXS_INTR)
1880         {
1881                 gen_int_mask |= RXXGXS_INT_M;
1882                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1883                                 &bar0->xgxs_int_mask);
1884                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1885                                 &bar0->xgxs_rxgxs_err_mask);
1886         }
1887
1888         if (mask & MC_INTR) {
1889                 gen_int_mask |= MC_INT_M;
1890                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1891                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1892                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1893                                 &bar0->mc_err_mask);
1894         }
1895         nic->general_int_mask = gen_int_mask;
1896
1897         /* Remove this line when alarm interrupts are enabled */
1898         nic->general_int_mask = 0;
1899 }
1900 /**
1901  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1902  *  @nic: device private variable,
1903  *  @mask: A mask indicating which Intr block must be modified and,
1904  *  @flag: A flag indicating whether to enable or disable the Intrs.
1905  *  Description: This function will either disable or enable the interrupts
1906  *  depending on the flag argument. The mask argument can be used to
1907  *  enable/disable any Intr block.
1908  *  Return Value: NONE.
1909  */
1910
1911 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1912 {
1913         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1914         register u64 temp64 = 0, intr_mask = 0;
1915
1916         intr_mask = nic->general_int_mask;
1917
1918         /*  Top level interrupt classification */
1919         /*  PIC Interrupts */
1920         if (mask & TX_PIC_INTR) {
1921                 /*  Enable PIC Intrs in the general intr mask register */
1922                 intr_mask |= TXPIC_INT_M;
1923                 if (flag == ENABLE_INTRS) {
1924                         /*
1925                          * If Hercules adapter enable GPIO otherwise
1926                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1927                          * interrupts for now.
1928                          * TODO
1929                          */
1930                         if (s2io_link_fault_indication(nic) ==
1931                                         LINK_UP_DOWN_INTERRUPT ) {
1932                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1933                                                 &bar0->pic_int_mask);
1934                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1935                                                 &bar0->gpio_int_mask);
1936                         } else
1937                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1938                 } else if (flag == DISABLE_INTRS) {
1939                         /*
1940                          * Disable PIC Intrs in the general
1941                          * intr mask register
1942                          */
1943                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1944                 }
1945         }
1946
1947         /*  Tx traffic interrupts */
1948         if (mask & TX_TRAFFIC_INTR) {
1949                 intr_mask |= TXTRAFFIC_INT_M;
1950                 if (flag == ENABLE_INTRS) {
1951                         /*
1952                          * Enable all the Tx side interrupts
1953                          * writing 0 Enables all 64 TX interrupt levels
1954                          */
1955                         writeq(0x0, &bar0->tx_traffic_mask);
1956                 } else if (flag == DISABLE_INTRS) {
1957                         /*
1958                          * Disable Tx Traffic Intrs in the general intr mask
1959                          * register.
1960                          */
1961                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1962                 }
1963         }
1964
1965         /*  Rx traffic interrupts */
1966         if (mask & RX_TRAFFIC_INTR) {
1967                 intr_mask |= RXTRAFFIC_INT_M;
1968                 if (flag == ENABLE_INTRS) {
1969                         /* writing 0 Enables all 8 RX interrupt levels */
1970                         writeq(0x0, &bar0->rx_traffic_mask);
1971                 } else if (flag == DISABLE_INTRS) {
1972                         /*
1973                          * Disable Rx Traffic Intrs in the general intr mask
1974                          * register.
1975                          */
1976                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1977                 }
1978         }
1979
1980         temp64 = readq(&bar0->general_int_mask);
1981         if (flag == ENABLE_INTRS)
1982                 temp64 &= ~((u64) intr_mask);
1983         else
1984                 temp64 = DISABLE_ALL_INTRS;
1985         writeq(temp64, &bar0->general_int_mask);
1986
1987         nic->general_int_mask = readq(&bar0->general_int_mask);
1988 }
1989
1990 /**
1991  *  verify_pcc_quiescent- Checks for PCC quiescent state
1992  *  Return: 1 If PCC is quiescence
1993  *          0 If PCC is not quiescence
1994  */
1995 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1996 {
1997         int ret = 0, herc;
1998         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1999         u64 val64 = readq(&bar0->adapter_status);
2000
2001         herc = (sp->device_type == XFRAME_II_DEVICE);
2002
2003         if (flag == FALSE) {
2004                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2005                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2006                                 ret = 1;
2007                 } else {
2008                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2009                                 ret = 1;
2010                 }
2011         } else {
2012                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2013                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2014                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2015                                 ret = 1;
2016                 } else {
2017                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2018                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2019                                 ret = 1;
2020                 }
2021         }
2022
2023         return ret;
2024 }
2025 /**
2026  *  verify_xena_quiescence - Checks whether the H/W is ready
2027  *  Description: Returns whether the H/W is ready to go or not. Depending
2028  *  on whether adapter enable bit was written or not the comparison
2029  *  differs and the calling function passes the input argument flag to
2030  *  indicate this.
2031  *  Return: 1 If xena is quiescence
2032  *          0 If Xena is not quiescence
2033  */
2034
2035 static int verify_xena_quiescence(struct s2io_nic *sp)
2036 {
2037         int  mode;
2038         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2039         u64 val64 = readq(&bar0->adapter_status);
2040         mode = s2io_verify_pci_mode(sp);
2041
2042         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2043                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2044                 return 0;
2045         }
2046         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2047         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2048                 return 0;
2049         }
2050         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2051                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2052                 return 0;
2053         }
2054         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2055                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2056                 return 0;
2057         }
2058         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2059                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2060                 return 0;
2061         }
2062         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2063                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2064                 return 0;
2065         }
2066         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2067                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2068                 return 0;
2069         }
2070         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2071                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2072                 return 0;
2073         }
2074
2075         /*
2076          * In PCI 33 mode, the P_PLL is not used, and therefore,
2077          * the the P_PLL_LOCK bit in the adapter_status register will
2078          * not be asserted.
2079          */
2080         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2081                 sp->device_type == XFRAME_II_DEVICE && mode !=
2082                 PCI_MODE_PCI_33) {
2083                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2084                 return 0;
2085         }
2086         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2087                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2088                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2089                 return 0;
2090         }
2091         return 1;
2092 }
2093
2094 /**
2095  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2096  * @sp: Pointer to device specifc structure
2097  * Description :
2098  * New procedure to clear mac address reading  problems on Alpha platforms
2099  *
2100  */
2101
2102 static void fix_mac_address(struct s2io_nic * sp)
2103 {
2104         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2105         u64 val64;
2106         int i = 0;
2107
2108         while (fix_mac[i] != END_SIGN) {
2109                 writeq(fix_mac[i++], &bar0->gpio_control);
2110                 udelay(10);
2111                 val64 = readq(&bar0->gpio_control);
2112         }
2113 }
2114
2115 /**
2116  *  start_nic - Turns the device on
2117  *  @nic : device private variable.
2118  *  Description:
2119  *  This function actually turns the device on. Before this  function is
2120  *  called,all Registers are configured from their reset states
2121  *  and shared memory is allocated but the NIC is still quiescent. On
2122  *  calling this function, the device interrupts are cleared and the NIC is
2123  *  literally switched on by writing into the adapter control register.
2124  *  Return Value:
2125  *  SUCCESS on success and -1 on failure.
2126  */
2127
2128 static int start_nic(struct s2io_nic *nic)
2129 {
2130         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2131         struct net_device *dev = nic->dev;
2132         register u64 val64 = 0;
2133         u16 subid, i;
2134         struct mac_info *mac_control;
2135         struct config_param *config;
2136
2137         mac_control = &nic->mac_control;
2138         config = &nic->config;
2139
2140         /*  PRC Initialization and configuration */
2141         for (i = 0; i < config->rx_ring_num; i++) {
2142                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2143                        &bar0->prc_rxd0_n[i]);
2144
2145                 val64 = readq(&bar0->prc_ctrl_n[i]);
2146                 if (nic->rxd_mode == RXD_MODE_1)
2147                         val64 |= PRC_CTRL_RC_ENABLED;
2148                 else
2149                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2150                 if (nic->device_type == XFRAME_II_DEVICE)
2151                         val64 |= PRC_CTRL_GROUP_READS;
2152                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2153                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2154                 writeq(val64, &bar0->prc_ctrl_n[i]);
2155         }
2156
2157         if (nic->rxd_mode == RXD_MODE_3B) {
2158                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2159                 val64 = readq(&bar0->rx_pa_cfg);
2160                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2161                 writeq(val64, &bar0->rx_pa_cfg);
2162         }
2163
2164         if (vlan_tag_strip == 0) {
2165                 val64 = readq(&bar0->rx_pa_cfg);
2166                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2167                 writeq(val64, &bar0->rx_pa_cfg);
2168                 vlan_strip_flag = 0;
2169         }
2170
2171         /*
2172          * Enabling MC-RLDRAM. After enabling the device, we timeout
2173          * for around 100ms, which is approximately the time required
2174          * for the device to be ready for operation.
2175          */
2176         val64 = readq(&bar0->mc_rldram_mrs);
2177         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2178         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2179         val64 = readq(&bar0->mc_rldram_mrs);
2180
2181         msleep(100);    /* Delay by around 100 ms. */
2182
2183         /* Enabling ECC Protection. */
2184         val64 = readq(&bar0->adapter_control);
2185         val64 &= ~ADAPTER_ECC_EN;
2186         writeq(val64, &bar0->adapter_control);
2187
2188         /*
2189          * Verify if the device is ready to be enabled, if so enable
2190          * it.
2191          */
2192         val64 = readq(&bar0->adapter_status);
2193         if (!verify_xena_quiescence(nic)) {
2194                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2195                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2196                           (unsigned long long) val64);
2197                 return FAILURE;
2198         }
2199
2200         /*
2201          * With some switches, link might be already up at this point.
2202          * Because of this weird behavior, when we enable laser,
2203          * we may not get link. We need to handle this. We cannot
2204          * figure out which switch is misbehaving. So we are forced to
2205          * make a global change.
2206          */
2207
2208         /* Enabling Laser. */
2209         val64 = readq(&bar0->adapter_control);
2210         val64 |= ADAPTER_EOI_TX_ON;
2211         writeq(val64, &bar0->adapter_control);
2212
2213         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2214                 /*
2215                  * Dont see link state interrupts initally on some switches,
2216                  * so directly scheduling the link state task here.
2217                  */
2218                 schedule_work(&nic->set_link_task);
2219         }
2220         /* SXE-002: Initialize link and activity LED */
2221         subid = nic->pdev->subsystem_device;
2222         if (((subid & 0xFF) >= 0x07) &&
2223             (nic->device_type == XFRAME_I_DEVICE)) {
2224                 val64 = readq(&bar0->gpio_control);
2225                 val64 |= 0x0000800000000000ULL;
2226                 writeq(val64, &bar0->gpio_control);
2227                 val64 = 0x0411040400000000ULL;
2228                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2229         }
2230
2231         return SUCCESS;
2232 }
2233 /**
2234  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2235  */
2236 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2237                                         TxD *txdlp, int get_off)
2238 {
2239         struct s2io_nic *nic = fifo_data->nic;
2240         struct sk_buff *skb;
2241         struct TxD *txds;
2242         u16 j, frg_cnt;
2243
2244         txds = txdlp;
2245         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2246                 pci_unmap_single(nic->pdev, (dma_addr_t)
2247                         txds->Buffer_Pointer, sizeof(u64),
2248                         PCI_DMA_TODEVICE);
2249                 txds++;
2250         }
2251
2252         skb = (struct sk_buff *) ((unsigned long)
2253                         txds->Host_Control);
2254         if (!skb) {
2255                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2256                 return NULL;
2257         }
2258         pci_unmap_single(nic->pdev, (dma_addr_t)
2259                          txds->Buffer_Pointer,
2260                          skb->len - skb->data_len,
2261                          PCI_DMA_TODEVICE);
2262         frg_cnt = skb_shinfo(skb)->nr_frags;
2263         if (frg_cnt) {
2264                 txds++;
2265                 for (j = 0; j < frg_cnt; j++, txds++) {
2266                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2267                         if (!txds->Buffer_Pointer)
2268                                 break;
2269                         pci_unmap_page(nic->pdev, (dma_addr_t)
2270                                         txds->Buffer_Pointer,
2271                                        frag->size, PCI_DMA_TODEVICE);
2272                 }
2273         }
2274         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2275         return(skb);
2276 }
2277
2278 /**
2279  *  free_tx_buffers - Free all queued Tx buffers
2280  *  @nic : device private variable.
2281  *  Description:
2282  *  Free all queued Tx buffers.
2283  *  Return Value: void
2284 */
2285
2286 static void free_tx_buffers(struct s2io_nic *nic)
2287 {
2288         struct net_device *dev = nic->dev;
2289         struct sk_buff *skb;
2290         struct TxD *txdp;
2291         int i, j;
2292         struct mac_info *mac_control;
2293         struct config_param *config;
2294         int cnt = 0;
2295
2296         mac_control = &nic->mac_control;
2297         config = &nic->config;
2298
2299         for (i = 0; i < config->tx_fifo_num; i++) {
2300                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2301                         txdp = (struct TxD *) \
2302                         mac_control->fifos[i].list_info[j].list_virt_addr;
2303                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2304                         if (skb) {
2305                                 nic->mac_control.stats_info->sw_stat.mem_freed
2306                                         += skb->truesize;
2307                                 dev_kfree_skb(skb);
2308                                 cnt++;
2309                         }
2310                 }
2311                 DBG_PRINT(INTR_DBG,
2312                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2313                           dev->name, cnt, i);
2314                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2315                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2316         }
2317 }
2318
2319 /**
2320  *   stop_nic -  To stop the nic
2321  *   @nic ; device private variable.
2322  *   Description:
2323  *   This function does exactly the opposite of what the start_nic()
2324  *   function does. This function is called to stop the device.
2325  *   Return Value:
2326  *   void.
2327  */
2328
2329 static void stop_nic(struct s2io_nic *nic)
2330 {
2331         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2332         register u64 val64 = 0;
2333         u16 interruptible;
2334         struct mac_info *mac_control;
2335         struct config_param *config;
2336
2337         mac_control = &nic->mac_control;
2338         config = &nic->config;
2339
2340         /*  Disable all interrupts */
2341         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2342         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2343         interruptible |= TX_PIC_INTR;
2344         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2345
2346         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2347         val64 = readq(&bar0->adapter_control);
2348         val64 &= ~(ADAPTER_CNTL_EN);
2349         writeq(val64, &bar0->adapter_control);
2350 }
2351
2352 /**
2353  *  fill_rx_buffers - Allocates the Rx side skbs
2354  *  @nic:  device private variable
2355  *  @ring_no: ring number
2356  *  Description:
2357  *  The function allocates Rx side skbs and puts the physical
2358  *  address of these buffers into the RxD buffer pointers, so that the NIC
2359  *  can DMA the received frame into these locations.
2360  *  The NIC supports 3 receive modes, viz
2361  *  1. single buffer,
2362  *  2. three buffer and
2363  *  3. Five buffer modes.
2364  *  Each mode defines how many fragments the received frame will be split
2365  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2366  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2367  *  is split into 3 fragments. As of now only single buffer mode is
2368  *  supported.
2369  *   Return Value:
2370  *  SUCCESS on success or an appropriate -ve value on failure.
2371  */
2372
2373 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2374 {
2375         struct net_device *dev = nic->dev;
2376         struct sk_buff *skb;
2377         struct RxD_t *rxdp;
2378         int off, off1, size, block_no, block_no1;
2379         u32 alloc_tab = 0;
2380         u32 alloc_cnt;
2381         struct mac_info *mac_control;
2382         struct config_param *config;
2383         u64 tmp;
2384         struct buffAdd *ba;
2385         unsigned long flags;
2386         struct RxD_t *first_rxdp = NULL;
2387         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2388         struct RxD1 *rxdp1;
2389         struct RxD3 *rxdp3;
2390         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2391
2392         mac_control = &nic->mac_control;
2393         config = &nic->config;
2394         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2395             atomic_read(&nic->rx_bufs_left[ring_no]);
2396
2397         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2398         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2399         while (alloc_tab < alloc_cnt) {
2400                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2401                     block_index;
2402                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2403
2404                 rxdp = mac_control->rings[ring_no].
2405                                 rx_blocks[block_no].rxds[off].virt_addr;
2406
2407                 if ((block_no == block_no1) && (off == off1) &&
2408                                         (rxdp->Host_Control)) {
2409                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2410                                   dev->name);
2411                         DBG_PRINT(INTR_DBG, " info equated\n");
2412                         goto end;
2413                 }
2414                 if (off && (off == rxd_count[nic->rxd_mode])) {
2415                         mac_control->rings[ring_no].rx_curr_put_info.
2416                             block_index++;
2417                         if (mac_control->rings[ring_no].rx_curr_put_info.
2418                             block_index == mac_control->rings[ring_no].
2419                                         block_count)
2420                                 mac_control->rings[ring_no].rx_curr_put_info.
2421                                         block_index = 0;
2422                         block_no = mac_control->rings[ring_no].
2423                                         rx_curr_put_info.block_index;
2424                         if (off == rxd_count[nic->rxd_mode])
2425                                 off = 0;
2426                         mac_control->rings[ring_no].rx_curr_put_info.
2427                                 offset = off;
2428                         rxdp = mac_control->rings[ring_no].
2429                                 rx_blocks[block_no].block_virt_addr;
2430                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2431                                   dev->name, rxdp);
2432                 }
2433                 if(!napi) {
2434                         spin_lock_irqsave(&nic->put_lock, flags);
2435                         mac_control->rings[ring_no].put_pos =
2436                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2437                         spin_unlock_irqrestore(&nic->put_lock, flags);
2438                 } else {
2439                         mac_control->rings[ring_no].put_pos =
2440                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2441                 }
2442                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2443                         ((nic->rxd_mode == RXD_MODE_3B) &&
2444                                 (rxdp->Control_2 & s2BIT(0)))) {
2445                         mac_control->rings[ring_no].rx_curr_put_info.
2446                                         offset = off;
2447                         goto end;
2448                 }
2449                 /* calculate size of skb based on ring mode */
2450                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2451                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2452                 if (nic->rxd_mode == RXD_MODE_1)
2453                         size += NET_IP_ALIGN;
2454                 else
2455                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2456
2457                 /* allocate skb */
2458                 skb = dev_alloc_skb(size);
2459                 if(!skb) {
2460                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2461                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2462                         if (first_rxdp) {
2463                                 wmb();
2464                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2465                         }
2466                         nic->mac_control.stats_info->sw_stat. \
2467                                 mem_alloc_fail_cnt++;
2468                         return -ENOMEM ;
2469                 }
2470                 nic->mac_control.stats_info->sw_stat.mem_allocated
2471                         += skb->truesize;
2472                 if (nic->rxd_mode == RXD_MODE_1) {
2473                         /* 1 buffer mode - normal operation mode */
2474                         rxdp1 = (struct RxD1*)rxdp;
2475                         memset(rxdp, 0, sizeof(struct RxD1));
2476                         skb_reserve(skb, NET_IP_ALIGN);
2477                         rxdp1->Buffer0_ptr = pci_map_single
2478                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2479                                 PCI_DMA_FROMDEVICE);
2480                         if( (rxdp1->Buffer0_ptr == 0) ||
2481                                 (rxdp1->Buffer0_ptr ==
2482                                 DMA_ERROR_CODE))
2483                                 goto pci_map_failed;
2484
2485                         rxdp->Control_2 =
2486                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2487
2488                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2489                         /*
2490                          * 2 buffer mode -
2491                          * 2 buffer mode provides 128
2492                          * byte aligned receive buffers.
2493                          */
2494
2495                         rxdp3 = (struct RxD3*)rxdp;
2496                         /* save buffer pointers to avoid frequent dma mapping */
2497                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2498                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2499                         memset(rxdp, 0, sizeof(struct RxD3));
2500                         /* restore the buffer pointers for dma sync*/
2501                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2502                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2503
2504                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2505                         skb_reserve(skb, BUF0_LEN);
2506                         tmp = (u64)(unsigned long) skb->data;
2507                         tmp += ALIGN_SIZE;
2508                         tmp &= ~ALIGN_SIZE;
2509                         skb->data = (void *) (unsigned long)tmp;
2510                         skb_reset_tail_pointer(skb);
2511
2512                         if (!(rxdp3->Buffer0_ptr))
2513                                 rxdp3->Buffer0_ptr =
2514                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2515                                            PCI_DMA_FROMDEVICE);
2516                         else
2517                                 pci_dma_sync_single_for_device(nic->pdev,
2518                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2519                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2520                         if( (rxdp3->Buffer0_ptr == 0) ||
2521                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2522                                 goto pci_map_failed;
2523
2524                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2525                         if (nic->rxd_mode == RXD_MODE_3B) {
2526                                 /* Two buffer mode */
2527
2528                                 /*
2529                                  * Buffer2 will have L3/L4 header plus
2530                                  * L4 payload
2531                                  */
2532                                 rxdp3->Buffer2_ptr = pci_map_single
2533                                 (nic->pdev, skb->data, dev->mtu + 4,
2534                                                 PCI_DMA_FROMDEVICE);
2535
2536                                 if( (rxdp3->Buffer2_ptr == 0) ||
2537                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2538                                         goto pci_map_failed;
2539
2540                                 rxdp3->Buffer1_ptr =
2541                                                 pci_map_single(nic->pdev,
2542                                                 ba->ba_1, BUF1_LEN,
2543                                                 PCI_DMA_FROMDEVICE);
2544                                 if( (rxdp3->Buffer1_ptr == 0) ||
2545                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2546                                         pci_unmap_single
2547                                                 (nic->pdev,
2548                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2549                                                 dev->mtu + 4,
2550                                                 PCI_DMA_FROMDEVICE);
2551                                         goto pci_map_failed;
2552                                 }
2553                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2554                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2555                                                                 (dev->mtu + 4);
2556                         }
2557                         rxdp->Control_2 |= s2BIT(0);
2558                 }
2559                 rxdp->Host_Control = (unsigned long) (skb);
2560                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2561                         rxdp->Control_1 |= RXD_OWN_XENA;
2562                 off++;
2563                 if (off == (rxd_count[nic->rxd_mode] + 1))
2564                         off = 0;
2565                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2566
2567                 rxdp->Control_2 |= SET_RXD_MARKER;
2568                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2569                         if (first_rxdp) {
2570                                 wmb();
2571                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2572                         }
2573                         first_rxdp = rxdp;
2574                 }
2575                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2576                 alloc_tab++;
2577         }
2578
2579       end:
2580         /* Transfer ownership of first descriptor to adapter just before
2581          * exiting. Before that, use memory barrier so that ownership
2582          * and other fields are seen by adapter correctly.
2583          */
2584         if (first_rxdp) {
2585                 wmb();
2586                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2587         }
2588
2589         return SUCCESS;
2590 pci_map_failed:
2591         stats->pci_map_fail_cnt++;
2592         stats->mem_freed += skb->truesize;
2593         dev_kfree_skb_irq(skb);
2594         return -ENOMEM;
2595 }
2596
2597 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2598 {
2599         struct net_device *dev = sp->dev;
2600         int j;
2601         struct sk_buff *skb;
2602         struct RxD_t *rxdp;
2603         struct mac_info *mac_control;
2604         struct buffAdd *ba;
2605         struct RxD1 *rxdp1;
2606         struct RxD3 *rxdp3;
2607
2608         mac_control = &sp->mac_control;
2609         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2610                 rxdp = mac_control->rings[ring_no].
2611                                 rx_blocks[blk].rxds[j].virt_addr;
2612                 skb = (struct sk_buff *)
2613                         ((unsigned long) rxdp->Host_Control);
2614                 if (!skb) {
2615                         continue;
2616                 }
2617                 if (sp->rxd_mode == RXD_MODE_1) {
2618                         rxdp1 = (struct RxD1*)rxdp;
2619                         pci_unmap_single(sp->pdev, (dma_addr_t)
2620                                 rxdp1->Buffer0_ptr,
2621                                 dev->mtu +
2622                                 HEADER_ETHERNET_II_802_3_SIZE
2623                                 + HEADER_802_2_SIZE +
2624                                 HEADER_SNAP_SIZE,
2625                                 PCI_DMA_FROMDEVICE);
2626                         memset(rxdp, 0, sizeof(struct RxD1));
2627                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2628                         rxdp3 = (struct RxD3*)rxdp;
2629                         ba = &mac_control->rings[ring_no].
2630                                 ba[blk][j];
2631                         pci_unmap_single(sp->pdev, (dma_addr_t)
2632                                 rxdp3->Buffer0_ptr,
2633                                 BUF0_LEN,
2634                                 PCI_DMA_FROMDEVICE);
2635                         pci_unmap_single(sp->pdev, (dma_addr_t)
2636                                 rxdp3->Buffer1_ptr,
2637                                 BUF1_LEN,
2638                                 PCI_DMA_FROMDEVICE);
2639                         pci_unmap_single(sp->pdev, (dma_addr_t)
2640                                 rxdp3->Buffer2_ptr,
2641                                 dev->mtu + 4,
2642                                 PCI_DMA_FROMDEVICE);
2643                         memset(rxdp, 0, sizeof(struct RxD3));
2644                 }
2645                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2646                 dev_kfree_skb(skb);
2647                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2648         }
2649 }
2650
2651 /**
2652  *  free_rx_buffers - Frees all Rx buffers
2653  *  @sp: device private variable.
2654  *  Description:
2655  *  This function will free all Rx buffers allocated by host.
2656  *  Return Value:
2657  *  NONE.
2658  */
2659
2660 static void free_rx_buffers(struct s2io_nic *sp)
2661 {
2662         struct net_device *dev = sp->dev;
2663         int i, blk = 0, buf_cnt = 0;
2664         struct mac_info *mac_control;
2665         struct config_param *config;
2666
2667         mac_control = &sp->mac_control;
2668         config = &sp->config;
2669
2670         for (i = 0; i < config->rx_ring_num; i++) {
2671                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2672                         free_rxd_blk(sp,i,blk);
2673
2674                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2675                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2676                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2677                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2678                 atomic_set(&sp->rx_bufs_left[i], 0);
2679                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2680                           dev->name, buf_cnt, i);
2681         }
2682 }
2683
2684 /**
2685  * s2io_poll - Rx interrupt handler for NAPI support
2686  * @napi : pointer to the napi structure.
2687  * @budget : The number of packets that were budgeted to be processed
2688  * during  one pass through the 'Poll" function.
2689  * Description:
2690  * Comes into picture only if NAPI support has been incorporated. It does
2691  * the same thing that rx_intr_handler does, but not in a interrupt context
2692  * also It will process only a given number of packets.
2693  * Return value:
2694  * 0 on success and 1 if there are No Rx packets to be processed.
2695  */
2696
2697 static int s2io_poll(struct napi_struct *napi, int budget)
2698 {
2699         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2700         struct net_device *dev = nic->dev;
2701         int pkt_cnt = 0, org_pkts_to_process;
2702         struct mac_info *mac_control;
2703         struct config_param *config;
2704         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2705         int i;
2706
2707         if (!is_s2io_card_up(nic))
2708                 return 0;
2709
2710         mac_control = &nic->mac_control;
2711         config = &nic->config;
2712
2713         nic->pkts_to_process = budget;
2714         org_pkts_to_process = nic->pkts_to_process;
2715
2716         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2717         readl(&bar0->rx_traffic_int);
2718
2719         for (i = 0; i < config->rx_ring_num; i++) {
2720                 rx_intr_handler(&mac_control->rings[i]);
2721                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2722                 if (!nic->pkts_to_process) {
2723                         /* Quota for the current iteration has been met */
2724                         goto no_rx;
2725                 }
2726         }
2727
2728         netif_rx_complete(dev, napi);
2729
2730         for (i = 0; i < config->rx_ring_num; i++) {
2731                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2732                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2733                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2734                         break;
2735                 }
2736         }
2737         /* Re enable the Rx interrupts. */
2738         writeq(0x0, &bar0->rx_traffic_mask);
2739         readl(&bar0->rx_traffic_mask);
2740         return pkt_cnt;
2741
2742 no_rx:
2743         for (i = 0; i < config->rx_ring_num; i++) {
2744                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2745                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2746                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2747                         break;
2748                 }
2749         }
2750         return pkt_cnt;
2751 }
2752
2753 #ifdef CONFIG_NET_POLL_CONTROLLER
2754 /**
2755  * s2io_netpoll - netpoll event handler entry point
2756  * @dev : pointer to the device structure.
2757  * Description:
2758  *      This function will be called by upper layer to check for events on the
2759  * interface in situations where interrupts are disabled. It is used for
2760  * specific in-kernel networking tasks, such as remote consoles and kernel
2761  * debugging over the network (example netdump in RedHat).
2762  */
2763 static void s2io_netpoll(struct net_device *dev)
2764 {
2765         struct s2io_nic *nic = dev->priv;
2766         struct mac_info *mac_control;
2767         struct config_param *config;
2768         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2769         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2770         int i;
2771
2772         if (pci_channel_offline(nic->pdev))
2773                 return;
2774
2775         disable_irq(dev->irq);
2776
2777         mac_control = &nic->mac_control;
2778         config = &nic->config;
2779
2780         writeq(val64, &bar0->rx_traffic_int);
2781         writeq(val64, &bar0->tx_traffic_int);
2782
2783         /* we need to free up the transmitted skbufs or else netpoll will
2784          * run out of skbs and will fail and eventually netpoll application such
2785          * as netdump will fail.
2786          */
2787         for (i = 0; i < config->tx_fifo_num; i++)
2788                 tx_intr_handler(&mac_control->fifos[i]);
2789
2790         /* check for received packet and indicate up to network */
2791         for (i = 0; i < config->rx_ring_num; i++)
2792                 rx_intr_handler(&mac_control->rings[i]);
2793
2794         for (i = 0; i < config->rx_ring_num; i++) {
2795                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2796                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2797                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2798                         break;
2799                 }
2800         }
2801         enable_irq(dev->irq);
2802         return;
2803 }
2804 #endif
2805
2806 /**
2807  *  rx_intr_handler - Rx interrupt handler
2808  *  @nic: device private variable.
2809  *  Description:
2810  *  If the interrupt is because of a received frame or if the
2811  *  receive ring contains fresh as yet un-processed frames,this function is
2812  *  called. It picks out the RxD at which place the last Rx processing had
2813  *  stopped and sends the skb to the OSM's Rx handler and then increments
2814  *  the offset.
2815  *  Return Value:
2816  *  NONE.
2817  */
2818 static void rx_intr_handler(struct ring_info *ring_data)
2819 {
2820         struct s2io_nic *nic = ring_data->nic;
2821         struct net_device *dev = (struct net_device *) nic->dev;
2822         int get_block, put_block, put_offset;
2823         struct rx_curr_get_info get_info, put_info;
2824         struct RxD_t *rxdp;
2825         struct sk_buff *skb;
2826         int pkt_cnt = 0;
2827         int i;
2828         struct RxD1* rxdp1;
2829         struct RxD3* rxdp3;
2830
2831         spin_lock(&nic->rx_lock);
2832
2833         get_info = ring_data->rx_curr_get_info;
2834         get_block = get_info.block_index;
2835         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2836         put_block = put_info.block_index;
2837         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2838         if (!napi) {
2839                 spin_lock(&nic->put_lock);
2840                 put_offset = ring_data->put_pos;
2841                 spin_unlock(&nic->put_lock);
2842         } else
2843                 put_offset = ring_data->put_pos;
2844
2845         while (RXD_IS_UP2DT(rxdp)) {
2846                 /*
2847                  * If your are next to put index then it's
2848                  * FIFO full condition
2849                  */
2850                 if ((get_block == put_block) &&
2851                     (get_info.offset + 1) == put_info.offset) {
2852                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2853                         break;
2854                 }
2855                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2856                 if (skb == NULL) {
2857                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2858                                   dev->name);
2859                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2860                         spin_unlock(&nic->rx_lock);
2861                         return;
2862                 }
2863                 if (nic->rxd_mode == RXD_MODE_1) {
2864                         rxdp1 = (struct RxD1*)rxdp;
2865                         pci_unmap_single(nic->pdev, (dma_addr_t)
2866                                 rxdp1->Buffer0_ptr,
2867                                 dev->mtu +
2868                                 HEADER_ETHERNET_II_802_3_SIZE +
2869                                 HEADER_802_2_SIZE +
2870                                 HEADER_SNAP_SIZE,
2871                                 PCI_DMA_FROMDEVICE);
2872                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2873                         rxdp3 = (struct RxD3*)rxdp;
2874                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2875                                 rxdp3->Buffer0_ptr,
2876                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2877                         pci_unmap_single(nic->pdev, (dma_addr_t)
2878                                 rxdp3->Buffer2_ptr,
2879                                 dev->mtu + 4,
2880                                 PCI_DMA_FROMDEVICE);
2881                 }
2882                 prefetch(skb->data);
2883                 rx_osm_handler(ring_data, rxdp);
2884                 get_info.offset++;
2885                 ring_data->rx_curr_get_info.offset = get_info.offset;
2886                 rxdp = ring_data->rx_blocks[get_block].
2887                                 rxds[get_info.offset].virt_addr;
2888                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2889                         get_info.offset = 0;
2890                         ring_data->rx_curr_get_info.offset = get_info.offset;
2891                         get_block++;
2892                         if (get_block == ring_data->block_count)
2893                                 get_block = 0;
2894                         ring_data->rx_curr_get_info.block_index = get_block;
2895                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2896                 }
2897
2898                 nic->pkts_to_process -= 1;
2899                 if ((napi) && (!nic->pkts_to_process))
2900                         break;
2901                 pkt_cnt++;
2902                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2903                         break;
2904         }
2905         if (nic->lro) {
2906                 /* Clear all LRO sessions before exiting */
2907                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2908                         struct lro *lro = &nic->lro0_n[i];
2909                         if (lro->in_use) {
2910                                 update_L3L4_header(nic, lro);
2911                                 queue_rx_frame(lro->parent);
2912                                 clear_lro_session(lro);
2913                         }
2914                 }
2915         }
2916
2917         spin_unlock(&nic->rx_lock);
2918 }
2919
2920 /**
2921  *  tx_intr_handler - Transmit interrupt handler
2922  *  @nic : device private variable
2923  *  Description:
2924  *  If an interrupt was raised to indicate DMA complete of the
2925  *  Tx packet, this function is called. It identifies the last TxD
2926  *  whose buffer was freed and frees all skbs whose data have already
2927  *  DMA'ed into the NICs internal memory.
2928  *  Return Value:
2929  *  NONE
2930  */
2931
2932 static void tx_intr_handler(struct fifo_info *fifo_data)
2933 {
2934         struct s2io_nic *nic = fifo_data->nic;
2935         struct net_device *dev = (struct net_device *) nic->dev;
2936         struct tx_curr_get_info get_info, put_info;
2937         struct sk_buff *skb;
2938         struct TxD *txdlp;
2939         u8 err_mask;
2940
2941         get_info = fifo_data->tx_curr_get_info;
2942         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2943         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2944             list_virt_addr;
2945         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2946                (get_info.offset != put_info.offset) &&
2947                (txdlp->Host_Control)) {
2948                 /* Check for TxD errors */
2949                 if (txdlp->Control_1 & TXD_T_CODE) {
2950                         unsigned long long err;
2951                         err = txdlp->Control_1 & TXD_T_CODE;
2952                         if (err & 0x1) {
2953                                 nic->mac_control.stats_info->sw_stat.
2954                                                 parity_err_cnt++;
2955                         }
2956
2957                         /* update t_code statistics */
2958                         err_mask = err >> 48;
2959                         switch(err_mask) {
2960                                 case 2:
2961                                         nic->mac_control.stats_info->sw_stat.
2962                                                         tx_buf_abort_cnt++;
2963                                 break;
2964
2965                                 case 3:
2966                                         nic->mac_control.stats_info->sw_stat.
2967                                                         tx_desc_abort_cnt++;
2968                                 break;
2969
2970                                 case 7:
2971                                         nic->mac_control.stats_info->sw_stat.
2972                                                         tx_parity_err_cnt++;
2973                                 break;
2974
2975                                 case 10:
2976                                         nic->mac_control.stats_info->sw_stat.
2977                                                         tx_link_loss_cnt++;
2978                                 break;
2979
2980                                 case 15:
2981                                         nic->mac_control.stats_info->sw_stat.
2982                                                         tx_list_proc_err_cnt++;
2983                                 break;
2984                         }
2985                 }
2986
2987                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2988                 if (skb == NULL) {
2989                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2990                         __FUNCTION__);
2991                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2992                         return;
2993                 }
2994
2995                 /* Updating the statistics block */
2996                 nic->stats.tx_bytes += skb->len;
2997                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2998                 dev_kfree_skb_irq(skb);
2999
3000                 get_info.offset++;
3001                 if (get_info.offset == get_info.fifo_len + 1)
3002                         get_info.offset = 0;
3003                 txdlp = (struct TxD *) fifo_data->list_info
3004                     [get_info.offset].list_virt_addr;
3005                 fifo_data->tx_curr_get_info.offset =
3006                     get_info.offset;
3007         }
3008
3009         spin_lock(&nic->tx_lock);
3010         if (netif_queue_stopped(dev))
3011                 netif_wake_queue(dev);
3012         spin_unlock(&nic->tx_lock);
3013 }
3014
3015 /**
3016  *  s2io_mdio_write - Function to write in to MDIO registers
3017  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3018  *  @addr     : address value
3019  *  @value    : data value
3020  *  @dev      : pointer to net_device structure
3021  *  Description:
3022  *  This function is used to write values to the MDIO registers
3023  *  NONE
3024  */
3025 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3026 {
3027         u64 val64 = 0x0;
3028         struct s2io_nic *sp = dev->priv;
3029         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3030
3031         //address transaction
3032         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3033                         | MDIO_MMD_DEV_ADDR(mmd_type)
3034                         | MDIO_MMS_PRT_ADDR(0x0);
3035         writeq(val64, &bar0->mdio_control);
3036         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3037         writeq(val64, &bar0->mdio_control);
3038         udelay(100);
3039
3040         //Data transaction
3041         val64 = 0x0;
3042         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3043                         | MDIO_MMD_DEV_ADDR(mmd_type)
3044                         | MDIO_MMS_PRT_ADDR(0x0)
3045                         | MDIO_MDIO_DATA(value)
3046                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3047         writeq(val64, &bar0->mdio_control);
3048         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3049         writeq(val64, &bar0->mdio_control);
3050         udelay(100);
3051
3052         val64 = 0x0;
3053         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3054         | MDIO_MMD_DEV_ADDR(mmd_type)
3055         | MDIO_MMS_PRT_ADDR(0x0)
3056         | MDIO_OP(MDIO_OP_READ_TRANS);
3057         writeq(val64, &bar0->mdio_control);
3058         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3059         writeq(val64, &bar0->mdio_control);
3060         udelay(100);
3061
3062 }
3063
3064 /**
3065  *  s2io_mdio_read - Function to write in to MDIO registers
3066  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3067  *  @addr     : address value
3068  *  @dev      : pointer to net_device structure
3069  *  Description:
3070  *  This function is used to read values to the MDIO registers
3071  *  NONE
3072  */
3073 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3074 {
3075         u64 val64 = 0x0;
3076         u64 rval64 = 0x0;
3077         struct s2io_nic *sp = dev->priv;
3078         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3079
3080         /* address transaction */
3081         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3082                         | MDIO_MMD_DEV_ADDR(mmd_type)
3083                         | MDIO_MMS_PRT_ADDR(0x0);
3084         writeq(val64, &bar0->mdio_control);
3085         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3086         writeq(val64, &bar0->mdio_control);
3087         udelay(100);
3088
3089         /* Data transaction */
3090         val64 = 0x0;
3091         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3092                         | MDIO_MMD_DEV_ADDR(mmd_type)
3093                         | MDIO_MMS_PRT_ADDR(0x0)
3094                         | MDIO_OP(MDIO_OP_READ_TRANS);
3095         writeq(val64, &bar0->mdio_control);
3096         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3097         writeq(val64, &bar0->mdio_control);
3098         udelay(100);
3099
3100         /* Read the value from regs */
3101         rval64 = readq(&bar0->mdio_control);
3102         rval64 = rval64 & 0xFFFF0000;
3103         rval64 = rval64 >> 16;
3104         return rval64;
3105 }
3106 /**
3107  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3108  *  @counter      : couter value to be updated
3109  *  @flag         : flag to indicate the status
3110  *  @type         : counter type
3111  *  Description:
3112  *  This function is to check the status of the xpak counters value
3113  *  NONE
3114  */
3115
3116 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3117 {
3118         u64 mask = 0x3;
3119         u64 val64;
3120         int i;
3121         for(i = 0; i <index; i++)
3122                 mask = mask << 0x2;
3123
3124         if(flag > 0)
3125         {
3126                 *counter = *counter + 1;
3127                 val64 = *regs_stat & mask;
3128                 val64 = val64 >> (index * 0x2);
3129                 val64 = val64 + 1;
3130                 if(val64 == 3)
3131                 {
3132                         switch(type)
3133                         {
3134                         case 1:
3135                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3136                                           "service. Excessive temperatures may "
3137                                           "result in premature transceiver "
3138                                           "failure \n");
3139                         break;
3140                         case 2:
3141                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3142                                           "service Excessive bias currents may "
3143                                           "indicate imminent laser diode "
3144                                           "failure \n");
3145                         break;
3146                         case 3:
3147                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3148                                           "service Excessive laser output "
3149                                           "power may saturate far-end "
3150                                           "receiver\n");
3151                         break;
3152                         default:
3153                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3154                                           "type \n");
3155                         }
3156                         val64 = 0x0;
3157                 }
3158                 val64 = val64 << (index * 0x2);
3159                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3160
3161         } else {
3162                 *regs_stat = *regs_stat & (~mask);
3163         }
3164 }
3165
3166 /**
3167  *  s2io_updt_xpak_counter - Function to update the xpak counters
3168  *  @dev         : pointer to net_device struct
3169  *  Description:
3170  *  This function is to upate the status of the xpak counters value
3171  *  NONE
3172  */
3173 static void s2io_updt_xpak_counter(struct net_device *dev)
3174 {
3175         u16 flag  = 0x0;
3176         u16 type  = 0x0;
3177         u16 val16 = 0x0;
3178         u64 val64 = 0x0;
3179         u64 addr  = 0x0;
3180
3181         struct s2io_nic *sp = dev->priv;
3182         struct stat_block *stat_info = sp->mac_control.stats_info;
3183
3184         /* Check the communication with the MDIO slave */
3185         addr = 0x0000;
3186         val64 = 0x0;
3187         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3188         if((val64 == 0xFFFF) || (val64 == 0x0000))
3189         {
3190                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3191                           "Returned %llx\n", (unsigned long long)val64);
3192                 return;
3193         }
3194
3195         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3196         if(val64 != 0x2040)
3197         {
3198                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3199                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3200                           (unsigned long long)val64);
3201                 return;
3202         }
3203
3204         /* Loading the DOM register to MDIO register */
3205         addr = 0xA100;
3206         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3207         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3208
3209         /* Reading the Alarm flags */
3210         addr = 0xA070;
3211         val64 = 0x0;
3212         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3213
3214         flag = CHECKBIT(val64, 0x7);
3215         type = 1;
3216         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3217                                 &stat_info->xpak_stat.xpak_regs_stat,
3218                                 0x0, flag, type);
3219
3220         if(CHECKBIT(val64, 0x6))
3221                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3222
3223         flag = CHECKBIT(val64, 0x3);
3224         type = 2;
3225         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3226                                 &stat_info->xpak_stat.xpak_regs_stat,
3227                                 0x2, flag, type);
3228
3229         if(CHECKBIT(val64, 0x2))
3230                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3231
3232         flag = CHECKBIT(val64, 0x1);
3233         type = 3;
3234         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3235                                 &stat_info->xpak_stat.xpak_regs_stat,
3236                                 0x4, flag, type);
3237
3238         if(CHECKBIT(val64, 0x0))
3239                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3240
3241         /* Reading the Warning flags */
3242         addr = 0xA074;
3243         val64 = 0x0;
3244         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3245
3246         if(CHECKBIT(val64, 0x7))
3247                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3248
3249         if(CHECKBIT(val64, 0x6))
3250                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3251
3252         if(CHECKBIT(val64, 0x3))
3253                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3254
3255         if(CHECKBIT(val64, 0x2))
3256                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3257
3258         if(CHECKBIT(val64, 0x1))
3259                 stat_info->xpak_stat.warn_laser_output_power_high++;
3260
3261         if(CHECKBIT(val64, 0x0))
3262                 stat_info->xpak_stat.warn_laser_output_power_low++;
3263 }
3264
3265 /**
3266  *  wait_for_cmd_complete - waits for a command to complete.
3267  *  @sp : private member of the device structure, which is a pointer to the
3268  *  s2io_nic structure.
3269  *  Description: Function that waits for a command to Write into RMAC
3270  *  ADDR DATA registers to be completed and returns either success or
3271  *  error depending on whether the command was complete or not.
3272  *  Return value:
3273  *   SUCCESS on success and FAILURE on failure.
3274  */
3275
3276 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3277                                 int bit_state)
3278 {
3279         int ret = FAILURE, cnt = 0, delay = 1;
3280         u64 val64;
3281
3282         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3283                 return FAILURE;
3284
3285         do {
3286                 val64 = readq(addr);
3287                 if (bit_state == S2IO_BIT_RESET) {
3288                         if (!(val64 & busy_bit)) {
3289                                 ret = SUCCESS;
3290                                 break;
3291                         }
3292                 } else {
3293                         if (!(val64 & busy_bit)) {
3294                                 ret = SUCCESS;
3295                                 break;
3296                         }
3297                 }
3298
3299                 if(in_interrupt())
3300                         mdelay(delay);
3301                 else
3302                         msleep(delay);
3303
3304                 if (++cnt >= 10)
3305                         delay = 50;
3306         } while (cnt < 20);
3307         return ret;
3308 }
3309 /*
3310  * check_pci_device_id - Checks if the device id is supported
3311  * @id : device id
3312  * Description: Function to check if the pci device id is supported by driver.
3313  * Return value: Actual device id if supported else PCI_ANY_ID
3314  */
3315 static u16 check_pci_device_id(u16 id)
3316 {
3317         switch (id) {
3318         case PCI_DEVICE_ID_HERC_WIN:
3319         case PCI_DEVICE_ID_HERC_UNI:
3320                 return XFRAME_II_DEVICE;
3321         case PCI_DEVICE_ID_S2IO_UNI:
3322         case PCI_DEVICE_ID_S2IO_WIN:
3323                 return XFRAME_I_DEVICE;
3324         default:
3325                 return PCI_ANY_ID;
3326         }
3327 }
3328
3329 /**
3330  *  s2io_reset - Resets the card.
3331  *  @sp : private member of the device structure.
3332  *  Description: Function to Reset the card. This function then also
3333  *  restores the previously saved PCI configuration space registers as
3334  *  the card reset also resets the configuration space.
3335  *  Return value:
3336  *  void.
3337  */
3338
3339 static void s2io_reset(struct s2io_nic * sp)
3340 {
3341         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3342         u64 val64;
3343         u16 subid, pci_cmd;
3344         int i;
3345         u16 val16;
3346         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3347         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3348
3349         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3350                         __FUNCTION__, sp->dev->name);
3351
3352         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3353         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3354
3355         val64 = SW_RESET_ALL;
3356         writeq(val64, &bar0->sw_reset);
3357         if (strstr(sp->product_name, "CX4")) {
3358                 msleep(750);
3359         }
3360         msleep(250);
3361         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3362
3363                 /* Restore the PCI state saved during initialization. */
3364                 pci_restore_state(sp->pdev);
3365                 pci_read_config_word(sp->pdev, 0x2, &val16);
3366                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3367                         break;
3368                 msleep(200);
3369         }
3370
3371         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3372                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3373         }
3374
3375         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3376
3377         s2io_init_pci(sp);
3378
3379         /* Set swapper to enable I/O register access */
3380         s2io_set_swapper(sp);
3381
3382         /* Restore the MSIX table entries from local variables */
3383         restore_xmsi_data(sp);
3384
3385         /* Clear certain PCI/PCI-X fields after reset */
3386         if (sp->device_type == XFRAME_II_DEVICE) {
3387                 /* Clear "detected parity error" bit */
3388                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3389
3390                 /* Clearing PCIX Ecc status register */
3391                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3392
3393                 /* Clearing PCI_STATUS error reflected here */
3394                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3395         }
3396
3397         /* Reset device statistics maintained by OS */
3398         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3399
3400         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3401         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3402         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3403         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3404         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3405         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3406         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3407         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3408         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3409         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3410         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3411         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3412         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3413         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3414         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3415         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3416         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3417         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3418         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3419
3420         /* SXE-002: Configure link and activity LED to turn it off */
3421         subid = sp->pdev->subsystem_device;
3422         if (((subid & 0xFF) >= 0x07) &&
3423             (sp->device_type == XFRAME_I_DEVICE)) {
3424                 val64 = readq(&bar0->gpio_control);
3425                 val64 |= 0x0000800000000000ULL;
3426                 writeq(val64, &bar0->gpio_control);
3427                 val64 = 0x0411040400000000ULL;
3428                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3429         }
3430
3431         /*
3432          * Clear spurious ECC interrupts that would have occured on
3433          * XFRAME II cards after reset.
3434          */
3435         if (sp->device_type == XFRAME_II_DEVICE) {
3436                 val64 = readq(&bar0->pcc_err_reg);
3437                 writeq(val64, &bar0->pcc_err_reg);
3438         }
3439
3440         /* restore the previously assigned mac address */
3441         do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3442
3443         sp->device_enabled_once = FALSE;
3444 }
3445
3446 /**
3447  *  s2io_set_swapper - to set the swapper controle on the card
3448  *  @sp : private member of the device structure,
3449  *  pointer to the s2io_nic structure.
3450  *  Description: Function to set the swapper control on the card
3451  *  correctly depending on the 'endianness' of the system.
3452  *  Return value:
3453  *  SUCCESS on success and FAILURE on failure.
3454  */
3455
3456 static int s2io_set_swapper(struct s2io_nic * sp)
3457 {
3458         struct net_device *dev = sp->dev;
3459         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3460         u64 val64, valt, valr;
3461
3462         /*
3463          * Set proper endian settings and verify the same by reading
3464          * the PIF Feed-back register.
3465          */
3466
3467         val64 = readq(&bar0->pif_rd_swapper_fb);
3468         if (val64 != 0x0123456789ABCDEFULL) {
3469                 int i = 0;
3470                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3471                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3472                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3473                                 0};                     /* FE=0, SE=0 */
3474
3475                 while(i<4) {
3476                         writeq(value[i], &bar0->swapper_ctrl);
3477                         val64 = readq(&bar0->pif_rd_swapper_fb);
3478                         if (val64 == 0x0123456789ABCDEFULL)
3479                                 break;
3480                         i++;
3481                 }
3482                 if (i == 4) {
3483                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3484                                 dev->name);
3485                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3486                                 (unsigned long long) val64);
3487                         return FAILURE;
3488                 }
3489                 valr = value[i];
3490         } else {
3491                 valr = readq(&bar0->swapper_ctrl);
3492         }
3493
3494         valt = 0x0123456789ABCDEFULL;
3495         writeq(valt, &bar0->xmsi_address);
3496         val64 = readq(&bar0->xmsi_address);
3497
3498         if(val64 != valt) {
3499                 int i = 0;
3500                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3501                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3502                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3503                                 0};                     /* FE=0, SE=0 */
3504
3505                 while(i<4) {
3506                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3507                         writeq(valt, &bar0->xmsi_address);
3508                         val64 = readq(&bar0->xmsi_address);
3509                         if(val64 == valt)
3510                                 break;
3511                         i++;
3512                 }
3513                 if(i == 4) {
3514                         unsigned long long x = val64;
3515                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3516                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3517                         return FAILURE;
3518                 }
3519         }
3520         val64 = readq(&bar0->swapper_ctrl);
3521         val64 &= 0xFFFF000000000000ULL;
3522
3523 #ifdef  __BIG_ENDIAN
3524         /*
3525          * The device by default set to a big endian format, so a
3526          * big endian driver need not set anything.
3527          */
3528         val64 |= (SWAPPER_CTRL_TXP_FE |
3529                  SWAPPER_CTRL_TXP_SE |
3530                  SWAPPER_CTRL_TXD_R_FE |
3531                  SWAPPER_CTRL_TXD_W_FE |
3532                  SWAPPER_CTRL_TXF_R_FE |
3533                  SWAPPER_CTRL_RXD_R_FE |
3534                  SWAPPER_CTRL_RXD_W_FE |
3535                  SWAPPER_CTRL_RXF_W_FE |
3536                  SWAPPER_CTRL_XMSI_FE |
3537                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3538         if (sp->config.intr_type == INTA)
3539                 val64 |= SWAPPER_CTRL_XMSI_SE;
3540         writeq(val64, &bar0->swapper_ctrl);
3541 #else
3542         /*
3543          * Initially we enable all bits to make it accessible by the
3544          * driver, then we selectively enable only those bits that
3545          * we want to set.
3546          */
3547         val64 |= (SWAPPER_CTRL_TXP_FE |
3548                  SWAPPER_CTRL_TXP_SE |
3549                  SWAPPER_CTRL_TXD_R_FE |
3550                  SWAPPER_CTRL_TXD_R_SE |
3551                  SWAPPER_CTRL_TXD_W_FE |
3552                  SWAPPER_CTRL_TXD_W_SE |
3553                  SWAPPER_CTRL_TXF_R_FE |
3554                  SWAPPER_CTRL_RXD_R_FE |
3555                  SWAPPER_CTRL_RXD_R_SE |
3556                  SWAPPER_CTRL_RXD_W_FE |
3557                  SWAPPER_CTRL_RXD_W_SE |
3558                  SWAPPER_CTRL_RXF_W_FE |
3559                  SWAPPER_CTRL_XMSI_FE |
3560                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3561         if (sp->config.intr_type == INTA)
3562                 val64 |= SWAPPER_CTRL_XMSI_SE;
3563         writeq(val64, &bar0->swapper_ctrl);
3564 #endif
3565         val64 = readq(&bar0->swapper_ctrl);
3566
3567         /*
3568          * Verifying if endian settings are accurate by reading a
3569          * feedback register.
3570          */
3571         val64 = readq(&bar0->pif_rd_swapper_fb);
3572         if (val64 != 0x0123456789ABCDEFULL) {
3573                 /* Endian settings are incorrect, calls for another dekko. */
3574                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3575                           dev->name);
3576                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3577                           (unsigned long long) val64);
3578                 return FAILURE;
3579         }
3580
3581         return SUCCESS;
3582 }
3583
3584 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3585 {
3586         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3587         u64 val64;
3588         int ret = 0, cnt = 0;
3589
3590         do {
3591                 val64 = readq(&bar0->xmsi_access);
3592                 if (!(val64 & s2BIT(15)))
3593                         break;
3594                 mdelay(1);
3595                 cnt++;
3596         } while(cnt < 5);
3597         if (cnt == 5) {
3598                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3599                 ret = 1;
3600         }
3601
3602         return ret;
3603 }
3604
3605 static void restore_xmsi_data(struct s2io_nic *nic)
3606 {
3607         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3608         u64 val64;
3609         int i;
3610
3611         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3612                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3613                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3614                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3615                 writeq(val64, &bar0->xmsi_access);
3616                 if (wait_for_msix_trans(nic, i)) {
3617                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3618                         continue;
3619                 }
3620         }
3621 }
3622
3623 static void store_xmsi_data(struct s2io_nic *nic)
3624 {
3625         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3626         u64 val64, addr, data;
3627         int i;
3628
3629         /* Store and display */
3630         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3631                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3632                 writeq(val64, &bar0->xmsi_access);
3633                 if (wait_for_msix_trans(nic, i)) {
3634                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3635                         continue;
3636                 }
3637                 addr = readq(&bar0->xmsi_address);
3638                 data = readq(&bar0->xmsi_data);
3639                 if (addr && data) {
3640                         nic->msix_info[i].addr = addr;
3641                         nic->msix_info[i].data = data;
3642                 }
3643         }
3644 }
3645
3646 static int s2io_enable_msi_x(struct s2io_nic *nic)
3647 {
3648         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3649         u64 tx_mat, rx_mat;
3650         u16 msi_control; /* Temp variable */
3651         int ret, i, j, msix_indx = 1;
3652
3653         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3654                                GFP_KERNEL);
3655         if (!nic->entries) {
3656                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3657                         __FUNCTION__);
3658                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3659                 return -ENOMEM;
3660         }
3661         nic->mac_control.stats_info->sw_stat.mem_allocated
3662                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3663
3664         nic->s2io_entries =
3665                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3666                                    GFP_KERNEL);
3667         if (!nic->s2io_entries) {
3668                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3669                         __FUNCTION__);
3670                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3671                 kfree(nic->entries);
3672                 nic->mac_control.stats_info->sw_stat.mem_freed
3673                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3674                 return -ENOMEM;
3675         }
3676          nic->mac_control.stats_info->sw_stat.mem_allocated
3677                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3678
3679         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3680                 nic->entries[i].entry = i;
3681                 nic->s2io_entries[i].entry = i;
3682                 nic->s2io_entries[i].arg = NULL;
3683                 nic->s2io_entries[i].in_use = 0;
3684         }
3685
3686         tx_mat = readq(&bar0->tx_mat0_n[0]);
3687         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3688                 tx_mat |= TX_MAT_SET(i, msix_indx);
3689                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3690                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3691                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3692         }
3693         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3694
3695         rx_mat = readq(&bar0->rx_mat);
3696         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3697                 rx_mat |= RX_MAT_SET(j, msix_indx);
3698                 nic->s2io_entries[msix_indx].arg
3699                         = &nic->mac_control.rings[j];
3700                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3701                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3702         }
3703         writeq(rx_mat, &bar0->rx_mat);
3704
3705         nic->avail_msix_vectors = 0;
3706         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3707         /* We fail init if error or we get less vectors than min required */
3708         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3709                 nic->avail_msix_vectors = ret;
3710                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3711         }
3712         if (ret) {
3713                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3714                 kfree(nic->entries);
3715                 nic->mac_control.stats_info->sw_stat.mem_freed
3716                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3717                 kfree(nic->s2io_entries);
3718                 nic->mac_control.stats_info->sw_stat.mem_freed
3719                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3720                 nic->entries = NULL;
3721                 nic->s2io_entries = NULL;
3722                 nic->avail_msix_vectors = 0;
3723                 return -ENOMEM;
3724         }
3725         if (!nic->avail_msix_vectors)
3726                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3727
3728         /*
3729          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3730          * in the herc NIC. (Temp change, needs to be removed later)
3731          */
3732         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3733         msi_control |= 0x1; /* Enable MSI */
3734         pci_write_config_word(nic->pdev, 0x42, msi_control);
3735
3736         return 0;
3737 }
3738
3739 /* Handle software interrupt used during MSI(X) test */
3740 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3741 {
3742         struct s2io_nic *sp = dev_id;
3743
3744         sp->msi_detected = 1;
3745         wake_up(&sp->msi_wait);
3746
3747         return IRQ_HANDLED;
3748 }
3749
3750 /* Test interrupt path by forcing a a software IRQ */
3751 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3752 {
3753         struct pci_dev *pdev = sp->pdev;
3754         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3755         int err;
3756         u64 val64, saved64;
3757
3758         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3759                         sp->name, sp);
3760         if (err) {
3761                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3762                        sp->dev->name, pci_name(pdev), pdev->irq);
3763                 return err;
3764         }
3765
3766         init_waitqueue_head (&sp->msi_wait);
3767         sp->msi_detected = 0;
3768
3769         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3770         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3771         val64 |= SCHED_INT_CTRL_TIMER_EN;
3772         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3773         writeq(val64, &bar0->scheduled_int_ctrl);
3774
3775         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3776
3777         if (!sp->msi_detected) {
3778                 /* MSI(X) test failed, go back to INTx mode */
3779                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3780                         "using MSI(X) during test\n", sp->dev->name,
3781                         pci_name(pdev));
3782
3783                 err = -EOPNOTSUPP;
3784         }
3785
3786         free_irq(sp->entries[1].vector, sp);
3787
3788         writeq(saved64, &bar0->scheduled_int_ctrl);
3789
3790         return err;
3791 }
3792
3793 static void remove_msix_isr(struct s2io_nic *sp)
3794 {
3795         int i;
3796         u16 msi_control;
3797
3798         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3799                 if (sp->s2io_entries[i].in_use ==
3800                         MSIX_REGISTERED_SUCCESS) {
3801                         int vector = sp->entries[i].vector;
3802                         void *arg = sp->s2io_entries[i].arg;
3803                         free_irq(vector, arg);
3804                 }
3805         }
3806
3807         kfree(sp->entries);
3808         kfree(sp->s2io_entries);
3809         sp->entries = NULL;
3810         sp->s2io_entries = NULL;
3811
3812         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3813         msi_control &= 0xFFFE; /* Disable MSI */
3814         pci_write_config_word(sp->pdev, 0x42, msi_control);
3815
3816         pci_disable_msix(sp->pdev);
3817 }
3818
3819 static void remove_inta_isr(struct s2io_nic *sp)
3820 {
3821         struct net_device *dev = sp->dev;
3822
3823         free_irq(sp->pdev->irq, dev);
3824 }
3825
3826 /* ********************************************************* *
3827  * Functions defined below concern the OS part of the driver *
3828  * ********************************************************* */
3829
3830 /**
3831  *  s2io_open - open entry point of the driver
3832  *  @dev : pointer to the device structure.
3833  *  Description:
3834  *  This function is the open entry point of the driver. It mainly calls a
3835  *  function to allocate Rx buffers and inserts them into the buffer
3836  *  descriptors and then enables the Rx part of the NIC.
3837  *  Return value:
3838  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3839  *   file on failure.
3840  */
3841
3842 static int s2io_open(struct net_device *dev)
3843 {
3844         struct s2io_nic *sp = dev->priv;
3845         int err = 0;
3846
3847         /*
3848          * Make sure you have link off by default every time
3849          * Nic is initialized
3850          */
3851         netif_carrier_off(dev);
3852         sp->last_link_state = 0;
3853
3854         napi_enable(&sp->napi);
3855
3856         if (sp->config.intr_type == MSI_X) {
3857                 int ret = s2io_enable_msi_x(sp);
3858
3859                 if (!ret) {
3860                         ret = s2io_test_msi(sp);
3861                         /* rollback MSI-X, will re-enable during add_isr() */
3862                         remove_msix_isr(sp);
3863                 }
3864                 if (ret) {
3865
3866                         DBG_PRINT(ERR_DBG,
3867                           "%s: MSI-X requested but failed to enable\n",
3868                           dev->name);
3869                         sp->config.intr_type = INTA;
3870                 }
3871         }
3872
3873         /* NAPI doesn't work well with MSI(X) */
3874          if (sp->config.intr_type != INTA) {
3875                 if(sp->config.napi)
3876                         sp->config.napi = 0;
3877         }
3878
3879         /* Initialize H/W and enable interrupts */
3880         err = s2io_card_up(sp);
3881         if (err) {
3882                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3883                           dev->name);
3884                 goto hw_init_failed;
3885         }
3886
3887         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3888                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3889                 s2io_card_down(sp);
3890                 err = -ENODEV;
3891                 goto hw_init_failed;
3892         }
3893
3894         netif_start_queue(dev);
3895         return 0;
3896
3897 hw_init_failed:
3898         napi_disable(&sp->napi);
3899         if (sp->config.intr_type == MSI_X) {
3900                 if (sp->entries) {
3901                         kfree(sp->entries);
3902                         sp->mac_control.stats_info->sw_stat.mem_freed
3903                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3904                 }
3905                 if (sp->s2io_entries) {
3906                         kfree(sp->s2io_entries);
3907                         sp->mac_control.stats_info->sw_stat.mem_freed
3908                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3909                 }
3910         }
3911         return err;
3912 }
3913
3914 /**
3915  *  s2io_close -close entry point of the driver
3916  *  @dev : device pointer.
3917  *  Description:
3918  *  This is the stop entry point of the driver. It needs to undo exactly
3919  *  whatever was done by the open entry point,thus it's usually referred to
3920  *  as the close function.Among other things this function mainly stops the
3921  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3922  *  Return value:
3923  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3924  *  file on failure.
3925  */
3926
3927 static int s2io_close(struct net_device *dev)
3928 {
3929         struct s2io_nic *sp = dev->priv;
3930
3931         /* Return if the device is already closed               *
3932         *  Can happen when s2io_card_up failed in change_mtu    *
3933         */
3934         if (!is_s2io_card_up(sp))
3935                 return 0;
3936
3937         netif_stop_queue(dev);
3938         napi_disable(&sp->napi);
3939         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3940         s2io_card_down(sp);
3941
3942         return 0;
3943 }
3944
3945 /**
3946  *  s2io_xmit - Tx entry point of te driver
3947  *  @skb : the socket buffer containing the Tx data.
3948  *  @dev : device pointer.
3949  *  Description :
3950  *  This function is the Tx entry point of the driver. S2IO NIC supports
3951  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3952  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3953  *  not be upadted.
3954  *  Return value:
3955  *  0 on success & 1 on failure.
3956  */
3957
3958 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3959 {
3960         struct s2io_nic *sp = dev->priv;
3961         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3962         register u64 val64;
3963         struct TxD *txdp;
3964         struct TxFIFO_element __iomem *tx_fifo;
3965         unsigned long flags;
3966         u16 vlan_tag = 0;
3967         int vlan_priority = 0;
3968         struct mac_info *mac_control;
3969         struct config_param *config;
3970         int offload_type;
3971         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3972
3973         mac_control = &sp->mac_control;
3974         config = &sp->config;
3975
3976         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3977
3978         if (unlikely(skb->len <= 0)) {
3979                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3980                 dev_kfree_skb_any(skb);
3981                 return 0;
3982 }
3983
3984         spin_lock_irqsave(&sp->tx_lock, flags);
3985         if (!is_s2io_card_up(sp)) {
3986                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3987                           dev->name);
3988                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3989                 dev_kfree_skb(skb);
3990                 return 0;
3991         }
3992
3993         queue = 0;
3994         /* Get Fifo number to Transmit based on vlan priority */
3995         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3996                 vlan_tag = vlan_tx_tag_get(skb);
3997                 vlan_priority = vlan_tag >> 13;
3998                 queue = config->fifo_mapping[vlan_priority];
3999         }
4000
4001         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4002         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4003         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4004                 list_virt_addr;
4005
4006         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4007         /* Avoid "put" pointer going beyond "get" pointer */
4008         if (txdp->Host_Control ||
4009                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4010                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4011                 netif_stop_queue(dev);
4012                 dev_kfree_skb(skb);
4013                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4014                 return 0;
4015         }
4016
4017         offload_type = s2io_offload_type(skb);
4018         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4019                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4020                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4021         }
4022         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4023                 txdp->Control_2 |=
4024                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4025                      TXD_TX_CKO_UDP_EN);
4026         }
4027         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4028         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4029         txdp->Control_2 |= config->tx_intr_type;
4030
4031         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4032                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4033                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4034         }
4035
4036         frg_len = skb->len - skb->data_len;
4037         if (offload_type == SKB_GSO_UDP) {
4038                 int ufo_size;
4039
4040                 ufo_size = s2io_udp_mss(skb);
4041                 ufo_size &= ~7;
4042                 txdp->Control_1 |= TXD_UFO_EN;
4043                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4044                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4045 #ifdef __BIG_ENDIAN
4046                 sp->ufo_in_band_v[put_off] =
4047                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4048 #else
4049                 sp->ufo_in_band_v[put_off] =
4050                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4051 #endif
4052                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4053                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4054                                         sp->ufo_in_band_v,
4055                                         sizeof(u64), PCI_DMA_TODEVICE);
4056                 if((txdp->Buffer_Pointer == 0) ||
4057                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4058                         goto pci_map_failed;
4059                 txdp++;
4060         }
4061
4062         txdp->Buffer_Pointer = pci_map_single
4063             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4064         if((txdp->Buffer_Pointer == 0) ||
4065                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4066                 goto pci_map_failed;
4067
4068         txdp->Host_Control = (unsigned long) skb;
4069         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4070         if (offload_type == SKB_GSO_UDP)
4071                 txdp->Control_1 |= TXD_UFO_EN;
4072
4073         frg_cnt = skb_shinfo(skb)->nr_frags;
4074         /* For fragmented SKB. */
4075         for (i = 0; i < frg_cnt; i++) {
4076                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4077                 /* A '0' length fragment will be ignored */
4078                 if (!frag->size)
4079                         continue;
4080                 txdp++;
4081                 txdp->Buffer_Pointer = (u64) pci_map_page
4082                     (sp->pdev, frag->page, frag->page_offset,
4083                      frag->size, PCI_DMA_TODEVICE);
4084                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4085                 if (offload_type == SKB_GSO_UDP)
4086                         txdp->Control_1 |= TXD_UFO_EN;
4087         }
4088         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4089
4090         if (offload_type == SKB_GSO_UDP)
4091                 frg_cnt++; /* as Txd0 was used for inband header */
4092
4093         tx_fifo = mac_control->tx_FIFO_start[queue];
4094         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4095         writeq(val64, &tx_fifo->TxDL_Pointer);
4096
4097         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4098                  TX_FIFO_LAST_LIST);
4099         if (offload_type)
4100                 val64 |= TX_FIFO_SPECIAL_FUNC;
4101
4102         writeq(val64, &tx_fifo->List_Control);
4103
4104         mmiowb();
4105
4106         put_off++;
4107         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4108                 put_off = 0;
4109         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4110
4111         /* Avoid "put" pointer going beyond "get" pointer */
4112         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4113                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4114                 DBG_PRINT(TX_DBG,
4115                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4116                           put_off, get_off);
4117                 netif_stop_queue(dev);
4118         }
4119         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4120         dev->trans_start = jiffies;
4121         spin_unlock_irqrestore(&sp->tx_lock, flags);
4122
4123         return 0;
4124 pci_map_failed:
4125         stats->pci_map_fail_cnt++;
4126         netif_stop_queue(dev);
4127         stats->mem_freed += skb->truesize;
4128         dev_kfree_skb(skb);
4129         spin_unlock_irqrestore(&sp->tx_lock, flags);
4130         return 0;
4131 }
4132
4133 static void
4134 s2io_alarm_handle(unsigned long data)
4135 {
4136         struct s2io_nic *sp = (struct s2io_nic *)data;
4137         struct net_device *dev = sp->dev;
4138
4139         s2io_handle_errors(dev);
4140         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4141 }
4142
4143 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4144 {
4145         int rxb_size, level;
4146
4147         if (!sp->lro) {
4148                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4149                 level = rx_buffer_level(sp, rxb_size, rng_n);
4150
4151                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4152                         int ret;
4153                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4154                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4155                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4156                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4157                                           __FUNCTION__);
4158                                 clear_bit(0, (&sp->tasklet_status));
4159                                 return -1;
4160                         }
4161                         clear_bit(0, (&sp->tasklet_status));
4162                 } else if (level == LOW)
4163                         tasklet_schedule(&sp->task);
4164
4165         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4166                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4167                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4168         }
4169         return 0;
4170 }
4171
4172 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4173 {
4174         struct ring_info *ring = (struct ring_info *)dev_id;
4175         struct s2io_nic *sp = ring->nic;
4176
4177         if (!is_s2io_card_up(sp))
4178                 return IRQ_HANDLED;
4179
4180         rx_intr_handler(ring);
4181         s2io_chk_rx_buffers(sp, ring->ring_no);
4182
4183         return IRQ_HANDLED;
4184 }
4185
4186 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4187 {
4188         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4189         struct s2io_nic *sp = fifo->nic;
4190
4191         if (!is_s2io_card_up(sp))
4192                 return IRQ_HANDLED;
4193
4194         tx_intr_handler(fifo);
4195         return IRQ_HANDLED;
4196 }
4197 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4198 {
4199         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4200         u64 val64;
4201
4202         val64 = readq(&bar0->pic_int_status);
4203         if (val64 & PIC_INT_GPIO) {
4204                 val64 = readq(&bar0->gpio_int_reg);
4205                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4206                     (val64 & GPIO_INT_REG_LINK_UP)) {
4207                         /*
4208                          * This is unstable state so clear both up/down
4209                          * interrupt and adapter to re-evaluate the link state.
4210                          */
4211                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4212                         val64 |= GPIO_INT_REG_LINK_UP;
4213                         writeq(val64, &bar0->gpio_int_reg);
4214                         val64 = readq(&bar0->gpio_int_mask);
4215                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4216                                    GPIO_INT_MASK_LINK_DOWN);
4217                         writeq(val64, &bar0->gpio_int_mask);
4218                 }
4219                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4220                         val64 = readq(&bar0->adapter_status);
4221                                 /* Enable Adapter */
4222                         val64 = readq(&bar0->adapter_control);
4223                         val64 |= ADAPTER_CNTL_EN;
4224                         writeq(val64, &bar0->adapter_control);
4225                         val64 |= ADAPTER_LED_ON;
4226                         writeq(val64, &bar0->adapter_control);
4227                         if (!sp->device_enabled_once)
4228                                 sp->device_enabled_once = 1;
4229
4230                         s2io_link(sp, LINK_UP);
4231                         /*
4232                          * unmask link down interrupt and mask link-up
4233                          * intr
4234                          */
4235                         val64 = readq(&bar0->gpio_int_mask);
4236                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4237                         val64 |= GPIO_INT_MASK_LINK_UP;
4238                         writeq(val64, &bar0->gpio_int_mask);
4239
4240                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4241                         val64 = readq(&bar0->adapter_status);
4242                         s2io_link(sp, LINK_DOWN);
4243                         /* Link is down so unmaks link up interrupt */
4244                         val64 = readq(&bar0->gpio_int_mask);
4245                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4246                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4247                         writeq(val64, &bar0->gpio_int_mask);
4248
4249                         /* turn off LED */
4250                         val64 = readq(&bar0->adapter_control);
4251                         val64 = val64 &(~ADAPTER_LED_ON);
4252                         writeq(val64, &bar0->adapter_control);
4253                 }
4254         }
4255         val64 = readq(&bar0->gpio_int_mask);
4256 }
4257
4258 /**
4259  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4260  *  @value: alarm bits
4261  *  @addr: address value
4262  *  @cnt: counter variable
4263  *  Description: Check for alarm and increment the counter
4264  *  Return Value:
4265  *  1 - if alarm bit set
4266  *  0 - if alarm bit is not set
4267  */
4268 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4269                           unsigned long long *cnt)
4270 {
4271         u64 val64;
4272         val64 = readq(addr);
4273         if ( val64 & value ) {
4274                 writeq(val64, addr);
4275                 (*cnt)++;
4276                 return 1;
4277         }
4278         return 0;
4279
4280 }
4281
4282 /**
4283  *  s2io_handle_errors - Xframe error indication handler
4284  *  @nic: device private variable
4285  *  Description: Handle alarms such as loss of link, single or
4286  *  double ECC errors, critical and serious errors.
4287  *  Return Value:
4288  *  NONE
4289  */
4290 static void s2io_handle_errors(void * dev_id)
4291 {
4292         struct net_device *dev = (struct net_device *) dev_id;
4293         struct s2io_nic *sp = dev->priv;
4294         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4295         u64 temp64 = 0,val64=0;
4296         int i = 0;
4297
4298         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4299         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4300
4301         if (!is_s2io_card_up(sp))
4302                 return;
4303
4304         if (pci_channel_offline(sp->pdev))
4305                 return;
4306
4307         memset(&sw_stat->ring_full_cnt, 0,
4308                 sizeof(sw_stat->ring_full_cnt));
4309
4310         /* Handling the XPAK counters update */
4311         if(stats->xpak_timer_count < 72000) {
4312                 /* waiting for an hour */
4313                 stats->xpak_timer_count++;
4314         } else {
4315                 s2io_updt_xpak_counter(dev);
4316                 /* reset the count to zero */
4317                 stats->xpak_timer_count = 0;
4318         }
4319
4320         /* Handling link status change error Intr */
4321         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4322                 val64 = readq(&bar0->mac_rmac_err_reg);
4323                 writeq(val64, &bar0->mac_rmac_err_reg);
4324                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4325                         schedule_work(&sp->set_link_task);
4326         }
4327
4328         /* In case of a serious error, the device will be Reset. */
4329         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4330                                 &sw_stat->serious_err_cnt))
4331                 goto reset;
4332
4333         /* Check for data parity error */
4334         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4335                                 &sw_stat->parity_err_cnt))
4336                 goto reset;
4337
4338         /* Check for ring full counter */
4339         if (sp->device_type == XFRAME_II_DEVICE) {
4340                 val64 = readq(&bar0->ring_bump_counter1);
4341                 for (i=0; i<4; i++) {
4342                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4343                         temp64 >>= 64 - ((i+1)*16);
4344                         sw_stat->ring_full_cnt[i] += temp64;
4345                 }
4346
4347                 val64 = readq(&bar0->ring_bump_counter2);
4348                 for (i=0; i<4; i++) {
4349                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4350                         temp64 >>= 64 - ((i+1)*16);
4351                          sw_stat->ring_full_cnt[i+4] += temp64;
4352                 }
4353         }
4354
4355         val64 = readq(&bar0->txdma_int_status);
4356         /*check for pfc_err*/
4357         if (val64 & TXDMA_PFC_INT) {
4358                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4359                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4360                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4361                                 &sw_stat->pfc_err_cnt))
4362                         goto reset;
4363                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4364                                 &sw_stat->pfc_err_cnt);
4365         }
4366
4367         /*check for tda_err*/
4368         if (val64 & TXDMA_TDA_INT) {
4369                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4370                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4371                                 &sw_stat->tda_err_cnt))
4372                         goto reset;
4373                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4374                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4375         }
4376         /*check for pcc_err*/
4377         if (val64 & TXDMA_PCC_INT) {
4378                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4379                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4380                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4381                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4382                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4383                                 &sw_stat->pcc_err_cnt))
4384                         goto reset;
4385                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4386                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4387         }
4388
4389         /*check for tti_err*/
4390         if (val64 & TXDMA_TTI_INT) {
4391                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4392                                 &sw_stat->tti_err_cnt))
4393                         goto reset;
4394                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4395                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4396         }
4397
4398         /*check for lso_err*/
4399         if (val64 & TXDMA_LSO_INT) {
4400                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4401                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4402                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4403                         goto reset;
4404                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4405                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4406         }
4407
4408         /*check for tpa_err*/
4409         if (val64 & TXDMA_TPA_INT) {
4410                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4411                         &sw_stat->tpa_err_cnt))
4412                         goto reset;
4413                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4414                         &sw_stat->tpa_err_cnt);
4415         }
4416
4417         /*check for sm_err*/
4418         if (val64 & TXDMA_SM_INT) {
4419                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4420                         &sw_stat->sm_err_cnt))
4421                         goto reset;
4422         }
4423
4424         val64 = readq(&bar0->mac_int_status);
4425         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4426                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4427                                 &bar0->mac_tmac_err_reg,
4428                                 &sw_stat->mac_tmac_err_cnt))
4429                         goto reset;
4430                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4431                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4432                                 &bar0->mac_tmac_err_reg,
4433                                 &sw_stat->mac_tmac_err_cnt);
4434         }
4435
4436         val64 = readq(&bar0->xgxs_int_status);
4437         if (val64 & XGXS_INT_STATUS_TXGXS) {
4438                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4439                                 &bar0->xgxs_txgxs_err_reg,
4440                                 &sw_stat->xgxs_txgxs_err_cnt))
4441                         goto reset;
4442                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4443                                 &bar0->xgxs_txgxs_err_reg,
4444                                 &sw_stat->xgxs_txgxs_err_cnt);
4445         }
4446
4447         val64 = readq(&bar0->rxdma_int_status);
4448         if (val64 & RXDMA_INT_RC_INT_M) {
4449                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4450                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4451                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4452                         goto reset;
4453                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4454                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4455                                 &sw_stat->rc_err_cnt);
4456                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4457                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4458                                 &sw_stat->prc_pcix_err_cnt))
4459                         goto reset;
4460                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4461                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4462                                 &sw_stat->prc_pcix_err_cnt);
4463         }
4464
4465         if (val64 & RXDMA_INT_RPA_INT_M) {
4466                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4467                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4468                         goto reset;
4469                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4470                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4471         }
4472
4473         if (val64 & RXDMA_INT_RDA_INT_M) {
4474                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4475                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4476                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4477                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4478                         goto reset;
4479                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4480                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4481                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4482         }
4483
4484         if (val64 & RXDMA_INT_RTI_INT_M) {
4485                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4486                                 &sw_stat->rti_err_cnt))
4487                         goto reset;
4488                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4489                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4490         }
4491
4492         val64 = readq(&bar0->mac_int_status);
4493         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4494                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4495                                 &bar0->mac_rmac_err_reg,
4496                                 &sw_stat->mac_rmac_err_cnt))
4497                         goto reset;
4498                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4499                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4500                                 &sw_stat->mac_rmac_err_cnt);
4501         }
4502
4503         val64 = readq(&bar0->xgxs_int_status);
4504         if (val64 & XGXS_INT_STATUS_RXGXS) {
4505                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4506                                 &bar0->xgxs_rxgxs_err_reg,
4507                                 &sw_stat->xgxs_rxgxs_err_cnt))
4508                         goto reset;
4509         }
4510
4511         val64 = readq(&bar0->mc_int_status);
4512         if(val64 & MC_INT_STATUS_MC_INT) {
4513                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4514                                 &sw_stat->mc_err_cnt))
4515                         goto reset;
4516
4517                 /* Handling Ecc errors */
4518                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4519                         writeq(val64, &bar0->mc_err_reg);
4520                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4521                                 sw_stat->double_ecc_errs++;
4522                                 if (sp->device_type != XFRAME_II_DEVICE) {
4523                                         /*
4524                                          * Reset XframeI only if critical error
4525                                          */
4526                                         if (val64 &
4527                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4528                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4529                                                                 goto reset;
4530                                         }
4531                         } else
4532                                 sw_stat->single_ecc_errs++;
4533                 }
4534         }
4535         return;
4536
4537 reset:
4538         netif_stop_queue(dev);
4539         schedule_work(&sp->rst_timer_task);
4540         sw_stat->soft_reset_cnt++;
4541         return;
4542 }
4543
4544 /**
4545  *  s2io_isr - ISR handler of the device .
4546  *  @irq: the irq of the device.
4547  *  @dev_id: a void pointer to the dev structure of the NIC.
4548  *  Description:  This function is the ISR handler of the device. It
4549  *  identifies the reason for the interrupt and calls the relevant
4550  *  service routines. As a contongency measure, this ISR allocates the
4551  *  recv buffers, if their numbers are below the panic value which is
4552  *  presently set to 25% of the original number of rcv buffers allocated.
4553  *  Return value:
4554  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4555  *   IRQ_NONE: will be returned if interrupt is not from our device
4556  */
4557 static irqreturn_t s2io_isr(int irq, void *dev_id)
4558 {
4559         struct net_device *dev = (struct net_device *) dev_id;
4560         struct s2io_nic *sp = dev->priv;
4561         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4562         int i;
4563         u64 reason = 0;
4564         struct mac_info *mac_control;
4565         struct config_param *config;
4566
4567         /* Pretend we handled any irq's from a disconnected card */
4568         if (pci_channel_offline(sp->pdev))
4569                 return IRQ_NONE;
4570
4571         if (!is_s2io_card_up(sp))
4572                 return IRQ_NONE;
4573
4574         mac_control = &sp->mac_control;
4575         config = &sp->config;
4576
4577         /*
4578          * Identify the cause for interrupt and call the appropriate
4579          * interrupt handler. Causes for the interrupt could be;
4580          * 1. Rx of packet.
4581          * 2. Tx complete.
4582          * 3. Link down.
4583          */
4584         reason = readq(&bar0->general_int_status);
4585
4586         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4587                 /* Nothing much can be done. Get out */
4588                 return IRQ_HANDLED;
4589         }
4590
4591         if (reason & (GEN_INTR_RXTRAFFIC |
4592                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4593         {
4594                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4595
4596                 if (config->napi) {
4597                         if (reason & GEN_INTR_RXTRAFFIC) {
4598                                 if (likely(netif_rx_schedule_prep(dev,
4599                                                         &sp->napi))) {
4600                                         __netif_rx_schedule(dev, &sp->napi);
4601                                         writeq(S2IO_MINUS_ONE,
4602                                                &bar0->rx_traffic_mask);
4603                                 } else
4604                                         writeq(S2IO_MINUS_ONE,
4605                                                &bar0->rx_traffic_int);
4606                         }
4607                 } else {
4608                         /*
4609                          * rx_traffic_int reg is an R1 register, writing all 1's
4610                          * will ensure that the actual interrupt causing bit
4611                          * get's cleared and hence a read can be avoided.
4612                          */
4613                         if (reason & GEN_INTR_RXTRAFFIC)
4614                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4615
4616                         for (i = 0; i < config->rx_ring_num; i++)
4617                                 rx_intr_handler(&mac_control->rings[i]);
4618                 }
4619
4620                 /*
4621                  * tx_traffic_int reg is an R1 register, writing all 1's
4622                  * will ensure that the actual interrupt causing bit get's
4623                  * cleared and hence a read can be avoided.
4624                  */
4625                 if (reason & GEN_INTR_TXTRAFFIC)
4626                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4627
4628                 for (i = 0; i < config->tx_fifo_num; i++)
4629                         tx_intr_handler(&mac_control->fifos[i]);
4630
4631                 if (reason & GEN_INTR_TXPIC)
4632                         s2io_txpic_intr_handle(sp);
4633
4634                 /*
4635                  * Reallocate the buffers from the interrupt handler itself.
4636                  */
4637                 if (!config->napi) {
4638                         for (i = 0; i < config->rx_ring_num; i++)
4639                                 s2io_chk_rx_buffers(sp, i);
4640                 }
4641                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4642                 readl(&bar0->general_int_status);
4643
4644                 return IRQ_HANDLED;
4645
4646         }
4647         else if (!reason) {
4648                 /* The interrupt was not raised by us */
4649                 return IRQ_NONE;
4650         }
4651
4652         return IRQ_HANDLED;
4653 }
4654
4655 /**
4656  * s2io_updt_stats -
4657  */
4658 static void s2io_updt_stats(struct s2io_nic *sp)
4659 {
4660         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4661         u64 val64;
4662         int cnt = 0;
4663
4664         if (is_s2io_card_up(sp)) {
4665                 /* Apprx 30us on a 133 MHz bus */
4666                 val64 = SET_UPDT_CLICKS(10) |
4667                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4668                 writeq(val64, &bar0->stat_cfg);
4669                 do {
4670                         udelay(100);
4671                         val64 = readq(&bar0->stat_cfg);
4672                         if (!(val64 & s2BIT(0)))
4673                                 break;
4674                         cnt++;
4675                         if (cnt == 5)
4676                                 break; /* Updt failed */
4677                 } while(1);
4678         }
4679 }
4680
4681 /**
4682  *  s2io_get_stats - Updates the device statistics structure.
4683  *  @dev : pointer to the device structure.
4684  *  Description:
4685  *  This function updates the device statistics structure in the s2io_nic
4686  *  structure and returns a pointer to the same.
4687  *  Return value:
4688  *  pointer to the updated net_device_stats structure.
4689  */
4690
4691 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4692 {
4693         struct s2io_nic *sp = dev->priv;
4694         struct mac_info *mac_control;
4695         struct config_param *config;
4696
4697
4698         mac_control = &sp->mac_control;
4699         config = &sp->config;
4700
4701         /* Configure Stats for immediate updt */
4702         s2io_updt_stats(sp);
4703
4704         sp->stats.tx_packets =
4705                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4706         sp->stats.tx_errors =
4707                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4708         sp->stats.rx_errors =
4709                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4710         sp->stats.multicast =
4711                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4712         sp->stats.rx_length_errors =
4713                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4714
4715         return (&sp->stats);
4716 }
4717
4718 /**
4719  *  s2io_set_multicast - entry point for multicast address enable/disable.
4720  *  @dev : pointer to the device structure
4721  *  Description:
4722  *  This function is a driver entry point which gets called by the kernel
4723  *  whenever multicast addresses must be enabled/disabled. This also gets
4724  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4725  *  determine, if multicast address must be enabled or if promiscuous mode
4726  *  is to be disabled etc.
4727  *  Return value:
4728  *  void.
4729  */
4730
4731 static void s2io_set_multicast(struct net_device *dev)
4732 {
4733         int i, j, prev_cnt;
4734         struct dev_mc_list *mclist;
4735         struct s2io_nic *sp = dev->priv;
4736         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4737         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4738             0xfeffffffffffULL;
4739         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4740         void __iomem *add;
4741
4742         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4743                 /*  Enable all Multicast addresses */
4744                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4745                        &bar0->rmac_addr_data0_mem);
4746                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4747                        &bar0->rmac_addr_data1_mem);
4748                 val64 = RMAC_ADDR_CMD_MEM_WE |
4749                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4750                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4751                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4752                 /* Wait till command completes */
4753                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4754                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4755                                         S2IO_BIT_RESET);
4756
4757                 sp->m_cast_flg = 1;
4758                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4759         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4760                 /*  Disable all Multicast addresses */
4761                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4762                        &bar0->rmac_addr_data0_mem);
4763                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4764                        &bar0->rmac_addr_data1_mem);
4765                 val64 = RMAC_ADDR_CMD_MEM_WE |
4766                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4767                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4768                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4769                 /* Wait till command completes */
4770                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4771                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4772                                         S2IO_BIT_RESET);
4773
4774                 sp->m_cast_flg = 0;
4775                 sp->all_multi_pos = 0;
4776         }
4777
4778         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4779                 /*  Put the NIC into promiscuous mode */
4780                 add = &bar0->mac_cfg;
4781                 val64 = readq(&bar0->mac_cfg);
4782                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4783
4784                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4785                 writel((u32) val64, add);
4786                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4787                 writel((u32) (val64 >> 32), (add + 4));
4788
4789                 if (vlan_tag_strip != 1) {
4790                         val64 = readq(&bar0->rx_pa_cfg);
4791                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4792                         writeq(val64, &bar0->rx_pa_cfg);
4793                         vlan_strip_flag = 0;
4794                 }
4795
4796                 val64 = readq(&bar0->mac_cfg);
4797                 sp->promisc_flg = 1;
4798                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4799                           dev->name);
4800         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4801                 /*  Remove the NIC from promiscuous mode */
4802                 add = &bar0->mac_cfg;
4803                 val64 = readq(&bar0->mac_cfg);
4804                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4805
4806                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4807                 writel((u32) val64, add);
4808                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4809                 writel((u32) (val64 >> 32), (add + 4));
4810
4811                 if (vlan_tag_strip != 0) {
4812                         val64 = readq(&bar0->rx_pa_cfg);
4813                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4814                         writeq(val64, &bar0->rx_pa_cfg);
4815                         vlan_strip_flag = 1;
4816                 }
4817
4818                 val64 = readq(&bar0->mac_cfg);
4819                 sp->promisc_flg = 0;
4820                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4821                           dev->name);
4822         }
4823
4824         /*  Update individual M_CAST address list */
4825         if ((!sp->m_cast_flg) && dev->mc_count) {
4826                 if (dev->mc_count >
4827                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4828                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4829                                   dev->name);
4830                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4831                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4832                         return;
4833                 }
4834
4835                 prev_cnt = sp->mc_addr_count;
4836                 sp->mc_addr_count = dev->mc_count;
4837
4838                 /* Clear out the previous list of Mc in the H/W. */
4839                 for (i = 0; i < prev_cnt; i++) {
4840                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4841                                &bar0->rmac_addr_data0_mem);
4842                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4843                                 &bar0->rmac_addr_data1_mem);
4844                         val64 = RMAC_ADDR_CMD_MEM_WE |
4845                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4846                             RMAC_ADDR_CMD_MEM_OFFSET
4847                             (MAC_MC_ADDR_START_OFFSET + i);
4848                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4849
4850                         /* Wait for command completes */
4851                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4852                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4853                                         S2IO_BIT_RESET)) {
4854                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4855                                           dev->name);
4856                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4857                                 return;
4858                         }
4859                 }
4860
4861                 /* Create the new Rx filter list and update the same in H/W. */
4862                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4863                      i++, mclist = mclist->next) {
4864                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4865                                ETH_ALEN);
4866                         mac_addr = 0;
4867                         for (j = 0; j < ETH_ALEN; j++) {
4868                                 mac_addr |= mclist->dmi_addr[j];
4869                                 mac_addr <<= 8;
4870                         }
4871                         mac_addr >>= 8;
4872                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4873                                &bar0->rmac_addr_data0_mem);
4874                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4875                                 &bar0->rmac_addr_data1_mem);
4876                         val64 = RMAC_ADDR_CMD_MEM_WE |
4877                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4878                             RMAC_ADDR_CMD_MEM_OFFSET
4879                             (i + MAC_MC_ADDR_START_OFFSET);
4880                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4881
4882                         /* Wait for command completes */
4883                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4884                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4885                                         S2IO_BIT_RESET)) {
4886                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4887                                           dev->name);
4888                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4889                                 return;
4890                         }
4891                 }
4892         }
4893 }
4894
4895 /* add unicast MAC address to CAM */
4896 static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off)
4897 {
4898         u64 val64;
4899         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4900
4901         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
4902                 &bar0->rmac_addr_data0_mem);
4903
4904         val64 =
4905                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906                 RMAC_ADDR_CMD_MEM_OFFSET(off);
4907         writeq(val64, &bar0->rmac_addr_cmd_mem);
4908
4909         /* Wait till command completes */
4910         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4911                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4912                 S2IO_BIT_RESET)) {
4913                 DBG_PRINT(INFO_DBG, "add_mac_addr failed\n");
4914                 return FAILURE;
4915         }
4916         return SUCCESS;
4917 }
4918
4919 /**
4920  * s2io_set_mac_addr driver entry point
4921  */
4922 static int s2io_set_mac_addr(struct net_device *dev, void *p)
4923 {
4924         struct sockaddr *addr = p;
4925
4926         if (!is_valid_ether_addr(addr->sa_data))
4927                 return -EINVAL;
4928
4929         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4930
4931         /* store the MAC address in CAM */
4932         return (do_s2io_prog_unicast(dev, dev->dev_addr));
4933 }
4934
4935 /**
4936  *  do_s2io_prog_unicast - Programs the Xframe mac address
4937  *  @dev : pointer to the device structure.
4938  *  @addr: a uchar pointer to the new mac address which is to be set.
4939  *  Description : This procedure will program the Xframe to receive
4940  *  frames with new Mac Address
4941  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4942  *  as defined in errno.h file on failure.
4943  */
4944 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
4945 {
4946         struct s2io_nic *sp = dev->priv;
4947         register u64 mac_addr = 0, perm_addr = 0;
4948         int i;
4949
4950         /*
4951         * Set the new MAC address as the new unicast filter and reflect this
4952         * change on the device address registered with the OS. It will be
4953         * at offset 0.
4954         */
4955         for (i = 0; i < ETH_ALEN; i++) {
4956                 mac_addr <<= 8;
4957                 mac_addr |= addr[i];
4958                 perm_addr <<= 8;
4959                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
4960         }
4961
4962         /* check if the dev_addr is different than perm_addr */
4963         if (mac_addr == perm_addr)
4964                 return SUCCESS;
4965
4966         /* Update the internal structure with this new mac address */
4967         do_s2io_copy_mac_addr(sp, 0, mac_addr);
4968         return (do_s2io_add_unicast(sp, mac_addr, 0));
4969 }
4970
4971 /**
4972  * s2io_ethtool_sset - Sets different link parameters.
4973  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4974  * @info: pointer to the structure with parameters given by ethtool to set
4975  * link information.
4976  * Description:
4977  * The function sets different link parameters provided by the user onto
4978  * the NIC.
4979  * Return value:
4980  * 0 on success.
4981 */
4982
4983 static int s2io_ethtool_sset(struct net_device *dev,
4984                              struct ethtool_cmd *info)
4985 {
4986         struct s2io_nic *sp = dev->priv;
4987         if ((info->autoneg == AUTONEG_ENABLE) ||
4988             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4989                 return -EINVAL;
4990         else {
4991                 s2io_close(sp->dev);
4992                 s2io_open(sp->dev);
4993         }
4994
4995         return 0;
4996 }
4997
4998 /**
4999  * s2io_ethtol_gset - Return link specific information.
5000  * @sp : private member of the device structure, pointer to the
5001  *      s2io_nic structure.
5002  * @info : pointer to the structure with parameters given by ethtool
5003  * to return link information.
5004  * Description:
5005  * Returns link specific information like speed, duplex etc.. to ethtool.
5006  * Return value :
5007  * return 0 on success.
5008  */
5009
5010 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5011 {
5012         struct s2io_nic *sp = dev->priv;
5013         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5014         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5015         info->port = PORT_FIBRE;
5016
5017         /* info->transceiver */
5018         info->transceiver = XCVR_EXTERNAL;
5019
5020         if (netif_carrier_ok(sp->dev)) {
5021                 info->speed = 10000;
5022                 info->duplex = DUPLEX_FULL;
5023         } else {
5024                 info->speed = -1;
5025                 info->duplex = -1;
5026         }
5027
5028         info->autoneg = AUTONEG_DISABLE;
5029         return 0;
5030 }
5031
5032 /**
5033  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5034  * @sp : private member of the device structure, which is a pointer to the
5035  * s2io_nic structure.
5036  * @info : pointer to the structure with parameters given by ethtool to
5037  * return driver information.
5038  * Description:
5039  * Returns driver specefic information like name, version etc.. to ethtool.
5040  * Return value:
5041  *  void
5042  */
5043
5044 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5045                                   struct ethtool_drvinfo *info)
5046 {
5047         struct s2io_nic *sp = dev->priv;
5048
5049         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5050         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5051         strncpy(info->fw_version, "", sizeof(info->fw_version));
5052         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5053         info->regdump_len = XENA_REG_SPACE;
5054         info->eedump_len = XENA_EEPROM_SPACE;
5055 }
5056
5057 /**
5058  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5059  *  @sp: private member of the device structure, which is a pointer to the
5060  *  s2io_nic structure.
5061  *  @regs : pointer to the structure with parameters given by ethtool for
5062  *  dumping the registers.
5063  *  @reg_space: The input argumnet into which all the registers are dumped.
5064  *  Description:
5065  *  Dumps the entire register space of xFrame NIC into the user given
5066  *  buffer area.
5067  * Return value :
5068  * void .
5069 */
5070
5071 static void s2io_ethtool_gregs(struct net_device *dev,
5072                                struct ethtool_regs *regs, void *space)
5073 {
5074         int i;
5075         u64 reg;
5076         u8 *reg_space = (u8 *) space;
5077         struct s2io_nic *sp = dev->priv;
5078
5079         regs->len = XENA_REG_SPACE;
5080         regs->version = sp->pdev->subsystem_device;
5081
5082         for (i = 0; i < regs->len; i += 8) {
5083                 reg = readq(sp->bar0 + i);
5084                 memcpy((reg_space + i), &reg, 8);
5085         }
5086 }
5087
5088 /**
5089  *  s2io_phy_id  - timer function that alternates adapter LED.
5090  *  @data : address of the private member of the device structure, which
5091  *  is a pointer to the s2io_nic structure, provided as an u32.
5092  * Description: This is actually the timer function that alternates the
5093  * adapter LED bit of the adapter control bit to set/reset every time on
5094  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5095  *  once every second.
5096 */
5097 static void s2io_phy_id(unsigned long data)
5098 {
5099         struct s2io_nic *sp = (struct s2io_nic *) data;
5100         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5101         u64 val64 = 0;
5102         u16 subid;
5103
5104         subid = sp->pdev->subsystem_device;
5105         if ((sp->device_type == XFRAME_II_DEVICE) ||
5106                    ((subid & 0xFF) >= 0x07)) {
5107                 val64 = readq(&bar0->gpio_control);
5108                 val64 ^= GPIO_CTRL_GPIO_0;
5109                 writeq(val64, &bar0->gpio_control);
5110         } else {
5111                 val64 = readq(&bar0->adapter_control);
5112                 val64 ^= ADAPTER_LED_ON;
5113                 writeq(val64, &bar0->adapter_control);
5114         }
5115
5116         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5117 }
5118
5119 /**
5120  * s2io_ethtool_idnic - To physically identify the nic on the system.
5121  * @sp : private member of the device structure, which is a pointer to the
5122  * s2io_nic structure.
5123  * @id : pointer to the structure with identification parameters given by
5124  * ethtool.
5125  * Description: Used to physically identify the NIC on the system.
5126  * The Link LED will blink for a time specified by the user for
5127  * identification.
5128  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5129  * identification is possible only if it's link is up.
5130  * Return value:
5131  * int , returns 0 on success
5132  */
5133
5134 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5135 {
5136         u64 val64 = 0, last_gpio_ctrl_val;
5137         struct s2io_nic *sp = dev->priv;
5138         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5139         u16 subid;
5140
5141         subid = sp->pdev->subsystem_device;
5142         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5143         if ((sp->device_type == XFRAME_I_DEVICE) &&
5144                 ((subid & 0xFF) < 0x07)) {
5145                 val64 = readq(&bar0->adapter_control);
5146                 if (!(val64 & ADAPTER_CNTL_EN)) {
5147                         printk(KERN_ERR
5148                                "Adapter Link down, cannot blink LED\n");
5149                         return -EFAULT;
5150                 }
5151         }
5152         if (sp->id_timer.function == NULL) {
5153                 init_timer(&sp->id_timer);
5154                 sp->id_timer.function = s2io_phy_id;
5155                 sp->id_timer.data = (unsigned long) sp;
5156         }
5157         mod_timer(&sp->id_timer, jiffies);
5158         if (data)
5159                 msleep_interruptible(data * HZ);
5160         else
5161                 msleep_interruptible(MAX_FLICKER_TIME);
5162         del_timer_sync(&sp->id_timer);
5163
5164         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5165                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5166                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5167         }
5168
5169         return 0;
5170 }
5171
5172 static void s2io_ethtool_gringparam(struct net_device *dev,
5173                                     struct ethtool_ringparam *ering)
5174 {
5175         struct s2io_nic *sp = dev->priv;
5176         int i,tx_desc_count=0,rx_desc_count=0;
5177
5178         if (sp->rxd_mode == RXD_MODE_1)
5179                 ering->rx_max_pending = MAX_RX_DESC_1;
5180         else if (sp->rxd_mode == RXD_MODE_3B)
5181                 ering->rx_max_pending = MAX_RX_DESC_2;
5182
5183         ering->tx_max_pending = MAX_TX_DESC;
5184         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5185                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5186
5187         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5188         ering->tx_pending = tx_desc_count;
5189         rx_desc_count = 0;
5190         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5191                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5192
5193         ering->rx_pending = rx_desc_count;
5194
5195         ering->rx_mini_max_pending = 0;
5196         ering->rx_mini_pending = 0;
5197         if(sp->rxd_mode == RXD_MODE_1)
5198                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5199         else if (sp->rxd_mode == RXD_MODE_3B)
5200                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5201         ering->rx_jumbo_pending = rx_desc_count;
5202 }
5203
5204 /**
5205  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5206  * @sp : private member of the device structure, which is a pointer to the
5207  *      s2io_nic structure.
5208  * @ep : pointer to the structure with pause parameters given by ethtool.
5209  * Description:
5210  * Returns the Pause frame generation and reception capability of the NIC.
5211  * Return value:
5212  *  void
5213  */
5214 static void s2io_ethtool_getpause_data(struct net_device *dev,
5215                                        struct ethtool_pauseparam *ep)
5216 {
5217         u64 val64;
5218         struct s2io_nic *sp = dev->priv;
5219         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5220
5221         val64 = readq(&bar0->rmac_pause_cfg);
5222         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5223                 ep->tx_pause = TRUE;
5224         if (val64 & RMAC_PAUSE_RX_ENABLE)
5225                 ep->rx_pause = TRUE;
5226         ep->autoneg = FALSE;
5227 }
5228
5229 /**
5230  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5231  * @sp : private member of the device structure, which is a pointer to the
5232  *      s2io_nic structure.
5233  * @ep : pointer to the structure with pause parameters given by ethtool.
5234  * Description:
5235  * It can be used to set or reset Pause frame generation or reception
5236  * support of the NIC.
5237  * Return value:
5238  * int, returns 0 on Success
5239  */
5240
5241 static int s2io_ethtool_setpause_data(struct net_device *dev,
5242                                struct ethtool_pauseparam *ep)
5243 {
5244         u64 val64;
5245         struct s2io_nic *sp = dev->priv;
5246         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5247
5248         val64 = readq(&bar0->rmac_pause_cfg);
5249         if (ep->tx_pause)
5250                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5251         else
5252                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5253         if (ep->rx_pause)
5254                 val64 |= RMAC_PAUSE_RX_ENABLE;
5255         else
5256                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5257         writeq(val64, &bar0->rmac_pause_cfg);
5258         return 0;
5259 }
5260
5261 /**
5262  * read_eeprom - reads 4 bytes of data from user given offset.
5263  * @sp : private member of the device structure, which is a pointer to the
5264  *      s2io_nic structure.
5265  * @off : offset at which the data must be written
5266  * @data : Its an output parameter where the data read at the given
5267  *      offset is stored.
5268  * Description:
5269  * Will read 4 bytes of data from the user given offset and return the
5270  * read data.
5271  * NOTE: Will allow to read only part of the EEPROM visible through the
5272  *   I2C bus.
5273  * Return value:
5274  *  -1 on failure and 0 on success.
5275  */
5276
5277 #define S2IO_DEV_ID             5
5278 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5279 {
5280         int ret = -1;
5281         u32 exit_cnt = 0;
5282         u64 val64;
5283         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5284
5285         if (sp->device_type == XFRAME_I_DEVICE) {
5286                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5287                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5288                     I2C_CONTROL_CNTL_START;
5289                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5290
5291                 while (exit_cnt < 5) {
5292                         val64 = readq(&bar0->i2c_control);
5293                         if (I2C_CONTROL_CNTL_END(val64)) {
5294                                 *data = I2C_CONTROL_GET_DATA(val64);
5295                                 ret = 0;
5296                                 break;
5297                         }
5298                         msleep(50);
5299                         exit_cnt++;
5300                 }
5301         }
5302
5303         if (sp->device_type == XFRAME_II_DEVICE) {
5304                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5305                         SPI_CONTROL_BYTECNT(0x3) |
5306                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5307                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5308                 val64 |= SPI_CONTROL_REQ;
5309                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5310                 while (exit_cnt < 5) {
5311                         val64 = readq(&bar0->spi_control);
5312                         if (val64 & SPI_CONTROL_NACK) {
5313                                 ret = 1;
5314                                 break;
5315                         } else if (val64 & SPI_CONTROL_DONE) {
5316                                 *data = readq(&bar0->spi_data);
5317                                 *data &= 0xffffff;
5318                                 ret = 0;
5319                                 break;
5320                         }
5321                         msleep(50);
5322                         exit_cnt++;
5323                 }
5324         }
5325         return ret;
5326 }
5327
5328 /**
5329  *  write_eeprom - actually writes the relevant part of the data value.
5330  *  @sp : private member of the device structure, which is a pointer to the
5331  *       s2io_nic structure.
5332  *  @off : offset at which the data must be written
5333  *  @data : The data that is to be written
5334  *  @cnt : Number of bytes of the data that are actually to be written into
5335  *  the Eeprom. (max of 3)
5336  * Description:
5337  *  Actually writes the relevant part of the data value into the Eeprom
5338  *  through the I2C bus.
5339  * Return value:
5340  *  0 on success, -1 on failure.
5341  */
5342
5343 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5344 {
5345         int exit_cnt = 0, ret = -1;
5346         u64 val64;
5347         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5348
5349         if (sp->device_type == XFRAME_I_DEVICE) {
5350                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5351                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5352                     I2C_CONTROL_CNTL_START;
5353                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5354
5355                 while (exit_cnt < 5) {
5356                         val64 = readq(&bar0->i2c_control);
5357                         if (I2C_CONTROL_CNTL_END(val64)) {
5358                                 if (!(val64 & I2C_CONTROL_NACK))
5359                                         ret = 0;
5360                                 break;
5361                         }
5362                         msleep(50);
5363                         exit_cnt++;
5364                 }
5365         }
5366
5367         if (sp->device_type == XFRAME_II_DEVICE) {
5368                 int write_cnt = (cnt == 8) ? 0 : cnt;
5369                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5370
5371                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5372                         SPI_CONTROL_BYTECNT(write_cnt) |
5373                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5374                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5375                 val64 |= SPI_CONTROL_REQ;
5376                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5377                 while (exit_cnt < 5) {
5378                         val64 = readq(&bar0->spi_control);
5379                         if (val64 & SPI_CONTROL_NACK) {
5380                                 ret = 1;
5381                                 break;
5382                         } else if (val64 & SPI_CONTROL_DONE) {
5383                                 ret = 0;
5384                                 break;
5385                         }
5386                         msleep(50);
5387                         exit_cnt++;
5388                 }
5389         }
5390         return ret;
5391 }
5392 static void s2io_vpd_read(struct s2io_nic *nic)
5393 {
5394         u8 *vpd_data;
5395         u8 data;
5396         int i=0, cnt, fail = 0;
5397         int vpd_addr = 0x80;
5398
5399         if (nic->device_type == XFRAME_II_DEVICE) {
5400                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5401                 vpd_addr = 0x80;
5402         }
5403         else {
5404                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5405                 vpd_addr = 0x50;
5406         }
5407         strcpy(nic->serial_num, "NOT AVAILABLE");
5408
5409         vpd_data = kmalloc(256, GFP_KERNEL);
5410         if (!vpd_data) {
5411                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5412                 return;
5413         }
5414         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5415
5416         for (i = 0; i < 256; i +=4 ) {
5417                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5418                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5419                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5420                 for (cnt = 0; cnt <5; cnt++) {
5421                         msleep(2);
5422                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5423                         if (data == 0x80)
5424                                 break;
5425                 }
5426                 if (cnt >= 5) {
5427                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5428                         fail = 1;
5429                         break;
5430                 }
5431                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5432                                       (u32 *)&vpd_data[i]);
5433         }
5434
5435         if(!fail) {
5436                 /* read serial number of adapter */
5437                 for (cnt = 0; cnt < 256; cnt++) {
5438                 if ((vpd_data[cnt] == 'S') &&
5439                         (vpd_data[cnt+1] == 'N') &&
5440                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5441                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5442                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5443                                         vpd_data[cnt+2]);
5444                                 break;
5445                         }
5446                 }
5447         }
5448
5449         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5450                 memset(nic->product_name, 0, vpd_data[1]);
5451                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5452         }
5453         kfree(vpd_data);
5454         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5455 }
5456
5457 /**
5458  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5459  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5460  *  @eeprom : pointer to the user level structure provided by ethtool,
5461  *  containing all relevant information.
5462  *  @data_buf : user defined value to be written into Eeprom.
5463  *  Description: Reads the values stored in the Eeprom at given offset
5464  *  for a given length. Stores these values int the input argument data
5465  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5466  *  Return value:
5467  *  int  0 on success
5468  */
5469
5470 static int s2io_ethtool_geeprom(struct net_device *dev,
5471                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5472 {
5473         u32 i, valid;
5474         u64 data;
5475         struct s2io_nic *sp = dev->priv;
5476
5477         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5478
5479         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5480                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5481
5482         for (i = 0; i < eeprom->len; i += 4) {
5483                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5484                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5485                         return -EFAULT;
5486                 }
5487                 valid = INV(data);
5488                 memcpy((data_buf + i), &valid, 4);
5489         }
5490         return 0;
5491 }
5492
5493 /**
5494  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5495  *  @sp : private member of the device structure, which is a pointer to the
5496  *  s2io_nic structure.
5497  *  @eeprom : pointer to the user level structure provided by ethtool,
5498  *  containing all relevant information.
5499  *  @data_buf ; user defined value to be written into Eeprom.
5500  *  Description:
5501  *  Tries to write the user provided value in the Eeprom, at the offset
5502  *  given by the user.
5503  *  Return value:
5504  *  0 on success, -EFAULT on failure.
5505  */
5506
5507 static int s2io_ethtool_seeprom(struct net_device *dev,
5508                                 struct ethtool_eeprom *eeprom,
5509                                 u8 * data_buf)
5510 {
5511         int len = eeprom->len, cnt = 0;
5512         u64 valid = 0, data;
5513         struct s2io_nic *sp = dev->priv;
5514
5515         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5516                 DBG_PRINT(ERR_DBG,
5517                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5518                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5519                           eeprom->magic);
5520                 return -EFAULT;
5521         }
5522
5523         while (len) {
5524                 data = (u32) data_buf[cnt] & 0x000000FF;
5525                 if (data) {
5526                         valid = (u32) (data << 24);
5527                 } else
5528                         valid = data;
5529
5530                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5531                         DBG_PRINT(ERR_DBG,
5532                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5533                         DBG_PRINT(ERR_DBG,
5534                                   "write into the specified offset\n");
5535                         return -EFAULT;
5536                 }
5537                 cnt++;
5538                 len--;
5539         }
5540
5541         return 0;
5542 }
5543
5544 /**
5545  * s2io_register_test - reads and writes into all clock domains.
5546  * @sp : private member of the device structure, which is a pointer to the
5547  * s2io_nic structure.
5548  * @data : variable that returns the result of each of the test conducted b
5549  * by the driver.
5550  * Description:
5551  * Read and write into all clock domains. The NIC has 3 clock domains,
5552  * see that registers in all the three regions are accessible.
5553  * Return value:
5554  * 0 on success.
5555  */
5556
5557 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5558 {
5559         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5560         u64 val64 = 0, exp_val;
5561         int fail = 0;
5562
5563         val64 = readq(&bar0->pif_rd_swapper_fb);
5564         if (val64 != 0x123456789abcdefULL) {
5565                 fail = 1;
5566                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5567         }
5568
5569         val64 = readq(&bar0->rmac_pause_cfg);
5570         if (val64 != 0xc000ffff00000000ULL) {
5571                 fail = 1;
5572                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5573         }
5574
5575         val64 = readq(&bar0->rx_queue_cfg);
5576         if (sp->device_type == XFRAME_II_DEVICE)
5577                 exp_val = 0x0404040404040404ULL;
5578         else
5579                 exp_val = 0x0808080808080808ULL;
5580         if (val64 != exp_val) {
5581                 fail = 1;
5582                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5583         }
5584
5585         val64 = readq(&bar0->xgxs_efifo_cfg);
5586         if (val64 != 0x000000001923141EULL) {
5587                 fail = 1;
5588                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5589         }
5590
5591         val64 = 0x5A5A5A5A5A5A5A5AULL;
5592         writeq(val64, &bar0->xmsi_data);
5593         val64 = readq(&bar0->xmsi_data);
5594         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5595                 fail = 1;
5596                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5597         }
5598
5599         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5600         writeq(val64, &bar0->xmsi_data);
5601         val64 = readq(&bar0->xmsi_data);
5602         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5603                 fail = 1;
5604                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5605         }
5606
5607         *data = fail;
5608         return fail;
5609 }
5610
5611 /**
5612  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5613  * @sp : private member of the device structure, which is a pointer to the
5614  * s2io_nic structure.
5615  * @data:variable that returns the result of each of the test conducted by
5616  * the driver.
5617  * Description:
5618  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5619  * register.
5620  * Return value:
5621  * 0 on success.
5622  */
5623
5624 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5625 {
5626         int fail = 0;
5627         u64 ret_data, org_4F0, org_7F0;
5628         u8 saved_4F0 = 0, saved_7F0 = 0;
5629         struct net_device *dev = sp->dev;
5630
5631         /* Test Write Error at offset 0 */
5632         /* Note that SPI interface allows write access to all areas
5633          * of EEPROM. Hence doing all negative testing only for Xframe I.
5634          */
5635         if (sp->device_type == XFRAME_I_DEVICE)
5636                 if (!write_eeprom(sp, 0, 0, 3))
5637                         fail = 1;
5638
5639         /* Save current values at offsets 0x4F0 and 0x7F0 */
5640         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5641                 saved_4F0 = 1;
5642         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5643                 saved_7F0 = 1;
5644
5645         /* Test Write at offset 4f0 */
5646         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5647                 fail = 1;
5648         if (read_eeprom(sp, 0x4F0, &ret_data))
5649                 fail = 1;
5650
5651         if (ret_data != 0x012345) {
5652                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5653                         "Data written %llx Data read %llx\n",
5654                         dev->name, (unsigned long long)0x12345,
5655                         (unsigned long long)ret_data);
5656                 fail = 1;
5657         }
5658
5659         /* Reset the EEPROM data go FFFF */
5660         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5661
5662         /* Test Write Request Error at offset 0x7c */
5663         if (sp->device_type == XFRAME_I_DEVICE)
5664                 if (!write_eeprom(sp, 0x07C, 0, 3))
5665                         fail = 1;
5666
5667         /* Test Write Request at offset 0x7f0 */
5668         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5669                 fail = 1;
5670         if (read_eeprom(sp, 0x7F0, &ret_data))
5671                 fail = 1;
5672
5673         if (ret_data != 0x012345) {
5674                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5675                         "Data written %llx Data read %llx\n",
5676                         dev->name, (unsigned long long)0x12345,
5677                         (unsigned long long)ret_data);
5678                 fail = 1;
5679         }
5680
5681         /* Reset the EEPROM data go FFFF */
5682         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5683
5684         if (sp->device_type == XFRAME_I_DEVICE) {
5685                 /* Test Write Error at offset 0x80 */
5686                 if (!write_eeprom(sp, 0x080, 0, 3))
5687                         fail = 1;
5688
5689                 /* Test Write Error at offset 0xfc */
5690                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5691                         fail = 1;
5692
5693                 /* Test Write Error at offset 0x100 */
5694                 if (!write_eeprom(sp, 0x100, 0, 3))
5695                         fail = 1;
5696
5697                 /* Test Write Error at offset 4ec */
5698                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5699                         fail = 1;
5700         }
5701
5702         /* Restore values at offsets 0x4F0 and 0x7F0 */
5703         if (saved_4F0)
5704                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5705         if (saved_7F0)
5706                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5707
5708         *data = fail;
5709         return fail;
5710 }
5711
5712 /**
5713  * s2io_bist_test - invokes the MemBist test of the card .
5714  * @sp : private member of the device structure, which is a pointer to the
5715  * s2io_nic structure.
5716  * @data:variable that returns the result of each of the test conducted by
5717  * the driver.
5718  * Description:
5719  * This invokes the MemBist test of the card. We give around
5720  * 2 secs time for the Test to complete. If it's still not complete
5721  * within this peiod, we consider that the test failed.
5722  * Return value:
5723  * 0 on success and -1 on failure.
5724  */
5725
5726 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5727 {
5728         u8 bist = 0;
5729         int cnt = 0, ret = -1;
5730
5731         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5732         bist |= PCI_BIST_START;
5733         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5734
5735         while (cnt < 20) {
5736                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5737                 if (!(bist & PCI_BIST_START)) {
5738                         *data = (bist & PCI_BIST_CODE_MASK);
5739                         ret = 0;
5740                         break;
5741                 }
5742                 msleep(100);
5743                 cnt++;
5744         }
5745
5746         return ret;
5747 }
5748
5749 /**
5750  * s2io-link_test - verifies the link state of the nic
5751  * @sp ; private member of the device structure, which is a pointer to the
5752  * s2io_nic structure.
5753  * @data: variable that returns the result of each of the test conducted by
5754  * the driver.
5755  * Description:
5756  * The function verifies the link state of the NIC and updates the input
5757  * argument 'data' appropriately.
5758  * Return value:
5759  * 0 on success.
5760  */
5761
5762 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5763 {
5764         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5765         u64 val64;
5766
5767         val64 = readq(&bar0->adapter_status);
5768         if(!(LINK_IS_UP(val64)))
5769                 *data = 1;
5770         else
5771                 *data = 0;
5772
5773         return *data;
5774 }
5775
5776 /**
5777  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5778  * @sp - private member of the device structure, which is a pointer to the
5779  * s2io_nic structure.
5780  * @data - variable that returns the result of each of the test
5781  * conducted by the driver.
5782  * Description:
5783  *  This is one of the offline test that tests the read and write
5784  *  access to the RldRam chip on the NIC.
5785  * Return value:
5786  *  0 on success.
5787  */
5788
5789 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5790 {
5791         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5792         u64 val64;
5793         int cnt, iteration = 0, test_fail = 0;
5794
5795         val64 = readq(&bar0->adapter_control);
5796         val64 &= ~ADAPTER_ECC_EN;
5797         writeq(val64, &bar0->adapter_control);
5798
5799         val64 = readq(&bar0->mc_rldram_test_ctrl);
5800         val64 |= MC_RLDRAM_TEST_MODE;
5801         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5802
5803         val64 = readq(&bar0->mc_rldram_mrs);
5804         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5805         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5806
5807         val64 |= MC_RLDRAM_MRS_ENABLE;
5808         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5809
5810         while (iteration < 2) {
5811                 val64 = 0x55555555aaaa0000ULL;
5812                 if (iteration == 1) {
5813                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5814                 }
5815                 writeq(val64, &bar0->mc_rldram_test_d0);
5816
5817                 val64 = 0xaaaa5a5555550000ULL;
5818                 if (iteration == 1) {
5819                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5820                 }
5821                 writeq(val64, &bar0->mc_rldram_test_d1);
5822
5823                 val64 = 0x55aaaaaaaa5a0000ULL;
5824                 if (iteration == 1) {
5825                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5826                 }
5827                 writeq(val64, &bar0->mc_rldram_test_d2);
5828
5829                 val64 = (u64) (0x0000003ffffe0100ULL);
5830                 writeq(val64, &bar0->mc_rldram_test_add);
5831
5832                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5833                         MC_RLDRAM_TEST_GO;
5834                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5835
5836                 for (cnt = 0; cnt < 5; cnt++) {
5837                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5838                         if (val64 & MC_RLDRAM_TEST_DONE)
5839                                 break;
5840                         msleep(200);
5841                 }
5842
5843                 if (cnt == 5)
5844                         break;
5845
5846                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5847                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5848
5849                 for (cnt = 0; cnt < 5; cnt++) {
5850                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5851                         if (val64 & MC_RLDRAM_TEST_DONE)
5852                                 break;
5853                         msleep(500);
5854                 }
5855
5856                 if (cnt == 5)
5857                         break;
5858
5859                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5860                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5861                         test_fail = 1;
5862
5863                 iteration++;
5864         }
5865
5866         *data = test_fail;
5867
5868         /* Bring the adapter out of test mode */
5869         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5870
5871         return test_fail;
5872 }
5873
5874 /**
5875  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5876  *  @sp : private member of the device structure, which is a pointer to the
5877  *  s2io_nic structure.
5878  *  @ethtest : pointer to a ethtool command specific structure that will be
5879  *  returned to the user.
5880  *  @data : variable that returns the result of each of the test
5881  * conducted by the driver.
5882  * Description:
5883  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5884  *  the health of the card.
5885  * Return value:
5886  *  void
5887  */
5888
5889 static void s2io_ethtool_test(struct net_device *dev,
5890                               struct ethtool_test *ethtest,
5891                               uint64_t * data)
5892 {
5893         struct s2io_nic *sp = dev->priv;
5894         int orig_state = netif_running(sp->dev);
5895
5896         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5897                 /* Offline Tests. */
5898                 if (orig_state)
5899                         s2io_close(sp->dev);
5900
5901                 if (s2io_register_test(sp, &data[0]))
5902                         ethtest->flags |= ETH_TEST_FL_FAILED;
5903
5904                 s2io_reset(sp);
5905
5906                 if (s2io_rldram_test(sp, &data[3]))
5907                         ethtest->flags |= ETH_TEST_FL_FAILED;
5908
5909                 s2io_reset(sp);
5910
5911                 if (s2io_eeprom_test(sp, &data[1]))
5912                         ethtest->flags |= ETH_TEST_FL_FAILED;
5913
5914                 if (s2io_bist_test(sp, &data[4]))
5915                         ethtest->flags |= ETH_TEST_FL_FAILED;
5916
5917                 if (orig_state)
5918                         s2io_open(sp->dev);
5919
5920                 data[2] = 0;
5921         } else {
5922                 /* Online Tests. */
5923                 if (!orig_state) {
5924                         DBG_PRINT(ERR_DBG,
5925                                   "%s: is not up, cannot run test\n",
5926                                   dev->name);
5927                         data[0] = -1;
5928                         data[1] = -1;
5929                         data[2] = -1;
5930                         data[3] = -1;
5931                         data[4] = -1;
5932                 }
5933
5934                 if (s2io_link_test(sp, &data[2]))
5935                         ethtest->flags |= ETH_TEST_FL_FAILED;
5936
5937                 data[0] = 0;
5938                 data[1] = 0;
5939                 data[3] = 0;
5940                 data[4] = 0;
5941         }
5942 }
5943
5944 static void s2io_get_ethtool_stats(struct net_device *dev,
5945                                    struct ethtool_stats *estats,
5946                                    u64 * tmp_stats)
5947 {
5948         int i = 0, k;
5949         struct s2io_nic *sp = dev->priv;
5950         struct stat_block *stat_info = sp->mac_control.stats_info;
5951
5952         s2io_updt_stats(sp);
5953         tmp_stats[i++] =
5954                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5955                 le32_to_cpu(stat_info->tmac_frms);
5956         tmp_stats[i++] =
5957                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5958                 le32_to_cpu(stat_info->tmac_data_octets);
5959         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5960         tmp_stats[i++] =
5961                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5962                 le32_to_cpu(stat_info->tmac_mcst_frms);
5963         tmp_stats[i++] =
5964                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5965                 le32_to_cpu(stat_info->tmac_bcst_frms);
5966         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5967         tmp_stats[i++] =
5968                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5969                 le32_to_cpu(stat_info->tmac_ttl_octets);
5970         tmp_stats[i++] =
5971                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5972                 le32_to_cpu(stat_info->tmac_ucst_frms);
5973         tmp_stats[i++] =
5974                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5975                 le32_to_cpu(stat_info->tmac_nucst_frms);
5976         tmp_stats[i++] =
5977                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5978                 le32_to_cpu(stat_info->tmac_any_err_frms);
5979         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5980         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5981         tmp_stats[i++] =
5982                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5983                 le32_to_cpu(stat_info->tmac_vld_ip);
5984         tmp_stats[i++] =
5985                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5986                 le32_to_cpu(stat_info->tmac_drop_ip);
5987         tmp_stats[i++] =
5988                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5989                 le32_to_cpu(stat_info->tmac_icmp);
5990         tmp_stats[i++] =
5991                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5992                 le32_to_cpu(stat_info->tmac_rst_tcp);
5993         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5994         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5995                 le32_to_cpu(stat_info->tmac_udp);
5996         tmp_stats[i++] =
5997                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5998                 le32_to_cpu(stat_info->rmac_vld_frms);
5999         tmp_stats[i++] =
6000                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6001                 le32_to_cpu(stat_info->rmac_data_octets);
6002         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6003         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6004         tmp_stats[i++] =
6005                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6006                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6007         tmp_stats[i++] =
6008                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6009                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6010         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6011         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6012         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6013         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6014         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6015         tmp_stats[i++] =
6016                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6017                 le32_to_cpu(stat_info->rmac_ttl_octets);
6018         tmp_stats[i++] =
6019                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6020                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6021         tmp_stats[i++] =
6022                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6023                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6024         tmp_stats[i++] =
6025                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6026                 le32_to_cpu(stat_info->rmac_discarded_frms);
6027         tmp_stats[i++] =
6028                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6029                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6030         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6031         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6032         tmp_stats[i++] =
6033                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6034                 le32_to_cpu(stat_info->rmac_usized_frms);
6035         tmp_stats[i++] =
6036                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6037                 le32_to_cpu(stat_info->rmac_osized_frms);
6038         tmp_stats[i++] =
6039                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6040                 le32_to_cpu(stat_info->rmac_frag_frms);
6041         tmp_stats[i++] =
6042                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6043                 le32_to_cpu(stat_info->rmac_jabber_frms);
6044         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6045         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6046         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6047         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6048         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6049         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6050         tmp_stats[i++] =
6051                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6052                 le32_to_cpu(stat_info->rmac_ip);
6053         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6054         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6055         tmp_stats[i++] =
6056                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6057                 le32_to_cpu(stat_info->rmac_drop_ip);
6058         tmp_stats[i++] =
6059                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6060                 le32_to_cpu(stat_info->rmac_icmp);
6061         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6062         tmp_stats[i++] =
6063                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6064                 le32_to_cpu(stat_info->rmac_udp);
6065         tmp_stats[i++] =
6066                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6067                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6068         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6069         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6070         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6071         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6072         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6073         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6074         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6075         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6076         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6077         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6078         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6079         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6080         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6081         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6082         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6083         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6084         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6085         tmp_stats[i++] =
6086                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6087                 le32_to_cpu(stat_info->rmac_pause_cnt);
6088         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6089         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6090         tmp_stats[i++] =
6091                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6092                 le32_to_cpu(stat_info->rmac_accepted_ip);
6093         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6094         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6095         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6096         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6097         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6098         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6099         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6100         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6101         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6102         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6103         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6104         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6105         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6106         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6107         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6108         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6109         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6110         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6111         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6112
6113         /* Enhanced statistics exist only for Hercules */
6114         if(sp->device_type == XFRAME_II_DEVICE) {
6115                 tmp_stats[i++] =
6116                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6117                 tmp_stats[i++] =
6118                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6119                 tmp_stats[i++] =
6120                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6121                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6122                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6123                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6124                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6125                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6126                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6127                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6128                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6129                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6130                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6131                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6132                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6133                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6134         }
6135
6136         tmp_stats[i++] = 0;
6137         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6138         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6139         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6140         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6141         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6142         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6143         for (k = 0; k < MAX_RX_RINGS; k++)
6144                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6145         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6146         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6147         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6148         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6149         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6150         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6151         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6152         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6153         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6154         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6155         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6156         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6157         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6158         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6159         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6160         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6161         if (stat_info->sw_stat.num_aggregations) {
6162                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6163                 int count = 0;
6164                 /*
6165                  * Since 64-bit divide does not work on all platforms,
6166                  * do repeated subtraction.
6167                  */
6168                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6169                         tmp -= stat_info->sw_stat.num_aggregations;
6170                         count++;
6171                 }
6172                 tmp_stats[i++] = count;
6173         }
6174         else
6175                 tmp_stats[i++] = 0;
6176         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6177         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6178         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6179         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6180         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6181         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6182         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6183         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6184         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6185
6186         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6187         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6188         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6189         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6190         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6191
6192         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6193         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6194         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6195         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6196         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6197         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6198         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6199         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6200         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6201         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6202         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6203         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6204         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6205         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6206         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6207         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6208         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6209         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6210         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6211         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6212         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6213         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6214         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6215         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6216         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6217         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6218 }
6219
6220 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6221 {
6222         return (XENA_REG_SPACE);
6223 }
6224
6225
6226 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6227 {
6228         struct s2io_nic *sp = dev->priv;
6229
6230         return (sp->rx_csum);
6231 }
6232
6233 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6234 {
6235         struct s2io_nic *sp = dev->priv;
6236
6237         if (data)
6238                 sp->rx_csum = 1;
6239         else
6240                 sp->rx_csum = 0;
6241
6242         return 0;
6243 }
6244
6245 static int s2io_get_eeprom_len(struct net_device *dev)
6246 {
6247         return (XENA_EEPROM_SPACE);
6248 }
6249
6250 static int s2io_get_sset_count(struct net_device *dev, int sset)
6251 {
6252         struct s2io_nic *sp = dev->priv;
6253
6254         switch (sset) {
6255         case ETH_SS_TEST:
6256                 return S2IO_TEST_LEN;
6257         case ETH_SS_STATS:
6258                 switch(sp->device_type) {
6259                 case XFRAME_I_DEVICE:
6260                         return XFRAME_I_STAT_LEN;
6261                 case XFRAME_II_DEVICE:
6262                         return XFRAME_II_STAT_LEN;
6263                 default:
6264                         return 0;
6265                 }
6266         default:
6267                 return -EOPNOTSUPP;
6268         }
6269 }
6270
6271 static void s2io_ethtool_get_strings(struct net_device *dev,
6272                                      u32 stringset, u8 * data)
6273 {
6274         int stat_size = 0;
6275         struct s2io_nic *sp = dev->priv;
6276
6277         switch (stringset) {
6278         case ETH_SS_TEST:
6279                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6280                 break;
6281         case ETH_SS_STATS:
6282                 stat_size = sizeof(ethtool_xena_stats_keys);
6283                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6284                 if(sp->device_type == XFRAME_II_DEVICE) {
6285                         memcpy(data + stat_size,
6286                                 &ethtool_enhanced_stats_keys,
6287                                 sizeof(ethtool_enhanced_stats_keys));
6288                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6289                 }
6290
6291                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6292                         sizeof(ethtool_driver_stats_keys));
6293         }
6294 }
6295
6296 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6297 {
6298         if (data)
6299                 dev->features |= NETIF_F_IP_CSUM;
6300         else
6301                 dev->features &= ~NETIF_F_IP_CSUM;
6302
6303         return 0;
6304 }
6305
6306 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6307 {
6308         return (dev->features & NETIF_F_TSO) != 0;
6309 }
6310 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6311 {
6312         if (data)
6313                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6314         else
6315                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6316
6317         return 0;
6318 }
6319
6320 static const struct ethtool_ops netdev_ethtool_ops = {
6321         .get_settings = s2io_ethtool_gset,
6322         .set_settings = s2io_ethtool_sset,
6323         .get_drvinfo = s2io_ethtool_gdrvinfo,
6324         .get_regs_len = s2io_ethtool_get_regs_len,
6325         .get_regs = s2io_ethtool_gregs,
6326         .get_link = ethtool_op_get_link,
6327         .get_eeprom_len = s2io_get_eeprom_len,
6328         .get_eeprom = s2io_ethtool_geeprom,
6329         .set_eeprom = s2io_ethtool_seeprom,
6330         .get_ringparam = s2io_ethtool_gringparam,
6331         .get_pauseparam = s2io_ethtool_getpause_data,
6332         .set_pauseparam = s2io_ethtool_setpause_data,
6333         .get_rx_csum = s2io_ethtool_get_rx_csum,
6334         .set_rx_csum = s2io_ethtool_set_rx_csum,
6335         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6336         .set_sg = ethtool_op_set_sg,
6337         .get_tso = s2io_ethtool_op_get_tso,
6338         .set_tso = s2io_ethtool_op_set_tso,
6339         .set_ufo = ethtool_op_set_ufo,
6340         .self_test = s2io_ethtool_test,
6341         .get_strings = s2io_ethtool_get_strings,
6342         .phys_id = s2io_ethtool_idnic,
6343         .get_ethtool_stats = s2io_get_ethtool_stats,
6344         .get_sset_count = s2io_get_sset_count,
6345 };
6346
6347 /**
6348  *  s2io_ioctl - Entry point for the Ioctl
6349  *  @dev :  Device pointer.
6350  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6351  *  a proprietary structure used to pass information to the driver.
6352  *  @cmd :  This is used to distinguish between the different commands that
6353  *  can be passed to the IOCTL functions.
6354  *  Description:
6355  *  Currently there are no special functionality supported in IOCTL, hence
6356  *  function always return EOPNOTSUPPORTED
6357  */
6358
6359 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6360 {
6361         return -EOPNOTSUPP;
6362 }
6363
6364 /**
6365  *  s2io_change_mtu - entry point to change MTU size for the device.
6366  *   @dev : device pointer.
6367  *   @new_mtu : the new MTU size for the device.
6368  *   Description: A driver entry point to change MTU size for the device.
6369  *   Before changing the MTU the device must be stopped.
6370  *  Return value:
6371  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6372  *   file on failure.
6373  */
6374
6375 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6376 {
6377         struct s2io_nic *sp = dev->priv;
6378         int ret = 0;
6379
6380         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6381                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6382                           dev->name);
6383                 return -EPERM;
6384         }
6385
6386         dev->mtu = new_mtu;
6387         if (netif_running(dev)) {
6388                 s2io_card_down(sp);
6389                 netif_stop_queue(dev);
6390                 ret = s2io_card_up(sp);
6391                 if (ret) {
6392                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6393                                   __FUNCTION__);
6394                         return ret;
6395                 }
6396                 if (netif_queue_stopped(dev))
6397                         netif_wake_queue(dev);
6398         } else { /* Device is down */
6399                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6400                 u64 val64 = new_mtu;
6401
6402                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6403         }
6404
6405         return ret;
6406 }
6407
6408 /**
6409  *  s2io_tasklet - Bottom half of the ISR.
6410  *  @dev_adr : address of the device structure in dma_addr_t format.
6411  *  Description:
6412  *  This is the tasklet or the bottom half of the ISR. This is
6413  *  an extension of the ISR which is scheduled by the scheduler to be run
6414  *  when the load on the CPU is low. All low priority tasks of the ISR can
6415  *  be pushed into the tasklet. For now the tasklet is used only to
6416  *  replenish the Rx buffers in the Rx buffer descriptors.
6417  *  Return value:
6418  *  void.
6419  */
6420
6421 static void s2io_tasklet(unsigned long dev_addr)
6422 {
6423         struct net_device *dev = (struct net_device *) dev_addr;
6424         struct s2io_nic *sp = dev->priv;
6425         int i, ret;
6426         struct mac_info *mac_control;
6427         struct config_param *config;
6428
6429         mac_control = &sp->mac_control;
6430         config = &sp->config;
6431
6432         if (!TASKLET_IN_USE) {
6433                 for (i = 0; i < config->rx_ring_num; i++) {
6434                         ret = fill_rx_buffers(sp, i);
6435                         if (ret == -ENOMEM) {
6436                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6437                                           dev->name);
6438                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6439                                 break;
6440                         } else if (ret == -EFILL) {
6441                                 DBG_PRINT(INFO_DBG,
6442                                           "%s: Rx Ring %d is full\n",
6443                                           dev->name, i);
6444                                 break;
6445                         }
6446                 }
6447                 clear_bit(0, (&sp->tasklet_status));
6448         }
6449 }
6450
6451 /**
6452  * s2io_set_link - Set the LInk status
6453  * @data: long pointer to device private structue
6454  * Description: Sets the link status for the adapter
6455  */
6456
6457 static void s2io_set_link(struct work_struct *work)
6458 {
6459         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6460         struct net_device *dev = nic->dev;
6461         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6462         register u64 val64;
6463         u16 subid;
6464
6465         rtnl_lock();
6466
6467         if (!netif_running(dev))
6468                 goto out_unlock;
6469
6470         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6471                 /* The card is being reset, no point doing anything */
6472                 goto out_unlock;
6473         }
6474
6475         subid = nic->pdev->subsystem_device;
6476         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6477                 /*
6478                  * Allow a small delay for the NICs self initiated
6479                  * cleanup to complete.
6480                  */
6481                 msleep(100);
6482         }
6483
6484         val64 = readq(&bar0->adapter_status);
6485         if (LINK_IS_UP(val64)) {
6486                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6487                         if (verify_xena_quiescence(nic)) {
6488                                 val64 = readq(&bar0->adapter_control);
6489                                 val64 |= ADAPTER_CNTL_EN;
6490                                 writeq(val64, &bar0->adapter_control);
6491                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6492                                         nic->device_type, subid)) {
6493                                         val64 = readq(&bar0->gpio_control);
6494                                         val64 |= GPIO_CTRL_GPIO_0;
6495                                         writeq(val64, &bar0->gpio_control);
6496                                         val64 = readq(&bar0->gpio_control);
6497                                 } else {
6498                                         val64 |= ADAPTER_LED_ON;
6499                                         writeq(val64, &bar0->adapter_control);
6500                                 }
6501                                 nic->device_enabled_once = TRUE;
6502                         } else {
6503                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6504                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6505                                 netif_stop_queue(dev);
6506                         }
6507                 }
6508                 val64 = readq(&bar0->adapter_control);
6509                 val64 |= ADAPTER_LED_ON;
6510                 writeq(val64, &bar0->adapter_control);
6511                 s2io_link(nic, LINK_UP);
6512         } else {
6513                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6514                                                       subid)) {
6515                         val64 = readq(&bar0->gpio_control);
6516                         val64 &= ~GPIO_CTRL_GPIO_0;
6517                         writeq(val64, &bar0->gpio_control);
6518                         val64 = readq(&bar0->gpio_control);
6519                 }
6520                 /* turn off LED */
6521                 val64 = readq(&bar0->adapter_control);
6522                 val64 = val64 &(~ADAPTER_LED_ON);
6523                 writeq(val64, &bar0->adapter_control);
6524                 s2io_link(nic, LINK_DOWN);
6525         }
6526         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6527
6528 out_unlock:
6529         rtnl_unlock();
6530 }
6531
6532 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6533                                 struct buffAdd *ba,
6534                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6535                                 u64 *temp2, int size)
6536 {
6537         struct net_device *dev = sp->dev;
6538         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6539
6540         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6541                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6542                 /* allocate skb */
6543                 if (*skb) {
6544                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6545                         /*
6546                          * As Rx frame are not going to be processed,
6547                          * using same mapped address for the Rxd
6548                          * buffer pointer
6549                          */
6550                         rxdp1->Buffer0_ptr = *temp0;
6551                 } else {
6552                         *skb = dev_alloc_skb(size);
6553                         if (!(*skb)) {
6554                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6555                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6556                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6557                                 sp->mac_control.stats_info->sw_stat. \
6558                                         mem_alloc_fail_cnt++;
6559                                 return -ENOMEM ;
6560                         }
6561                         sp->mac_control.stats_info->sw_stat.mem_allocated
6562                                 += (*skb)->truesize;
6563                         /* storing the mapped addr in a temp variable
6564                          * such it will be used for next rxd whose
6565                          * Host Control is NULL
6566                          */
6567                         rxdp1->Buffer0_ptr = *temp0 =
6568                                 pci_map_single( sp->pdev, (*skb)->data,
6569                                         size - NET_IP_ALIGN,
6570                                         PCI_DMA_FROMDEVICE);
6571                         if( (rxdp1->Buffer0_ptr == 0) ||
6572                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6573                                 goto memalloc_failed;
6574                         }
6575                         rxdp->Host_Control = (unsigned long) (*skb);
6576                 }
6577         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6578                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6579                 /* Two buffer Mode */
6580                 if (*skb) {
6581                         rxdp3->Buffer2_ptr = *temp2;
6582                         rxdp3->Buffer0_ptr = *temp0;
6583                         rxdp3->Buffer1_ptr = *temp1;
6584                 } else {
6585                         *skb = dev_alloc_skb(size);
6586                         if (!(*skb)) {
6587                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6588                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6589                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6590                                 sp->mac_control.stats_info->sw_stat. \
6591                                         mem_alloc_fail_cnt++;
6592                                 return -ENOMEM;
6593                         }
6594                         sp->mac_control.stats_info->sw_stat.mem_allocated
6595                                 += (*skb)->truesize;
6596                         rxdp3->Buffer2_ptr = *temp2 =
6597                                 pci_map_single(sp->pdev, (*skb)->data,
6598                                                dev->mtu + 4,
6599                                                PCI_DMA_FROMDEVICE);
6600                         if( (rxdp3->Buffer2_ptr == 0) ||
6601                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6602                                 goto memalloc_failed;
6603                         }
6604                         rxdp3->Buffer0_ptr = *temp0 =
6605                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6606                                                 PCI_DMA_FROMDEVICE);
6607                         if( (rxdp3->Buffer0_ptr == 0) ||
6608                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6609                                 pci_unmap_single (sp->pdev,
6610                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6611                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6612                                 goto memalloc_failed;
6613                         }
6614                         rxdp->Host_Control = (unsigned long) (*skb);
6615
6616                         /* Buffer-1 will be dummy buffer not used */
6617                         rxdp3->Buffer1_ptr = *temp1 =
6618                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6619                                                 PCI_DMA_FROMDEVICE);
6620                         if( (rxdp3->Buffer1_ptr == 0) ||
6621                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6622                                 pci_unmap_single (sp->pdev,
6623                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6624                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6625                                 pci_unmap_single (sp->pdev,
6626                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6627                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6628                                 goto memalloc_failed;
6629                         }
6630                 }
6631         }
6632         return 0;
6633         memalloc_failed:
6634                 stats->pci_map_fail_cnt++;
6635                 stats->mem_freed += (*skb)->truesize;
6636                 dev_kfree_skb(*skb);
6637                 return -ENOMEM;
6638 }
6639
6640 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6641                                 int size)
6642 {
6643         struct net_device *dev = sp->dev;
6644         if (sp->rxd_mode == RXD_MODE_1) {
6645                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6646         } else if (sp->rxd_mode == RXD_MODE_3B) {
6647                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6648                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6649                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6650         }
6651 }
6652
6653 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6654 {
6655         int i, j, k, blk_cnt = 0, size;
6656         struct mac_info * mac_control = &sp->mac_control;
6657         struct config_param *config = &sp->config;
6658         struct net_device *dev = sp->dev;
6659         struct RxD_t *rxdp = NULL;
6660         struct sk_buff *skb = NULL;
6661         struct buffAdd *ba = NULL;
6662         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6663
6664         /* Calculate the size based on ring mode */
6665         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6666                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6667         if (sp->rxd_mode == RXD_MODE_1)
6668                 size += NET_IP_ALIGN;
6669         else if (sp->rxd_mode == RXD_MODE_3B)
6670                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6671
6672         for (i = 0; i < config->rx_ring_num; i++) {
6673                 blk_cnt = config->rx_cfg[i].num_rxd /
6674                         (rxd_count[sp->rxd_mode] +1);
6675
6676                 for (j = 0; j < blk_cnt; j++) {
6677                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6678                                 rxdp = mac_control->rings[i].
6679                                         rx_blocks[j].rxds[k].virt_addr;
6680                                 if(sp->rxd_mode == RXD_MODE_3B)
6681                                         ba = &mac_control->rings[i].ba[j][k];
6682                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6683                                                        &skb,(u64 *)&temp0_64,
6684                                                        (u64 *)&temp1_64,
6685                                                        (u64 *)&temp2_64,
6686                                                         size) == ENOMEM) {
6687                                         return 0;
6688                                 }
6689
6690                                 set_rxd_buffer_size(sp, rxdp, size);
6691                                 wmb();
6692                                 /* flip the Ownership bit to Hardware */
6693                                 rxdp->Control_1 |= RXD_OWN_XENA;
6694                         }
6695                 }
6696         }
6697         return 0;
6698
6699 }
6700
6701 static int s2io_add_isr(struct s2io_nic * sp)
6702 {
6703         int ret = 0;
6704         struct net_device *dev = sp->dev;
6705         int err = 0;
6706
6707         if (sp->config.intr_type == MSI_X)
6708                 ret = s2io_enable_msi_x(sp);
6709         if (ret) {
6710                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6711                 sp->config.intr_type = INTA;
6712         }
6713
6714         /* Store the values of the MSIX table in the struct s2io_nic structure */
6715         store_xmsi_data(sp);
6716
6717         /* After proper initialization of H/W, register ISR */
6718         if (sp->config.intr_type == MSI_X) {
6719                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6720
6721                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6722                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6723                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6724                                         dev->name, i);
6725                                 err = request_irq(sp->entries[i].vector,
6726                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6727                                                   sp->s2io_entries[i].arg);
6728                                 /* If either data or addr is zero print it */
6729                                 if(!(sp->msix_info[i].addr &&
6730                                         sp->msix_info[i].data)) {
6731                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6732                                                 "Data:0x%lx\n",sp->desc[i],
6733                                                 (unsigned long long)
6734                                                 sp->msix_info[i].addr,
6735                                                 (unsigned long)
6736                                                 ntohl(sp->msix_info[i].data));
6737                                 } else {
6738                                         msix_tx_cnt++;
6739                                 }
6740                         } else {
6741                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6742                                         dev->name, i);
6743                                 err = request_irq(sp->entries[i].vector,
6744                                           s2io_msix_ring_handle, 0, sp->desc[i],
6745                                                   sp->s2io_entries[i].arg);
6746                                 /* If either data or addr is zero print it */
6747                                 if(!(sp->msix_info[i].addr &&
6748                                         sp->msix_info[i].data)) {
6749                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6750                                                 "Data:0x%lx\n",sp->desc[i],
6751                                                 (unsigned long long)
6752                                                 sp->msix_info[i].addr,
6753                                                 (unsigned long)
6754                                                 ntohl(sp->msix_info[i].data));
6755                                 } else {
6756                                         msix_rx_cnt++;
6757                                 }
6758                         }
6759                         if (err) {
6760                                 remove_msix_isr(sp);
6761                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6762                                           "failed\n", dev->name, i);
6763                                 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
6764                                                  dev->name);
6765                                 sp->config.intr_type = INTA;
6766                                 break;
6767                         }
6768                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6769                 }
6770                 if (!err) {
6771                         printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
6772                                 msix_tx_cnt);
6773                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
6774                                 msix_rx_cnt);
6775                 }
6776         }
6777         if (sp->config.intr_type == INTA) {
6778                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6779                                 sp->name, dev);
6780                 if (err) {
6781                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6782                                   dev->name);
6783                         return -1;
6784                 }
6785         }
6786         return 0;
6787 }
6788 static void s2io_rem_isr(struct s2io_nic * sp)
6789 {
6790         if (sp->config.intr_type == MSI_X)
6791                 remove_msix_isr(sp);
6792         else
6793                 remove_inta_isr(sp);
6794 }
6795
6796 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6797 {
6798         int cnt = 0;
6799         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6800         unsigned long flags;
6801         register u64 val64 = 0;
6802
6803         if (!is_s2io_card_up(sp))
6804                 return;
6805
6806         del_timer_sync(&sp->alarm_timer);
6807         /* If s2io_set_link task is executing, wait till it completes. */
6808         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6809                 msleep(50);
6810         }
6811         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6812
6813         /* disable Tx and Rx traffic on the NIC */
6814         if (do_io)
6815                 stop_nic(sp);
6816
6817         s2io_rem_isr(sp);
6818
6819         /* Kill tasklet. */
6820         tasklet_kill(&sp->task);
6821
6822         /* Check if the device is Quiescent and then Reset the NIC */
6823         while(do_io) {
6824                 /* As per the HW requirement we need to replenish the
6825                  * receive buffer to avoid the ring bump. Since there is
6826                  * no intention of processing the Rx frame at this pointwe are
6827                  * just settting the ownership bit of rxd in Each Rx
6828                  * ring to HW and set the appropriate buffer size
6829                  * based on the ring mode
6830                  */
6831                 rxd_owner_bit_reset(sp);
6832
6833                 val64 = readq(&bar0->adapter_status);
6834                 if (verify_xena_quiescence(sp)) {
6835                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6836                         break;
6837                 }
6838
6839                 msleep(50);
6840                 cnt++;
6841                 if (cnt == 10) {
6842                         DBG_PRINT(ERR_DBG,
6843                                   "s2io_close:Device not Quiescent ");
6844                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6845                                   (unsigned long long) val64);
6846                         break;
6847                 }
6848         }
6849         if (do_io)
6850                 s2io_reset(sp);
6851
6852         spin_lock_irqsave(&sp->tx_lock, flags);
6853         /* Free all Tx buffers */
6854         free_tx_buffers(sp);
6855         spin_unlock_irqrestore(&sp->tx_lock, flags);
6856
6857         /* Free all Rx buffers */
6858         spin_lock_irqsave(&sp->rx_lock, flags);
6859         free_rx_buffers(sp);
6860         spin_unlock_irqrestore(&sp->rx_lock, flags);
6861
6862         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6863 }
6864
6865 static void s2io_card_down(struct s2io_nic * sp)
6866 {
6867         do_s2io_card_down(sp, 1);
6868 }
6869
6870 static int s2io_card_up(struct s2io_nic * sp)
6871 {
6872         int i, ret = 0;
6873         struct mac_info *mac_control;
6874         struct config_param *config;
6875         struct net_device *dev = (struct net_device *) sp->dev;
6876         u16 interruptible;
6877
6878         /* Initialize the H/W I/O registers */
6879         ret = init_nic(sp);
6880         if (ret != 0) {
6881                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6882                           dev->name);
6883                 if (ret != -EIO)
6884                         s2io_reset(sp);
6885                 return ret;
6886         }
6887
6888         /*
6889          * Initializing the Rx buffers. For now we are considering only 1
6890          * Rx ring and initializing buffers into 30 Rx blocks
6891          */
6892         mac_control = &sp->mac_control;
6893         config = &sp->config;
6894
6895         for (i = 0; i < config->rx_ring_num; i++) {
6896                 if ((ret = fill_rx_buffers(sp, i))) {
6897                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6898                                   dev->name);
6899                         s2io_reset(sp);
6900                         free_rx_buffers(sp);
6901                         return -ENOMEM;
6902                 }
6903                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6904                           atomic_read(&sp->rx_bufs_left[i]));
6905         }
6906         /* Maintain the state prior to the open */
6907         if (sp->promisc_flg)
6908                 sp->promisc_flg = 0;
6909         if (sp->m_cast_flg) {
6910                 sp->m_cast_flg = 0;
6911                 sp->all_multi_pos= 0;
6912         }
6913
6914         /* Setting its receive mode */
6915         s2io_set_multicast(dev);
6916
6917         if (sp->lro) {
6918                 /* Initialize max aggregatable pkts per session based on MTU */
6919                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6920                 /* Check if we can use(if specified) user provided value */
6921                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6922                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6923         }
6924
6925         /* Enable Rx Traffic and interrupts on the NIC */
6926         if (start_nic(sp)) {
6927                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6928                 s2io_reset(sp);
6929                 free_rx_buffers(sp);
6930                 return -ENODEV;
6931         }
6932
6933         /* Add interrupt service routine */
6934         if (s2io_add_isr(sp) != 0) {
6935                 if (sp->config.intr_type == MSI_X)
6936                         s2io_rem_isr(sp);
6937                 s2io_reset(sp);
6938                 free_rx_buffers(sp);
6939                 return -ENODEV;
6940         }
6941
6942         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6943
6944         /* Enable tasklet for the device */
6945         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6946
6947         /*  Enable select interrupts */
6948         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6949         if (sp->config.intr_type != INTA)
6950                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6951         else {
6952                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6953                 interruptible |= TX_PIC_INTR;
6954                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6955         }
6956
6957         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6958         return 0;
6959 }
6960
6961 /**
6962  * s2io_restart_nic - Resets the NIC.
6963  * @data : long pointer to the device private structure
6964  * Description:
6965  * This function is scheduled to be run by the s2io_tx_watchdog
6966  * function after 0.5 secs to reset the NIC. The idea is to reduce
6967  * the run time of the watch dog routine which is run holding a
6968  * spin lock.
6969  */
6970
6971 static void s2io_restart_nic(struct work_struct *work)
6972 {
6973         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6974         struct net_device *dev = sp->dev;
6975
6976         rtnl_lock();
6977
6978         if (!netif_running(dev))
6979                 goto out_unlock;
6980
6981         s2io_card_down(sp);
6982         if (s2io_card_up(sp)) {
6983                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6984                           dev->name);
6985         }
6986         netif_wake_queue(dev);
6987         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6988                   dev->name);
6989 out_unlock:
6990         rtnl_unlock();
6991 }
6992
6993 /**
6994  *  s2io_tx_watchdog - Watchdog for transmit side.
6995  *  @dev : Pointer to net device structure
6996  *  Description:
6997  *  This function is triggered if the Tx Queue is stopped
6998  *  for a pre-defined amount of time when the Interface is still up.
6999  *  If the Interface is jammed in such a situation, the hardware is
7000  *  reset (by s2io_close) and restarted again (by s2io_open) to
7001  *  overcome any problem that might have been caused in the hardware.
7002  *  Return value:
7003  *  void
7004  */
7005
7006 static void s2io_tx_watchdog(struct net_device *dev)
7007 {
7008         struct s2io_nic *sp = dev->priv;
7009
7010         if (netif_carrier_ok(dev)) {
7011                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7012                 schedule_work(&sp->rst_timer_task);
7013                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7014         }
7015 }
7016
7017 /**
7018  *   rx_osm_handler - To perform some OS related operations on SKB.
7019  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7020  *   @skb : the socket buffer pointer.
7021  *   @len : length of the packet
7022  *   @cksum : FCS checksum of the frame.
7023  *   @ring_no : the ring from which this RxD was extracted.
7024  *   Description:
7025  *   This function is called by the Rx interrupt serivce routine to perform
7026  *   some OS related operations on the SKB before passing it to the upper
7027  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7028  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7029  *   to the upper layer. If the checksum is wrong, it increments the Rx
7030  *   packet error count, frees the SKB and returns error.
7031  *   Return value:
7032  *   SUCCESS on success and -1 on failure.
7033  */
7034 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7035 {
7036         struct s2io_nic *sp = ring_data->nic;
7037         struct net_device *dev = (struct net_device *) sp->dev;
7038         struct sk_buff *skb = (struct sk_buff *)
7039                 ((unsigned long) rxdp->Host_Control);
7040         int ring_no = ring_data->ring_no;
7041         u16 l3_csum, l4_csum;
7042         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7043         struct lro *lro;
7044         u8 err_mask;
7045
7046         skb->dev = dev;
7047
7048         if (err) {
7049                 /* Check for parity error */
7050                 if (err & 0x1) {
7051                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7052                 }
7053                 err_mask = err >> 48;
7054                 switch(err_mask) {
7055                         case 1:
7056                                 sp->mac_control.stats_info->sw_stat.
7057                                 rx_parity_err_cnt++;
7058                         break;
7059
7060                         case 2:
7061                                 sp->mac_control.stats_info->sw_stat.
7062                                 rx_abort_cnt++;
7063                         break;
7064
7065                         case 3:
7066                                 sp->mac_control.stats_info->sw_stat.
7067                                 rx_parity_abort_cnt++;
7068                         break;
7069
7070                         case 4:
7071                                 sp->mac_control.stats_info->sw_stat.
7072                                 rx_rda_fail_cnt++;
7073                         break;
7074
7075                         case 5:
7076                                 sp->mac_control.stats_info->sw_stat.
7077                                 rx_unkn_prot_cnt++;
7078                         break;
7079
7080                         case 6:
7081                                 sp->mac_control.stats_info->sw_stat.
7082                                 rx_fcs_err_cnt++;
7083                         break;
7084
7085                         case 7:
7086                                 sp->mac_control.stats_info->sw_stat.
7087                                 rx_buf_size_err_cnt++;
7088                         break;
7089
7090                         case 8:
7091                                 sp->mac_control.stats_info->sw_stat.
7092                                 rx_rxd_corrupt_cnt++;
7093                         break;
7094
7095                         case 15:
7096                                 sp->mac_control.stats_info->sw_stat.
7097                                 rx_unkn_err_cnt++;
7098                         break;
7099                 }
7100                 /*
7101                 * Drop the packet if bad transfer code. Exception being
7102                 * 0x5, which could be due to unsupported IPv6 extension header.
7103                 * In this case, we let stack handle the packet.
7104                 * Note that in this case, since checksum will be incorrect,
7105                 * stack will validate the same.
7106                 */
7107                 if (err_mask != 0x5) {
7108                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7109                                 dev->name, err_mask);
7110                         sp->stats.rx_crc_errors++;
7111                         sp->mac_control.stats_info->sw_stat.mem_freed
7112                                 += skb->truesize;
7113                         dev_kfree_skb(skb);
7114                         atomic_dec(&sp->rx_bufs_left[ring_no]);
7115                         rxdp->Host_Control = 0;
7116                         return 0;
7117                 }
7118         }
7119
7120         /* Updating statistics */
7121         sp->stats.rx_packets++;
7122         rxdp->Host_Control = 0;
7123         if (sp->rxd_mode == RXD_MODE_1) {
7124                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7125
7126                 sp->stats.rx_bytes += len;
7127                 skb_put(skb, len);
7128
7129         } else if (sp->rxd_mode == RXD_MODE_3B) {
7130                 int get_block = ring_data->rx_curr_get_info.block_index;
7131                 int get_off = ring_data->rx_curr_get_info.offset;
7132                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7133                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7134                 unsigned char *buff = skb_push(skb, buf0_len);
7135
7136                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7137                 sp->stats.rx_bytes += buf0_len + buf2_len;
7138                 memcpy(buff, ba->ba_0, buf0_len);
7139                 skb_put(skb, buf2_len);
7140         }
7141
7142         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7143             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7144             (sp->rx_csum)) {
7145                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7146                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7147                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7148                         /*
7149                          * NIC verifies if the Checksum of the received
7150                          * frame is Ok or not and accordingly returns
7151                          * a flag in the RxD.
7152                          */
7153                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7154                         if (sp->lro) {
7155                                 u32 tcp_len;
7156                                 u8 *tcp;
7157                                 int ret = 0;
7158
7159                                 ret = s2io_club_tcp_session(skb->data, &tcp,
7160                                                             &tcp_len, &lro,
7161                                                             rxdp, sp);
7162                                 switch (ret) {
7163                                         case 3: /* Begin anew */
7164                                                 lro->parent = skb;
7165                                                 goto aggregate;
7166                                         case 1: /* Aggregate */
7167                                         {
7168                                                 lro_append_pkt(sp, lro,
7169                                                         skb, tcp_len);
7170                                                 goto aggregate;
7171                                         }
7172                                         case 4: /* Flush session */
7173                                         {
7174                                                 lro_append_pkt(sp, lro,
7175                                                         skb, tcp_len);
7176                                                 queue_rx_frame(lro->parent);
7177                                                 clear_lro_session(lro);
7178                                                 sp->mac_control.stats_info->
7179                                                     sw_stat.flush_max_pkts++;
7180                                                 goto aggregate;
7181                                         }
7182                                         case 2: /* Flush both */
7183                                                 lro->parent->data_len =
7184                                                         lro->frags_len;
7185                                                 sp->mac_control.stats_info->
7186                                                      sw_stat.sending_both++;
7187                                                 queue_rx_frame(lro->parent);
7188                                                 clear_lro_session(lro);
7189                                                 goto send_up;
7190                                         case 0: /* sessions exceeded */
7191                                         case -1: /* non-TCP or not
7192                                                   * L2 aggregatable
7193                                                   */
7194                                         case 5: /*
7195                                                  * First pkt in session not
7196                                                  * L3/L4 aggregatable
7197                                                  */
7198                                                 break;
7199                                         default:
7200                                                 DBG_PRINT(ERR_DBG,
7201                                                         "%s: Samadhana!!\n",
7202                                                          __FUNCTION__);
7203                                                 BUG();
7204                                 }
7205                         }
7206                 } else {
7207                         /*
7208                          * Packet with erroneous checksum, let the
7209                          * upper layers deal with it.
7210                          */
7211                         skb->ip_summed = CHECKSUM_NONE;
7212                 }
7213         } else {
7214                 skb->ip_summed = CHECKSUM_NONE;
7215         }
7216         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7217         if (!sp->lro) {
7218                 skb->protocol = eth_type_trans(skb, dev);
7219                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7220                         vlan_strip_flag)) {
7221                         /* Queueing the vlan frame to the upper layer */
7222                         if (napi)
7223                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7224                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7225                         else
7226                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7227                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7228                 } else {
7229                         if (napi)
7230                                 netif_receive_skb(skb);
7231                         else
7232                                 netif_rx(skb);
7233                 }
7234         } else {
7235 send_up:
7236                 queue_rx_frame(skb);
7237         }
7238         dev->last_rx = jiffies;
7239 aggregate:
7240         atomic_dec(&sp->rx_bufs_left[ring_no]);
7241         return SUCCESS;
7242 }
7243
7244 /**
7245  *  s2io_link - stops/starts the Tx queue.
7246  *  @sp : private member of the device structure, which is a pointer to the
7247  *  s2io_nic structure.
7248  *  @link : inidicates whether link is UP/DOWN.
7249  *  Description:
7250  *  This function stops/starts the Tx queue depending on whether the link
7251  *  status of the NIC is is down or up. This is called by the Alarm
7252  *  interrupt handler whenever a link change interrupt comes up.
7253  *  Return value:
7254  *  void.
7255  */
7256
7257 static void s2io_link(struct s2io_nic * sp, int link)
7258 {
7259         struct net_device *dev = (struct net_device *) sp->dev;
7260
7261         if (link != sp->last_link_state) {
7262                 if (link == LINK_DOWN) {
7263                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7264                         netif_carrier_off(dev);
7265                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7266                         sp->mac_control.stats_info->sw_stat.link_up_time =
7267                                 jiffies - sp->start_time;
7268                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7269                 } else {
7270                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7271                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7272                         sp->mac_control.stats_info->sw_stat.link_down_time =
7273                                 jiffies - sp->start_time;
7274                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7275                         netif_carrier_on(dev);
7276                 }
7277         }
7278         sp->last_link_state = link;
7279         sp->start_time = jiffies;
7280 }
7281
7282 /**
7283  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7284  *  @sp : private member of the device structure, which is a pointer to the
7285  *  s2io_nic structure.
7286  *  Description:
7287  *  This function initializes a few of the PCI and PCI-X configuration registers
7288  *  with recommended values.
7289  *  Return value:
7290  *  void
7291  */
7292
7293 static void s2io_init_pci(struct s2io_nic * sp)
7294 {
7295         u16 pci_cmd = 0, pcix_cmd = 0;
7296
7297         /* Enable Data Parity Error Recovery in PCI-X command register. */
7298         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7299                              &(pcix_cmd));
7300         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7301                               (pcix_cmd | 1));
7302         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7303                              &(pcix_cmd));
7304
7305         /* Set the PErr Response bit in PCI command register. */
7306         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7307         pci_write_config_word(sp->pdev, PCI_COMMAND,
7308                               (pci_cmd | PCI_COMMAND_PARITY));
7309         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7310 }
7311
7312 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7313 {
7314         if ( tx_fifo_num > 8) {
7315                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7316                          "supported\n");
7317                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7318                 tx_fifo_num = 8;
7319         }
7320         if ( rx_ring_num > 8) {
7321                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7322                          "supported\n");
7323                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7324                 rx_ring_num = 8;
7325         }
7326         if (*dev_intr_type != INTA)
7327                 napi = 0;
7328
7329         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7330                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7331                           "Defaulting to INTA\n");
7332                 *dev_intr_type = INTA;
7333         }
7334
7335         if ((*dev_intr_type == MSI_X) &&
7336                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7337                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7338                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7339                                         "Defaulting to INTA\n");
7340                 *dev_intr_type = INTA;
7341         }
7342
7343         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7344                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7345                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7346                 rx_ring_mode = 1;
7347         }
7348         return SUCCESS;
7349 }
7350
7351 /**
7352  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7353  * or Traffic class respectively.
7354  * @nic: device peivate variable
7355  * Description: The function configures the receive steering to
7356  * desired receive ring.
7357  * Return Value:  SUCCESS on success and
7358  * '-1' on failure (endian settings incorrect).
7359  */
7360 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7361 {
7362         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7363         register u64 val64 = 0;
7364
7365         if (ds_codepoint > 63)
7366                 return FAILURE;
7367
7368         val64 = RTS_DS_MEM_DATA(ring);
7369         writeq(val64, &bar0->rts_ds_mem_data);
7370
7371         val64 = RTS_DS_MEM_CTRL_WE |
7372                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7373                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7374
7375         writeq(val64, &bar0->rts_ds_mem_ctrl);
7376
7377         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7378                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7379                                 S2IO_BIT_RESET);
7380 }
7381
7382 /**
7383  *  s2io_init_nic - Initialization of the adapter .
7384  *  @pdev : structure containing the PCI related information of the device.
7385  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7386  *  Description:
7387  *  The function initializes an adapter identified by the pci_dec structure.
7388  *  All OS related initialization including memory and device structure and
7389  *  initlaization of the device private variable is done. Also the swapper
7390  *  control register is initialized to enable read and write into the I/O
7391  *  registers of the device.
7392  *  Return value:
7393  *  returns 0 on success and negative on failure.
7394  */
7395
7396 static int __devinit
7397 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7398 {
7399         struct s2io_nic *sp;
7400         struct net_device *dev;
7401         int i, j, ret;
7402         int dma_flag = FALSE;
7403         u32 mac_up, mac_down;
7404         u64 val64 = 0, tmp64 = 0;
7405         struct XENA_dev_config __iomem *bar0 = NULL;
7406         u16 subid;
7407         struct mac_info *mac_control;
7408         struct config_param *config;
7409         int mode;
7410         u8 dev_intr_type = intr_type;
7411         DECLARE_MAC_BUF(mac);
7412
7413         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7414                 return ret;
7415
7416         if ((ret = pci_enable_device(pdev))) {
7417                 DBG_PRINT(ERR_DBG,
7418                           "s2io_init_nic: pci_enable_device failed\n");
7419                 return ret;
7420         }
7421
7422         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7423                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7424                 dma_flag = TRUE;
7425                 if (pci_set_consistent_dma_mask
7426                     (pdev, DMA_64BIT_MASK)) {
7427                         DBG_PRINT(ERR_DBG,
7428                                   "Unable to obtain 64bit DMA for \
7429                                         consistent allocations\n");
7430                         pci_disable_device(pdev);
7431                         return -ENOMEM;
7432                 }
7433         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7434                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7435         } else {
7436                 pci_disable_device(pdev);
7437                 return -ENOMEM;
7438         }
7439         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7440                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7441                 pci_disable_device(pdev);
7442                 return -ENODEV;
7443         }
7444
7445         dev = alloc_etherdev(sizeof(struct s2io_nic));
7446         if (dev == NULL) {
7447                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7448                 pci_disable_device(pdev);
7449                 pci_release_regions(pdev);
7450                 return -ENODEV;
7451         }
7452
7453         pci_set_master(pdev);
7454         pci_set_drvdata(pdev, dev);
7455         SET_NETDEV_DEV(dev, &pdev->dev);
7456
7457         /*  Private member variable initialized to s2io NIC structure */
7458         sp = dev->priv;
7459         memset(sp, 0, sizeof(struct s2io_nic));
7460         sp->dev = dev;
7461         sp->pdev = pdev;
7462         sp->high_dma_flag = dma_flag;
7463         sp->device_enabled_once = FALSE;
7464         if (rx_ring_mode == 1)
7465                 sp->rxd_mode = RXD_MODE_1;
7466         if (rx_ring_mode == 2)
7467                 sp->rxd_mode = RXD_MODE_3B;
7468
7469         sp->config.intr_type = dev_intr_type;
7470
7471         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7472                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7473                 sp->device_type = XFRAME_II_DEVICE;
7474         else
7475                 sp->device_type = XFRAME_I_DEVICE;
7476
7477         sp->lro = lro_enable;
7478
7479         /* Initialize some PCI/PCI-X fields of the NIC. */
7480         s2io_init_pci(sp);
7481
7482         /*
7483          * Setting the device configuration parameters.
7484          * Most of these parameters can be specified by the user during
7485          * module insertion as they are module loadable parameters. If
7486          * these parameters are not not specified during load time, they
7487          * are initialized with default values.
7488          */
7489         mac_control = &sp->mac_control;
7490         config = &sp->config;
7491
7492         config->napi = napi;
7493
7494         /* Tx side parameters. */
7495         config->tx_fifo_num = tx_fifo_num;
7496         for (i = 0; i < MAX_TX_FIFOS; i++) {
7497                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7498                 config->tx_cfg[i].fifo_priority = i;
7499         }
7500
7501         /* mapping the QoS priority to the configured fifos */
7502         for (i = 0; i < MAX_TX_FIFOS; i++)
7503                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7504
7505         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7506         for (i = 0; i < config->tx_fifo_num; i++) {
7507                 config->tx_cfg[i].f_no_snoop =
7508                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7509                 if (config->tx_cfg[i].fifo_len < 65) {
7510                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7511                         break;
7512                 }
7513         }
7514         /* + 2 because one Txd for skb->data and one Txd for UFO */
7515         config->max_txds = MAX_SKB_FRAGS + 2;
7516
7517         /* Rx side parameters. */
7518         config->rx_ring_num = rx_ring_num;
7519         for (i = 0; i < MAX_RX_RINGS; i++) {
7520                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7521                     (rxd_count[sp->rxd_mode] + 1);
7522                 config->rx_cfg[i].ring_priority = i;
7523         }
7524
7525         for (i = 0; i < rx_ring_num; i++) {
7526                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7527                 config->rx_cfg[i].f_no_snoop =
7528                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7529         }
7530
7531         /*  Setting Mac Control parameters */
7532         mac_control->rmac_pause_time = rmac_pause_time;
7533         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7534         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7535
7536
7537         /* Initialize Ring buffer parameters. */
7538         for (i = 0; i < config->rx_ring_num; i++)
7539                 atomic_set(&sp->rx_bufs_left[i], 0);
7540
7541         /*  initialize the shared memory used by the NIC and the host */
7542         if (init_shared_mem(sp)) {
7543                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7544                           dev->name);
7545                 ret = -ENOMEM;
7546                 goto mem_alloc_failed;
7547         }
7548
7549         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7550                                      pci_resource_len(pdev, 0));
7551         if (!sp->bar0) {
7552                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7553                           dev->name);
7554                 ret = -ENOMEM;
7555                 goto bar0_remap_failed;
7556         }
7557
7558         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7559                                      pci_resource_len(pdev, 2));
7560         if (!sp->bar1) {
7561                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7562                           dev->name);
7563                 ret = -ENOMEM;
7564                 goto bar1_remap_failed;
7565         }
7566
7567         dev->irq = pdev->irq;
7568         dev->base_addr = (unsigned long) sp->bar0;
7569
7570         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7571         for (j = 0; j < MAX_TX_FIFOS; j++) {
7572                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7573                     (sp->bar1 + (j * 0x00020000));
7574         }
7575
7576         /*  Driver entry points */
7577         dev->open = &s2io_open;
7578         dev->stop = &s2io_close;
7579         dev->hard_start_xmit = &s2io_xmit;
7580         dev->get_stats = &s2io_get_stats;
7581         dev->set_multicast_list = &s2io_set_multicast;
7582         dev->do_ioctl = &s2io_ioctl;
7583         dev->set_mac_address = &s2io_set_mac_addr;
7584         dev->change_mtu = &s2io_change_mtu;
7585         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7586         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7587         dev->vlan_rx_register = s2io_vlan_rx_register;
7588
7589         /*
7590          * will use eth_mac_addr() for  dev->set_mac_address
7591          * mac address will be set every time dev->open() is called
7592          */
7593         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7594
7595 #ifdef CONFIG_NET_POLL_CONTROLLER
7596         dev->poll_controller = s2io_netpoll;
7597 #endif
7598
7599         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7600         if (sp->high_dma_flag == TRUE)
7601                 dev->features |= NETIF_F_HIGHDMA;
7602         dev->features |= NETIF_F_TSO;
7603         dev->features |= NETIF_F_TSO6;
7604         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7605                 dev->features |= NETIF_F_UFO;
7606                 dev->features |= NETIF_F_HW_CSUM;
7607         }
7608
7609         dev->tx_timeout = &s2io_tx_watchdog;
7610         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7611         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7612         INIT_WORK(&sp->set_link_task, s2io_set_link);
7613
7614         pci_save_state(sp->pdev);
7615
7616         /* Setting swapper control on the NIC, for proper reset operation */
7617         if (s2io_set_swapper(sp)) {
7618                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7619                           dev->name);
7620                 ret = -EAGAIN;
7621                 goto set_swap_failed;
7622         }
7623
7624         /* Verify if the Herc works on the slot its placed into */
7625         if (sp->device_type & XFRAME_II_DEVICE) {
7626                 mode = s2io_verify_pci_mode(sp);
7627                 if (mode < 0) {
7628                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7629                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7630                         ret = -EBADSLT;
7631                         goto set_swap_failed;
7632                 }
7633         }
7634
7635         /* Not needed for Herc */
7636         if (sp->device_type & XFRAME_I_DEVICE) {
7637                 /*
7638                  * Fix for all "FFs" MAC address problems observed on
7639                  * Alpha platforms
7640                  */
7641                 fix_mac_address(sp);
7642                 s2io_reset(sp);
7643         }
7644
7645         /*
7646          * MAC address initialization.
7647          * For now only one mac address will be read and used.
7648          */
7649         bar0 = sp->bar0;
7650         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7651             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7652         writeq(val64, &bar0->rmac_addr_cmd_mem);
7653         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7654                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7655         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7656         mac_down = (u32) tmp64;
7657         mac_up = (u32) (tmp64 >> 32);
7658
7659         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7660         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7661         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7662         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7663         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7664         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7665
7666         /*  Set the factory defined MAC address initially   */
7667         dev->addr_len = ETH_ALEN;
7668         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7669         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7670
7671          /* Store the values of the MSIX table in the s2io_nic structure */
7672         store_xmsi_data(sp);
7673         /* reset Nic and bring it to known state */
7674         s2io_reset(sp);
7675
7676         /*
7677          * Initialize the tasklet status and link state flags
7678          * and the card state parameter
7679          */
7680         sp->tasklet_status = 0;
7681         sp->state = 0;
7682
7683         /* Initialize spinlocks */
7684         spin_lock_init(&sp->tx_lock);
7685
7686         if (!napi)
7687                 spin_lock_init(&sp->put_lock);
7688         spin_lock_init(&sp->rx_lock);
7689
7690         /*
7691          * SXE-002: Configure link and activity LED to init state
7692          * on driver load.
7693          */
7694         subid = sp->pdev->subsystem_device;
7695         if ((subid & 0xFF) >= 0x07) {
7696                 val64 = readq(&bar0->gpio_control);
7697                 val64 |= 0x0000800000000000ULL;
7698                 writeq(val64, &bar0->gpio_control);
7699                 val64 = 0x0411040400000000ULL;
7700                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7701                 val64 = readq(&bar0->gpio_control);
7702         }
7703
7704         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7705
7706         if (register_netdev(dev)) {
7707                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7708                 ret = -ENODEV;
7709                 goto register_failed;
7710         }
7711         s2io_vpd_read(sp);
7712         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7713         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7714                   sp->product_name, pdev->revision);
7715         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7716                   s2io_driver_version);
7717         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7718                   dev->name, print_mac(mac, dev->dev_addr));
7719         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7720         if (sp->device_type & XFRAME_II_DEVICE) {
7721                 mode = s2io_print_pci_mode(sp);
7722                 if (mode < 0) {
7723                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7724                         ret = -EBADSLT;
7725                         unregister_netdev(dev);
7726                         goto set_swap_failed;
7727                 }
7728         }
7729         switch(sp->rxd_mode) {
7730                 case RXD_MODE_1:
7731                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7732                                                 dev->name);
7733                     break;
7734                 case RXD_MODE_3B:
7735                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7736                                                 dev->name);
7737                     break;
7738         }
7739
7740         if (napi)
7741                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7742         switch(sp->config.intr_type) {
7743                 case INTA:
7744                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7745                     break;
7746                 case MSI_X:
7747                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7748                     break;
7749         }
7750         if (sp->lro)
7751                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7752                           dev->name);
7753         if (ufo)
7754                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7755                                         " enabled\n", dev->name);
7756         /* Initialize device name */
7757         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7758
7759         /*
7760          * Make Link state as off at this point, when the Link change
7761          * interrupt comes the state will be automatically changed to
7762          * the right state.
7763          */
7764         netif_carrier_off(dev);
7765
7766         return 0;
7767
7768       register_failed:
7769       set_swap_failed:
7770         iounmap(sp->bar1);
7771       bar1_remap_failed:
7772         iounmap(sp->bar0);
7773       bar0_remap_failed:
7774       mem_alloc_failed:
7775         free_shared_mem(sp);
7776         pci_disable_device(pdev);
7777         pci_release_regions(pdev);
7778         pci_set_drvdata(pdev, NULL);
7779         free_netdev(dev);
7780
7781         return ret;
7782 }
7783
7784 /**
7785  * s2io_rem_nic - Free the PCI device
7786  * @pdev: structure containing the PCI related information of the device.
7787  * Description: This function is called by the Pci subsystem to release a
7788  * PCI device and free up all resource held up by the device. This could
7789  * be in response to a Hot plug event or when the driver is to be removed
7790  * from memory.
7791  */
7792
7793 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7794 {
7795         struct net_device *dev =
7796             (struct net_device *) pci_get_drvdata(pdev);
7797         struct s2io_nic *sp;
7798
7799         if (dev == NULL) {
7800                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7801                 return;
7802         }
7803
7804         flush_scheduled_work();
7805
7806         sp = dev->priv;
7807         unregister_netdev(dev);
7808
7809         free_shared_mem(sp);
7810         iounmap(sp->bar0);
7811         iounmap(sp->bar1);
7812         pci_release_regions(pdev);
7813         pci_set_drvdata(pdev, NULL);
7814         free_netdev(dev);
7815         pci_disable_device(pdev);
7816 }
7817
7818 /**
7819  * s2io_starter - Entry point for the driver
7820  * Description: This function is the entry point for the driver. It verifies
7821  * the module loadable parameters and initializes PCI configuration space.
7822  */
7823
7824 static int __init s2io_starter(void)
7825 {
7826         return pci_register_driver(&s2io_driver);
7827 }
7828
7829 /**
7830  * s2io_closer - Cleanup routine for the driver
7831  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7832  */
7833
7834 static __exit void s2io_closer(void)
7835 {
7836         pci_unregister_driver(&s2io_driver);
7837         DBG_PRINT(INIT_DBG, "cleanup done\n");
7838 }
7839
7840 module_init(s2io_starter);
7841 module_exit(s2io_closer);
7842
7843 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7844                 struct tcphdr **tcp, struct RxD_t *rxdp)
7845 {
7846         int ip_off;
7847         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7848
7849         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7850                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7851                           __FUNCTION__);
7852                 return -1;
7853         }
7854
7855         /* TODO:
7856          * By default the VLAN field in the MAC is stripped by the card, if this
7857          * feature is turned off in rx_pa_cfg register, then the ip_off field
7858          * has to be shifted by a further 2 bytes
7859          */
7860         switch (l2_type) {
7861                 case 0: /* DIX type */
7862                 case 4: /* DIX type with VLAN */
7863                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7864                         break;
7865                 /* LLC, SNAP etc are considered non-mergeable */
7866                 default:
7867                         return -1;
7868         }
7869
7870         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7871         ip_len = (u8)((*ip)->ihl);
7872         ip_len <<= 2;
7873         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7874
7875         return 0;
7876 }
7877
7878 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7879                                   struct tcphdr *tcp)
7880 {
7881         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7882         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7883            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7884                 return -1;
7885         return 0;
7886 }
7887
7888 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7889 {
7890         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7891 }
7892
7893 static void initiate_new_session(struct lro *lro, u8 *l2h,
7894                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7895 {
7896         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7897         lro->l2h = l2h;
7898         lro->iph = ip;
7899         lro->tcph = tcp;
7900         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7901         lro->tcp_ack = ntohl(tcp->ack_seq);
7902         lro->sg_num = 1;
7903         lro->total_len = ntohs(ip->tot_len);
7904         lro->frags_len = 0;
7905         /*
7906          * check if we saw TCP timestamp. Other consistency checks have
7907          * already been done.
7908          */
7909         if (tcp->doff == 8) {
7910                 u32 *ptr;
7911                 ptr = (u32 *)(tcp+1);
7912                 lro->saw_ts = 1;
7913                 lro->cur_tsval = *(ptr+1);
7914                 lro->cur_tsecr = *(ptr+2);
7915         }
7916         lro->in_use = 1;
7917 }
7918
7919 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7920 {
7921         struct iphdr *ip = lro->iph;
7922         struct tcphdr *tcp = lro->tcph;
7923         __sum16 nchk;
7924         struct stat_block *statinfo = sp->mac_control.stats_info;
7925         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7926
7927         /* Update L3 header */
7928         ip->tot_len = htons(lro->total_len);
7929         ip->check = 0;
7930         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7931         ip->check = nchk;
7932
7933         /* Update L4 header */
7934         tcp->ack_seq = lro->tcp_ack;
7935         tcp->window = lro->window;
7936
7937         /* Update tsecr field if this session has timestamps enabled */
7938         if (lro->saw_ts) {
7939                 u32 *ptr = (u32 *)(tcp + 1);
7940                 *(ptr+2) = lro->cur_tsecr;
7941         }
7942
7943         /* Update counters required for calculation of
7944          * average no. of packets aggregated.
7945          */
7946         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7947         statinfo->sw_stat.num_aggregations++;
7948 }
7949
7950 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7951                 struct tcphdr *tcp, u32 l4_pyld)
7952 {
7953         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7954         lro->total_len += l4_pyld;
7955         lro->frags_len += l4_pyld;
7956         lro->tcp_next_seq += l4_pyld;
7957         lro->sg_num++;
7958
7959         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7960         lro->tcp_ack = tcp->ack_seq;
7961         lro->window = tcp->window;
7962
7963         if (lro->saw_ts) {
7964                 u32 *ptr;
7965                 /* Update tsecr and tsval from this packet */
7966                 ptr = (u32 *) (tcp + 1);
7967                 lro->cur_tsval = *(ptr + 1);
7968                 lro->cur_tsecr = *(ptr + 2);
7969         }
7970 }
7971
7972 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7973                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7974 {
7975         u8 *ptr;
7976
7977         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7978
7979         if (!tcp_pyld_len) {
7980                 /* Runt frame or a pure ack */
7981                 return -1;
7982         }
7983
7984         if (ip->ihl != 5) /* IP has options */
7985                 return -1;
7986
7987         /* If we see CE codepoint in IP header, packet is not mergeable */
7988         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7989                 return -1;
7990
7991         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7992         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7993                                     tcp->ece || tcp->cwr || !tcp->ack) {
7994                 /*
7995                  * Currently recognize only the ack control word and
7996                  * any other control field being set would result in
7997                  * flushing the LRO session
7998                  */
7999                 return -1;
8000         }
8001
8002         /*
8003          * Allow only one TCP timestamp option. Don't aggregate if
8004          * any other options are detected.
8005          */
8006         if (tcp->doff != 5 && tcp->doff != 8)
8007                 return -1;
8008
8009         if (tcp->doff == 8) {
8010                 ptr = (u8 *)(tcp + 1);
8011                 while (*ptr == TCPOPT_NOP)
8012                         ptr++;
8013                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8014                         return -1;
8015
8016                 /* Ensure timestamp value increases monotonically */
8017                 if (l_lro)
8018                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8019                                 return -1;
8020
8021                 /* timestamp echo reply should be non-zero */
8022                 if (*((u32 *)(ptr+6)) == 0)
8023                         return -1;
8024         }
8025
8026         return 0;
8027 }
8028
8029 static int
8030 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8031                       struct RxD_t *rxdp, struct s2io_nic *sp)
8032 {
8033         struct iphdr *ip;
8034         struct tcphdr *tcph;
8035         int ret = 0, i;
8036
8037         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8038                                          rxdp))) {
8039                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8040                           ip->saddr, ip->daddr);
8041         } else {
8042                 return ret;
8043         }
8044
8045         tcph = (struct tcphdr *)*tcp;
8046         *tcp_len = get_l4_pyld_length(ip, tcph);
8047         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8048                 struct lro *l_lro = &sp->lro0_n[i];
8049                 if (l_lro->in_use) {
8050                         if (check_for_socket_match(l_lro, ip, tcph))
8051                                 continue;
8052                         /* Sock pair matched */
8053                         *lro = l_lro;
8054
8055                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8056                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8057                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8058                                           (*lro)->tcp_next_seq,
8059                                           ntohl(tcph->seq));
8060
8061                                 sp->mac_control.stats_info->
8062                                    sw_stat.outof_sequence_pkts++;
8063                                 ret = 2;
8064                                 break;
8065                         }
8066
8067                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8068                                 ret = 1; /* Aggregate */
8069                         else
8070                                 ret = 2; /* Flush both */
8071                         break;
8072                 }
8073         }
8074
8075         if (ret == 0) {
8076                 /* Before searching for available LRO objects,
8077                  * check if the pkt is L3/L4 aggregatable. If not
8078                  * don't create new LRO session. Just send this
8079                  * packet up.
8080                  */
8081                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8082                         return 5;
8083                 }
8084
8085                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8086                         struct lro *l_lro = &sp->lro0_n[i];
8087                         if (!(l_lro->in_use)) {
8088                                 *lro = l_lro;
8089                                 ret = 3; /* Begin anew */
8090                                 break;
8091                         }
8092                 }
8093         }
8094
8095         if (ret == 0) { /* sessions exceeded */
8096                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8097                           __FUNCTION__);
8098                 *lro = NULL;
8099                 return ret;
8100         }
8101
8102         switch (ret) {
8103                 case 3:
8104                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8105                         break;
8106                 case 2:
8107                         update_L3L4_header(sp, *lro);
8108                         break;
8109                 case 1:
8110                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8111                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8112                                 update_L3L4_header(sp, *lro);
8113                                 ret = 4; /* Flush the LRO */
8114                         }
8115                         break;
8116                 default:
8117                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8118                                 __FUNCTION__);
8119                         break;
8120         }
8121
8122         return ret;
8123 }
8124
8125 static void clear_lro_session(struct lro *lro)
8126 {
8127         static u16 lro_struct_size = sizeof(struct lro);
8128
8129         memset(lro, 0, lro_struct_size);
8130 }
8131
8132 static void queue_rx_frame(struct sk_buff *skb)
8133 {
8134         struct net_device *dev = skb->dev;
8135
8136         skb->protocol = eth_type_trans(skb, dev);
8137         if (napi)
8138                 netif_receive_skb(skb);
8139         else
8140                 netif_rx(skb);
8141 }
8142
8143 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8144                            struct sk_buff *skb,
8145                            u32 tcp_len)
8146 {
8147         struct sk_buff *first = lro->parent;
8148
8149         first->len += tcp_len;
8150         first->data_len = lro->frags_len;
8151         skb_pull(skb, (skb->len - tcp_len));
8152         if (skb_shinfo(first)->frag_list)
8153                 lro->last_frag->next = skb;
8154         else
8155                 skb_shinfo(first)->frag_list = skb;
8156         first->truesize += skb->truesize;
8157         lro->last_frag = skb;
8158         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8159         return;
8160 }
8161
8162 /**
8163  * s2io_io_error_detected - called when PCI error is detected
8164  * @pdev: Pointer to PCI device
8165  * @state: The current pci connection state
8166  *
8167  * This function is called after a PCI bus error affecting
8168  * this device has been detected.
8169  */
8170 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8171                                                pci_channel_state_t state)
8172 {
8173         struct net_device *netdev = pci_get_drvdata(pdev);
8174         struct s2io_nic *sp = netdev->priv;
8175
8176         netif_device_detach(netdev);
8177
8178         if (netif_running(netdev)) {
8179                 /* Bring down the card, while avoiding PCI I/O */
8180                 do_s2io_card_down(sp, 0);
8181         }
8182         pci_disable_device(pdev);
8183
8184         return PCI_ERS_RESULT_NEED_RESET;
8185 }
8186
8187 /**
8188  * s2io_io_slot_reset - called after the pci bus has been reset.
8189  * @pdev: Pointer to PCI device
8190  *
8191  * Restart the card from scratch, as if from a cold-boot.
8192  * At this point, the card has exprienced a hard reset,
8193  * followed by fixups by BIOS, and has its config space
8194  * set up identically to what it was at cold boot.
8195  */
8196 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8197 {
8198         struct net_device *netdev = pci_get_drvdata(pdev);
8199         struct s2io_nic *sp = netdev->priv;
8200
8201         if (pci_enable_device(pdev)) {
8202                 printk(KERN_ERR "s2io: "
8203                        "Cannot re-enable PCI device after reset.\n");
8204                 return PCI_ERS_RESULT_DISCONNECT;
8205         }
8206
8207         pci_set_master(pdev);
8208         s2io_reset(sp);
8209
8210         return PCI_ERS_RESULT_RECOVERED;
8211 }
8212
8213 /**
8214  * s2io_io_resume - called when traffic can start flowing again.
8215  * @pdev: Pointer to PCI device
8216  *
8217  * This callback is called when the error recovery driver tells
8218  * us that its OK to resume normal operation.
8219  */
8220 static void s2io_io_resume(struct pci_dev *pdev)
8221 {
8222         struct net_device *netdev = pci_get_drvdata(pdev);
8223         struct s2io_nic *sp = netdev->priv;
8224
8225         if (netif_running(netdev)) {
8226                 if (s2io_card_up(sp)) {
8227                         printk(KERN_ERR "s2io: "
8228                                "Can't bring device back up after reset.\n");
8229                         return;
8230                 }
8231
8232                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8233                         s2io_card_down(sp);
8234                         printk(KERN_ERR "s2io: "
8235                                "Can't resetore mac addr after reset.\n");
8236                         return;
8237                 }
8238         }
8239
8240         netif_device_attach(netdev);
8241         netif_wake_queue(netdev);
8242 }