Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[pandora-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.17"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         {"alarm_transceiver_temp_high"},
280         {"alarm_transceiver_temp_low"},
281         {"alarm_laser_bias_current_high"},
282         {"alarm_laser_bias_current_low"},
283         {"alarm_laser_output_power_high"},
284         {"alarm_laser_output_power_low"},
285         {"warn_transceiver_temp_high"},
286         {"warn_transceiver_temp_low"},
287         {"warn_laser_bias_current_high"},
288         {"warn_laser_bias_current_low"},
289         {"warn_laser_output_power_high"},
290         {"warn_laser_output_power_low"},
291         {"lro_aggregated_pkts"},
292         {"lro_flush_both_count"},
293         {"lro_out_of_sequence_pkts"},
294         {"lro_flush_due_to_max_pkts"},
295         {"lro_avg_aggr_pkts"},
296         {"mem_alloc_fail_cnt"},
297         {"pci_map_fail_cnt"},
298         {"watchdog_timer_cnt"},
299         {"mem_allocated"},
300         {"mem_freed"},
301         {"link_up_cnt"},
302         {"link_down_cnt"},
303         {"link_up_time"},
304         {"link_down_time"},
305         {"tx_tcode_buf_abort_cnt"},
306         {"tx_tcode_desc_abort_cnt"},
307         {"tx_tcode_parity_err_cnt"},
308         {"tx_tcode_link_loss_cnt"},
309         {"tx_tcode_list_proc_err_cnt"},
310         {"rx_tcode_parity_err_cnt"},
311         {"rx_tcode_abort_cnt"},
312         {"rx_tcode_parity_abort_cnt"},
313         {"rx_tcode_rda_fail_cnt"},
314         {"rx_tcode_unkn_prot_cnt"},
315         {"rx_tcode_fcs_err_cnt"},
316         {"rx_tcode_buf_size_err_cnt"},
317         {"rx_tcode_rxd_corrupt_cnt"},
318         {"rx_tcode_unkn_err_cnt"},
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340                                         ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
342
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
345
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
348
349 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
351
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
353                         init_timer(&timer);                     \
354                         timer.function = handle;                \
355                         timer.data = (unsigned long) arg;       \
356                         mod_timer(&timer, (jiffies + exp))      \
357
358 /* copy mac addr to def_mac_addr array */
359 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
360 {
361         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
362         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
363         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
364         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
365         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
366         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
367 }
368 /* Add the vlan */
369 static void s2io_vlan_rx_register(struct net_device *dev,
370                                         struct vlan_group *grp)
371 {
372         struct s2io_nic *nic = dev->priv;
373         unsigned long flags;
374
375         spin_lock_irqsave(&nic->tx_lock, flags);
376         nic->vlgrp = grp;
377         spin_unlock_irqrestore(&nic->tx_lock, flags);
378 }
379
380 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
381 static int vlan_strip_flag;
382
383 /*
384  * Constants to be programmed into the Xena's registers, to configure
385  * the XAUI.
386  */
387
388 #define END_SIGN        0x0
389 static const u64 herc_act_dtx_cfg[] = {
390         /* Set address */
391         0x8000051536750000ULL, 0x80000515367500E0ULL,
392         /* Write data */
393         0x8000051536750004ULL, 0x80000515367500E4ULL,
394         /* Set address */
395         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
396         /* Write data */
397         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
398         /* Set address */
399         0x801205150D440000ULL, 0x801205150D4400E0ULL,
400         /* Write data */
401         0x801205150D440004ULL, 0x801205150D4400E4ULL,
402         /* Set address */
403         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
404         /* Write data */
405         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
406         /* Done */
407         END_SIGN
408 };
409
410 static const u64 xena_dtx_cfg[] = {
411         /* Set address */
412         0x8000051500000000ULL, 0x80000515000000E0ULL,
413         /* Write data */
414         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
415         /* Set address */
416         0x8001051500000000ULL, 0x80010515000000E0ULL,
417         /* Write data */
418         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
419         /* Set address */
420         0x8002051500000000ULL, 0x80020515000000E0ULL,
421         /* Write data */
422         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
423         END_SIGN
424 };
425
426 /*
427  * Constants for Fixing the MacAddress problem seen mostly on
428  * Alpha machines.
429  */
430 static const u64 fix_mac[] = {
431         0x0060000000000000ULL, 0x0060600000000000ULL,
432         0x0040600000000000ULL, 0x0000600000000000ULL,
433         0x0020600000000000ULL, 0x0060600000000000ULL,
434         0x0020600000000000ULL, 0x0060600000000000ULL,
435         0x0020600000000000ULL, 0x0060600000000000ULL,
436         0x0020600000000000ULL, 0x0060600000000000ULL,
437         0x0020600000000000ULL, 0x0060600000000000ULL,
438         0x0020600000000000ULL, 0x0060600000000000ULL,
439         0x0020600000000000ULL, 0x0060600000000000ULL,
440         0x0020600000000000ULL, 0x0060600000000000ULL,
441         0x0020600000000000ULL, 0x0060600000000000ULL,
442         0x0020600000000000ULL, 0x0060600000000000ULL,
443         0x0020600000000000ULL, 0x0000600000000000ULL,
444         0x0040600000000000ULL, 0x0060600000000000ULL,
445         END_SIGN
446 };
447
448 MODULE_LICENSE("GPL");
449 MODULE_VERSION(DRV_VERSION);
450
451
452 /* Module Loadable parameters. */
453 S2IO_PARM_INT(tx_fifo_num, 1);
454 S2IO_PARM_INT(rx_ring_num, 1);
455
456
457 S2IO_PARM_INT(rx_ring_mode, 1);
458 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
459 S2IO_PARM_INT(rmac_pause_time, 0x100);
460 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
461 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
462 S2IO_PARM_INT(shared_splits, 0);
463 S2IO_PARM_INT(tmac_util_period, 5);
464 S2IO_PARM_INT(rmac_util_period, 5);
465 S2IO_PARM_INT(l3l4hdr_size, 128);
466 /* Frequency of Rx desc syncs expressed as power of 2 */
467 S2IO_PARM_INT(rxsync_frequency, 3);
468 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
469 S2IO_PARM_INT(intr_type, 2);
470 /* Large receive offload feature */
471 static unsigned int lro_enable;
472 module_param_named(lro, lro_enable, uint, 0);
473
474 /* Max pkts to be aggregated by LRO at one time. If not specified,
475  * aggregation happens until we hit max IP pkt size(64K)
476  */
477 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
478 S2IO_PARM_INT(indicate_max_pkts, 0);
479
480 S2IO_PARM_INT(napi, 1);
481 S2IO_PARM_INT(ufo, 0);
482 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
483
484 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
485     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
486 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
487     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
488 static unsigned int rts_frm_len[MAX_RX_RINGS] =
489     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
490
491 module_param_array(tx_fifo_len, uint, NULL, 0);
492 module_param_array(rx_ring_sz, uint, NULL, 0);
493 module_param_array(rts_frm_len, uint, NULL, 0);
494
495 /*
496  * S2IO device table.
497  * This table lists all the devices that this driver supports.
498  */
499 static struct pci_device_id s2io_tbl[] __devinitdata = {
500         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
501          PCI_ANY_ID, PCI_ANY_ID},
502         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
503          PCI_ANY_ID, PCI_ANY_ID},
504         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
505          PCI_ANY_ID, PCI_ANY_ID},
506         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
507          PCI_ANY_ID, PCI_ANY_ID},
508         {0,}
509 };
510
511 MODULE_DEVICE_TABLE(pci, s2io_tbl);
512
513 static struct pci_error_handlers s2io_err_handler = {
514         .error_detected = s2io_io_error_detected,
515         .slot_reset = s2io_io_slot_reset,
516         .resume = s2io_io_resume,
517 };
518
519 static struct pci_driver s2io_driver = {
520       .name = "S2IO",
521       .id_table = s2io_tbl,
522       .probe = s2io_init_nic,
523       .remove = __devexit_p(s2io_rem_nic),
524       .err_handler = &s2io_err_handler,
525 };
526
527 /* A simplifier macro used both by init and free shared_mem Fns(). */
528 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
529
530 /**
531  * init_shared_mem - Allocation and Initialization of Memory
532  * @nic: Device private variable.
533  * Description: The function allocates all the memory areas shared
534  * between the NIC and the driver. This includes Tx descriptors,
535  * Rx descriptors and the statistics block.
536  */
537
538 static int init_shared_mem(struct s2io_nic *nic)
539 {
540         u32 size;
541         void *tmp_v_addr, *tmp_v_addr_next;
542         dma_addr_t tmp_p_addr, tmp_p_addr_next;
543         struct RxD_block *pre_rxd_blk = NULL;
544         int i, j, blk_cnt;
545         int lst_size, lst_per_page;
546         struct net_device *dev = nic->dev;
547         unsigned long tmp;
548         struct buffAdd *ba;
549
550         struct mac_info *mac_control;
551         struct config_param *config;
552         unsigned long long mem_allocated = 0;
553
554         mac_control = &nic->mac_control;
555         config = &nic->config;
556
557
558         /* Allocation and initialization of TXDLs in FIOFs */
559         size = 0;
560         for (i = 0; i < config->tx_fifo_num; i++) {
561                 size += config->tx_cfg[i].fifo_len;
562         }
563         if (size > MAX_AVAILABLE_TXDS) {
564                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
565                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
566                 return -EINVAL;
567         }
568
569         lst_size = (sizeof(struct TxD) * config->max_txds);
570         lst_per_page = PAGE_SIZE / lst_size;
571
572         for (i = 0; i < config->tx_fifo_num; i++) {
573                 int fifo_len = config->tx_cfg[i].fifo_len;
574                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
575                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
576                                                           GFP_KERNEL);
577                 if (!mac_control->fifos[i].list_info) {
578                         DBG_PRINT(INFO_DBG,
579                                   "Malloc failed for list_info\n");
580                         return -ENOMEM;
581                 }
582                 mem_allocated += list_holder_size;
583         }
584         for (i = 0; i < config->tx_fifo_num; i++) {
585                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
586                                                 lst_per_page);
587                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
588                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
589                     config->tx_cfg[i].fifo_len - 1;
590                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
591                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
592                     config->tx_cfg[i].fifo_len - 1;
593                 mac_control->fifos[i].fifo_no = i;
594                 mac_control->fifos[i].nic = nic;
595                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
596
597                 for (j = 0; j < page_num; j++) {
598                         int k = 0;
599                         dma_addr_t tmp_p;
600                         void *tmp_v;
601                         tmp_v = pci_alloc_consistent(nic->pdev,
602                                                      PAGE_SIZE, &tmp_p);
603                         if (!tmp_v) {
604                                 DBG_PRINT(INFO_DBG,
605                                           "pci_alloc_consistent ");
606                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
607                                 return -ENOMEM;
608                         }
609                         /* If we got a zero DMA address(can happen on
610                          * certain platforms like PPC), reallocate.
611                          * Store virtual address of page we don't want,
612                          * to be freed later.
613                          */
614                         if (!tmp_p) {
615                                 mac_control->zerodma_virt_addr = tmp_v;
616                                 DBG_PRINT(INIT_DBG,
617                                 "%s: Zero DMA address for TxDL. ", dev->name);
618                                 DBG_PRINT(INIT_DBG,
619                                 "Virtual address %p\n", tmp_v);
620                                 tmp_v = pci_alloc_consistent(nic->pdev,
621                                                      PAGE_SIZE, &tmp_p);
622                                 if (!tmp_v) {
623                                         DBG_PRINT(INFO_DBG,
624                                           "pci_alloc_consistent ");
625                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
626                                         return -ENOMEM;
627                                 }
628                                 mem_allocated += PAGE_SIZE;
629                         }
630                         while (k < lst_per_page) {
631                                 int l = (j * lst_per_page) + k;
632                                 if (l == config->tx_cfg[i].fifo_len)
633                                         break;
634                                 mac_control->fifos[i].list_info[l].list_virt_addr =
635                                     tmp_v + (k * lst_size);
636                                 mac_control->fifos[i].list_info[l].list_phy_addr =
637                                     tmp_p + (k * lst_size);
638                                 k++;
639                         }
640                 }
641         }
642
643         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
644         if (!nic->ufo_in_band_v)
645                 return -ENOMEM;
646          mem_allocated += (size * sizeof(u64));
647
648         /* Allocation and initialization of RXDs in Rings */
649         size = 0;
650         for (i = 0; i < config->rx_ring_num; i++) {
651                 if (config->rx_cfg[i].num_rxd %
652                     (rxd_count[nic->rxd_mode] + 1)) {
653                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
654                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
655                                   i);
656                         DBG_PRINT(ERR_DBG, "RxDs per Block");
657                         return FAILURE;
658                 }
659                 size += config->rx_cfg[i].num_rxd;
660                 mac_control->rings[i].block_count =
661                         config->rx_cfg[i].num_rxd /
662                         (rxd_count[nic->rxd_mode] + 1 );
663                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
664                         mac_control->rings[i].block_count;
665         }
666         if (nic->rxd_mode == RXD_MODE_1)
667                 size = (size * (sizeof(struct RxD1)));
668         else
669                 size = (size * (sizeof(struct RxD3)));
670
671         for (i = 0; i < config->rx_ring_num; i++) {
672                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
673                 mac_control->rings[i].rx_curr_get_info.offset = 0;
674                 mac_control->rings[i].rx_curr_get_info.ring_len =
675                     config->rx_cfg[i].num_rxd - 1;
676                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
677                 mac_control->rings[i].rx_curr_put_info.offset = 0;
678                 mac_control->rings[i].rx_curr_put_info.ring_len =
679                     config->rx_cfg[i].num_rxd - 1;
680                 mac_control->rings[i].nic = nic;
681                 mac_control->rings[i].ring_no = i;
682
683                 blk_cnt = config->rx_cfg[i].num_rxd /
684                                 (rxd_count[nic->rxd_mode] + 1);
685                 /*  Allocating all the Rx blocks */
686                 for (j = 0; j < blk_cnt; j++) {
687                         struct rx_block_info *rx_blocks;
688                         int l;
689
690                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
691                         size = SIZE_OF_BLOCK; //size is always page size
692                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
693                                                           &tmp_p_addr);
694                         if (tmp_v_addr == NULL) {
695                                 /*
696                                  * In case of failure, free_shared_mem()
697                                  * is called, which should free any
698                                  * memory that was alloced till the
699                                  * failure happened.
700                                  */
701                                 rx_blocks->block_virt_addr = tmp_v_addr;
702                                 return -ENOMEM;
703                         }
704                         mem_allocated += size;
705                         memset(tmp_v_addr, 0, size);
706                         rx_blocks->block_virt_addr = tmp_v_addr;
707                         rx_blocks->block_dma_addr = tmp_p_addr;
708                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
709                                                   rxd_count[nic->rxd_mode],
710                                                   GFP_KERNEL);
711                         if (!rx_blocks->rxds)
712                                 return -ENOMEM;
713                         mem_allocated +=
714                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
715                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
716                                 rx_blocks->rxds[l].virt_addr =
717                                         rx_blocks->block_virt_addr +
718                                         (rxd_size[nic->rxd_mode] * l);
719                                 rx_blocks->rxds[l].dma_addr =
720                                         rx_blocks->block_dma_addr +
721                                         (rxd_size[nic->rxd_mode] * l);
722                         }
723                 }
724                 /* Interlinking all Rx Blocks */
725                 for (j = 0; j < blk_cnt; j++) {
726                         tmp_v_addr =
727                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
728                         tmp_v_addr_next =
729                                 mac_control->rings[i].rx_blocks[(j + 1) %
730                                               blk_cnt].block_virt_addr;
731                         tmp_p_addr =
732                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
733                         tmp_p_addr_next =
734                                 mac_control->rings[i].rx_blocks[(j + 1) %
735                                               blk_cnt].block_dma_addr;
736
737                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
738                         pre_rxd_blk->reserved_2_pNext_RxD_block =
739                             (unsigned long) tmp_v_addr_next;
740                         pre_rxd_blk->pNext_RxD_Blk_physical =
741                             (u64) tmp_p_addr_next;
742                 }
743         }
744         if (nic->rxd_mode == RXD_MODE_3B) {
745                 /*
746                  * Allocation of Storages for buffer addresses in 2BUFF mode
747                  * and the buffers as well.
748                  */
749                 for (i = 0; i < config->rx_ring_num; i++) {
750                         blk_cnt = config->rx_cfg[i].num_rxd /
751                            (rxd_count[nic->rxd_mode]+ 1);
752                         mac_control->rings[i].ba =
753                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
754                                      GFP_KERNEL);
755                         if (!mac_control->rings[i].ba)
756                                 return -ENOMEM;
757                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
758                         for (j = 0; j < blk_cnt; j++) {
759                                 int k = 0;
760                                 mac_control->rings[i].ba[j] =
761                                         kmalloc((sizeof(struct buffAdd) *
762                                                 (rxd_count[nic->rxd_mode] + 1)),
763                                                 GFP_KERNEL);
764                                 if (!mac_control->rings[i].ba[j])
765                                         return -ENOMEM;
766                                 mem_allocated += (sizeof(struct buffAdd) *  \
767                                         (rxd_count[nic->rxd_mode] + 1));
768                                 while (k != rxd_count[nic->rxd_mode]) {
769                                         ba = &mac_control->rings[i].ba[j][k];
770
771                                         ba->ba_0_org = (void *) kmalloc
772                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
773                                         if (!ba->ba_0_org)
774                                                 return -ENOMEM;
775                                         mem_allocated +=
776                                                 (BUF0_LEN + ALIGN_SIZE);
777                                         tmp = (unsigned long)ba->ba_0_org;
778                                         tmp += ALIGN_SIZE;
779                                         tmp &= ~((unsigned long) ALIGN_SIZE);
780                                         ba->ba_0 = (void *) tmp;
781
782                                         ba->ba_1_org = (void *) kmalloc
783                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
784                                         if (!ba->ba_1_org)
785                                                 return -ENOMEM;
786                                         mem_allocated
787                                                 += (BUF1_LEN + ALIGN_SIZE);
788                                         tmp = (unsigned long) ba->ba_1_org;
789                                         tmp += ALIGN_SIZE;
790                                         tmp &= ~((unsigned long) ALIGN_SIZE);
791                                         ba->ba_1 = (void *) tmp;
792                                         k++;
793                                 }
794                         }
795                 }
796         }
797
798         /* Allocation and initialization of Statistics block */
799         size = sizeof(struct stat_block);
800         mac_control->stats_mem = pci_alloc_consistent
801             (nic->pdev, size, &mac_control->stats_mem_phy);
802
803         if (!mac_control->stats_mem) {
804                 /*
805                  * In case of failure, free_shared_mem() is called, which
806                  * should free any memory that was alloced till the
807                  * failure happened.
808                  */
809                 return -ENOMEM;
810         }
811         mem_allocated += size;
812         mac_control->stats_mem_sz = size;
813
814         tmp_v_addr = mac_control->stats_mem;
815         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
816         memset(tmp_v_addr, 0, size);
817         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
818                   (unsigned long long) tmp_p_addr);
819         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
820         return SUCCESS;
821 }
822
823 /**
824  * free_shared_mem - Free the allocated Memory
825  * @nic:  Device private variable.
826  * Description: This function is to free all memory locations allocated by
827  * the init_shared_mem() function and return it to the kernel.
828  */
829
830 static void free_shared_mem(struct s2io_nic *nic)
831 {
832         int i, j, blk_cnt, size;
833         u32 ufo_size = 0;
834         void *tmp_v_addr;
835         dma_addr_t tmp_p_addr;
836         struct mac_info *mac_control;
837         struct config_param *config;
838         int lst_size, lst_per_page;
839         struct net_device *dev;
840         int page_num = 0;
841
842         if (!nic)
843                 return;
844
845         dev = nic->dev;
846
847         mac_control = &nic->mac_control;
848         config = &nic->config;
849
850         lst_size = (sizeof(struct TxD) * config->max_txds);
851         lst_per_page = PAGE_SIZE / lst_size;
852
853         for (i = 0; i < config->tx_fifo_num; i++) {
854                 ufo_size += config->tx_cfg[i].fifo_len;
855                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
856                                                         lst_per_page);
857                 for (j = 0; j < page_num; j++) {
858                         int mem_blks = (j * lst_per_page);
859                         if (!mac_control->fifos[i].list_info)
860                                 return;
861                         if (!mac_control->fifos[i].list_info[mem_blks].
862                                  list_virt_addr)
863                                 break;
864                         pci_free_consistent(nic->pdev, PAGE_SIZE,
865                                             mac_control->fifos[i].
866                                             list_info[mem_blks].
867                                             list_virt_addr,
868                                             mac_control->fifos[i].
869                                             list_info[mem_blks].
870                                             list_phy_addr);
871                         nic->mac_control.stats_info->sw_stat.mem_freed
872                                                 += PAGE_SIZE;
873                 }
874                 /* If we got a zero DMA address during allocation,
875                  * free the page now
876                  */
877                 if (mac_control->zerodma_virt_addr) {
878                         pci_free_consistent(nic->pdev, PAGE_SIZE,
879                                             mac_control->zerodma_virt_addr,
880                                             (dma_addr_t)0);
881                         DBG_PRINT(INIT_DBG,
882                                 "%s: Freeing TxDL with zero DMA addr. ",
883                                 dev->name);
884                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
885                                 mac_control->zerodma_virt_addr);
886                         nic->mac_control.stats_info->sw_stat.mem_freed
887                                                 += PAGE_SIZE;
888                 }
889                 kfree(mac_control->fifos[i].list_info);
890                 nic->mac_control.stats_info->sw_stat.mem_freed +=
891                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
892         }
893
894         size = SIZE_OF_BLOCK;
895         for (i = 0; i < config->rx_ring_num; i++) {
896                 blk_cnt = mac_control->rings[i].block_count;
897                 for (j = 0; j < blk_cnt; j++) {
898                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
899                                 block_virt_addr;
900                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
901                                 block_dma_addr;
902                         if (tmp_v_addr == NULL)
903                                 break;
904                         pci_free_consistent(nic->pdev, size,
905                                             tmp_v_addr, tmp_p_addr);
906                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
907                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
908                         nic->mac_control.stats_info->sw_stat.mem_freed +=
909                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
910                 }
911         }
912
913         if (nic->rxd_mode == RXD_MODE_3B) {
914                 /* Freeing buffer storage addresses in 2BUFF mode. */
915                 for (i = 0; i < config->rx_ring_num; i++) {
916                         blk_cnt = config->rx_cfg[i].num_rxd /
917                             (rxd_count[nic->rxd_mode] + 1);
918                         for (j = 0; j < blk_cnt; j++) {
919                                 int k = 0;
920                                 if (!mac_control->rings[i].ba[j])
921                                         continue;
922                                 while (k != rxd_count[nic->rxd_mode]) {
923                                         struct buffAdd *ba =
924                                                 &mac_control->rings[i].ba[j][k];
925                                         kfree(ba->ba_0_org);
926                                         nic->mac_control.stats_info->sw_stat.\
927                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
928                                         kfree(ba->ba_1_org);
929                                         nic->mac_control.stats_info->sw_stat.\
930                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
931                                         k++;
932                                 }
933                                 kfree(mac_control->rings[i].ba[j]);
934                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
935                                         (sizeof(struct buffAdd) *
936                                         (rxd_count[nic->rxd_mode] + 1));
937                         }
938                         kfree(mac_control->rings[i].ba);
939                         nic->mac_control.stats_info->sw_stat.mem_freed +=
940                         (sizeof(struct buffAdd *) * blk_cnt);
941                 }
942         }
943
944         if (mac_control->stats_mem) {
945                 pci_free_consistent(nic->pdev,
946                                     mac_control->stats_mem_sz,
947                                     mac_control->stats_mem,
948                                     mac_control->stats_mem_phy);
949                 nic->mac_control.stats_info->sw_stat.mem_freed +=
950                         mac_control->stats_mem_sz;
951         }
952         if (nic->ufo_in_band_v) {
953                 kfree(nic->ufo_in_band_v);
954                 nic->mac_control.stats_info->sw_stat.mem_freed
955                         += (ufo_size * sizeof(u64));
956         }
957 }
958
959 /**
960  * s2io_verify_pci_mode -
961  */
962
963 static int s2io_verify_pci_mode(struct s2io_nic *nic)
964 {
965         struct XENA_dev_config __iomem *bar0 = nic->bar0;
966         register u64 val64 = 0;
967         int     mode;
968
969         val64 = readq(&bar0->pci_mode);
970         mode = (u8)GET_PCI_MODE(val64);
971
972         if ( val64 & PCI_MODE_UNKNOWN_MODE)
973                 return -1;      /* Unknown PCI mode */
974         return mode;
975 }
976
977 #define NEC_VENID   0x1033
978 #define NEC_DEVID   0x0125
979 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
980 {
981         struct pci_dev *tdev = NULL;
982         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
983                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
984                         if (tdev->bus == s2io_pdev->bus->parent)
985                                 pci_dev_put(tdev);
986                                 return 1;
987                 }
988         }
989         return 0;
990 }
991
992 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
993 /**
994  * s2io_print_pci_mode -
995  */
996 static int s2io_print_pci_mode(struct s2io_nic *nic)
997 {
998         struct XENA_dev_config __iomem *bar0 = nic->bar0;
999         register u64 val64 = 0;
1000         int     mode;
1001         struct config_param *config = &nic->config;
1002
1003         val64 = readq(&bar0->pci_mode);
1004         mode = (u8)GET_PCI_MODE(val64);
1005
1006         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1007                 return -1;      /* Unknown PCI mode */
1008
1009         config->bus_speed = bus_speed[mode];
1010
1011         if (s2io_on_nec_bridge(nic->pdev)) {
1012                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1013                                                         nic->dev->name);
1014                 return mode;
1015         }
1016
1017         if (val64 & PCI_MODE_32_BITS) {
1018                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1019         } else {
1020                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1021         }
1022
1023         switch(mode) {
1024                 case PCI_MODE_PCI_33:
1025                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1026                         break;
1027                 case PCI_MODE_PCI_66:
1028                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1029                         break;
1030                 case PCI_MODE_PCIX_M1_66:
1031                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1032                         break;
1033                 case PCI_MODE_PCIX_M1_100:
1034                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1035                         break;
1036                 case PCI_MODE_PCIX_M1_133:
1037                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1038                         break;
1039                 case PCI_MODE_PCIX_M2_66:
1040                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1041                         break;
1042                 case PCI_MODE_PCIX_M2_100:
1043                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1044                         break;
1045                 case PCI_MODE_PCIX_M2_133:
1046                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1047                         break;
1048                 default:
1049                         return -1;      /* Unsupported bus speed */
1050         }
1051
1052         return mode;
1053 }
1054
1055 /**
1056  *  init_nic - Initialization of hardware
1057  *  @nic: device peivate variable
1058  *  Description: The function sequentially configures every block
1059  *  of the H/W from their reset values.
1060  *  Return Value:  SUCCESS on success and
1061  *  '-1' on failure (endian settings incorrect).
1062  */
1063
1064 static int init_nic(struct s2io_nic *nic)
1065 {
1066         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1067         struct net_device *dev = nic->dev;
1068         register u64 val64 = 0;
1069         void __iomem *add;
1070         u32 time;
1071         int i, j;
1072         struct mac_info *mac_control;
1073         struct config_param *config;
1074         int dtx_cnt = 0;
1075         unsigned long long mem_share;
1076         int mem_size;
1077
1078         mac_control = &nic->mac_control;
1079         config = &nic->config;
1080
1081         /* to set the swapper controle on the card */
1082         if(s2io_set_swapper(nic)) {
1083                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1084                 return -EIO;
1085         }
1086
1087         /*
1088          * Herc requires EOI to be removed from reset before XGXS, so..
1089          */
1090         if (nic->device_type & XFRAME_II_DEVICE) {
1091                 val64 = 0xA500000000ULL;
1092                 writeq(val64, &bar0->sw_reset);
1093                 msleep(500);
1094                 val64 = readq(&bar0->sw_reset);
1095         }
1096
1097         /* Remove XGXS from reset state */
1098         val64 = 0;
1099         writeq(val64, &bar0->sw_reset);
1100         msleep(500);
1101         val64 = readq(&bar0->sw_reset);
1102
1103         /* Ensure that it's safe to access registers by checking
1104          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1105          */
1106         if (nic->device_type == XFRAME_II_DEVICE) {
1107                 for (i = 0; i < 50; i++) {
1108                         val64 = readq(&bar0->adapter_status);
1109                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1110                                 break;
1111                         msleep(10);
1112                 }
1113                 if (i == 50)
1114                         return -ENODEV;
1115         }
1116
1117         /*  Enable Receiving broadcasts */
1118         add = &bar0->mac_cfg;
1119         val64 = readq(&bar0->mac_cfg);
1120         val64 |= MAC_RMAC_BCAST_ENABLE;
1121         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1122         writel((u32) val64, add);
1123         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1124         writel((u32) (val64 >> 32), (add + 4));
1125
1126         /* Read registers in all blocks */
1127         val64 = readq(&bar0->mac_int_mask);
1128         val64 = readq(&bar0->mc_int_mask);
1129         val64 = readq(&bar0->xgxs_int_mask);
1130
1131         /*  Set MTU */
1132         val64 = dev->mtu;
1133         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1134
1135         if (nic->device_type & XFRAME_II_DEVICE) {
1136                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1137                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1138                                           &bar0->dtx_control, UF);
1139                         if (dtx_cnt & 0x1)
1140                                 msleep(1); /* Necessary!! */
1141                         dtx_cnt++;
1142                 }
1143         } else {
1144                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1145                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1146                                           &bar0->dtx_control, UF);
1147                         val64 = readq(&bar0->dtx_control);
1148                         dtx_cnt++;
1149                 }
1150         }
1151
1152         /*  Tx DMA Initialization */
1153         val64 = 0;
1154         writeq(val64, &bar0->tx_fifo_partition_0);
1155         writeq(val64, &bar0->tx_fifo_partition_1);
1156         writeq(val64, &bar0->tx_fifo_partition_2);
1157         writeq(val64, &bar0->tx_fifo_partition_3);
1158
1159
1160         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1161                 val64 |=
1162                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1163                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1164                                     ((i * 32) + 5), 3);
1165
1166                 if (i == (config->tx_fifo_num - 1)) {
1167                         if (i % 2 == 0)
1168                                 i++;
1169                 }
1170
1171                 switch (i) {
1172                 case 1:
1173                         writeq(val64, &bar0->tx_fifo_partition_0);
1174                         val64 = 0;
1175                         break;
1176                 case 3:
1177                         writeq(val64, &bar0->tx_fifo_partition_1);
1178                         val64 = 0;
1179                         break;
1180                 case 5:
1181                         writeq(val64, &bar0->tx_fifo_partition_2);
1182                         val64 = 0;
1183                         break;
1184                 case 7:
1185                         writeq(val64, &bar0->tx_fifo_partition_3);
1186                         break;
1187                 }
1188         }
1189
1190         /*
1191          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1192          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1193          */
1194         if ((nic->device_type == XFRAME_I_DEVICE) &&
1195                 (nic->pdev->revision < 4))
1196                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1197
1198         val64 = readq(&bar0->tx_fifo_partition_0);
1199         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1200                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1201
1202         /*
1203          * Initialization of Tx_PA_CONFIG register to ignore packet
1204          * integrity checking.
1205          */
1206         val64 = readq(&bar0->tx_pa_cfg);
1207         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1208             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1209         writeq(val64, &bar0->tx_pa_cfg);
1210
1211         /* Rx DMA intialization. */
1212         val64 = 0;
1213         for (i = 0; i < config->rx_ring_num; i++) {
1214                 val64 |=
1215                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1216                          3);
1217         }
1218         writeq(val64, &bar0->rx_queue_priority);
1219
1220         /*
1221          * Allocating equal share of memory to all the
1222          * configured Rings.
1223          */
1224         val64 = 0;
1225         if (nic->device_type & XFRAME_II_DEVICE)
1226                 mem_size = 32;
1227         else
1228                 mem_size = 64;
1229
1230         for (i = 0; i < config->rx_ring_num; i++) {
1231                 switch (i) {
1232                 case 0:
1233                         mem_share = (mem_size / config->rx_ring_num +
1234                                      mem_size % config->rx_ring_num);
1235                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1236                         continue;
1237                 case 1:
1238                         mem_share = (mem_size / config->rx_ring_num);
1239                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1240                         continue;
1241                 case 2:
1242                         mem_share = (mem_size / config->rx_ring_num);
1243                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1244                         continue;
1245                 case 3:
1246                         mem_share = (mem_size / config->rx_ring_num);
1247                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1248                         continue;
1249                 case 4:
1250                         mem_share = (mem_size / config->rx_ring_num);
1251                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1252                         continue;
1253                 case 5:
1254                         mem_share = (mem_size / config->rx_ring_num);
1255                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1256                         continue;
1257                 case 6:
1258                         mem_share = (mem_size / config->rx_ring_num);
1259                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1260                         continue;
1261                 case 7:
1262                         mem_share = (mem_size / config->rx_ring_num);
1263                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1264                         continue;
1265                 }
1266         }
1267         writeq(val64, &bar0->rx_queue_cfg);
1268
1269         /*
1270          * Filling Tx round robin registers
1271          * as per the number of FIFOs
1272          */
1273         switch (config->tx_fifo_num) {
1274         case 1:
1275                 val64 = 0x0000000000000000ULL;
1276                 writeq(val64, &bar0->tx_w_round_robin_0);
1277                 writeq(val64, &bar0->tx_w_round_robin_1);
1278                 writeq(val64, &bar0->tx_w_round_robin_2);
1279                 writeq(val64, &bar0->tx_w_round_robin_3);
1280                 writeq(val64, &bar0->tx_w_round_robin_4);
1281                 break;
1282         case 2:
1283                 val64 = 0x0000010000010000ULL;
1284                 writeq(val64, &bar0->tx_w_round_robin_0);
1285                 val64 = 0x0100000100000100ULL;
1286                 writeq(val64, &bar0->tx_w_round_robin_1);
1287                 val64 = 0x0001000001000001ULL;
1288                 writeq(val64, &bar0->tx_w_round_robin_2);
1289                 val64 = 0x0000010000010000ULL;
1290                 writeq(val64, &bar0->tx_w_round_robin_3);
1291                 val64 = 0x0100000000000000ULL;
1292                 writeq(val64, &bar0->tx_w_round_robin_4);
1293                 break;
1294         case 3:
1295                 val64 = 0x0001000102000001ULL;
1296                 writeq(val64, &bar0->tx_w_round_robin_0);
1297                 val64 = 0x0001020000010001ULL;
1298                 writeq(val64, &bar0->tx_w_round_robin_1);
1299                 val64 = 0x0200000100010200ULL;
1300                 writeq(val64, &bar0->tx_w_round_robin_2);
1301                 val64 = 0x0001000102000001ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_3);
1303                 val64 = 0x0001020000000000ULL;
1304                 writeq(val64, &bar0->tx_w_round_robin_4);
1305                 break;
1306         case 4:
1307                 val64 = 0x0001020300010200ULL;
1308                 writeq(val64, &bar0->tx_w_round_robin_0);
1309                 val64 = 0x0100000102030001ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_1);
1311                 val64 = 0x0200010000010203ULL;
1312                 writeq(val64, &bar0->tx_w_round_robin_2);
1313                 val64 = 0x0001020001000001ULL;
1314                 writeq(val64, &bar0->tx_w_round_robin_3);
1315                 val64 = 0x0203000100000000ULL;
1316                 writeq(val64, &bar0->tx_w_round_robin_4);
1317                 break;
1318         case 5:
1319                 val64 = 0x0001000203000102ULL;
1320                 writeq(val64, &bar0->tx_w_round_robin_0);
1321                 val64 = 0x0001020001030004ULL;
1322                 writeq(val64, &bar0->tx_w_round_robin_1);
1323                 val64 = 0x0001000203000102ULL;
1324                 writeq(val64, &bar0->tx_w_round_robin_2);
1325                 val64 = 0x0001020001030004ULL;
1326                 writeq(val64, &bar0->tx_w_round_robin_3);
1327                 val64 = 0x0001000000000000ULL;
1328                 writeq(val64, &bar0->tx_w_round_robin_4);
1329                 break;
1330         case 6:
1331                 val64 = 0x0001020304000102ULL;
1332                 writeq(val64, &bar0->tx_w_round_robin_0);
1333                 val64 = 0x0304050001020001ULL;
1334                 writeq(val64, &bar0->tx_w_round_robin_1);
1335                 val64 = 0x0203000100000102ULL;
1336                 writeq(val64, &bar0->tx_w_round_robin_2);
1337                 val64 = 0x0304000102030405ULL;
1338                 writeq(val64, &bar0->tx_w_round_robin_3);
1339                 val64 = 0x0001000200000000ULL;
1340                 writeq(val64, &bar0->tx_w_round_robin_4);
1341                 break;
1342         case 7:
1343                 val64 = 0x0001020001020300ULL;
1344                 writeq(val64, &bar0->tx_w_round_robin_0);
1345                 val64 = 0x0102030400010203ULL;
1346                 writeq(val64, &bar0->tx_w_round_robin_1);
1347                 val64 = 0x0405060001020001ULL;
1348                 writeq(val64, &bar0->tx_w_round_robin_2);
1349                 val64 = 0x0304050000010200ULL;
1350                 writeq(val64, &bar0->tx_w_round_robin_3);
1351                 val64 = 0x0102030000000000ULL;
1352                 writeq(val64, &bar0->tx_w_round_robin_4);
1353                 break;
1354         case 8:
1355                 val64 = 0x0001020300040105ULL;
1356                 writeq(val64, &bar0->tx_w_round_robin_0);
1357                 val64 = 0x0200030106000204ULL;
1358                 writeq(val64, &bar0->tx_w_round_robin_1);
1359                 val64 = 0x0103000502010007ULL;
1360                 writeq(val64, &bar0->tx_w_round_robin_2);
1361                 val64 = 0x0304010002060500ULL;
1362                 writeq(val64, &bar0->tx_w_round_robin_3);
1363                 val64 = 0x0103020400000000ULL;
1364                 writeq(val64, &bar0->tx_w_round_robin_4);
1365                 break;
1366         }
1367
1368         /* Enable all configured Tx FIFO partitions */
1369         val64 = readq(&bar0->tx_fifo_partition_0);
1370         val64 |= (TX_FIFO_PARTITION_EN);
1371         writeq(val64, &bar0->tx_fifo_partition_0);
1372
1373         /* Filling the Rx round robin registers as per the
1374          * number of Rings and steering based on QoS.
1375          */
1376         switch (config->rx_ring_num) {
1377         case 1:
1378                 val64 = 0x8080808080808080ULL;
1379                 writeq(val64, &bar0->rts_qos_steering);
1380                 break;
1381         case 2:
1382                 val64 = 0x0000010000010000ULL;
1383                 writeq(val64, &bar0->rx_w_round_robin_0);
1384                 val64 = 0x0100000100000100ULL;
1385                 writeq(val64, &bar0->rx_w_round_robin_1);
1386                 val64 = 0x0001000001000001ULL;
1387                 writeq(val64, &bar0->rx_w_round_robin_2);
1388                 val64 = 0x0000010000010000ULL;
1389                 writeq(val64, &bar0->rx_w_round_robin_3);
1390                 val64 = 0x0100000000000000ULL;
1391                 writeq(val64, &bar0->rx_w_round_robin_4);
1392
1393                 val64 = 0x8080808040404040ULL;
1394                 writeq(val64, &bar0->rts_qos_steering);
1395                 break;
1396         case 3:
1397                 val64 = 0x0001000102000001ULL;
1398                 writeq(val64, &bar0->rx_w_round_robin_0);
1399                 val64 = 0x0001020000010001ULL;
1400                 writeq(val64, &bar0->rx_w_round_robin_1);
1401                 val64 = 0x0200000100010200ULL;
1402                 writeq(val64, &bar0->rx_w_round_robin_2);
1403                 val64 = 0x0001000102000001ULL;
1404                 writeq(val64, &bar0->rx_w_round_robin_3);
1405                 val64 = 0x0001020000000000ULL;
1406                 writeq(val64, &bar0->rx_w_round_robin_4);
1407
1408                 val64 = 0x8080804040402020ULL;
1409                 writeq(val64, &bar0->rts_qos_steering);
1410                 break;
1411         case 4:
1412                 val64 = 0x0001020300010200ULL;
1413                 writeq(val64, &bar0->rx_w_round_robin_0);
1414                 val64 = 0x0100000102030001ULL;
1415                 writeq(val64, &bar0->rx_w_round_robin_1);
1416                 val64 = 0x0200010000010203ULL;
1417                 writeq(val64, &bar0->rx_w_round_robin_2);
1418                 val64 = 0x0001020001000001ULL;
1419                 writeq(val64, &bar0->rx_w_round_robin_3);
1420                 val64 = 0x0203000100000000ULL;
1421                 writeq(val64, &bar0->rx_w_round_robin_4);
1422
1423                 val64 = 0x8080404020201010ULL;
1424                 writeq(val64, &bar0->rts_qos_steering);
1425                 break;
1426         case 5:
1427                 val64 = 0x0001000203000102ULL;
1428                 writeq(val64, &bar0->rx_w_round_robin_0);
1429                 val64 = 0x0001020001030004ULL;
1430                 writeq(val64, &bar0->rx_w_round_robin_1);
1431                 val64 = 0x0001000203000102ULL;
1432                 writeq(val64, &bar0->rx_w_round_robin_2);
1433                 val64 = 0x0001020001030004ULL;
1434                 writeq(val64, &bar0->rx_w_round_robin_3);
1435                 val64 = 0x0001000000000000ULL;
1436                 writeq(val64, &bar0->rx_w_round_robin_4);
1437
1438                 val64 = 0x8080404020201008ULL;
1439                 writeq(val64, &bar0->rts_qos_steering);
1440                 break;
1441         case 6:
1442                 val64 = 0x0001020304000102ULL;
1443                 writeq(val64, &bar0->rx_w_round_robin_0);
1444                 val64 = 0x0304050001020001ULL;
1445                 writeq(val64, &bar0->rx_w_round_robin_1);
1446                 val64 = 0x0203000100000102ULL;
1447                 writeq(val64, &bar0->rx_w_round_robin_2);
1448                 val64 = 0x0304000102030405ULL;
1449                 writeq(val64, &bar0->rx_w_round_robin_3);
1450                 val64 = 0x0001000200000000ULL;
1451                 writeq(val64, &bar0->rx_w_round_robin_4);
1452
1453                 val64 = 0x8080404020100804ULL;
1454                 writeq(val64, &bar0->rts_qos_steering);
1455                 break;
1456         case 7:
1457                 val64 = 0x0001020001020300ULL;
1458                 writeq(val64, &bar0->rx_w_round_robin_0);
1459                 val64 = 0x0102030400010203ULL;
1460                 writeq(val64, &bar0->rx_w_round_robin_1);
1461                 val64 = 0x0405060001020001ULL;
1462                 writeq(val64, &bar0->rx_w_round_robin_2);
1463                 val64 = 0x0304050000010200ULL;
1464                 writeq(val64, &bar0->rx_w_round_robin_3);
1465                 val64 = 0x0102030000000000ULL;
1466                 writeq(val64, &bar0->rx_w_round_robin_4);
1467
1468                 val64 = 0x8080402010080402ULL;
1469                 writeq(val64, &bar0->rts_qos_steering);
1470                 break;
1471         case 8:
1472                 val64 = 0x0001020300040105ULL;
1473                 writeq(val64, &bar0->rx_w_round_robin_0);
1474                 val64 = 0x0200030106000204ULL;
1475                 writeq(val64, &bar0->rx_w_round_robin_1);
1476                 val64 = 0x0103000502010007ULL;
1477                 writeq(val64, &bar0->rx_w_round_robin_2);
1478                 val64 = 0x0304010002060500ULL;
1479                 writeq(val64, &bar0->rx_w_round_robin_3);
1480                 val64 = 0x0103020400000000ULL;
1481                 writeq(val64, &bar0->rx_w_round_robin_4);
1482
1483                 val64 = 0x8040201008040201ULL;
1484                 writeq(val64, &bar0->rts_qos_steering);
1485                 break;
1486         }
1487
1488         /* UDP Fix */
1489         val64 = 0;
1490         for (i = 0; i < 8; i++)
1491                 writeq(val64, &bar0->rts_frm_len_n[i]);
1492
1493         /* Set the default rts frame length for the rings configured */
1494         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1495         for (i = 0 ; i < config->rx_ring_num ; i++)
1496                 writeq(val64, &bar0->rts_frm_len_n[i]);
1497
1498         /* Set the frame length for the configured rings
1499          * desired by the user
1500          */
1501         for (i = 0; i < config->rx_ring_num; i++) {
1502                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1503                  * specified frame length steering.
1504                  * If the user provides the frame length then program
1505                  * the rts_frm_len register for those values or else
1506                  * leave it as it is.
1507                  */
1508                 if (rts_frm_len[i] != 0) {
1509                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1510                                 &bar0->rts_frm_len_n[i]);
1511                 }
1512         }
1513
1514         /* Disable differentiated services steering logic */
1515         for (i = 0; i < 64; i++) {
1516                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1517                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1518                                 dev->name);
1519                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1520                         return -ENODEV;
1521                 }
1522         }
1523
1524         /* Program statistics memory */
1525         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1526
1527         if (nic->device_type == XFRAME_II_DEVICE) {
1528                 val64 = STAT_BC(0x320);
1529                 writeq(val64, &bar0->stat_byte_cnt);
1530         }
1531
1532         /*
1533          * Initializing the sampling rate for the device to calculate the
1534          * bandwidth utilization.
1535          */
1536         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1537             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1538         writeq(val64, &bar0->mac_link_util);
1539
1540
1541         /*
1542          * Initializing the Transmit and Receive Traffic Interrupt
1543          * Scheme.
1544          */
1545         /*
1546          * TTI Initialization. Default Tx timer gets us about
1547          * 250 interrupts per sec. Continuous interrupts are enabled
1548          * by default.
1549          */
1550         if (nic->device_type == XFRAME_II_DEVICE) {
1551                 int count = (nic->config.bus_speed * 125)/2;
1552                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1553         } else {
1554
1555                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1556         }
1557         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1558             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1559             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1560                 if (use_continuous_tx_intrs)
1561                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1562         writeq(val64, &bar0->tti_data1_mem);
1563
1564         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1565             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1566             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1567         writeq(val64, &bar0->tti_data2_mem);
1568
1569         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1570         writeq(val64, &bar0->tti_command_mem);
1571
1572         /*
1573          * Once the operation completes, the Strobe bit of the command
1574          * register will be reset. We poll for this particular condition
1575          * We wait for a maximum of 500ms for the operation to complete,
1576          * if it's not complete by then we return error.
1577          */
1578         time = 0;
1579         while (TRUE) {
1580                 val64 = readq(&bar0->tti_command_mem);
1581                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1582                         break;
1583                 }
1584                 if (time > 10) {
1585                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1586                                   dev->name);
1587                         return -ENODEV;
1588                 }
1589                 msleep(50);
1590                 time++;
1591         }
1592
1593         /* RTI Initialization */
1594         if (nic->device_type == XFRAME_II_DEVICE) {
1595                 /*
1596                  * Programmed to generate Apprx 500 Intrs per
1597                  * second
1598                  */
1599                 int count = (nic->config.bus_speed * 125)/4;
1600                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1601         } else
1602                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1603         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1604                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1605                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1606
1607         writeq(val64, &bar0->rti_data1_mem);
1608
1609         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1610                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1611         if (nic->config.intr_type == MSI_X)
1612             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1613                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1614         else
1615             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1616                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1617         writeq(val64, &bar0->rti_data2_mem);
1618
1619         for (i = 0; i < config->rx_ring_num; i++) {
1620                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1621                                 | RTI_CMD_MEM_OFFSET(i);
1622                 writeq(val64, &bar0->rti_command_mem);
1623
1624                 /*
1625                  * Once the operation completes, the Strobe bit of the
1626                  * command register will be reset. We poll for this
1627                  * particular condition. We wait for a maximum of 500ms
1628                  * for the operation to complete, if it's not complete
1629                  * by then we return error.
1630                  */
1631                 time = 0;
1632                 while (TRUE) {
1633                         val64 = readq(&bar0->rti_command_mem);
1634                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1635                                 break;
1636
1637                         if (time > 10) {
1638                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1639                                           dev->name);
1640                                 return -ENODEV;
1641                         }
1642                         time++;
1643                         msleep(50);
1644                 }
1645         }
1646
1647         /*
1648          * Initializing proper values as Pause threshold into all
1649          * the 8 Queues on Rx side.
1650          */
1651         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1652         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1653
1654         /* Disable RMAC PAD STRIPPING */
1655         add = &bar0->mac_cfg;
1656         val64 = readq(&bar0->mac_cfg);
1657         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1658         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1659         writel((u32) (val64), add);
1660         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1661         writel((u32) (val64 >> 32), (add + 4));
1662         val64 = readq(&bar0->mac_cfg);
1663
1664         /* Enable FCS stripping by adapter */
1665         add = &bar0->mac_cfg;
1666         val64 = readq(&bar0->mac_cfg);
1667         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1668         if (nic->device_type == XFRAME_II_DEVICE)
1669                 writeq(val64, &bar0->mac_cfg);
1670         else {
1671                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1672                 writel((u32) (val64), add);
1673                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1674                 writel((u32) (val64 >> 32), (add + 4));
1675         }
1676
1677         /*
1678          * Set the time value to be inserted in the pause frame
1679          * generated by xena.
1680          */
1681         val64 = readq(&bar0->rmac_pause_cfg);
1682         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1683         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1684         writeq(val64, &bar0->rmac_pause_cfg);
1685
1686         /*
1687          * Set the Threshold Limit for Generating the pause frame
1688          * If the amount of data in any Queue exceeds ratio of
1689          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1690          * pause frame is generated
1691          */
1692         val64 = 0;
1693         for (i = 0; i < 4; i++) {
1694                 val64 |=
1695                     (((u64) 0xFF00 | nic->mac_control.
1696                       mc_pause_threshold_q0q3)
1697                      << (i * 2 * 8));
1698         }
1699         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1700
1701         val64 = 0;
1702         for (i = 0; i < 4; i++) {
1703                 val64 |=
1704                     (((u64) 0xFF00 | nic->mac_control.
1705                       mc_pause_threshold_q4q7)
1706                      << (i * 2 * 8));
1707         }
1708         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1709
1710         /*
1711          * TxDMA will stop Read request if the number of read split has
1712          * exceeded the limit pointed by shared_splits
1713          */
1714         val64 = readq(&bar0->pic_control);
1715         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1716         writeq(val64, &bar0->pic_control);
1717
1718         if (nic->config.bus_speed == 266) {
1719                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1720                 writeq(0x0, &bar0->read_retry_delay);
1721                 writeq(0x0, &bar0->write_retry_delay);
1722         }
1723
1724         /*
1725          * Programming the Herc to split every write transaction
1726          * that does not start on an ADB to reduce disconnects.
1727          */
1728         if (nic->device_type == XFRAME_II_DEVICE) {
1729                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1730                         MISC_LINK_STABILITY_PRD(3);
1731                 writeq(val64, &bar0->misc_control);
1732                 val64 = readq(&bar0->pic_control2);
1733                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1734                 writeq(val64, &bar0->pic_control2);
1735         }
1736         if (strstr(nic->product_name, "CX4")) {
1737                 val64 = TMAC_AVG_IPG(0x17);
1738                 writeq(val64, &bar0->tmac_avg_ipg);
1739         }
1740
1741         return SUCCESS;
1742 }
1743 #define LINK_UP_DOWN_INTERRUPT          1
1744 #define MAC_RMAC_ERR_TIMER              2
1745
1746 static int s2io_link_fault_indication(struct s2io_nic *nic)
1747 {
1748         if (nic->config.intr_type != INTA)
1749                 return MAC_RMAC_ERR_TIMER;
1750         if (nic->device_type == XFRAME_II_DEVICE)
1751                 return LINK_UP_DOWN_INTERRUPT;
1752         else
1753                 return MAC_RMAC_ERR_TIMER;
1754 }
1755
1756 /**
1757  *  do_s2io_write_bits -  update alarm bits in alarm register
1758  *  @value: alarm bits
1759  *  @flag: interrupt status
1760  *  @addr: address value
1761  *  Description: update alarm bits in alarm register
1762  *  Return Value:
1763  *  NONE.
1764  */
1765 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1766 {
1767         u64 temp64;
1768
1769         temp64 = readq(addr);
1770
1771         if(flag == ENABLE_INTRS)
1772                 temp64 &= ~((u64) value);
1773         else
1774                 temp64 |= ((u64) value);
1775         writeq(temp64, addr);
1776 }
1777
1778 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1779 {
1780         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1781         register u64 gen_int_mask = 0;
1782
1783         if (mask & TX_DMA_INTR) {
1784
1785                 gen_int_mask |= TXDMA_INT_M;
1786
1787                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1788                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1789                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1790                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1791
1792                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1793                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1794                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1795                                 &bar0->pfc_err_mask);
1796
1797                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1798                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1799                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1800
1801                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1802                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1803                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1804                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1805                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1806                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1807
1808                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1809                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1810
1811                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1812                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1813                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1814                                 flag, &bar0->lso_err_mask);
1815
1816                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1817                                 flag, &bar0->tpa_err_mask);
1818
1819                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1820
1821         }
1822
1823         if (mask & TX_MAC_INTR) {
1824                 gen_int_mask |= TXMAC_INT_M;
1825                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1826                                 &bar0->mac_int_mask);
1827                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1828                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1829                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1830                                 flag, &bar0->mac_tmac_err_mask);
1831         }
1832
1833         if (mask & TX_XGXS_INTR) {
1834                 gen_int_mask |= TXXGXS_INT_M;
1835                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1836                                 &bar0->xgxs_int_mask);
1837                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1838                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1839                                 flag, &bar0->xgxs_txgxs_err_mask);
1840         }
1841
1842         if (mask & RX_DMA_INTR) {
1843                 gen_int_mask |= RXDMA_INT_M;
1844                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1845                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1846                                 flag, &bar0->rxdma_int_mask);
1847                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1848                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1849                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1850                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1851                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1852                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1853                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1854                                 &bar0->prc_pcix_err_mask);
1855                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1856                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1857                                 &bar0->rpa_err_mask);
1858                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1859                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1860                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1861                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1862                                 flag, &bar0->rda_err_mask);
1863                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1864                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1865                                 flag, &bar0->rti_err_mask);
1866         }
1867
1868         if (mask & RX_MAC_INTR) {
1869                 gen_int_mask |= RXMAC_INT_M;
1870                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1871                                 &bar0->mac_int_mask);
1872                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1873                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1874                                 RMAC_DOUBLE_ECC_ERR |
1875                                 RMAC_LINK_STATE_CHANGE_INT,
1876                                 flag, &bar0->mac_rmac_err_mask);
1877         }
1878
1879         if (mask & RX_XGXS_INTR)
1880         {
1881                 gen_int_mask |= RXXGXS_INT_M;
1882                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1883                                 &bar0->xgxs_int_mask);
1884                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1885                                 &bar0->xgxs_rxgxs_err_mask);
1886         }
1887
1888         if (mask & MC_INTR) {
1889                 gen_int_mask |= MC_INT_M;
1890                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1891                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1892                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1893                                 &bar0->mc_err_mask);
1894         }
1895         nic->general_int_mask = gen_int_mask;
1896
1897         /* Remove this line when alarm interrupts are enabled */
1898         nic->general_int_mask = 0;
1899 }
1900 /**
1901  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1902  *  @nic: device private variable,
1903  *  @mask: A mask indicating which Intr block must be modified and,
1904  *  @flag: A flag indicating whether to enable or disable the Intrs.
1905  *  Description: This function will either disable or enable the interrupts
1906  *  depending on the flag argument. The mask argument can be used to
1907  *  enable/disable any Intr block.
1908  *  Return Value: NONE.
1909  */
1910
1911 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1912 {
1913         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1914         register u64 temp64 = 0, intr_mask = 0;
1915
1916         intr_mask = nic->general_int_mask;
1917
1918         /*  Top level interrupt classification */
1919         /*  PIC Interrupts */
1920         if (mask & TX_PIC_INTR) {
1921                 /*  Enable PIC Intrs in the general intr mask register */
1922                 intr_mask |= TXPIC_INT_M;
1923                 if (flag == ENABLE_INTRS) {
1924                         /*
1925                          * If Hercules adapter enable GPIO otherwise
1926                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1927                          * interrupts for now.
1928                          * TODO
1929                          */
1930                         if (s2io_link_fault_indication(nic) ==
1931                                         LINK_UP_DOWN_INTERRUPT ) {
1932                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1933                                                 &bar0->pic_int_mask);
1934                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1935                                                 &bar0->gpio_int_mask);
1936                         } else
1937                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1938                 } else if (flag == DISABLE_INTRS) {
1939                         /*
1940                          * Disable PIC Intrs in the general
1941                          * intr mask register
1942                          */
1943                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1944                 }
1945         }
1946
1947         /*  Tx traffic interrupts */
1948         if (mask & TX_TRAFFIC_INTR) {
1949                 intr_mask |= TXTRAFFIC_INT_M;
1950                 if (flag == ENABLE_INTRS) {
1951                         /*
1952                          * Enable all the Tx side interrupts
1953                          * writing 0 Enables all 64 TX interrupt levels
1954                          */
1955                         writeq(0x0, &bar0->tx_traffic_mask);
1956                 } else if (flag == DISABLE_INTRS) {
1957                         /*
1958                          * Disable Tx Traffic Intrs in the general intr mask
1959                          * register.
1960                          */
1961                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1962                 }
1963         }
1964
1965         /*  Rx traffic interrupts */
1966         if (mask & RX_TRAFFIC_INTR) {
1967                 intr_mask |= RXTRAFFIC_INT_M;
1968                 if (flag == ENABLE_INTRS) {
1969                         /* writing 0 Enables all 8 RX interrupt levels */
1970                         writeq(0x0, &bar0->rx_traffic_mask);
1971                 } else if (flag == DISABLE_INTRS) {
1972                         /*
1973                          * Disable Rx Traffic Intrs in the general intr mask
1974                          * register.
1975                          */
1976                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1977                 }
1978         }
1979
1980         temp64 = readq(&bar0->general_int_mask);
1981         if (flag == ENABLE_INTRS)
1982                 temp64 &= ~((u64) intr_mask);
1983         else
1984                 temp64 = DISABLE_ALL_INTRS;
1985         writeq(temp64, &bar0->general_int_mask);
1986
1987         nic->general_int_mask = readq(&bar0->general_int_mask);
1988 }
1989
1990 /**
1991  *  verify_pcc_quiescent- Checks for PCC quiescent state
1992  *  Return: 1 If PCC is quiescence
1993  *          0 If PCC is not quiescence
1994  */
1995 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1996 {
1997         int ret = 0, herc;
1998         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1999         u64 val64 = readq(&bar0->adapter_status);
2000
2001         herc = (sp->device_type == XFRAME_II_DEVICE);
2002
2003         if (flag == FALSE) {
2004                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2005                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2006                                 ret = 1;
2007                 } else {
2008                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2009                                 ret = 1;
2010                 }
2011         } else {
2012                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2013                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2014                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2015                                 ret = 1;
2016                 } else {
2017                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2018                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2019                                 ret = 1;
2020                 }
2021         }
2022
2023         return ret;
2024 }
2025 /**
2026  *  verify_xena_quiescence - Checks whether the H/W is ready
2027  *  Description: Returns whether the H/W is ready to go or not. Depending
2028  *  on whether adapter enable bit was written or not the comparison
2029  *  differs and the calling function passes the input argument flag to
2030  *  indicate this.
2031  *  Return: 1 If xena is quiescence
2032  *          0 If Xena is not quiescence
2033  */
2034
2035 static int verify_xena_quiescence(struct s2io_nic *sp)
2036 {
2037         int  mode;
2038         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2039         u64 val64 = readq(&bar0->adapter_status);
2040         mode = s2io_verify_pci_mode(sp);
2041
2042         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2043                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2044                 return 0;
2045         }
2046         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2047         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2048                 return 0;
2049         }
2050         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2051                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2052                 return 0;
2053         }
2054         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2055                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2056                 return 0;
2057         }
2058         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2059                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2060                 return 0;
2061         }
2062         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2063                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2064                 return 0;
2065         }
2066         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2067                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2068                 return 0;
2069         }
2070         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2071                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2072                 return 0;
2073         }
2074
2075         /*
2076          * In PCI 33 mode, the P_PLL is not used, and therefore,
2077          * the the P_PLL_LOCK bit in the adapter_status register will
2078          * not be asserted.
2079          */
2080         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2081                 sp->device_type == XFRAME_II_DEVICE && mode !=
2082                 PCI_MODE_PCI_33) {
2083                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2084                 return 0;
2085         }
2086         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2087                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2088                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2089                 return 0;
2090         }
2091         return 1;
2092 }
2093
2094 /**
2095  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2096  * @sp: Pointer to device specifc structure
2097  * Description :
2098  * New procedure to clear mac address reading  problems on Alpha platforms
2099  *
2100  */
2101
2102 static void fix_mac_address(struct s2io_nic * sp)
2103 {
2104         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2105         u64 val64;
2106         int i = 0;
2107
2108         while (fix_mac[i] != END_SIGN) {
2109                 writeq(fix_mac[i++], &bar0->gpio_control);
2110                 udelay(10);
2111                 val64 = readq(&bar0->gpio_control);
2112         }
2113 }
2114
2115 /**
2116  *  start_nic - Turns the device on
2117  *  @nic : device private variable.
2118  *  Description:
2119  *  This function actually turns the device on. Before this  function is
2120  *  called,all Registers are configured from their reset states
2121  *  and shared memory is allocated but the NIC is still quiescent. On
2122  *  calling this function, the device interrupts are cleared and the NIC is
2123  *  literally switched on by writing into the adapter control register.
2124  *  Return Value:
2125  *  SUCCESS on success and -1 on failure.
2126  */
2127
2128 static int start_nic(struct s2io_nic *nic)
2129 {
2130         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2131         struct net_device *dev = nic->dev;
2132         register u64 val64 = 0;
2133         u16 subid, i;
2134         struct mac_info *mac_control;
2135         struct config_param *config;
2136
2137         mac_control = &nic->mac_control;
2138         config = &nic->config;
2139
2140         /*  PRC Initialization and configuration */
2141         for (i = 0; i < config->rx_ring_num; i++) {
2142                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2143                        &bar0->prc_rxd0_n[i]);
2144
2145                 val64 = readq(&bar0->prc_ctrl_n[i]);
2146                 if (nic->rxd_mode == RXD_MODE_1)
2147                         val64 |= PRC_CTRL_RC_ENABLED;
2148                 else
2149                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2150                 if (nic->device_type == XFRAME_II_DEVICE)
2151                         val64 |= PRC_CTRL_GROUP_READS;
2152                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2153                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2154                 writeq(val64, &bar0->prc_ctrl_n[i]);
2155         }
2156
2157         if (nic->rxd_mode == RXD_MODE_3B) {
2158                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2159                 val64 = readq(&bar0->rx_pa_cfg);
2160                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2161                 writeq(val64, &bar0->rx_pa_cfg);
2162         }
2163
2164         if (vlan_tag_strip == 0) {
2165                 val64 = readq(&bar0->rx_pa_cfg);
2166                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2167                 writeq(val64, &bar0->rx_pa_cfg);
2168                 vlan_strip_flag = 0;
2169         }
2170
2171         /*
2172          * Enabling MC-RLDRAM. After enabling the device, we timeout
2173          * for around 100ms, which is approximately the time required
2174          * for the device to be ready for operation.
2175          */
2176         val64 = readq(&bar0->mc_rldram_mrs);
2177         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2178         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2179         val64 = readq(&bar0->mc_rldram_mrs);
2180
2181         msleep(100);    /* Delay by around 100 ms. */
2182
2183         /* Enabling ECC Protection. */
2184         val64 = readq(&bar0->adapter_control);
2185         val64 &= ~ADAPTER_ECC_EN;
2186         writeq(val64, &bar0->adapter_control);
2187
2188         /*
2189          * Verify if the device is ready to be enabled, if so enable
2190          * it.
2191          */
2192         val64 = readq(&bar0->adapter_status);
2193         if (!verify_xena_quiescence(nic)) {
2194                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2195                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2196                           (unsigned long long) val64);
2197                 return FAILURE;
2198         }
2199
2200         /*
2201          * With some switches, link might be already up at this point.
2202          * Because of this weird behavior, when we enable laser,
2203          * we may not get link. We need to handle this. We cannot
2204          * figure out which switch is misbehaving. So we are forced to
2205          * make a global change.
2206          */
2207
2208         /* Enabling Laser. */
2209         val64 = readq(&bar0->adapter_control);
2210         val64 |= ADAPTER_EOI_TX_ON;
2211         writeq(val64, &bar0->adapter_control);
2212
2213         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2214                 /*
2215                  * Dont see link state interrupts initally on some switches,
2216                  * so directly scheduling the link state task here.
2217                  */
2218                 schedule_work(&nic->set_link_task);
2219         }
2220         /* SXE-002: Initialize link and activity LED */
2221         subid = nic->pdev->subsystem_device;
2222         if (((subid & 0xFF) >= 0x07) &&
2223             (nic->device_type == XFRAME_I_DEVICE)) {
2224                 val64 = readq(&bar0->gpio_control);
2225                 val64 |= 0x0000800000000000ULL;
2226                 writeq(val64, &bar0->gpio_control);
2227                 val64 = 0x0411040400000000ULL;
2228                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2229         }
2230
2231         return SUCCESS;
2232 }
2233 /**
2234  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2235  */
2236 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2237                                         TxD *txdlp, int get_off)
2238 {
2239         struct s2io_nic *nic = fifo_data->nic;
2240         struct sk_buff *skb;
2241         struct TxD *txds;
2242         u16 j, frg_cnt;
2243
2244         txds = txdlp;
2245         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2246                 pci_unmap_single(nic->pdev, (dma_addr_t)
2247                         txds->Buffer_Pointer, sizeof(u64),
2248                         PCI_DMA_TODEVICE);
2249                 txds++;
2250         }
2251
2252         skb = (struct sk_buff *) ((unsigned long)
2253                         txds->Host_Control);
2254         if (!skb) {
2255                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2256                 return NULL;
2257         }
2258         pci_unmap_single(nic->pdev, (dma_addr_t)
2259                          txds->Buffer_Pointer,
2260                          skb->len - skb->data_len,
2261                          PCI_DMA_TODEVICE);
2262         frg_cnt = skb_shinfo(skb)->nr_frags;
2263         if (frg_cnt) {
2264                 txds++;
2265                 for (j = 0; j < frg_cnt; j++, txds++) {
2266                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2267                         if (!txds->Buffer_Pointer)
2268                                 break;
2269                         pci_unmap_page(nic->pdev, (dma_addr_t)
2270                                         txds->Buffer_Pointer,
2271                                        frag->size, PCI_DMA_TODEVICE);
2272                 }
2273         }
2274         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2275         return(skb);
2276 }
2277
2278 /**
2279  *  free_tx_buffers - Free all queued Tx buffers
2280  *  @nic : device private variable.
2281  *  Description:
2282  *  Free all queued Tx buffers.
2283  *  Return Value: void
2284 */
2285
2286 static void free_tx_buffers(struct s2io_nic *nic)
2287 {
2288         struct net_device *dev = nic->dev;
2289         struct sk_buff *skb;
2290         struct TxD *txdp;
2291         int i, j;
2292         struct mac_info *mac_control;
2293         struct config_param *config;
2294         int cnt = 0;
2295
2296         mac_control = &nic->mac_control;
2297         config = &nic->config;
2298
2299         for (i = 0; i < config->tx_fifo_num; i++) {
2300                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2301                         txdp = (struct TxD *) \
2302                         mac_control->fifos[i].list_info[j].list_virt_addr;
2303                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2304                         if (skb) {
2305                                 nic->mac_control.stats_info->sw_stat.mem_freed
2306                                         += skb->truesize;
2307                                 dev_kfree_skb(skb);
2308                                 cnt++;
2309                         }
2310                 }
2311                 DBG_PRINT(INTR_DBG,
2312                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2313                           dev->name, cnt, i);
2314                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2315                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2316         }
2317 }
2318
2319 /**
2320  *   stop_nic -  To stop the nic
2321  *   @nic ; device private variable.
2322  *   Description:
2323  *   This function does exactly the opposite of what the start_nic()
2324  *   function does. This function is called to stop the device.
2325  *   Return Value:
2326  *   void.
2327  */
2328
2329 static void stop_nic(struct s2io_nic *nic)
2330 {
2331         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2332         register u64 val64 = 0;
2333         u16 interruptible;
2334         struct mac_info *mac_control;
2335         struct config_param *config;
2336
2337         mac_control = &nic->mac_control;
2338         config = &nic->config;
2339
2340         /*  Disable all interrupts */
2341         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2342         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2343         interruptible |= TX_PIC_INTR;
2344         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2345
2346         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2347         val64 = readq(&bar0->adapter_control);
2348         val64 &= ~(ADAPTER_CNTL_EN);
2349         writeq(val64, &bar0->adapter_control);
2350 }
2351
2352 /**
2353  *  fill_rx_buffers - Allocates the Rx side skbs
2354  *  @nic:  device private variable
2355  *  @ring_no: ring number
2356  *  Description:
2357  *  The function allocates Rx side skbs and puts the physical
2358  *  address of these buffers into the RxD buffer pointers, so that the NIC
2359  *  can DMA the received frame into these locations.
2360  *  The NIC supports 3 receive modes, viz
2361  *  1. single buffer,
2362  *  2. three buffer and
2363  *  3. Five buffer modes.
2364  *  Each mode defines how many fragments the received frame will be split
2365  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2366  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2367  *  is split into 3 fragments. As of now only single buffer mode is
2368  *  supported.
2369  *   Return Value:
2370  *  SUCCESS on success or an appropriate -ve value on failure.
2371  */
2372
2373 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2374 {
2375         struct net_device *dev = nic->dev;
2376         struct sk_buff *skb;
2377         struct RxD_t *rxdp;
2378         int off, off1, size, block_no, block_no1;
2379         u32 alloc_tab = 0;
2380         u32 alloc_cnt;
2381         struct mac_info *mac_control;
2382         struct config_param *config;
2383         u64 tmp;
2384         struct buffAdd *ba;
2385         unsigned long flags;
2386         struct RxD_t *first_rxdp = NULL;
2387         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2388         struct RxD1 *rxdp1;
2389         struct RxD3 *rxdp3;
2390         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2391
2392         mac_control = &nic->mac_control;
2393         config = &nic->config;
2394         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2395             atomic_read(&nic->rx_bufs_left[ring_no]);
2396
2397         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2398         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2399         while (alloc_tab < alloc_cnt) {
2400                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2401                     block_index;
2402                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2403
2404                 rxdp = mac_control->rings[ring_no].
2405                                 rx_blocks[block_no].rxds[off].virt_addr;
2406
2407                 if ((block_no == block_no1) && (off == off1) &&
2408                                         (rxdp->Host_Control)) {
2409                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2410                                   dev->name);
2411                         DBG_PRINT(INTR_DBG, " info equated\n");
2412                         goto end;
2413                 }
2414                 if (off && (off == rxd_count[nic->rxd_mode])) {
2415                         mac_control->rings[ring_no].rx_curr_put_info.
2416                             block_index++;
2417                         if (mac_control->rings[ring_no].rx_curr_put_info.
2418                             block_index == mac_control->rings[ring_no].
2419                                         block_count)
2420                                 mac_control->rings[ring_no].rx_curr_put_info.
2421                                         block_index = 0;
2422                         block_no = mac_control->rings[ring_no].
2423                                         rx_curr_put_info.block_index;
2424                         if (off == rxd_count[nic->rxd_mode])
2425                                 off = 0;
2426                         mac_control->rings[ring_no].rx_curr_put_info.
2427                                 offset = off;
2428                         rxdp = mac_control->rings[ring_no].
2429                                 rx_blocks[block_no].block_virt_addr;
2430                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2431                                   dev->name, rxdp);
2432                 }
2433                 if(!napi) {
2434                         spin_lock_irqsave(&nic->put_lock, flags);
2435                         mac_control->rings[ring_no].put_pos =
2436                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2437                         spin_unlock_irqrestore(&nic->put_lock, flags);
2438                 } else {
2439                         mac_control->rings[ring_no].put_pos =
2440                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2441                 }
2442                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2443                         ((nic->rxd_mode == RXD_MODE_3B) &&
2444                                 (rxdp->Control_2 & s2BIT(0)))) {
2445                         mac_control->rings[ring_no].rx_curr_put_info.
2446                                         offset = off;
2447                         goto end;
2448                 }
2449                 /* calculate size of skb based on ring mode */
2450                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2451                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2452                 if (nic->rxd_mode == RXD_MODE_1)
2453                         size += NET_IP_ALIGN;
2454                 else
2455                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2456
2457                 /* allocate skb */
2458                 skb = dev_alloc_skb(size);
2459                 if(!skb) {
2460                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2461                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2462                         if (first_rxdp) {
2463                                 wmb();
2464                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2465                         }
2466                         nic->mac_control.stats_info->sw_stat. \
2467                                 mem_alloc_fail_cnt++;
2468                         return -ENOMEM ;
2469                 }
2470                 nic->mac_control.stats_info->sw_stat.mem_allocated
2471                         += skb->truesize;
2472                 if (nic->rxd_mode == RXD_MODE_1) {
2473                         /* 1 buffer mode - normal operation mode */
2474                         rxdp1 = (struct RxD1*)rxdp;
2475                         memset(rxdp, 0, sizeof(struct RxD1));
2476                         skb_reserve(skb, NET_IP_ALIGN);
2477                         rxdp1->Buffer0_ptr = pci_map_single
2478                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2479                                 PCI_DMA_FROMDEVICE);
2480                         if( (rxdp1->Buffer0_ptr == 0) ||
2481                                 (rxdp1->Buffer0_ptr ==
2482                                 DMA_ERROR_CODE))
2483                                 goto pci_map_failed;
2484
2485                         rxdp->Control_2 =
2486                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2487
2488                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2489                         /*
2490                          * 2 buffer mode -
2491                          * 2 buffer mode provides 128
2492                          * byte aligned receive buffers.
2493                          */
2494
2495                         rxdp3 = (struct RxD3*)rxdp;
2496                         /* save buffer pointers to avoid frequent dma mapping */
2497                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2498                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2499                         memset(rxdp, 0, sizeof(struct RxD3));
2500                         /* restore the buffer pointers for dma sync*/
2501                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2502                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2503
2504                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2505                         skb_reserve(skb, BUF0_LEN);
2506                         tmp = (u64)(unsigned long) skb->data;
2507                         tmp += ALIGN_SIZE;
2508                         tmp &= ~ALIGN_SIZE;
2509                         skb->data = (void *) (unsigned long)tmp;
2510                         skb_reset_tail_pointer(skb);
2511
2512                         if (!(rxdp3->Buffer0_ptr))
2513                                 rxdp3->Buffer0_ptr =
2514                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2515                                            PCI_DMA_FROMDEVICE);
2516                         else
2517                                 pci_dma_sync_single_for_device(nic->pdev,
2518                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2519                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2520                         if( (rxdp3->Buffer0_ptr == 0) ||
2521                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2522                                 goto pci_map_failed;
2523
2524                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2525                         if (nic->rxd_mode == RXD_MODE_3B) {
2526                                 /* Two buffer mode */
2527
2528                                 /*
2529                                  * Buffer2 will have L3/L4 header plus
2530                                  * L4 payload
2531                                  */
2532                                 rxdp3->Buffer2_ptr = pci_map_single
2533                                 (nic->pdev, skb->data, dev->mtu + 4,
2534                                                 PCI_DMA_FROMDEVICE);
2535
2536                                 if( (rxdp3->Buffer2_ptr == 0) ||
2537                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2538                                         goto pci_map_failed;
2539
2540                                 rxdp3->Buffer1_ptr =
2541                                                 pci_map_single(nic->pdev,
2542                                                 ba->ba_1, BUF1_LEN,
2543                                                 PCI_DMA_FROMDEVICE);
2544                                 if( (rxdp3->Buffer1_ptr == 0) ||
2545                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2546                                         pci_unmap_single
2547                                                 (nic->pdev,
2548                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2549                                                 dev->mtu + 4,
2550                                                 PCI_DMA_FROMDEVICE);
2551                                         goto pci_map_failed;
2552                                 }
2553                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2554                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2555                                                                 (dev->mtu + 4);
2556                         }
2557                         rxdp->Control_2 |= s2BIT(0);
2558                 }
2559                 rxdp->Host_Control = (unsigned long) (skb);
2560                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2561                         rxdp->Control_1 |= RXD_OWN_XENA;
2562                 off++;
2563                 if (off == (rxd_count[nic->rxd_mode] + 1))
2564                         off = 0;
2565                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2566
2567                 rxdp->Control_2 |= SET_RXD_MARKER;
2568                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2569                         if (first_rxdp) {
2570                                 wmb();
2571                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2572                         }
2573                         first_rxdp = rxdp;
2574                 }
2575                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2576                 alloc_tab++;
2577         }
2578
2579       end:
2580         /* Transfer ownership of first descriptor to adapter just before
2581          * exiting. Before that, use memory barrier so that ownership
2582          * and other fields are seen by adapter correctly.
2583          */
2584         if (first_rxdp) {
2585                 wmb();
2586                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2587         }
2588
2589         return SUCCESS;
2590 pci_map_failed:
2591         stats->pci_map_fail_cnt++;
2592         stats->mem_freed += skb->truesize;
2593         dev_kfree_skb_irq(skb);
2594         return -ENOMEM;
2595 }
2596
2597 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2598 {
2599         struct net_device *dev = sp->dev;
2600         int j;
2601         struct sk_buff *skb;
2602         struct RxD_t *rxdp;
2603         struct mac_info *mac_control;
2604         struct buffAdd *ba;
2605         struct RxD1 *rxdp1;
2606         struct RxD3 *rxdp3;
2607
2608         mac_control = &sp->mac_control;
2609         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2610                 rxdp = mac_control->rings[ring_no].
2611                                 rx_blocks[blk].rxds[j].virt_addr;
2612                 skb = (struct sk_buff *)
2613                         ((unsigned long) rxdp->Host_Control);
2614                 if (!skb) {
2615                         continue;
2616                 }
2617                 if (sp->rxd_mode == RXD_MODE_1) {
2618                         rxdp1 = (struct RxD1*)rxdp;
2619                         pci_unmap_single(sp->pdev, (dma_addr_t)
2620                                 rxdp1->Buffer0_ptr,
2621                                 dev->mtu +
2622                                 HEADER_ETHERNET_II_802_3_SIZE
2623                                 + HEADER_802_2_SIZE +
2624                                 HEADER_SNAP_SIZE,
2625                                 PCI_DMA_FROMDEVICE);
2626                         memset(rxdp, 0, sizeof(struct RxD1));
2627                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2628                         rxdp3 = (struct RxD3*)rxdp;
2629                         ba = &mac_control->rings[ring_no].
2630                                 ba[blk][j];
2631                         pci_unmap_single(sp->pdev, (dma_addr_t)
2632                                 rxdp3->Buffer0_ptr,
2633                                 BUF0_LEN,
2634                                 PCI_DMA_FROMDEVICE);
2635                         pci_unmap_single(sp->pdev, (dma_addr_t)
2636                                 rxdp3->Buffer1_ptr,
2637                                 BUF1_LEN,
2638                                 PCI_DMA_FROMDEVICE);
2639                         pci_unmap_single(sp->pdev, (dma_addr_t)
2640                                 rxdp3->Buffer2_ptr,
2641                                 dev->mtu + 4,
2642                                 PCI_DMA_FROMDEVICE);
2643                         memset(rxdp, 0, sizeof(struct RxD3));
2644                 }
2645                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2646                 dev_kfree_skb(skb);
2647                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2648         }
2649 }
2650
2651 /**
2652  *  free_rx_buffers - Frees all Rx buffers
2653  *  @sp: device private variable.
2654  *  Description:
2655  *  This function will free all Rx buffers allocated by host.
2656  *  Return Value:
2657  *  NONE.
2658  */
2659
2660 static void free_rx_buffers(struct s2io_nic *sp)
2661 {
2662         struct net_device *dev = sp->dev;
2663         int i, blk = 0, buf_cnt = 0;
2664         struct mac_info *mac_control;
2665         struct config_param *config;
2666
2667         mac_control = &sp->mac_control;
2668         config = &sp->config;
2669
2670         for (i = 0; i < config->rx_ring_num; i++) {
2671                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2672                         free_rxd_blk(sp,i,blk);
2673
2674                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2675                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2676                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2677                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2678                 atomic_set(&sp->rx_bufs_left[i], 0);
2679                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2680                           dev->name, buf_cnt, i);
2681         }
2682 }
2683
2684 /**
2685  * s2io_poll - Rx interrupt handler for NAPI support
2686  * @napi : pointer to the napi structure.
2687  * @budget : The number of packets that were budgeted to be processed
2688  * during  one pass through the 'Poll" function.
2689  * Description:
2690  * Comes into picture only if NAPI support has been incorporated. It does
2691  * the same thing that rx_intr_handler does, but not in a interrupt context
2692  * also It will process only a given number of packets.
2693  * Return value:
2694  * 0 on success and 1 if there are No Rx packets to be processed.
2695  */
2696
2697 static int s2io_poll(struct napi_struct *napi, int budget)
2698 {
2699         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2700         struct net_device *dev = nic->dev;
2701         int pkt_cnt = 0, org_pkts_to_process;
2702         struct mac_info *mac_control;
2703         struct config_param *config;
2704         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2705         int i;
2706
2707         mac_control = &nic->mac_control;
2708         config = &nic->config;
2709
2710         nic->pkts_to_process = budget;
2711         org_pkts_to_process = nic->pkts_to_process;
2712
2713         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2714         readl(&bar0->rx_traffic_int);
2715
2716         for (i = 0; i < config->rx_ring_num; i++) {
2717                 rx_intr_handler(&mac_control->rings[i]);
2718                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2719                 if (!nic->pkts_to_process) {
2720                         /* Quota for the current iteration has been met */
2721                         goto no_rx;
2722                 }
2723         }
2724
2725         netif_rx_complete(dev, napi);
2726
2727         for (i = 0; i < config->rx_ring_num; i++) {
2728                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2729                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2730                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2731                         break;
2732                 }
2733         }
2734         /* Re enable the Rx interrupts. */
2735         writeq(0x0, &bar0->rx_traffic_mask);
2736         readl(&bar0->rx_traffic_mask);
2737         return pkt_cnt;
2738
2739 no_rx:
2740         for (i = 0; i < config->rx_ring_num; i++) {
2741                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2742                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2743                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2744                         break;
2745                 }
2746         }
2747         return pkt_cnt;
2748 }
2749
2750 #ifdef CONFIG_NET_POLL_CONTROLLER
2751 /**
2752  * s2io_netpoll - netpoll event handler entry point
2753  * @dev : pointer to the device structure.
2754  * Description:
2755  *      This function will be called by upper layer to check for events on the
2756  * interface in situations where interrupts are disabled. It is used for
2757  * specific in-kernel networking tasks, such as remote consoles and kernel
2758  * debugging over the network (example netdump in RedHat).
2759  */
2760 static void s2io_netpoll(struct net_device *dev)
2761 {
2762         struct s2io_nic *nic = dev->priv;
2763         struct mac_info *mac_control;
2764         struct config_param *config;
2765         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2766         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2767         int i;
2768
2769         if (pci_channel_offline(nic->pdev))
2770                 return;
2771
2772         disable_irq(dev->irq);
2773
2774         mac_control = &nic->mac_control;
2775         config = &nic->config;
2776
2777         writeq(val64, &bar0->rx_traffic_int);
2778         writeq(val64, &bar0->tx_traffic_int);
2779
2780         /* we need to free up the transmitted skbufs or else netpoll will
2781          * run out of skbs and will fail and eventually netpoll application such
2782          * as netdump will fail.
2783          */
2784         for (i = 0; i < config->tx_fifo_num; i++)
2785                 tx_intr_handler(&mac_control->fifos[i]);
2786
2787         /* check for received packet and indicate up to network */
2788         for (i = 0; i < config->rx_ring_num; i++)
2789                 rx_intr_handler(&mac_control->rings[i]);
2790
2791         for (i = 0; i < config->rx_ring_num; i++) {
2792                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2793                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2794                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2795                         break;
2796                 }
2797         }
2798         enable_irq(dev->irq);
2799         return;
2800 }
2801 #endif
2802
2803 /**
2804  *  rx_intr_handler - Rx interrupt handler
2805  *  @nic: device private variable.
2806  *  Description:
2807  *  If the interrupt is because of a received frame or if the
2808  *  receive ring contains fresh as yet un-processed frames,this function is
2809  *  called. It picks out the RxD at which place the last Rx processing had
2810  *  stopped and sends the skb to the OSM's Rx handler and then increments
2811  *  the offset.
2812  *  Return Value:
2813  *  NONE.
2814  */
2815 static void rx_intr_handler(struct ring_info *ring_data)
2816 {
2817         struct s2io_nic *nic = ring_data->nic;
2818         struct net_device *dev = (struct net_device *) nic->dev;
2819         int get_block, put_block, put_offset;
2820         struct rx_curr_get_info get_info, put_info;
2821         struct RxD_t *rxdp;
2822         struct sk_buff *skb;
2823         int pkt_cnt = 0;
2824         int i;
2825         struct RxD1* rxdp1;
2826         struct RxD3* rxdp3;
2827
2828         spin_lock(&nic->rx_lock);
2829
2830         get_info = ring_data->rx_curr_get_info;
2831         get_block = get_info.block_index;
2832         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2833         put_block = put_info.block_index;
2834         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2835         if (!napi) {
2836                 spin_lock(&nic->put_lock);
2837                 put_offset = ring_data->put_pos;
2838                 spin_unlock(&nic->put_lock);
2839         } else
2840                 put_offset = ring_data->put_pos;
2841
2842         while (RXD_IS_UP2DT(rxdp)) {
2843                 /*
2844                  * If your are next to put index then it's
2845                  * FIFO full condition
2846                  */
2847                 if ((get_block == put_block) &&
2848                     (get_info.offset + 1) == put_info.offset) {
2849                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2850                         break;
2851                 }
2852                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2853                 if (skb == NULL) {
2854                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2855                                   dev->name);
2856                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2857                         spin_unlock(&nic->rx_lock);
2858                         return;
2859                 }
2860                 if (nic->rxd_mode == RXD_MODE_1) {
2861                         rxdp1 = (struct RxD1*)rxdp;
2862                         pci_unmap_single(nic->pdev, (dma_addr_t)
2863                                 rxdp1->Buffer0_ptr,
2864                                 dev->mtu +
2865                                 HEADER_ETHERNET_II_802_3_SIZE +
2866                                 HEADER_802_2_SIZE +
2867                                 HEADER_SNAP_SIZE,
2868                                 PCI_DMA_FROMDEVICE);
2869                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2870                         rxdp3 = (struct RxD3*)rxdp;
2871                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2872                                 rxdp3->Buffer0_ptr,
2873                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2874                         pci_unmap_single(nic->pdev, (dma_addr_t)
2875                                 rxdp3->Buffer2_ptr,
2876                                 dev->mtu + 4,
2877                                 PCI_DMA_FROMDEVICE);
2878                 }
2879                 prefetch(skb->data);
2880                 rx_osm_handler(ring_data, rxdp);
2881                 get_info.offset++;
2882                 ring_data->rx_curr_get_info.offset = get_info.offset;
2883                 rxdp = ring_data->rx_blocks[get_block].
2884                                 rxds[get_info.offset].virt_addr;
2885                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2886                         get_info.offset = 0;
2887                         ring_data->rx_curr_get_info.offset = get_info.offset;
2888                         get_block++;
2889                         if (get_block == ring_data->block_count)
2890                                 get_block = 0;
2891                         ring_data->rx_curr_get_info.block_index = get_block;
2892                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2893                 }
2894
2895                 nic->pkts_to_process -= 1;
2896                 if ((napi) && (!nic->pkts_to_process))
2897                         break;
2898                 pkt_cnt++;
2899                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2900                         break;
2901         }
2902         if (nic->lro) {
2903                 /* Clear all LRO sessions before exiting */
2904                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2905                         struct lro *lro = &nic->lro0_n[i];
2906                         if (lro->in_use) {
2907                                 update_L3L4_header(nic, lro);
2908                                 queue_rx_frame(lro->parent);
2909                                 clear_lro_session(lro);
2910                         }
2911                 }
2912         }
2913
2914         spin_unlock(&nic->rx_lock);
2915 }
2916
2917 /**
2918  *  tx_intr_handler - Transmit interrupt handler
2919  *  @nic : device private variable
2920  *  Description:
2921  *  If an interrupt was raised to indicate DMA complete of the
2922  *  Tx packet, this function is called. It identifies the last TxD
2923  *  whose buffer was freed and frees all skbs whose data have already
2924  *  DMA'ed into the NICs internal memory.
2925  *  Return Value:
2926  *  NONE
2927  */
2928
2929 static void tx_intr_handler(struct fifo_info *fifo_data)
2930 {
2931         struct s2io_nic *nic = fifo_data->nic;
2932         struct net_device *dev = (struct net_device *) nic->dev;
2933         struct tx_curr_get_info get_info, put_info;
2934         struct sk_buff *skb;
2935         struct TxD *txdlp;
2936         u8 err_mask;
2937
2938         get_info = fifo_data->tx_curr_get_info;
2939         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2940         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2941             list_virt_addr;
2942         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2943                (get_info.offset != put_info.offset) &&
2944                (txdlp->Host_Control)) {
2945                 /* Check for TxD errors */
2946                 if (txdlp->Control_1 & TXD_T_CODE) {
2947                         unsigned long long err;
2948                         err = txdlp->Control_1 & TXD_T_CODE;
2949                         if (err & 0x1) {
2950                                 nic->mac_control.stats_info->sw_stat.
2951                                                 parity_err_cnt++;
2952                         }
2953
2954                         /* update t_code statistics */
2955                         err_mask = err >> 48;
2956                         switch(err_mask) {
2957                                 case 2:
2958                                         nic->mac_control.stats_info->sw_stat.
2959                                                         tx_buf_abort_cnt++;
2960                                 break;
2961
2962                                 case 3:
2963                                         nic->mac_control.stats_info->sw_stat.
2964                                                         tx_desc_abort_cnt++;
2965                                 break;
2966
2967                                 case 7:
2968                                         nic->mac_control.stats_info->sw_stat.
2969                                                         tx_parity_err_cnt++;
2970                                 break;
2971
2972                                 case 10:
2973                                         nic->mac_control.stats_info->sw_stat.
2974                                                         tx_link_loss_cnt++;
2975                                 break;
2976
2977                                 case 15:
2978                                         nic->mac_control.stats_info->sw_stat.
2979                                                         tx_list_proc_err_cnt++;
2980                                 break;
2981                         }
2982                 }
2983
2984                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2985                 if (skb == NULL) {
2986                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2987                         __FUNCTION__);
2988                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2989                         return;
2990                 }
2991
2992                 /* Updating the statistics block */
2993                 nic->stats.tx_bytes += skb->len;
2994                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2995                 dev_kfree_skb_irq(skb);
2996
2997                 get_info.offset++;
2998                 if (get_info.offset == get_info.fifo_len + 1)
2999                         get_info.offset = 0;
3000                 txdlp = (struct TxD *) fifo_data->list_info
3001                     [get_info.offset].list_virt_addr;
3002                 fifo_data->tx_curr_get_info.offset =
3003                     get_info.offset;
3004         }
3005
3006         spin_lock(&nic->tx_lock);
3007         if (netif_queue_stopped(dev))
3008                 netif_wake_queue(dev);
3009         spin_unlock(&nic->tx_lock);
3010 }
3011
3012 /**
3013  *  s2io_mdio_write - Function to write in to MDIO registers
3014  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3015  *  @addr     : address value
3016  *  @value    : data value
3017  *  @dev      : pointer to net_device structure
3018  *  Description:
3019  *  This function is used to write values to the MDIO registers
3020  *  NONE
3021  */
3022 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3023 {
3024         u64 val64 = 0x0;
3025         struct s2io_nic *sp = dev->priv;
3026         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3027
3028         //address transaction
3029         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3030                         | MDIO_MMD_DEV_ADDR(mmd_type)
3031                         | MDIO_MMS_PRT_ADDR(0x0);
3032         writeq(val64, &bar0->mdio_control);
3033         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3034         writeq(val64, &bar0->mdio_control);
3035         udelay(100);
3036
3037         //Data transaction
3038         val64 = 0x0;
3039         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3040                         | MDIO_MMD_DEV_ADDR(mmd_type)
3041                         | MDIO_MMS_PRT_ADDR(0x0)
3042                         | MDIO_MDIO_DATA(value)
3043                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3044         writeq(val64, &bar0->mdio_control);
3045         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3046         writeq(val64, &bar0->mdio_control);
3047         udelay(100);
3048
3049         val64 = 0x0;
3050         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3051         | MDIO_MMD_DEV_ADDR(mmd_type)
3052         | MDIO_MMS_PRT_ADDR(0x0)
3053         | MDIO_OP(MDIO_OP_READ_TRANS);
3054         writeq(val64, &bar0->mdio_control);
3055         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3056         writeq(val64, &bar0->mdio_control);
3057         udelay(100);
3058
3059 }
3060
3061 /**
3062  *  s2io_mdio_read - Function to write in to MDIO registers
3063  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3064  *  @addr     : address value
3065  *  @dev      : pointer to net_device structure
3066  *  Description:
3067  *  This function is used to read values to the MDIO registers
3068  *  NONE
3069  */
3070 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3071 {
3072         u64 val64 = 0x0;
3073         u64 rval64 = 0x0;
3074         struct s2io_nic *sp = dev->priv;
3075         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3076
3077         /* address transaction */
3078         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3079                         | MDIO_MMD_DEV_ADDR(mmd_type)
3080                         | MDIO_MMS_PRT_ADDR(0x0);
3081         writeq(val64, &bar0->mdio_control);
3082         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3083         writeq(val64, &bar0->mdio_control);
3084         udelay(100);
3085
3086         /* Data transaction */
3087         val64 = 0x0;
3088         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3089                         | MDIO_MMD_DEV_ADDR(mmd_type)
3090                         | MDIO_MMS_PRT_ADDR(0x0)
3091                         | MDIO_OP(MDIO_OP_READ_TRANS);
3092         writeq(val64, &bar0->mdio_control);
3093         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3094         writeq(val64, &bar0->mdio_control);
3095         udelay(100);
3096
3097         /* Read the value from regs */
3098         rval64 = readq(&bar0->mdio_control);
3099         rval64 = rval64 & 0xFFFF0000;
3100         rval64 = rval64 >> 16;
3101         return rval64;
3102 }
3103 /**
3104  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3105  *  @counter      : couter value to be updated
3106  *  @flag         : flag to indicate the status
3107  *  @type         : counter type
3108  *  Description:
3109  *  This function is to check the status of the xpak counters value
3110  *  NONE
3111  */
3112
3113 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3114 {
3115         u64 mask = 0x3;
3116         u64 val64;
3117         int i;
3118         for(i = 0; i <index; i++)
3119                 mask = mask << 0x2;
3120
3121         if(flag > 0)
3122         {
3123                 *counter = *counter + 1;
3124                 val64 = *regs_stat & mask;
3125                 val64 = val64 >> (index * 0x2);
3126                 val64 = val64 + 1;
3127                 if(val64 == 3)
3128                 {
3129                         switch(type)
3130                         {
3131                         case 1:
3132                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3133                                           "service. Excessive temperatures may "
3134                                           "result in premature transceiver "
3135                                           "failure \n");
3136                         break;
3137                         case 2:
3138                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3139                                           "service Excessive bias currents may "
3140                                           "indicate imminent laser diode "
3141                                           "failure \n");
3142                         break;
3143                         case 3:
3144                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3145                                           "service Excessive laser output "
3146                                           "power may saturate far-end "
3147                                           "receiver\n");
3148                         break;
3149                         default:
3150                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3151                                           "type \n");
3152                         }
3153                         val64 = 0x0;
3154                 }
3155                 val64 = val64 << (index * 0x2);
3156                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3157
3158         } else {
3159                 *regs_stat = *regs_stat & (~mask);
3160         }
3161 }
3162
3163 /**
3164  *  s2io_updt_xpak_counter - Function to update the xpak counters
3165  *  @dev         : pointer to net_device struct
3166  *  Description:
3167  *  This function is to upate the status of the xpak counters value
3168  *  NONE
3169  */
3170 static void s2io_updt_xpak_counter(struct net_device *dev)
3171 {
3172         u16 flag  = 0x0;
3173         u16 type  = 0x0;
3174         u16 val16 = 0x0;
3175         u64 val64 = 0x0;
3176         u64 addr  = 0x0;
3177
3178         struct s2io_nic *sp = dev->priv;
3179         struct stat_block *stat_info = sp->mac_control.stats_info;
3180
3181         /* Check the communication with the MDIO slave */
3182         addr = 0x0000;
3183         val64 = 0x0;
3184         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3185         if((val64 == 0xFFFF) || (val64 == 0x0000))
3186         {
3187                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3188                           "Returned %llx\n", (unsigned long long)val64);
3189                 return;
3190         }
3191
3192         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3193         if(val64 != 0x2040)
3194         {
3195                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3196                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3197                           (unsigned long long)val64);
3198                 return;
3199         }
3200
3201         /* Loading the DOM register to MDIO register */
3202         addr = 0xA100;
3203         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3204         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3205
3206         /* Reading the Alarm flags */
3207         addr = 0xA070;
3208         val64 = 0x0;
3209         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3210
3211         flag = CHECKBIT(val64, 0x7);
3212         type = 1;
3213         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3214                                 &stat_info->xpak_stat.xpak_regs_stat,
3215                                 0x0, flag, type);
3216
3217         if(CHECKBIT(val64, 0x6))
3218                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3219
3220         flag = CHECKBIT(val64, 0x3);
3221         type = 2;
3222         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3223                                 &stat_info->xpak_stat.xpak_regs_stat,
3224                                 0x2, flag, type);
3225
3226         if(CHECKBIT(val64, 0x2))
3227                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3228
3229         flag = CHECKBIT(val64, 0x1);
3230         type = 3;
3231         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3232                                 &stat_info->xpak_stat.xpak_regs_stat,
3233                                 0x4, flag, type);
3234
3235         if(CHECKBIT(val64, 0x0))
3236                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3237
3238         /* Reading the Warning flags */
3239         addr = 0xA074;
3240         val64 = 0x0;
3241         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3242
3243         if(CHECKBIT(val64, 0x7))
3244                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3245
3246         if(CHECKBIT(val64, 0x6))
3247                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3248
3249         if(CHECKBIT(val64, 0x3))
3250                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3251
3252         if(CHECKBIT(val64, 0x2))
3253                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3254
3255         if(CHECKBIT(val64, 0x1))
3256                 stat_info->xpak_stat.warn_laser_output_power_high++;
3257
3258         if(CHECKBIT(val64, 0x0))
3259                 stat_info->xpak_stat.warn_laser_output_power_low++;
3260 }
3261
3262 /**
3263  *  wait_for_cmd_complete - waits for a command to complete.
3264  *  @sp : private member of the device structure, which is a pointer to the
3265  *  s2io_nic structure.
3266  *  Description: Function that waits for a command to Write into RMAC
3267  *  ADDR DATA registers to be completed and returns either success or
3268  *  error depending on whether the command was complete or not.
3269  *  Return value:
3270  *   SUCCESS on success and FAILURE on failure.
3271  */
3272
3273 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3274                                 int bit_state)
3275 {
3276         int ret = FAILURE, cnt = 0, delay = 1;
3277         u64 val64;
3278
3279         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3280                 return FAILURE;
3281
3282         do {
3283                 val64 = readq(addr);
3284                 if (bit_state == S2IO_BIT_RESET) {
3285                         if (!(val64 & busy_bit)) {
3286                                 ret = SUCCESS;
3287                                 break;
3288                         }
3289                 } else {
3290                         if (!(val64 & busy_bit)) {
3291                                 ret = SUCCESS;
3292                                 break;
3293                         }
3294                 }
3295
3296                 if(in_interrupt())
3297                         mdelay(delay);
3298                 else
3299                         msleep(delay);
3300
3301                 if (++cnt >= 10)
3302                         delay = 50;
3303         } while (cnt < 20);
3304         return ret;
3305 }
3306 /*
3307  * check_pci_device_id - Checks if the device id is supported
3308  * @id : device id
3309  * Description: Function to check if the pci device id is supported by driver.
3310  * Return value: Actual device id if supported else PCI_ANY_ID
3311  */
3312 static u16 check_pci_device_id(u16 id)
3313 {
3314         switch (id) {
3315         case PCI_DEVICE_ID_HERC_WIN:
3316         case PCI_DEVICE_ID_HERC_UNI:
3317                 return XFRAME_II_DEVICE;
3318         case PCI_DEVICE_ID_S2IO_UNI:
3319         case PCI_DEVICE_ID_S2IO_WIN:
3320                 return XFRAME_I_DEVICE;
3321         default:
3322                 return PCI_ANY_ID;
3323         }
3324 }
3325
3326 /**
3327  *  s2io_reset - Resets the card.
3328  *  @sp : private member of the device structure.
3329  *  Description: Function to Reset the card. This function then also
3330  *  restores the previously saved PCI configuration space registers as
3331  *  the card reset also resets the configuration space.
3332  *  Return value:
3333  *  void.
3334  */
3335
3336 static void s2io_reset(struct s2io_nic * sp)
3337 {
3338         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3339         u64 val64;
3340         u16 subid, pci_cmd;
3341         int i;
3342         u16 val16;
3343         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3344         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3345
3346         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3347                         __FUNCTION__, sp->dev->name);
3348
3349         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3350         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3351
3352         val64 = SW_RESET_ALL;
3353         writeq(val64, &bar0->sw_reset);
3354         if (strstr(sp->product_name, "CX4")) {
3355                 msleep(750);
3356         }
3357         msleep(250);
3358         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3359
3360                 /* Restore the PCI state saved during initialization. */
3361                 pci_restore_state(sp->pdev);
3362                 pci_read_config_word(sp->pdev, 0x2, &val16);
3363                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3364                         break;
3365                 msleep(200);
3366         }
3367
3368         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3369                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3370         }
3371
3372         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3373
3374         s2io_init_pci(sp);
3375
3376         /* Set swapper to enable I/O register access */
3377         s2io_set_swapper(sp);
3378
3379         /* Restore the MSIX table entries from local variables */
3380         restore_xmsi_data(sp);
3381
3382         /* Clear certain PCI/PCI-X fields after reset */
3383         if (sp->device_type == XFRAME_II_DEVICE) {
3384                 /* Clear "detected parity error" bit */
3385                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3386
3387                 /* Clearing PCIX Ecc status register */
3388                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3389
3390                 /* Clearing PCI_STATUS error reflected here */
3391                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3392         }
3393
3394         /* Reset device statistics maintained by OS */
3395         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3396
3397         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3398         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3399         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3400         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3401         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3402         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3403         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3404         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3405         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3406         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3407         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3408         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3409         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3410         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3411         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3412         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3413         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3414         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3415         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3416
3417         /* SXE-002: Configure link and activity LED to turn it off */
3418         subid = sp->pdev->subsystem_device;
3419         if (((subid & 0xFF) >= 0x07) &&
3420             (sp->device_type == XFRAME_I_DEVICE)) {
3421                 val64 = readq(&bar0->gpio_control);
3422                 val64 |= 0x0000800000000000ULL;
3423                 writeq(val64, &bar0->gpio_control);
3424                 val64 = 0x0411040400000000ULL;
3425                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3426         }
3427
3428         /*
3429          * Clear spurious ECC interrupts that would have occured on
3430          * XFRAME II cards after reset.
3431          */
3432         if (sp->device_type == XFRAME_II_DEVICE) {
3433                 val64 = readq(&bar0->pcc_err_reg);
3434                 writeq(val64, &bar0->pcc_err_reg);
3435         }
3436
3437         /* restore the previously assigned mac address */
3438         do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3439
3440         sp->device_enabled_once = FALSE;
3441 }
3442
3443 /**
3444  *  s2io_set_swapper - to set the swapper controle on the card
3445  *  @sp : private member of the device structure,
3446  *  pointer to the s2io_nic structure.
3447  *  Description: Function to set the swapper control on the card
3448  *  correctly depending on the 'endianness' of the system.
3449  *  Return value:
3450  *  SUCCESS on success and FAILURE on failure.
3451  */
3452
3453 static int s2io_set_swapper(struct s2io_nic * sp)
3454 {
3455         struct net_device *dev = sp->dev;
3456         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3457         u64 val64, valt, valr;
3458
3459         /*
3460          * Set proper endian settings and verify the same by reading
3461          * the PIF Feed-back register.
3462          */
3463
3464         val64 = readq(&bar0->pif_rd_swapper_fb);
3465         if (val64 != 0x0123456789ABCDEFULL) {
3466                 int i = 0;
3467                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3468                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3469                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3470                                 0};                     /* FE=0, SE=0 */
3471
3472                 while(i<4) {
3473                         writeq(value[i], &bar0->swapper_ctrl);
3474                         val64 = readq(&bar0->pif_rd_swapper_fb);
3475                         if (val64 == 0x0123456789ABCDEFULL)
3476                                 break;
3477                         i++;
3478                 }
3479                 if (i == 4) {
3480                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3481                                 dev->name);
3482                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3483                                 (unsigned long long) val64);
3484                         return FAILURE;
3485                 }
3486                 valr = value[i];
3487         } else {
3488                 valr = readq(&bar0->swapper_ctrl);
3489         }
3490
3491         valt = 0x0123456789ABCDEFULL;
3492         writeq(valt, &bar0->xmsi_address);
3493         val64 = readq(&bar0->xmsi_address);
3494
3495         if(val64 != valt) {
3496                 int i = 0;
3497                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3498                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3499                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3500                                 0};                     /* FE=0, SE=0 */
3501
3502                 while(i<4) {
3503                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3504                         writeq(valt, &bar0->xmsi_address);
3505                         val64 = readq(&bar0->xmsi_address);
3506                         if(val64 == valt)
3507                                 break;
3508                         i++;
3509                 }
3510                 if(i == 4) {
3511                         unsigned long long x = val64;
3512                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3513                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3514                         return FAILURE;
3515                 }
3516         }
3517         val64 = readq(&bar0->swapper_ctrl);
3518         val64 &= 0xFFFF000000000000ULL;
3519
3520 #ifdef  __BIG_ENDIAN
3521         /*
3522          * The device by default set to a big endian format, so a
3523          * big endian driver need not set anything.
3524          */
3525         val64 |= (SWAPPER_CTRL_TXP_FE |
3526                  SWAPPER_CTRL_TXP_SE |
3527                  SWAPPER_CTRL_TXD_R_FE |
3528                  SWAPPER_CTRL_TXD_W_FE |
3529                  SWAPPER_CTRL_TXF_R_FE |
3530                  SWAPPER_CTRL_RXD_R_FE |
3531                  SWAPPER_CTRL_RXD_W_FE |
3532                  SWAPPER_CTRL_RXF_W_FE |
3533                  SWAPPER_CTRL_XMSI_FE |
3534                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3535         if (sp->config.intr_type == INTA)
3536                 val64 |= SWAPPER_CTRL_XMSI_SE;
3537         writeq(val64, &bar0->swapper_ctrl);
3538 #else
3539         /*
3540          * Initially we enable all bits to make it accessible by the
3541          * driver, then we selectively enable only those bits that
3542          * we want to set.
3543          */
3544         val64 |= (SWAPPER_CTRL_TXP_FE |
3545                  SWAPPER_CTRL_TXP_SE |
3546                  SWAPPER_CTRL_TXD_R_FE |
3547                  SWAPPER_CTRL_TXD_R_SE |
3548                  SWAPPER_CTRL_TXD_W_FE |
3549                  SWAPPER_CTRL_TXD_W_SE |
3550                  SWAPPER_CTRL_TXF_R_FE |
3551                  SWAPPER_CTRL_RXD_R_FE |
3552                  SWAPPER_CTRL_RXD_R_SE |
3553                  SWAPPER_CTRL_RXD_W_FE |
3554                  SWAPPER_CTRL_RXD_W_SE |
3555                  SWAPPER_CTRL_RXF_W_FE |
3556                  SWAPPER_CTRL_XMSI_FE |
3557                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3558         if (sp->config.intr_type == INTA)
3559                 val64 |= SWAPPER_CTRL_XMSI_SE;
3560         writeq(val64, &bar0->swapper_ctrl);
3561 #endif
3562         val64 = readq(&bar0->swapper_ctrl);
3563
3564         /*
3565          * Verifying if endian settings are accurate by reading a
3566          * feedback register.
3567          */
3568         val64 = readq(&bar0->pif_rd_swapper_fb);
3569         if (val64 != 0x0123456789ABCDEFULL) {
3570                 /* Endian settings are incorrect, calls for another dekko. */
3571                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3572                           dev->name);
3573                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3574                           (unsigned long long) val64);
3575                 return FAILURE;
3576         }
3577
3578         return SUCCESS;
3579 }
3580
3581 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3582 {
3583         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3584         u64 val64;
3585         int ret = 0, cnt = 0;
3586
3587         do {
3588                 val64 = readq(&bar0->xmsi_access);
3589                 if (!(val64 & s2BIT(15)))
3590                         break;
3591                 mdelay(1);
3592                 cnt++;
3593         } while(cnt < 5);
3594         if (cnt == 5) {
3595                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3596                 ret = 1;
3597         }
3598
3599         return ret;
3600 }
3601
3602 static void restore_xmsi_data(struct s2io_nic *nic)
3603 {
3604         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3605         u64 val64;
3606         int i;
3607
3608         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3609                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3610                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3611                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3612                 writeq(val64, &bar0->xmsi_access);
3613                 if (wait_for_msix_trans(nic, i)) {
3614                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3615                         continue;
3616                 }
3617         }
3618 }
3619
3620 static void store_xmsi_data(struct s2io_nic *nic)
3621 {
3622         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3623         u64 val64, addr, data;
3624         int i;
3625
3626         /* Store and display */
3627         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3628                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3629                 writeq(val64, &bar0->xmsi_access);
3630                 if (wait_for_msix_trans(nic, i)) {
3631                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3632                         continue;
3633                 }
3634                 addr = readq(&bar0->xmsi_address);
3635                 data = readq(&bar0->xmsi_data);
3636                 if (addr && data) {
3637                         nic->msix_info[i].addr = addr;
3638                         nic->msix_info[i].data = data;
3639                 }
3640         }
3641 }
3642
3643 static int s2io_enable_msi_x(struct s2io_nic *nic)
3644 {
3645         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3646         u64 tx_mat, rx_mat;
3647         u16 msi_control; /* Temp variable */
3648         int ret, i, j, msix_indx = 1;
3649
3650         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3651                                GFP_KERNEL);
3652         if (!nic->entries) {
3653                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3654                         __FUNCTION__);
3655                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3656                 return -ENOMEM;
3657         }
3658         nic->mac_control.stats_info->sw_stat.mem_allocated
3659                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3660
3661         nic->s2io_entries =
3662                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3663                                    GFP_KERNEL);
3664         if (!nic->s2io_entries) {
3665                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3666                         __FUNCTION__);
3667                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3668                 kfree(nic->entries);
3669                 nic->mac_control.stats_info->sw_stat.mem_freed
3670                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3671                 return -ENOMEM;
3672         }
3673          nic->mac_control.stats_info->sw_stat.mem_allocated
3674                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3675
3676         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3677                 nic->entries[i].entry = i;
3678                 nic->s2io_entries[i].entry = i;
3679                 nic->s2io_entries[i].arg = NULL;
3680                 nic->s2io_entries[i].in_use = 0;
3681         }
3682
3683         tx_mat = readq(&bar0->tx_mat0_n[0]);
3684         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3685                 tx_mat |= TX_MAT_SET(i, msix_indx);
3686                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3687                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3688                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3689         }
3690         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3691
3692         rx_mat = readq(&bar0->rx_mat);
3693         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3694                 rx_mat |= RX_MAT_SET(j, msix_indx);
3695                 nic->s2io_entries[msix_indx].arg
3696                         = &nic->mac_control.rings[j];
3697                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3698                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3699         }
3700         writeq(rx_mat, &bar0->rx_mat);
3701
3702         nic->avail_msix_vectors = 0;
3703         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3704         /* We fail init if error or we get less vectors than min required */
3705         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3706                 nic->avail_msix_vectors = ret;
3707                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3708         }
3709         if (ret) {
3710                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3711                 kfree(nic->entries);
3712                 nic->mac_control.stats_info->sw_stat.mem_freed
3713                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3714                 kfree(nic->s2io_entries);
3715                 nic->mac_control.stats_info->sw_stat.mem_freed
3716                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3717                 nic->entries = NULL;
3718                 nic->s2io_entries = NULL;
3719                 nic->avail_msix_vectors = 0;
3720                 return -ENOMEM;
3721         }
3722         if (!nic->avail_msix_vectors)
3723                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3724
3725         /*
3726          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3727          * in the herc NIC. (Temp change, needs to be removed later)
3728          */
3729         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3730         msi_control |= 0x1; /* Enable MSI */
3731         pci_write_config_word(nic->pdev, 0x42, msi_control);
3732
3733         return 0;
3734 }
3735
3736 /* Handle software interrupt used during MSI(X) test */
3737 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3738 {
3739         struct s2io_nic *sp = dev_id;
3740
3741         sp->msi_detected = 1;
3742         wake_up(&sp->msi_wait);
3743
3744         return IRQ_HANDLED;
3745 }
3746
3747 /* Test interrupt path by forcing a a software IRQ */
3748 static int s2io_test_msi(struct s2io_nic *sp)
3749 {
3750         struct pci_dev *pdev = sp->pdev;
3751         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3752         int err;
3753         u64 val64, saved64;
3754
3755         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3756                         sp->name, sp);
3757         if (err) {
3758                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3759                        sp->dev->name, pci_name(pdev), pdev->irq);
3760                 return err;
3761         }
3762
3763         init_waitqueue_head (&sp->msi_wait);
3764         sp->msi_detected = 0;
3765
3766         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3767         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3768         val64 |= SCHED_INT_CTRL_TIMER_EN;
3769         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3770         writeq(val64, &bar0->scheduled_int_ctrl);
3771
3772         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3773
3774         if (!sp->msi_detected) {
3775                 /* MSI(X) test failed, go back to INTx mode */
3776                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3777                         "using MSI(X) during test\n", sp->dev->name,
3778                         pci_name(pdev));
3779
3780                 err = -EOPNOTSUPP;
3781         }
3782
3783         free_irq(sp->entries[1].vector, sp);
3784
3785         writeq(saved64, &bar0->scheduled_int_ctrl);
3786
3787         return err;
3788 }
3789
3790 static void remove_msix_isr(struct s2io_nic *sp)
3791 {
3792         int i;
3793         u16 msi_control;
3794
3795         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3796                 if (sp->s2io_entries[i].in_use ==
3797                         MSIX_REGISTERED_SUCCESS) {
3798                         int vector = sp->entries[i].vector;
3799                         void *arg = sp->s2io_entries[i].arg;
3800                         free_irq(vector, arg);
3801                 }
3802         }
3803
3804         kfree(sp->entries);
3805         kfree(sp->s2io_entries);
3806         sp->entries = NULL;
3807         sp->s2io_entries = NULL;
3808
3809         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3810         msi_control &= 0xFFFE; /* Disable MSI */
3811         pci_write_config_word(sp->pdev, 0x42, msi_control);
3812
3813         pci_disable_msix(sp->pdev);
3814 }
3815
3816 static void remove_inta_isr(struct s2io_nic *sp)
3817 {
3818         struct net_device *dev = sp->dev;
3819
3820         free_irq(sp->pdev->irq, dev);
3821 }
3822
3823 /* ********************************************************* *
3824  * Functions defined below concern the OS part of the driver *
3825  * ********************************************************* */
3826
3827 /**
3828  *  s2io_open - open entry point of the driver
3829  *  @dev : pointer to the device structure.
3830  *  Description:
3831  *  This function is the open entry point of the driver. It mainly calls a
3832  *  function to allocate Rx buffers and inserts them into the buffer
3833  *  descriptors and then enables the Rx part of the NIC.
3834  *  Return value:
3835  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3836  *   file on failure.
3837  */
3838
3839 static int s2io_open(struct net_device *dev)
3840 {
3841         struct s2io_nic *sp = dev->priv;
3842         int err = 0;
3843
3844         /*
3845          * Make sure you have link off by default every time
3846          * Nic is initialized
3847          */
3848         netif_carrier_off(dev);
3849         sp->last_link_state = 0;
3850
3851         if (sp->config.intr_type == MSI_X) {
3852                 int ret = s2io_enable_msi_x(sp);
3853
3854                 if (!ret) {
3855                         ret = s2io_test_msi(sp);
3856                         /* rollback MSI-X, will re-enable during add_isr() */
3857                         remove_msix_isr(sp);
3858                 }
3859                 if (ret) {
3860
3861                         DBG_PRINT(ERR_DBG,
3862                           "%s: MSI-X requested but failed to enable\n",
3863                           dev->name);
3864                         sp->config.intr_type = INTA;
3865                 }
3866         }
3867
3868         /* NAPI doesn't work well with MSI(X) */
3869          if (sp->config.intr_type != INTA) {
3870                 if(sp->config.napi)
3871                         sp->config.napi = 0;
3872         }
3873
3874         /* Initialize H/W and enable interrupts */
3875         err = s2io_card_up(sp);
3876         if (err) {
3877                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3878                           dev->name);
3879                 goto hw_init_failed;
3880         }
3881
3882         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3883                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3884                 s2io_card_down(sp);
3885                 err = -ENODEV;
3886                 goto hw_init_failed;
3887         }
3888
3889         netif_start_queue(dev);
3890         return 0;
3891
3892 hw_init_failed:
3893         if (sp->config.intr_type == MSI_X) {
3894                 if (sp->entries) {
3895                         kfree(sp->entries);
3896                         sp->mac_control.stats_info->sw_stat.mem_freed
3897                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3898                 }
3899                 if (sp->s2io_entries) {
3900                         kfree(sp->s2io_entries);
3901                         sp->mac_control.stats_info->sw_stat.mem_freed
3902                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3903                 }
3904         }
3905         return err;
3906 }
3907
3908 /**
3909  *  s2io_close -close entry point of the driver
3910  *  @dev : device pointer.
3911  *  Description:
3912  *  This is the stop entry point of the driver. It needs to undo exactly
3913  *  whatever was done by the open entry point,thus it's usually referred to
3914  *  as the close function.Among other things this function mainly stops the
3915  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3916  *  Return value:
3917  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3918  *  file on failure.
3919  */
3920
3921 static int s2io_close(struct net_device *dev)
3922 {
3923         struct s2io_nic *sp = dev->priv;
3924
3925         /* Return if the device is already closed               *
3926         *  Can happen when s2io_card_up failed in change_mtu    *
3927         */
3928         if (!is_s2io_card_up(sp))
3929                 return 0;
3930
3931         netif_stop_queue(dev);
3932         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3933         s2io_card_down(sp);
3934
3935         return 0;
3936 }
3937
3938 /**
3939  *  s2io_xmit - Tx entry point of te driver
3940  *  @skb : the socket buffer containing the Tx data.
3941  *  @dev : device pointer.
3942  *  Description :
3943  *  This function is the Tx entry point of the driver. S2IO NIC supports
3944  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3945  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3946  *  not be upadted.
3947  *  Return value:
3948  *  0 on success & 1 on failure.
3949  */
3950
3951 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3952 {
3953         struct s2io_nic *sp = dev->priv;
3954         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3955         register u64 val64;
3956         struct TxD *txdp;
3957         struct TxFIFO_element __iomem *tx_fifo;
3958         unsigned long flags;
3959         u16 vlan_tag = 0;
3960         int vlan_priority = 0;
3961         struct mac_info *mac_control;
3962         struct config_param *config;
3963         int offload_type;
3964         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3965
3966         mac_control = &sp->mac_control;
3967         config = &sp->config;
3968
3969         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3970
3971         if (unlikely(skb->len <= 0)) {
3972                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3973                 dev_kfree_skb_any(skb);
3974                 return 0;
3975 }
3976
3977         spin_lock_irqsave(&sp->tx_lock, flags);
3978         if (!is_s2io_card_up(sp)) {
3979                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3980                           dev->name);
3981                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3982                 dev_kfree_skb(skb);
3983                 return 0;
3984         }
3985
3986         queue = 0;
3987         /* Get Fifo number to Transmit based on vlan priority */
3988         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3989                 vlan_tag = vlan_tx_tag_get(skb);
3990                 vlan_priority = vlan_tag >> 13;
3991                 queue = config->fifo_mapping[vlan_priority];
3992         }
3993
3994         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3995         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3996         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3997                 list_virt_addr;
3998
3999         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4000         /* Avoid "put" pointer going beyond "get" pointer */
4001         if (txdp->Host_Control ||
4002                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4003                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4004                 netif_stop_queue(dev);
4005                 dev_kfree_skb(skb);
4006                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4007                 return 0;
4008         }
4009
4010         offload_type = s2io_offload_type(skb);
4011         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4012                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4013                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4014         }
4015         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4016                 txdp->Control_2 |=
4017                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4018                      TXD_TX_CKO_UDP_EN);
4019         }
4020         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4021         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4022         txdp->Control_2 |= config->tx_intr_type;
4023
4024         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4025                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4026                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4027         }
4028
4029         frg_len = skb->len - skb->data_len;
4030         if (offload_type == SKB_GSO_UDP) {
4031                 int ufo_size;
4032
4033                 ufo_size = s2io_udp_mss(skb);
4034                 ufo_size &= ~7;
4035                 txdp->Control_1 |= TXD_UFO_EN;
4036                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4037                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4038 #ifdef __BIG_ENDIAN
4039                 sp->ufo_in_band_v[put_off] =
4040                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4041 #else
4042                 sp->ufo_in_band_v[put_off] =
4043                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4044 #endif
4045                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4046                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4047                                         sp->ufo_in_band_v,
4048                                         sizeof(u64), PCI_DMA_TODEVICE);
4049                 if((txdp->Buffer_Pointer == 0) ||
4050                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4051                         goto pci_map_failed;
4052                 txdp++;
4053         }
4054
4055         txdp->Buffer_Pointer = pci_map_single
4056             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4057         if((txdp->Buffer_Pointer == 0) ||
4058                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4059                 goto pci_map_failed;
4060
4061         txdp->Host_Control = (unsigned long) skb;
4062         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4063         if (offload_type == SKB_GSO_UDP)
4064                 txdp->Control_1 |= TXD_UFO_EN;
4065
4066         frg_cnt = skb_shinfo(skb)->nr_frags;
4067         /* For fragmented SKB. */
4068         for (i = 0; i < frg_cnt; i++) {
4069                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4070                 /* A '0' length fragment will be ignored */
4071                 if (!frag->size)
4072                         continue;
4073                 txdp++;
4074                 txdp->Buffer_Pointer = (u64) pci_map_page
4075                     (sp->pdev, frag->page, frag->page_offset,
4076                      frag->size, PCI_DMA_TODEVICE);
4077                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4078                 if (offload_type == SKB_GSO_UDP)
4079                         txdp->Control_1 |= TXD_UFO_EN;
4080         }
4081         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4082
4083         if (offload_type == SKB_GSO_UDP)
4084                 frg_cnt++; /* as Txd0 was used for inband header */
4085
4086         tx_fifo = mac_control->tx_FIFO_start[queue];
4087         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4088         writeq(val64, &tx_fifo->TxDL_Pointer);
4089
4090         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4091                  TX_FIFO_LAST_LIST);
4092         if (offload_type)
4093                 val64 |= TX_FIFO_SPECIAL_FUNC;
4094
4095         writeq(val64, &tx_fifo->List_Control);
4096
4097         mmiowb();
4098
4099         put_off++;
4100         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4101                 put_off = 0;
4102         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4103
4104         /* Avoid "put" pointer going beyond "get" pointer */
4105         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4106                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4107                 DBG_PRINT(TX_DBG,
4108                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4109                           put_off, get_off);
4110                 netif_stop_queue(dev);
4111         }
4112         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4113         dev->trans_start = jiffies;
4114         spin_unlock_irqrestore(&sp->tx_lock, flags);
4115
4116         return 0;
4117 pci_map_failed:
4118         stats->pci_map_fail_cnt++;
4119         netif_stop_queue(dev);
4120         stats->mem_freed += skb->truesize;
4121         dev_kfree_skb(skb);
4122         spin_unlock_irqrestore(&sp->tx_lock, flags);
4123         return 0;
4124 }
4125
4126 static void
4127 s2io_alarm_handle(unsigned long data)
4128 {
4129         struct s2io_nic *sp = (struct s2io_nic *)data;
4130         struct net_device *dev = sp->dev;
4131
4132         s2io_handle_errors(dev);
4133         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4134 }
4135
4136 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4137 {
4138         int rxb_size, level;
4139
4140         if (!sp->lro) {
4141                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4142                 level = rx_buffer_level(sp, rxb_size, rng_n);
4143
4144                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4145                         int ret;
4146                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4147                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4148                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4149                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4150                                           __FUNCTION__);
4151                                 clear_bit(0, (&sp->tasklet_status));
4152                                 return -1;
4153                         }
4154                         clear_bit(0, (&sp->tasklet_status));
4155                 } else if (level == LOW)
4156                         tasklet_schedule(&sp->task);
4157
4158         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4159                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4160                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4161         }
4162         return 0;
4163 }
4164
4165 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4166 {
4167         struct ring_info *ring = (struct ring_info *)dev_id;
4168         struct s2io_nic *sp = ring->nic;
4169
4170         if (!is_s2io_card_up(sp))
4171                 return IRQ_HANDLED;
4172
4173         rx_intr_handler(ring);
4174         s2io_chk_rx_buffers(sp, ring->ring_no);
4175
4176         return IRQ_HANDLED;
4177 }
4178
4179 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4180 {
4181         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4182         struct s2io_nic *sp = fifo->nic;
4183
4184         if (!is_s2io_card_up(sp))
4185                 return IRQ_HANDLED;
4186
4187         tx_intr_handler(fifo);
4188         return IRQ_HANDLED;
4189 }
4190 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4191 {
4192         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4193         u64 val64;
4194
4195         val64 = readq(&bar0->pic_int_status);
4196         if (val64 & PIC_INT_GPIO) {
4197                 val64 = readq(&bar0->gpio_int_reg);
4198                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4199                     (val64 & GPIO_INT_REG_LINK_UP)) {
4200                         /*
4201                          * This is unstable state so clear both up/down
4202                          * interrupt and adapter to re-evaluate the link state.
4203                          */
4204                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4205                         val64 |= GPIO_INT_REG_LINK_UP;
4206                         writeq(val64, &bar0->gpio_int_reg);
4207                         val64 = readq(&bar0->gpio_int_mask);
4208                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4209                                    GPIO_INT_MASK_LINK_DOWN);
4210                         writeq(val64, &bar0->gpio_int_mask);
4211                 }
4212                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4213                         val64 = readq(&bar0->adapter_status);
4214                                 /* Enable Adapter */
4215                         val64 = readq(&bar0->adapter_control);
4216                         val64 |= ADAPTER_CNTL_EN;
4217                         writeq(val64, &bar0->adapter_control);
4218                         val64 |= ADAPTER_LED_ON;
4219                         writeq(val64, &bar0->adapter_control);
4220                         if (!sp->device_enabled_once)
4221                                 sp->device_enabled_once = 1;
4222
4223                         s2io_link(sp, LINK_UP);
4224                         /*
4225                          * unmask link down interrupt and mask link-up
4226                          * intr
4227                          */
4228                         val64 = readq(&bar0->gpio_int_mask);
4229                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4230                         val64 |= GPIO_INT_MASK_LINK_UP;
4231                         writeq(val64, &bar0->gpio_int_mask);
4232
4233                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4234                         val64 = readq(&bar0->adapter_status);
4235                         s2io_link(sp, LINK_DOWN);
4236                         /* Link is down so unmaks link up interrupt */
4237                         val64 = readq(&bar0->gpio_int_mask);
4238                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4239                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4240                         writeq(val64, &bar0->gpio_int_mask);
4241
4242                         /* turn off LED */
4243                         val64 = readq(&bar0->adapter_control);
4244                         val64 = val64 &(~ADAPTER_LED_ON);
4245                         writeq(val64, &bar0->adapter_control);
4246                 }
4247         }
4248         val64 = readq(&bar0->gpio_int_mask);
4249 }
4250
4251 /**
4252  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4253  *  @value: alarm bits
4254  *  @addr: address value
4255  *  @cnt: counter variable
4256  *  Description: Check for alarm and increment the counter
4257  *  Return Value:
4258  *  1 - if alarm bit set
4259  *  0 - if alarm bit is not set
4260  */
4261 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4262                           unsigned long long *cnt)
4263 {
4264         u64 val64;
4265         val64 = readq(addr);
4266         if ( val64 & value ) {
4267                 writeq(val64, addr);
4268                 (*cnt)++;
4269                 return 1;
4270         }
4271         return 0;
4272
4273 }
4274
4275 /**
4276  *  s2io_handle_errors - Xframe error indication handler
4277  *  @nic: device private variable
4278  *  Description: Handle alarms such as loss of link, single or
4279  *  double ECC errors, critical and serious errors.
4280  *  Return Value:
4281  *  NONE
4282  */
4283 static void s2io_handle_errors(void * dev_id)
4284 {
4285         struct net_device *dev = (struct net_device *) dev_id;
4286         struct s2io_nic *sp = dev->priv;
4287         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4288         u64 temp64 = 0,val64=0;
4289         int i = 0;
4290
4291         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4292         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4293
4294         if (!is_s2io_card_up(sp))
4295                 return;
4296
4297         if (pci_channel_offline(sp->pdev))
4298                 return;
4299
4300         memset(&sw_stat->ring_full_cnt, 0,
4301                 sizeof(sw_stat->ring_full_cnt));
4302
4303         /* Handling the XPAK counters update */
4304         if(stats->xpak_timer_count < 72000) {
4305                 /* waiting for an hour */
4306                 stats->xpak_timer_count++;
4307         } else {
4308                 s2io_updt_xpak_counter(dev);
4309                 /* reset the count to zero */
4310                 stats->xpak_timer_count = 0;
4311         }
4312
4313         /* Handling link status change error Intr */
4314         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4315                 val64 = readq(&bar0->mac_rmac_err_reg);
4316                 writeq(val64, &bar0->mac_rmac_err_reg);
4317                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4318                         schedule_work(&sp->set_link_task);
4319         }
4320
4321         /* In case of a serious error, the device will be Reset. */
4322         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4323                                 &sw_stat->serious_err_cnt))
4324                 goto reset;
4325
4326         /* Check for data parity error */
4327         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4328                                 &sw_stat->parity_err_cnt))
4329                 goto reset;
4330
4331         /* Check for ring full counter */
4332         if (sp->device_type == XFRAME_II_DEVICE) {
4333                 val64 = readq(&bar0->ring_bump_counter1);
4334                 for (i=0; i<4; i++) {
4335                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4336                         temp64 >>= 64 - ((i+1)*16);
4337                         sw_stat->ring_full_cnt[i] += temp64;
4338                 }
4339
4340                 val64 = readq(&bar0->ring_bump_counter2);
4341                 for (i=0; i<4; i++) {
4342                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4343                         temp64 >>= 64 - ((i+1)*16);
4344                          sw_stat->ring_full_cnt[i+4] += temp64;
4345                 }
4346         }
4347
4348         val64 = readq(&bar0->txdma_int_status);
4349         /*check for pfc_err*/
4350         if (val64 & TXDMA_PFC_INT) {
4351                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4352                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4353                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4354                                 &sw_stat->pfc_err_cnt))
4355                         goto reset;
4356                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4357                                 &sw_stat->pfc_err_cnt);
4358         }
4359
4360         /*check for tda_err*/
4361         if (val64 & TXDMA_TDA_INT) {
4362                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4363                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4364                                 &sw_stat->tda_err_cnt))
4365                         goto reset;
4366                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4367                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4368         }
4369         /*check for pcc_err*/
4370         if (val64 & TXDMA_PCC_INT) {
4371                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4372                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4373                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4374                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4375                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4376                                 &sw_stat->pcc_err_cnt))
4377                         goto reset;
4378                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4379                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4380         }
4381
4382         /*check for tti_err*/
4383         if (val64 & TXDMA_TTI_INT) {
4384                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4385                                 &sw_stat->tti_err_cnt))
4386                         goto reset;
4387                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4388                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4389         }
4390
4391         /*check for lso_err*/
4392         if (val64 & TXDMA_LSO_INT) {
4393                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4394                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4395                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4396                         goto reset;
4397                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4398                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4399         }
4400
4401         /*check for tpa_err*/
4402         if (val64 & TXDMA_TPA_INT) {
4403                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4404                         &sw_stat->tpa_err_cnt))
4405                         goto reset;
4406                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4407                         &sw_stat->tpa_err_cnt);
4408         }
4409
4410         /*check for sm_err*/
4411         if (val64 & TXDMA_SM_INT) {
4412                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4413                         &sw_stat->sm_err_cnt))
4414                         goto reset;
4415         }
4416
4417         val64 = readq(&bar0->mac_int_status);
4418         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4419                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4420                                 &bar0->mac_tmac_err_reg,
4421                                 &sw_stat->mac_tmac_err_cnt))
4422                         goto reset;
4423                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4424                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4425                                 &bar0->mac_tmac_err_reg,
4426                                 &sw_stat->mac_tmac_err_cnt);
4427         }
4428
4429         val64 = readq(&bar0->xgxs_int_status);
4430         if (val64 & XGXS_INT_STATUS_TXGXS) {
4431                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4432                                 &bar0->xgxs_txgxs_err_reg,
4433                                 &sw_stat->xgxs_txgxs_err_cnt))
4434                         goto reset;
4435                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4436                                 &bar0->xgxs_txgxs_err_reg,
4437                                 &sw_stat->xgxs_txgxs_err_cnt);
4438         }
4439
4440         val64 = readq(&bar0->rxdma_int_status);
4441         if (val64 & RXDMA_INT_RC_INT_M) {
4442                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4443                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4444                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4445                         goto reset;
4446                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4447                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4448                                 &sw_stat->rc_err_cnt);
4449                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4450                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4451                                 &sw_stat->prc_pcix_err_cnt))
4452                         goto reset;
4453                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4454                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4455                                 &sw_stat->prc_pcix_err_cnt);
4456         }
4457
4458         if (val64 & RXDMA_INT_RPA_INT_M) {
4459                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4460                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4461                         goto reset;
4462                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4463                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4464         }
4465
4466         if (val64 & RXDMA_INT_RDA_INT_M) {
4467                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4468                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4469                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4470                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4471                         goto reset;
4472                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4473                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4474                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4475         }
4476
4477         if (val64 & RXDMA_INT_RTI_INT_M) {
4478                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4479                                 &sw_stat->rti_err_cnt))
4480                         goto reset;
4481                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4482                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4483         }
4484
4485         val64 = readq(&bar0->mac_int_status);
4486         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4487                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4488                                 &bar0->mac_rmac_err_reg,
4489                                 &sw_stat->mac_rmac_err_cnt))
4490                         goto reset;
4491                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4492                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4493                                 &sw_stat->mac_rmac_err_cnt);
4494         }
4495
4496         val64 = readq(&bar0->xgxs_int_status);
4497         if (val64 & XGXS_INT_STATUS_RXGXS) {
4498                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4499                                 &bar0->xgxs_rxgxs_err_reg,
4500                                 &sw_stat->xgxs_rxgxs_err_cnt))
4501                         goto reset;
4502         }
4503
4504         val64 = readq(&bar0->mc_int_status);
4505         if(val64 & MC_INT_STATUS_MC_INT) {
4506                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4507                                 &sw_stat->mc_err_cnt))
4508                         goto reset;
4509
4510                 /* Handling Ecc errors */
4511                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4512                         writeq(val64, &bar0->mc_err_reg);
4513                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4514                                 sw_stat->double_ecc_errs++;
4515                                 if (sp->device_type != XFRAME_II_DEVICE) {
4516                                         /*
4517                                          * Reset XframeI only if critical error
4518                                          */
4519                                         if (val64 &
4520                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4521                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4522                                                                 goto reset;
4523                                         }
4524                         } else
4525                                 sw_stat->single_ecc_errs++;
4526                 }
4527         }
4528         return;
4529
4530 reset:
4531         netif_stop_queue(dev);
4532         schedule_work(&sp->rst_timer_task);
4533         sw_stat->soft_reset_cnt++;
4534         return;
4535 }
4536
4537 /**
4538  *  s2io_isr - ISR handler of the device .
4539  *  @irq: the irq of the device.
4540  *  @dev_id: a void pointer to the dev structure of the NIC.
4541  *  Description:  This function is the ISR handler of the device. It
4542  *  identifies the reason for the interrupt and calls the relevant
4543  *  service routines. As a contongency measure, this ISR allocates the
4544  *  recv buffers, if their numbers are below the panic value which is
4545  *  presently set to 25% of the original number of rcv buffers allocated.
4546  *  Return value:
4547  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4548  *   IRQ_NONE: will be returned if interrupt is not from our device
4549  */
4550 static irqreturn_t s2io_isr(int irq, void *dev_id)
4551 {
4552         struct net_device *dev = (struct net_device *) dev_id;
4553         struct s2io_nic *sp = dev->priv;
4554         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4555         int i;
4556         u64 reason = 0;
4557         struct mac_info *mac_control;
4558         struct config_param *config;
4559
4560         /* Pretend we handled any irq's from a disconnected card */
4561         if (pci_channel_offline(sp->pdev))
4562                 return IRQ_NONE;
4563
4564         if (!is_s2io_card_up(sp))
4565                 return IRQ_NONE;
4566
4567         mac_control = &sp->mac_control;
4568         config = &sp->config;
4569
4570         /*
4571          * Identify the cause for interrupt and call the appropriate
4572          * interrupt handler. Causes for the interrupt could be;
4573          * 1. Rx of packet.
4574          * 2. Tx complete.
4575          * 3. Link down.
4576          */
4577         reason = readq(&bar0->general_int_status);
4578
4579         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4580                 /* Nothing much can be done. Get out */
4581                 return IRQ_HANDLED;
4582         }
4583
4584         if (reason & (GEN_INTR_RXTRAFFIC |
4585                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4586         {
4587                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4588
4589                 if (config->napi) {
4590                         if (reason & GEN_INTR_RXTRAFFIC) {
4591                                 if (likely(netif_rx_schedule_prep(dev,
4592                                                         &sp->napi))) {
4593                                         __netif_rx_schedule(dev, &sp->napi);
4594                                         writeq(S2IO_MINUS_ONE,
4595                                                &bar0->rx_traffic_mask);
4596                                 } else
4597                                         writeq(S2IO_MINUS_ONE,
4598                                                &bar0->rx_traffic_int);
4599                         }
4600                 } else {
4601                         /*
4602                          * rx_traffic_int reg is an R1 register, writing all 1's
4603                          * will ensure that the actual interrupt causing bit
4604                          * get's cleared and hence a read can be avoided.
4605                          */
4606                         if (reason & GEN_INTR_RXTRAFFIC)
4607                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4608
4609                         for (i = 0; i < config->rx_ring_num; i++)
4610                                 rx_intr_handler(&mac_control->rings[i]);
4611                 }
4612
4613                 /*
4614                  * tx_traffic_int reg is an R1 register, writing all 1's
4615                  * will ensure that the actual interrupt causing bit get's
4616                  * cleared and hence a read can be avoided.
4617                  */
4618                 if (reason & GEN_INTR_TXTRAFFIC)
4619                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4620
4621                 for (i = 0; i < config->tx_fifo_num; i++)
4622                         tx_intr_handler(&mac_control->fifos[i]);
4623
4624                 if (reason & GEN_INTR_TXPIC)
4625                         s2io_txpic_intr_handle(sp);
4626
4627                 /*
4628                  * Reallocate the buffers from the interrupt handler itself.
4629                  */
4630                 if (!config->napi) {
4631                         for (i = 0; i < config->rx_ring_num; i++)
4632                                 s2io_chk_rx_buffers(sp, i);
4633                 }
4634                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4635                 readl(&bar0->general_int_status);
4636
4637                 return IRQ_HANDLED;
4638
4639         }
4640         else if (!reason) {
4641                 /* The interrupt was not raised by us */
4642                 return IRQ_NONE;
4643         }
4644
4645         return IRQ_HANDLED;
4646 }
4647
4648 /**
4649  * s2io_updt_stats -
4650  */
4651 static void s2io_updt_stats(struct s2io_nic *sp)
4652 {
4653         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4654         u64 val64;
4655         int cnt = 0;
4656
4657         if (is_s2io_card_up(sp)) {
4658                 /* Apprx 30us on a 133 MHz bus */
4659                 val64 = SET_UPDT_CLICKS(10) |
4660                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4661                 writeq(val64, &bar0->stat_cfg);
4662                 do {
4663                         udelay(100);
4664                         val64 = readq(&bar0->stat_cfg);
4665                         if (!(val64 & s2BIT(0)))
4666                                 break;
4667                         cnt++;
4668                         if (cnt == 5)
4669                                 break; /* Updt failed */
4670                 } while(1);
4671         }
4672 }
4673
4674 /**
4675  *  s2io_get_stats - Updates the device statistics structure.
4676  *  @dev : pointer to the device structure.
4677  *  Description:
4678  *  This function updates the device statistics structure in the s2io_nic
4679  *  structure and returns a pointer to the same.
4680  *  Return value:
4681  *  pointer to the updated net_device_stats structure.
4682  */
4683
4684 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4685 {
4686         struct s2io_nic *sp = dev->priv;
4687         struct mac_info *mac_control;
4688         struct config_param *config;
4689
4690
4691         mac_control = &sp->mac_control;
4692         config = &sp->config;
4693
4694         /* Configure Stats for immediate updt */
4695         s2io_updt_stats(sp);
4696
4697         sp->stats.tx_packets =
4698                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4699         sp->stats.tx_errors =
4700                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4701         sp->stats.rx_errors =
4702                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4703         sp->stats.multicast =
4704                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4705         sp->stats.rx_length_errors =
4706                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4707
4708         return (&sp->stats);
4709 }
4710
4711 /**
4712  *  s2io_set_multicast - entry point for multicast address enable/disable.
4713  *  @dev : pointer to the device structure
4714  *  Description:
4715  *  This function is a driver entry point which gets called by the kernel
4716  *  whenever multicast addresses must be enabled/disabled. This also gets
4717  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4718  *  determine, if multicast address must be enabled or if promiscuous mode
4719  *  is to be disabled etc.
4720  *  Return value:
4721  *  void.
4722  */
4723
4724 static void s2io_set_multicast(struct net_device *dev)
4725 {
4726         int i, j, prev_cnt;
4727         struct dev_mc_list *mclist;
4728         struct s2io_nic *sp = dev->priv;
4729         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4730         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4731             0xfeffffffffffULL;
4732         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4733         void __iomem *add;
4734
4735         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4736                 /*  Enable all Multicast addresses */
4737                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4738                        &bar0->rmac_addr_data0_mem);
4739                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4740                        &bar0->rmac_addr_data1_mem);
4741                 val64 = RMAC_ADDR_CMD_MEM_WE |
4742                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4743                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4744                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4745                 /* Wait till command completes */
4746                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4747                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4748                                         S2IO_BIT_RESET);
4749
4750                 sp->m_cast_flg = 1;
4751                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4752         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4753                 /*  Disable all Multicast addresses */
4754                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4755                        &bar0->rmac_addr_data0_mem);
4756                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4757                        &bar0->rmac_addr_data1_mem);
4758                 val64 = RMAC_ADDR_CMD_MEM_WE |
4759                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4760                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4761                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4762                 /* Wait till command completes */
4763                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4764                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4765                                         S2IO_BIT_RESET);
4766
4767                 sp->m_cast_flg = 0;
4768                 sp->all_multi_pos = 0;
4769         }
4770
4771         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4772                 /*  Put the NIC into promiscuous mode */
4773                 add = &bar0->mac_cfg;
4774                 val64 = readq(&bar0->mac_cfg);
4775                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4776
4777                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4778                 writel((u32) val64, add);
4779                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4780                 writel((u32) (val64 >> 32), (add + 4));
4781
4782                 if (vlan_tag_strip != 1) {
4783                         val64 = readq(&bar0->rx_pa_cfg);
4784                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4785                         writeq(val64, &bar0->rx_pa_cfg);
4786                         vlan_strip_flag = 0;
4787                 }
4788
4789                 val64 = readq(&bar0->mac_cfg);
4790                 sp->promisc_flg = 1;
4791                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4792                           dev->name);
4793         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4794                 /*  Remove the NIC from promiscuous mode */
4795                 add = &bar0->mac_cfg;
4796                 val64 = readq(&bar0->mac_cfg);
4797                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4798
4799                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4800                 writel((u32) val64, add);
4801                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4802                 writel((u32) (val64 >> 32), (add + 4));
4803
4804                 if (vlan_tag_strip != 0) {
4805                         val64 = readq(&bar0->rx_pa_cfg);
4806                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4807                         writeq(val64, &bar0->rx_pa_cfg);
4808                         vlan_strip_flag = 1;
4809                 }
4810
4811                 val64 = readq(&bar0->mac_cfg);
4812                 sp->promisc_flg = 0;
4813                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4814                           dev->name);
4815         }
4816
4817         /*  Update individual M_CAST address list */
4818         if ((!sp->m_cast_flg) && dev->mc_count) {
4819                 if (dev->mc_count >
4820                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4821                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4822                                   dev->name);
4823                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4824                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4825                         return;
4826                 }
4827
4828                 prev_cnt = sp->mc_addr_count;
4829                 sp->mc_addr_count = dev->mc_count;
4830
4831                 /* Clear out the previous list of Mc in the H/W. */
4832                 for (i = 0; i < prev_cnt; i++) {
4833                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4834                                &bar0->rmac_addr_data0_mem);
4835                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4836                                 &bar0->rmac_addr_data1_mem);
4837                         val64 = RMAC_ADDR_CMD_MEM_WE |
4838                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4839                             RMAC_ADDR_CMD_MEM_OFFSET
4840                             (MAC_MC_ADDR_START_OFFSET + i);
4841                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4842
4843                         /* Wait for command completes */
4844                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4845                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4846                                         S2IO_BIT_RESET)) {
4847                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4848                                           dev->name);
4849                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4850                                 return;
4851                         }
4852                 }
4853
4854                 /* Create the new Rx filter list and update the same in H/W. */
4855                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4856                      i++, mclist = mclist->next) {
4857                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4858                                ETH_ALEN);
4859                         mac_addr = 0;
4860                         for (j = 0; j < ETH_ALEN; j++) {
4861                                 mac_addr |= mclist->dmi_addr[j];
4862                                 mac_addr <<= 8;
4863                         }
4864                         mac_addr >>= 8;
4865                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4866                                &bar0->rmac_addr_data0_mem);
4867                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4868                                 &bar0->rmac_addr_data1_mem);
4869                         val64 = RMAC_ADDR_CMD_MEM_WE |
4870                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4871                             RMAC_ADDR_CMD_MEM_OFFSET
4872                             (i + MAC_MC_ADDR_START_OFFSET);
4873                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4874
4875                         /* Wait for command completes */
4876                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4877                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4878                                         S2IO_BIT_RESET)) {
4879                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4880                                           dev->name);
4881                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4882                                 return;
4883                         }
4884                 }
4885         }
4886 }
4887
4888 /* add unicast MAC address to CAM */
4889 static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off)
4890 {
4891         u64 val64;
4892         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4893
4894         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
4895                 &bar0->rmac_addr_data0_mem);
4896
4897         val64 =
4898                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4899                 RMAC_ADDR_CMD_MEM_OFFSET(off);
4900         writeq(val64, &bar0->rmac_addr_cmd_mem);
4901
4902         /* Wait till command completes */
4903         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4904                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4905                 S2IO_BIT_RESET)) {
4906                 DBG_PRINT(INFO_DBG, "add_mac_addr failed\n");
4907                 return FAILURE;
4908         }
4909         return SUCCESS;
4910 }
4911
4912 /**
4913  * s2io_set_mac_addr driver entry point
4914  */
4915 static int s2io_set_mac_addr(struct net_device *dev, void *p)
4916 {
4917         struct sockaddr *addr = p;
4918
4919         if (!is_valid_ether_addr(addr->sa_data))
4920                 return -EINVAL;
4921
4922         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4923
4924         /* store the MAC address in CAM */
4925         return (do_s2io_prog_unicast(dev, dev->dev_addr));
4926 }
4927
4928 /**
4929  *  do_s2io_prog_unicast - Programs the Xframe mac address
4930  *  @dev : pointer to the device structure.
4931  *  @addr: a uchar pointer to the new mac address which is to be set.
4932  *  Description : This procedure will program the Xframe to receive
4933  *  frames with new Mac Address
4934  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4935  *  as defined in errno.h file on failure.
4936  */
4937 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
4938 {
4939         struct s2io_nic *sp = dev->priv;
4940         register u64 mac_addr = 0, perm_addr = 0;
4941         int i;
4942
4943         /*
4944         * Set the new MAC address as the new unicast filter and reflect this
4945         * change on the device address registered with the OS. It will be
4946         * at offset 0.
4947         */
4948         for (i = 0; i < ETH_ALEN; i++) {
4949                 mac_addr <<= 8;
4950                 mac_addr |= addr[i];
4951                 perm_addr <<= 8;
4952                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
4953         }
4954
4955         /* check if the dev_addr is different than perm_addr */
4956         if (mac_addr == perm_addr)
4957                 return SUCCESS;
4958
4959         /* Update the internal structure with this new mac address */
4960         do_s2io_copy_mac_addr(sp, 0, mac_addr);
4961         return (do_s2io_add_unicast(sp, mac_addr, 0));
4962 }
4963
4964 /**
4965  * s2io_ethtool_sset - Sets different link parameters.
4966  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4967  * @info: pointer to the structure with parameters given by ethtool to set
4968  * link information.
4969  * Description:
4970  * The function sets different link parameters provided by the user onto
4971  * the NIC.
4972  * Return value:
4973  * 0 on success.
4974 */
4975
4976 static int s2io_ethtool_sset(struct net_device *dev,
4977                              struct ethtool_cmd *info)
4978 {
4979         struct s2io_nic *sp = dev->priv;
4980         if ((info->autoneg == AUTONEG_ENABLE) ||
4981             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4982                 return -EINVAL;
4983         else {
4984                 s2io_close(sp->dev);
4985                 s2io_open(sp->dev);
4986         }
4987
4988         return 0;
4989 }
4990
4991 /**
4992  * s2io_ethtol_gset - Return link specific information.
4993  * @sp : private member of the device structure, pointer to the
4994  *      s2io_nic structure.
4995  * @info : pointer to the structure with parameters given by ethtool
4996  * to return link information.
4997  * Description:
4998  * Returns link specific information like speed, duplex etc.. to ethtool.
4999  * Return value :
5000  * return 0 on success.
5001  */
5002
5003 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5004 {
5005         struct s2io_nic *sp = dev->priv;
5006         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5007         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5008         info->port = PORT_FIBRE;
5009
5010         /* info->transceiver */
5011         info->transceiver = XCVR_EXTERNAL;
5012
5013         if (netif_carrier_ok(sp->dev)) {
5014                 info->speed = 10000;
5015                 info->duplex = DUPLEX_FULL;
5016         } else {
5017                 info->speed = -1;
5018                 info->duplex = -1;
5019         }
5020
5021         info->autoneg = AUTONEG_DISABLE;
5022         return 0;
5023 }
5024
5025 /**
5026  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5027  * @sp : private member of the device structure, which is a pointer to the
5028  * s2io_nic structure.
5029  * @info : pointer to the structure with parameters given by ethtool to
5030  * return driver information.
5031  * Description:
5032  * Returns driver specefic information like name, version etc.. to ethtool.
5033  * Return value:
5034  *  void
5035  */
5036
5037 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5038                                   struct ethtool_drvinfo *info)
5039 {
5040         struct s2io_nic *sp = dev->priv;
5041
5042         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5043         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5044         strncpy(info->fw_version, "", sizeof(info->fw_version));
5045         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5046         info->regdump_len = XENA_REG_SPACE;
5047         info->eedump_len = XENA_EEPROM_SPACE;
5048 }
5049
5050 /**
5051  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5052  *  @sp: private member of the device structure, which is a pointer to the
5053  *  s2io_nic structure.
5054  *  @regs : pointer to the structure with parameters given by ethtool for
5055  *  dumping the registers.
5056  *  @reg_space: The input argumnet into which all the registers are dumped.
5057  *  Description:
5058  *  Dumps the entire register space of xFrame NIC into the user given
5059  *  buffer area.
5060  * Return value :
5061  * void .
5062 */
5063
5064 static void s2io_ethtool_gregs(struct net_device *dev,
5065                                struct ethtool_regs *regs, void *space)
5066 {
5067         int i;
5068         u64 reg;
5069         u8 *reg_space = (u8 *) space;
5070         struct s2io_nic *sp = dev->priv;
5071
5072         regs->len = XENA_REG_SPACE;
5073         regs->version = sp->pdev->subsystem_device;
5074
5075         for (i = 0; i < regs->len; i += 8) {
5076                 reg = readq(sp->bar0 + i);
5077                 memcpy((reg_space + i), &reg, 8);
5078         }
5079 }
5080
5081 /**
5082  *  s2io_phy_id  - timer function that alternates adapter LED.
5083  *  @data : address of the private member of the device structure, which
5084  *  is a pointer to the s2io_nic structure, provided as an u32.
5085  * Description: This is actually the timer function that alternates the
5086  * adapter LED bit of the adapter control bit to set/reset every time on
5087  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5088  *  once every second.
5089 */
5090 static void s2io_phy_id(unsigned long data)
5091 {
5092         struct s2io_nic *sp = (struct s2io_nic *) data;
5093         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5094         u64 val64 = 0;
5095         u16 subid;
5096
5097         subid = sp->pdev->subsystem_device;
5098         if ((sp->device_type == XFRAME_II_DEVICE) ||
5099                    ((subid & 0xFF) >= 0x07)) {
5100                 val64 = readq(&bar0->gpio_control);
5101                 val64 ^= GPIO_CTRL_GPIO_0;
5102                 writeq(val64, &bar0->gpio_control);
5103         } else {
5104                 val64 = readq(&bar0->adapter_control);
5105                 val64 ^= ADAPTER_LED_ON;
5106                 writeq(val64, &bar0->adapter_control);
5107         }
5108
5109         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5110 }
5111
5112 /**
5113  * s2io_ethtool_idnic - To physically identify the nic on the system.
5114  * @sp : private member of the device structure, which is a pointer to the
5115  * s2io_nic structure.
5116  * @id : pointer to the structure with identification parameters given by
5117  * ethtool.
5118  * Description: Used to physically identify the NIC on the system.
5119  * The Link LED will blink for a time specified by the user for
5120  * identification.
5121  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5122  * identification is possible only if it's link is up.
5123  * Return value:
5124  * int , returns 0 on success
5125  */
5126
5127 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5128 {
5129         u64 val64 = 0, last_gpio_ctrl_val;
5130         struct s2io_nic *sp = dev->priv;
5131         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5132         u16 subid;
5133
5134         subid = sp->pdev->subsystem_device;
5135         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5136         if ((sp->device_type == XFRAME_I_DEVICE) &&
5137                 ((subid & 0xFF) < 0x07)) {
5138                 val64 = readq(&bar0->adapter_control);
5139                 if (!(val64 & ADAPTER_CNTL_EN)) {
5140                         printk(KERN_ERR
5141                                "Adapter Link down, cannot blink LED\n");
5142                         return -EFAULT;
5143                 }
5144         }
5145         if (sp->id_timer.function == NULL) {
5146                 init_timer(&sp->id_timer);
5147                 sp->id_timer.function = s2io_phy_id;
5148                 sp->id_timer.data = (unsigned long) sp;
5149         }
5150         mod_timer(&sp->id_timer, jiffies);
5151         if (data)
5152                 msleep_interruptible(data * HZ);
5153         else
5154                 msleep_interruptible(MAX_FLICKER_TIME);
5155         del_timer_sync(&sp->id_timer);
5156
5157         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5158                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5159                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5160         }
5161
5162         return 0;
5163 }
5164
5165 static void s2io_ethtool_gringparam(struct net_device *dev,
5166                                     struct ethtool_ringparam *ering)
5167 {
5168         struct s2io_nic *sp = dev->priv;
5169         int i,tx_desc_count=0,rx_desc_count=0;
5170
5171         if (sp->rxd_mode == RXD_MODE_1)
5172                 ering->rx_max_pending = MAX_RX_DESC_1;
5173         else if (sp->rxd_mode == RXD_MODE_3B)
5174                 ering->rx_max_pending = MAX_RX_DESC_2;
5175
5176         ering->tx_max_pending = MAX_TX_DESC;
5177         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5178                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5179
5180         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5181         ering->tx_pending = tx_desc_count;
5182         rx_desc_count = 0;
5183         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5184                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5185
5186         ering->rx_pending = rx_desc_count;
5187
5188         ering->rx_mini_max_pending = 0;
5189         ering->rx_mini_pending = 0;
5190         if(sp->rxd_mode == RXD_MODE_1)
5191                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5192         else if (sp->rxd_mode == RXD_MODE_3B)
5193                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5194         ering->rx_jumbo_pending = rx_desc_count;
5195 }
5196
5197 /**
5198  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5199  * @sp : private member of the device structure, which is a pointer to the
5200  *      s2io_nic structure.
5201  * @ep : pointer to the structure with pause parameters given by ethtool.
5202  * Description:
5203  * Returns the Pause frame generation and reception capability of the NIC.
5204  * Return value:
5205  *  void
5206  */
5207 static void s2io_ethtool_getpause_data(struct net_device *dev,
5208                                        struct ethtool_pauseparam *ep)
5209 {
5210         u64 val64;
5211         struct s2io_nic *sp = dev->priv;
5212         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5213
5214         val64 = readq(&bar0->rmac_pause_cfg);
5215         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5216                 ep->tx_pause = TRUE;
5217         if (val64 & RMAC_PAUSE_RX_ENABLE)
5218                 ep->rx_pause = TRUE;
5219         ep->autoneg = FALSE;
5220 }
5221
5222 /**
5223  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5224  * @sp : private member of the device structure, which is a pointer to the
5225  *      s2io_nic structure.
5226  * @ep : pointer to the structure with pause parameters given by ethtool.
5227  * Description:
5228  * It can be used to set or reset Pause frame generation or reception
5229  * support of the NIC.
5230  * Return value:
5231  * int, returns 0 on Success
5232  */
5233
5234 static int s2io_ethtool_setpause_data(struct net_device *dev,
5235                                struct ethtool_pauseparam *ep)
5236 {
5237         u64 val64;
5238         struct s2io_nic *sp = dev->priv;
5239         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5240
5241         val64 = readq(&bar0->rmac_pause_cfg);
5242         if (ep->tx_pause)
5243                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5244         else
5245                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5246         if (ep->rx_pause)
5247                 val64 |= RMAC_PAUSE_RX_ENABLE;
5248         else
5249                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5250         writeq(val64, &bar0->rmac_pause_cfg);
5251         return 0;
5252 }
5253
5254 /**
5255  * read_eeprom - reads 4 bytes of data from user given offset.
5256  * @sp : private member of the device structure, which is a pointer to the
5257  *      s2io_nic structure.
5258  * @off : offset at which the data must be written
5259  * @data : Its an output parameter where the data read at the given
5260  *      offset is stored.
5261  * Description:
5262  * Will read 4 bytes of data from the user given offset and return the
5263  * read data.
5264  * NOTE: Will allow to read only part of the EEPROM visible through the
5265  *   I2C bus.
5266  * Return value:
5267  *  -1 on failure and 0 on success.
5268  */
5269
5270 #define S2IO_DEV_ID             5
5271 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5272 {
5273         int ret = -1;
5274         u32 exit_cnt = 0;
5275         u64 val64;
5276         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5277
5278         if (sp->device_type == XFRAME_I_DEVICE) {
5279                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5280                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5281                     I2C_CONTROL_CNTL_START;
5282                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5283
5284                 while (exit_cnt < 5) {
5285                         val64 = readq(&bar0->i2c_control);
5286                         if (I2C_CONTROL_CNTL_END(val64)) {
5287                                 *data = I2C_CONTROL_GET_DATA(val64);
5288                                 ret = 0;
5289                                 break;
5290                         }
5291                         msleep(50);
5292                         exit_cnt++;
5293                 }
5294         }
5295
5296         if (sp->device_type == XFRAME_II_DEVICE) {
5297                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5298                         SPI_CONTROL_BYTECNT(0x3) |
5299                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5300                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5301                 val64 |= SPI_CONTROL_REQ;
5302                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5303                 while (exit_cnt < 5) {
5304                         val64 = readq(&bar0->spi_control);
5305                         if (val64 & SPI_CONTROL_NACK) {
5306                                 ret = 1;
5307                                 break;
5308                         } else if (val64 & SPI_CONTROL_DONE) {
5309                                 *data = readq(&bar0->spi_data);
5310                                 *data &= 0xffffff;
5311                                 ret = 0;
5312                                 break;
5313                         }
5314                         msleep(50);
5315                         exit_cnt++;
5316                 }
5317         }
5318         return ret;
5319 }
5320
5321 /**
5322  *  write_eeprom - actually writes the relevant part of the data value.
5323  *  @sp : private member of the device structure, which is a pointer to the
5324  *       s2io_nic structure.
5325  *  @off : offset at which the data must be written
5326  *  @data : The data that is to be written
5327  *  @cnt : Number of bytes of the data that are actually to be written into
5328  *  the Eeprom. (max of 3)
5329  * Description:
5330  *  Actually writes the relevant part of the data value into the Eeprom
5331  *  through the I2C bus.
5332  * Return value:
5333  *  0 on success, -1 on failure.
5334  */
5335
5336 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5337 {
5338         int exit_cnt = 0, ret = -1;
5339         u64 val64;
5340         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5341
5342         if (sp->device_type == XFRAME_I_DEVICE) {
5343                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5344                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5345                     I2C_CONTROL_CNTL_START;
5346                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5347
5348                 while (exit_cnt < 5) {
5349                         val64 = readq(&bar0->i2c_control);
5350                         if (I2C_CONTROL_CNTL_END(val64)) {
5351                                 if (!(val64 & I2C_CONTROL_NACK))
5352                                         ret = 0;
5353                                 break;
5354                         }
5355                         msleep(50);
5356                         exit_cnt++;
5357                 }
5358         }
5359
5360         if (sp->device_type == XFRAME_II_DEVICE) {
5361                 int write_cnt = (cnt == 8) ? 0 : cnt;
5362                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5363
5364                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5365                         SPI_CONTROL_BYTECNT(write_cnt) |
5366                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5367                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5368                 val64 |= SPI_CONTROL_REQ;
5369                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5370                 while (exit_cnt < 5) {
5371                         val64 = readq(&bar0->spi_control);
5372                         if (val64 & SPI_CONTROL_NACK) {
5373                                 ret = 1;
5374                                 break;
5375                         } else if (val64 & SPI_CONTROL_DONE) {
5376                                 ret = 0;
5377                                 break;
5378                         }
5379                         msleep(50);
5380                         exit_cnt++;
5381                 }
5382         }
5383         return ret;
5384 }
5385 static void s2io_vpd_read(struct s2io_nic *nic)
5386 {
5387         u8 *vpd_data;
5388         u8 data;
5389         int i=0, cnt, fail = 0;
5390         int vpd_addr = 0x80;
5391
5392         if (nic->device_type == XFRAME_II_DEVICE) {
5393                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5394                 vpd_addr = 0x80;
5395         }
5396         else {
5397                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5398                 vpd_addr = 0x50;
5399         }
5400         strcpy(nic->serial_num, "NOT AVAILABLE");
5401
5402         vpd_data = kmalloc(256, GFP_KERNEL);
5403         if (!vpd_data) {
5404                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5405                 return;
5406         }
5407         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5408
5409         for (i = 0; i < 256; i +=4 ) {
5410                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5411                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5412                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5413                 for (cnt = 0; cnt <5; cnt++) {
5414                         msleep(2);
5415                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5416                         if (data == 0x80)
5417                                 break;
5418                 }
5419                 if (cnt >= 5) {
5420                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5421                         fail = 1;
5422                         break;
5423                 }
5424                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5425                                       (u32 *)&vpd_data[i]);
5426         }
5427
5428         if(!fail) {
5429                 /* read serial number of adapter */
5430                 for (cnt = 0; cnt < 256; cnt++) {
5431                 if ((vpd_data[cnt] == 'S') &&
5432                         (vpd_data[cnt+1] == 'N') &&
5433                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5434                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5435                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5436                                         vpd_data[cnt+2]);
5437                                 break;
5438                         }
5439                 }
5440         }
5441
5442         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5443                 memset(nic->product_name, 0, vpd_data[1]);
5444                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5445         }
5446         kfree(vpd_data);
5447         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5448 }
5449
5450 /**
5451  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5452  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5453  *  @eeprom : pointer to the user level structure provided by ethtool,
5454  *  containing all relevant information.
5455  *  @data_buf : user defined value to be written into Eeprom.
5456  *  Description: Reads the values stored in the Eeprom at given offset
5457  *  for a given length. Stores these values int the input argument data
5458  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5459  *  Return value:
5460  *  int  0 on success
5461  */
5462
5463 static int s2io_ethtool_geeprom(struct net_device *dev,
5464                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5465 {
5466         u32 i, valid;
5467         u64 data;
5468         struct s2io_nic *sp = dev->priv;
5469
5470         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5471
5472         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5473                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5474
5475         for (i = 0; i < eeprom->len; i += 4) {
5476                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5477                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5478                         return -EFAULT;
5479                 }
5480                 valid = INV(data);
5481                 memcpy((data_buf + i), &valid, 4);
5482         }
5483         return 0;
5484 }
5485
5486 /**
5487  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5488  *  @sp : private member of the device structure, which is a pointer to the
5489  *  s2io_nic structure.
5490  *  @eeprom : pointer to the user level structure provided by ethtool,
5491  *  containing all relevant information.
5492  *  @data_buf ; user defined value to be written into Eeprom.
5493  *  Description:
5494  *  Tries to write the user provided value in the Eeprom, at the offset
5495  *  given by the user.
5496  *  Return value:
5497  *  0 on success, -EFAULT on failure.
5498  */
5499
5500 static int s2io_ethtool_seeprom(struct net_device *dev,
5501                                 struct ethtool_eeprom *eeprom,
5502                                 u8 * data_buf)
5503 {
5504         int len = eeprom->len, cnt = 0;
5505         u64 valid = 0, data;
5506         struct s2io_nic *sp = dev->priv;
5507
5508         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5509                 DBG_PRINT(ERR_DBG,
5510                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5511                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5512                           eeprom->magic);
5513                 return -EFAULT;
5514         }
5515
5516         while (len) {
5517                 data = (u32) data_buf[cnt] & 0x000000FF;
5518                 if (data) {
5519                         valid = (u32) (data << 24);
5520                 } else
5521                         valid = data;
5522
5523                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5524                         DBG_PRINT(ERR_DBG,
5525                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5526                         DBG_PRINT(ERR_DBG,
5527                                   "write into the specified offset\n");
5528                         return -EFAULT;
5529                 }
5530                 cnt++;
5531                 len--;
5532         }
5533
5534         return 0;
5535 }
5536
5537 /**
5538  * s2io_register_test - reads and writes into all clock domains.
5539  * @sp : private member of the device structure, which is a pointer to the
5540  * s2io_nic structure.
5541  * @data : variable that returns the result of each of the test conducted b
5542  * by the driver.
5543  * Description:
5544  * Read and write into all clock domains. The NIC has 3 clock domains,
5545  * see that registers in all the three regions are accessible.
5546  * Return value:
5547  * 0 on success.
5548  */
5549
5550 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5551 {
5552         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5553         u64 val64 = 0, exp_val;
5554         int fail = 0;
5555
5556         val64 = readq(&bar0->pif_rd_swapper_fb);
5557         if (val64 != 0x123456789abcdefULL) {
5558                 fail = 1;
5559                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5560         }
5561
5562         val64 = readq(&bar0->rmac_pause_cfg);
5563         if (val64 != 0xc000ffff00000000ULL) {
5564                 fail = 1;
5565                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5566         }
5567
5568         val64 = readq(&bar0->rx_queue_cfg);
5569         if (sp->device_type == XFRAME_II_DEVICE)
5570                 exp_val = 0x0404040404040404ULL;
5571         else
5572                 exp_val = 0x0808080808080808ULL;
5573         if (val64 != exp_val) {
5574                 fail = 1;
5575                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5576         }
5577
5578         val64 = readq(&bar0->xgxs_efifo_cfg);
5579         if (val64 != 0x000000001923141EULL) {
5580                 fail = 1;
5581                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5582         }
5583
5584         val64 = 0x5A5A5A5A5A5A5A5AULL;
5585         writeq(val64, &bar0->xmsi_data);
5586         val64 = readq(&bar0->xmsi_data);
5587         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5588                 fail = 1;
5589                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5590         }
5591
5592         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5593         writeq(val64, &bar0->xmsi_data);
5594         val64 = readq(&bar0->xmsi_data);
5595         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5596                 fail = 1;
5597                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5598         }
5599
5600         *data = fail;
5601         return fail;
5602 }
5603
5604 /**
5605  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5606  * @sp : private member of the device structure, which is a pointer to the
5607  * s2io_nic structure.
5608  * @data:variable that returns the result of each of the test conducted by
5609  * the driver.
5610  * Description:
5611  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5612  * register.
5613  * Return value:
5614  * 0 on success.
5615  */
5616
5617 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5618 {
5619         int fail = 0;
5620         u64 ret_data, org_4F0, org_7F0;
5621         u8 saved_4F0 = 0, saved_7F0 = 0;
5622         struct net_device *dev = sp->dev;
5623
5624         /* Test Write Error at offset 0 */
5625         /* Note that SPI interface allows write access to all areas
5626          * of EEPROM. Hence doing all negative testing only for Xframe I.
5627          */
5628         if (sp->device_type == XFRAME_I_DEVICE)
5629                 if (!write_eeprom(sp, 0, 0, 3))
5630                         fail = 1;
5631
5632         /* Save current values at offsets 0x4F0 and 0x7F0 */
5633         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5634                 saved_4F0 = 1;
5635         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5636                 saved_7F0 = 1;
5637
5638         /* Test Write at offset 4f0 */
5639         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5640                 fail = 1;
5641         if (read_eeprom(sp, 0x4F0, &ret_data))
5642                 fail = 1;
5643
5644         if (ret_data != 0x012345) {
5645                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5646                         "Data written %llx Data read %llx\n",
5647                         dev->name, (unsigned long long)0x12345,
5648                         (unsigned long long)ret_data);
5649                 fail = 1;
5650         }
5651
5652         /* Reset the EEPROM data go FFFF */
5653         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5654
5655         /* Test Write Request Error at offset 0x7c */
5656         if (sp->device_type == XFRAME_I_DEVICE)
5657                 if (!write_eeprom(sp, 0x07C, 0, 3))
5658                         fail = 1;
5659
5660         /* Test Write Request at offset 0x7f0 */
5661         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5662                 fail = 1;
5663         if (read_eeprom(sp, 0x7F0, &ret_data))
5664                 fail = 1;
5665
5666         if (ret_data != 0x012345) {
5667                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5668                         "Data written %llx Data read %llx\n",
5669                         dev->name, (unsigned long long)0x12345,
5670                         (unsigned long long)ret_data);
5671                 fail = 1;
5672         }
5673
5674         /* Reset the EEPROM data go FFFF */
5675         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5676
5677         if (sp->device_type == XFRAME_I_DEVICE) {
5678                 /* Test Write Error at offset 0x80 */
5679                 if (!write_eeprom(sp, 0x080, 0, 3))
5680                         fail = 1;
5681
5682                 /* Test Write Error at offset 0xfc */
5683                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5684                         fail = 1;
5685
5686                 /* Test Write Error at offset 0x100 */
5687                 if (!write_eeprom(sp, 0x100, 0, 3))
5688                         fail = 1;
5689
5690                 /* Test Write Error at offset 4ec */
5691                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5692                         fail = 1;
5693         }
5694
5695         /* Restore values at offsets 0x4F0 and 0x7F0 */
5696         if (saved_4F0)
5697                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5698         if (saved_7F0)
5699                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5700
5701         *data = fail;
5702         return fail;
5703 }
5704
5705 /**
5706  * s2io_bist_test - invokes the MemBist test of the card .
5707  * @sp : private member of the device structure, which is a pointer to the
5708  * s2io_nic structure.
5709  * @data:variable that returns the result of each of the test conducted by
5710  * the driver.
5711  * Description:
5712  * This invokes the MemBist test of the card. We give around
5713  * 2 secs time for the Test to complete. If it's still not complete
5714  * within this peiod, we consider that the test failed.
5715  * Return value:
5716  * 0 on success and -1 on failure.
5717  */
5718
5719 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5720 {
5721         u8 bist = 0;
5722         int cnt = 0, ret = -1;
5723
5724         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5725         bist |= PCI_BIST_START;
5726         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5727
5728         while (cnt < 20) {
5729                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5730                 if (!(bist & PCI_BIST_START)) {
5731                         *data = (bist & PCI_BIST_CODE_MASK);
5732                         ret = 0;
5733                         break;
5734                 }
5735                 msleep(100);
5736                 cnt++;
5737         }
5738
5739         return ret;
5740 }
5741
5742 /**
5743  * s2io-link_test - verifies the link state of the nic
5744  * @sp ; private member of the device structure, which is a pointer to the
5745  * s2io_nic structure.
5746  * @data: variable that returns the result of each of the test conducted by
5747  * the driver.
5748  * Description:
5749  * The function verifies the link state of the NIC and updates the input
5750  * argument 'data' appropriately.
5751  * Return value:
5752  * 0 on success.
5753  */
5754
5755 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5756 {
5757         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5758         u64 val64;
5759
5760         val64 = readq(&bar0->adapter_status);
5761         if(!(LINK_IS_UP(val64)))
5762                 *data = 1;
5763         else
5764                 *data = 0;
5765
5766         return *data;
5767 }
5768
5769 /**
5770  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5771  * @sp - private member of the device structure, which is a pointer to the
5772  * s2io_nic structure.
5773  * @data - variable that returns the result of each of the test
5774  * conducted by the driver.
5775  * Description:
5776  *  This is one of the offline test that tests the read and write
5777  *  access to the RldRam chip on the NIC.
5778  * Return value:
5779  *  0 on success.
5780  */
5781
5782 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5783 {
5784         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5785         u64 val64;
5786         int cnt, iteration = 0, test_fail = 0;
5787
5788         val64 = readq(&bar0->adapter_control);
5789         val64 &= ~ADAPTER_ECC_EN;
5790         writeq(val64, &bar0->adapter_control);
5791
5792         val64 = readq(&bar0->mc_rldram_test_ctrl);
5793         val64 |= MC_RLDRAM_TEST_MODE;
5794         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5795
5796         val64 = readq(&bar0->mc_rldram_mrs);
5797         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5798         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5799
5800         val64 |= MC_RLDRAM_MRS_ENABLE;
5801         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5802
5803         while (iteration < 2) {
5804                 val64 = 0x55555555aaaa0000ULL;
5805                 if (iteration == 1) {
5806                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5807                 }
5808                 writeq(val64, &bar0->mc_rldram_test_d0);
5809
5810                 val64 = 0xaaaa5a5555550000ULL;
5811                 if (iteration == 1) {
5812                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5813                 }
5814                 writeq(val64, &bar0->mc_rldram_test_d1);
5815
5816                 val64 = 0x55aaaaaaaa5a0000ULL;
5817                 if (iteration == 1) {
5818                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5819                 }
5820                 writeq(val64, &bar0->mc_rldram_test_d2);
5821
5822                 val64 = (u64) (0x0000003ffffe0100ULL);
5823                 writeq(val64, &bar0->mc_rldram_test_add);
5824
5825                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5826                         MC_RLDRAM_TEST_GO;
5827                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5828
5829                 for (cnt = 0; cnt < 5; cnt++) {
5830                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5831                         if (val64 & MC_RLDRAM_TEST_DONE)
5832                                 break;
5833                         msleep(200);
5834                 }
5835
5836                 if (cnt == 5)
5837                         break;
5838
5839                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5840                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5841
5842                 for (cnt = 0; cnt < 5; cnt++) {
5843                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5844                         if (val64 & MC_RLDRAM_TEST_DONE)
5845                                 break;
5846                         msleep(500);
5847                 }
5848
5849                 if (cnt == 5)
5850                         break;
5851
5852                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5853                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5854                         test_fail = 1;
5855
5856                 iteration++;
5857         }
5858
5859         *data = test_fail;
5860
5861         /* Bring the adapter out of test mode */
5862         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5863
5864         return test_fail;
5865 }
5866
5867 /**
5868  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5869  *  @sp : private member of the device structure, which is a pointer to the
5870  *  s2io_nic structure.
5871  *  @ethtest : pointer to a ethtool command specific structure that will be
5872  *  returned to the user.
5873  *  @data : variable that returns the result of each of the test
5874  * conducted by the driver.
5875  * Description:
5876  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5877  *  the health of the card.
5878  * Return value:
5879  *  void
5880  */
5881
5882 static void s2io_ethtool_test(struct net_device *dev,
5883                               struct ethtool_test *ethtest,
5884                               uint64_t * data)
5885 {
5886         struct s2io_nic *sp = dev->priv;
5887         int orig_state = netif_running(sp->dev);
5888
5889         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5890                 /* Offline Tests. */
5891                 if (orig_state)
5892                         s2io_close(sp->dev);
5893
5894                 if (s2io_register_test(sp, &data[0]))
5895                         ethtest->flags |= ETH_TEST_FL_FAILED;
5896
5897                 s2io_reset(sp);
5898
5899                 if (s2io_rldram_test(sp, &data[3]))
5900                         ethtest->flags |= ETH_TEST_FL_FAILED;
5901
5902                 s2io_reset(sp);
5903
5904                 if (s2io_eeprom_test(sp, &data[1]))
5905                         ethtest->flags |= ETH_TEST_FL_FAILED;
5906
5907                 if (s2io_bist_test(sp, &data[4]))
5908                         ethtest->flags |= ETH_TEST_FL_FAILED;
5909
5910                 if (orig_state)
5911                         s2io_open(sp->dev);
5912
5913                 data[2] = 0;
5914         } else {
5915                 /* Online Tests. */
5916                 if (!orig_state) {
5917                         DBG_PRINT(ERR_DBG,
5918                                   "%s: is not up, cannot run test\n",
5919                                   dev->name);
5920                         data[0] = -1;
5921                         data[1] = -1;
5922                         data[2] = -1;
5923                         data[3] = -1;
5924                         data[4] = -1;
5925                 }
5926
5927                 if (s2io_link_test(sp, &data[2]))
5928                         ethtest->flags |= ETH_TEST_FL_FAILED;
5929
5930                 data[0] = 0;
5931                 data[1] = 0;
5932                 data[3] = 0;
5933                 data[4] = 0;
5934         }
5935 }
5936
5937 static void s2io_get_ethtool_stats(struct net_device *dev,
5938                                    struct ethtool_stats *estats,
5939                                    u64 * tmp_stats)
5940 {
5941         int i = 0, k;
5942         struct s2io_nic *sp = dev->priv;
5943         struct stat_block *stat_info = sp->mac_control.stats_info;
5944
5945         s2io_updt_stats(sp);
5946         tmp_stats[i++] =
5947                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5948                 le32_to_cpu(stat_info->tmac_frms);
5949         tmp_stats[i++] =
5950                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5951                 le32_to_cpu(stat_info->tmac_data_octets);
5952         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5953         tmp_stats[i++] =
5954                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5955                 le32_to_cpu(stat_info->tmac_mcst_frms);
5956         tmp_stats[i++] =
5957                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5958                 le32_to_cpu(stat_info->tmac_bcst_frms);
5959         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5960         tmp_stats[i++] =
5961                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5962                 le32_to_cpu(stat_info->tmac_ttl_octets);
5963         tmp_stats[i++] =
5964                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5965                 le32_to_cpu(stat_info->tmac_ucst_frms);
5966         tmp_stats[i++] =
5967                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5968                 le32_to_cpu(stat_info->tmac_nucst_frms);
5969         tmp_stats[i++] =
5970                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5971                 le32_to_cpu(stat_info->tmac_any_err_frms);
5972         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5973         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5974         tmp_stats[i++] =
5975                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5976                 le32_to_cpu(stat_info->tmac_vld_ip);
5977         tmp_stats[i++] =
5978                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5979                 le32_to_cpu(stat_info->tmac_drop_ip);
5980         tmp_stats[i++] =
5981                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5982                 le32_to_cpu(stat_info->tmac_icmp);
5983         tmp_stats[i++] =
5984                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5985                 le32_to_cpu(stat_info->tmac_rst_tcp);
5986         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5987         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5988                 le32_to_cpu(stat_info->tmac_udp);
5989         tmp_stats[i++] =
5990                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5991                 le32_to_cpu(stat_info->rmac_vld_frms);
5992         tmp_stats[i++] =
5993                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5994                 le32_to_cpu(stat_info->rmac_data_octets);
5995         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5996         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5997         tmp_stats[i++] =
5998                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5999                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6000         tmp_stats[i++] =
6001                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6002                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6003         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6004         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6005         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6006         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6007         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6008         tmp_stats[i++] =
6009                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6010                 le32_to_cpu(stat_info->rmac_ttl_octets);
6011         tmp_stats[i++] =
6012                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6013                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6014         tmp_stats[i++] =
6015                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6016                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6017         tmp_stats[i++] =
6018                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6019                 le32_to_cpu(stat_info->rmac_discarded_frms);
6020         tmp_stats[i++] =
6021                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6022                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6023         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6024         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6025         tmp_stats[i++] =
6026                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6027                 le32_to_cpu(stat_info->rmac_usized_frms);
6028         tmp_stats[i++] =
6029                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6030                 le32_to_cpu(stat_info->rmac_osized_frms);
6031         tmp_stats[i++] =
6032                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6033                 le32_to_cpu(stat_info->rmac_frag_frms);
6034         tmp_stats[i++] =
6035                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6036                 le32_to_cpu(stat_info->rmac_jabber_frms);
6037         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6038         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6039         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6040         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6041         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6042         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6043         tmp_stats[i++] =
6044                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6045                 le32_to_cpu(stat_info->rmac_ip);
6046         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6047         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6048         tmp_stats[i++] =
6049                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6050                 le32_to_cpu(stat_info->rmac_drop_ip);
6051         tmp_stats[i++] =
6052                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6053                 le32_to_cpu(stat_info->rmac_icmp);
6054         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6055         tmp_stats[i++] =
6056                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6057                 le32_to_cpu(stat_info->rmac_udp);
6058         tmp_stats[i++] =
6059                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6060                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6061         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6062         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6063         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6064         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6065         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6066         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6067         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6068         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6069         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6070         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6071         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6072         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6073         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6074         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6075         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6076         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6077         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6078         tmp_stats[i++] =
6079                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6080                 le32_to_cpu(stat_info->rmac_pause_cnt);
6081         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6082         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6083         tmp_stats[i++] =
6084                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6085                 le32_to_cpu(stat_info->rmac_accepted_ip);
6086         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6087         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6088         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6089         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6090         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6091         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6092         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6093         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6094         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6095         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6096         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6097         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6098         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6099         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6100         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6101         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6102         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6103         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6104         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6105
6106         /* Enhanced statistics exist only for Hercules */
6107         if(sp->device_type == XFRAME_II_DEVICE) {
6108                 tmp_stats[i++] =
6109                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6110                 tmp_stats[i++] =
6111                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6112                 tmp_stats[i++] =
6113                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6114                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6115                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6116                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6117                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6118                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6119                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6120                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6121                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6122                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6123                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6124                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6125                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6126                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6127         }
6128
6129         tmp_stats[i++] = 0;
6130         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6131         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6132         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6133         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6134         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6135         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6136         for (k = 0; k < MAX_RX_RINGS; k++)
6137                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6138         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6139         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6140         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6141         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6142         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6143         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6144         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6145         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6146         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6147         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6148         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6149         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6150         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6151         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6152         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6153         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6154         if (stat_info->sw_stat.num_aggregations) {
6155                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6156                 int count = 0;
6157                 /*
6158                  * Since 64-bit divide does not work on all platforms,
6159                  * do repeated subtraction.
6160                  */
6161                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6162                         tmp -= stat_info->sw_stat.num_aggregations;
6163                         count++;
6164                 }
6165                 tmp_stats[i++] = count;
6166         }
6167         else
6168                 tmp_stats[i++] = 0;
6169         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6170         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6171         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6172         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6173         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6174         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6175         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6176         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6177         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6178
6179         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6180         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6181         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6182         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6183         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6184
6185         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6186         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6187         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6188         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6189         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6190         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6191         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6192         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6193         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6194         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6195         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6196         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6197         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6198         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6199         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6200         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6201         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6202         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6203         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6204         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6205         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6206         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6207         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6208         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6209         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6210         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6211 }
6212
6213 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6214 {
6215         return (XENA_REG_SPACE);
6216 }
6217
6218
6219 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6220 {
6221         struct s2io_nic *sp = dev->priv;
6222
6223         return (sp->rx_csum);
6224 }
6225
6226 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6227 {
6228         struct s2io_nic *sp = dev->priv;
6229
6230         if (data)
6231                 sp->rx_csum = 1;
6232         else
6233                 sp->rx_csum = 0;
6234
6235         return 0;
6236 }
6237
6238 static int s2io_get_eeprom_len(struct net_device *dev)
6239 {
6240         return (XENA_EEPROM_SPACE);
6241 }
6242
6243 static int s2io_get_sset_count(struct net_device *dev, int sset)
6244 {
6245         struct s2io_nic *sp = dev->priv;
6246
6247         switch (sset) {
6248         case ETH_SS_TEST:
6249                 return S2IO_TEST_LEN;
6250         case ETH_SS_STATS:
6251                 switch(sp->device_type) {
6252                 case XFRAME_I_DEVICE:
6253                         return XFRAME_I_STAT_LEN;
6254                 case XFRAME_II_DEVICE:
6255                         return XFRAME_II_STAT_LEN;
6256                 default:
6257                         return 0;
6258                 }
6259         default:
6260                 return -EOPNOTSUPP;
6261         }
6262 }
6263
6264 static void s2io_ethtool_get_strings(struct net_device *dev,
6265                                      u32 stringset, u8 * data)
6266 {
6267         int stat_size = 0;
6268         struct s2io_nic *sp = dev->priv;
6269
6270         switch (stringset) {
6271         case ETH_SS_TEST:
6272                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6273                 break;
6274         case ETH_SS_STATS:
6275                 stat_size = sizeof(ethtool_xena_stats_keys);
6276                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6277                 if(sp->device_type == XFRAME_II_DEVICE) {
6278                         memcpy(data + stat_size,
6279                                 &ethtool_enhanced_stats_keys,
6280                                 sizeof(ethtool_enhanced_stats_keys));
6281                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6282                 }
6283
6284                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6285                         sizeof(ethtool_driver_stats_keys));
6286         }
6287 }
6288
6289 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6290 {
6291         if (data)
6292                 dev->features |= NETIF_F_IP_CSUM;
6293         else
6294                 dev->features &= ~NETIF_F_IP_CSUM;
6295
6296         return 0;
6297 }
6298
6299 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6300 {
6301         return (dev->features & NETIF_F_TSO) != 0;
6302 }
6303 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6304 {
6305         if (data)
6306                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6307         else
6308                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6309
6310         return 0;
6311 }
6312
6313 static const struct ethtool_ops netdev_ethtool_ops = {
6314         .get_settings = s2io_ethtool_gset,
6315         .set_settings = s2io_ethtool_sset,
6316         .get_drvinfo = s2io_ethtool_gdrvinfo,
6317         .get_regs_len = s2io_ethtool_get_regs_len,
6318         .get_regs = s2io_ethtool_gregs,
6319         .get_link = ethtool_op_get_link,
6320         .get_eeprom_len = s2io_get_eeprom_len,
6321         .get_eeprom = s2io_ethtool_geeprom,
6322         .set_eeprom = s2io_ethtool_seeprom,
6323         .get_ringparam = s2io_ethtool_gringparam,
6324         .get_pauseparam = s2io_ethtool_getpause_data,
6325         .set_pauseparam = s2io_ethtool_setpause_data,
6326         .get_rx_csum = s2io_ethtool_get_rx_csum,
6327         .set_rx_csum = s2io_ethtool_set_rx_csum,
6328         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6329         .set_sg = ethtool_op_set_sg,
6330         .get_tso = s2io_ethtool_op_get_tso,
6331         .set_tso = s2io_ethtool_op_set_tso,
6332         .set_ufo = ethtool_op_set_ufo,
6333         .self_test = s2io_ethtool_test,
6334         .get_strings = s2io_ethtool_get_strings,
6335         .phys_id = s2io_ethtool_idnic,
6336         .get_ethtool_stats = s2io_get_ethtool_stats,
6337         .get_sset_count = s2io_get_sset_count,
6338 };
6339
6340 /**
6341  *  s2io_ioctl - Entry point for the Ioctl
6342  *  @dev :  Device pointer.
6343  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6344  *  a proprietary structure used to pass information to the driver.
6345  *  @cmd :  This is used to distinguish between the different commands that
6346  *  can be passed to the IOCTL functions.
6347  *  Description:
6348  *  Currently there are no special functionality supported in IOCTL, hence
6349  *  function always return EOPNOTSUPPORTED
6350  */
6351
6352 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6353 {
6354         return -EOPNOTSUPP;
6355 }
6356
6357 /**
6358  *  s2io_change_mtu - entry point to change MTU size for the device.
6359  *   @dev : device pointer.
6360  *   @new_mtu : the new MTU size for the device.
6361  *   Description: A driver entry point to change MTU size for the device.
6362  *   Before changing the MTU the device must be stopped.
6363  *  Return value:
6364  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6365  *   file on failure.
6366  */
6367
6368 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6369 {
6370         struct s2io_nic *sp = dev->priv;
6371         int ret = 0;
6372
6373         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6374                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6375                           dev->name);
6376                 return -EPERM;
6377         }
6378
6379         dev->mtu = new_mtu;
6380         if (netif_running(dev)) {
6381                 s2io_card_down(sp);
6382                 netif_stop_queue(dev);
6383                 ret = s2io_card_up(sp);
6384                 if (ret) {
6385                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6386                                   __FUNCTION__);
6387                         return ret;
6388                 }
6389                 if (netif_queue_stopped(dev))
6390                         netif_wake_queue(dev);
6391         } else { /* Device is down */
6392                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6393                 u64 val64 = new_mtu;
6394
6395                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6396         }
6397
6398         return ret;
6399 }
6400
6401 /**
6402  *  s2io_tasklet - Bottom half of the ISR.
6403  *  @dev_adr : address of the device structure in dma_addr_t format.
6404  *  Description:
6405  *  This is the tasklet or the bottom half of the ISR. This is
6406  *  an extension of the ISR which is scheduled by the scheduler to be run
6407  *  when the load on the CPU is low. All low priority tasks of the ISR can
6408  *  be pushed into the tasklet. For now the tasklet is used only to
6409  *  replenish the Rx buffers in the Rx buffer descriptors.
6410  *  Return value:
6411  *  void.
6412  */
6413
6414 static void s2io_tasklet(unsigned long dev_addr)
6415 {
6416         struct net_device *dev = (struct net_device *) dev_addr;
6417         struct s2io_nic *sp = dev->priv;
6418         int i, ret;
6419         struct mac_info *mac_control;
6420         struct config_param *config;
6421
6422         mac_control = &sp->mac_control;
6423         config = &sp->config;
6424
6425         if (!TASKLET_IN_USE) {
6426                 for (i = 0; i < config->rx_ring_num; i++) {
6427                         ret = fill_rx_buffers(sp, i);
6428                         if (ret == -ENOMEM) {
6429                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6430                                           dev->name);
6431                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6432                                 break;
6433                         } else if (ret == -EFILL) {
6434                                 DBG_PRINT(INFO_DBG,
6435                                           "%s: Rx Ring %d is full\n",
6436                                           dev->name, i);
6437                                 break;
6438                         }
6439                 }
6440                 clear_bit(0, (&sp->tasklet_status));
6441         }
6442 }
6443
6444 /**
6445  * s2io_set_link - Set the LInk status
6446  * @data: long pointer to device private structue
6447  * Description: Sets the link status for the adapter
6448  */
6449
6450 static void s2io_set_link(struct work_struct *work)
6451 {
6452         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6453         struct net_device *dev = nic->dev;
6454         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6455         register u64 val64;
6456         u16 subid;
6457
6458         rtnl_lock();
6459
6460         if (!netif_running(dev))
6461                 goto out_unlock;
6462
6463         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6464                 /* The card is being reset, no point doing anything */
6465                 goto out_unlock;
6466         }
6467
6468         subid = nic->pdev->subsystem_device;
6469         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6470                 /*
6471                  * Allow a small delay for the NICs self initiated
6472                  * cleanup to complete.
6473                  */
6474                 msleep(100);
6475         }
6476
6477         val64 = readq(&bar0->adapter_status);
6478         if (LINK_IS_UP(val64)) {
6479                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6480                         if (verify_xena_quiescence(nic)) {
6481                                 val64 = readq(&bar0->adapter_control);
6482                                 val64 |= ADAPTER_CNTL_EN;
6483                                 writeq(val64, &bar0->adapter_control);
6484                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6485                                         nic->device_type, subid)) {
6486                                         val64 = readq(&bar0->gpio_control);
6487                                         val64 |= GPIO_CTRL_GPIO_0;
6488                                         writeq(val64, &bar0->gpio_control);
6489                                         val64 = readq(&bar0->gpio_control);
6490                                 } else {
6491                                         val64 |= ADAPTER_LED_ON;
6492                                         writeq(val64, &bar0->adapter_control);
6493                                 }
6494                                 nic->device_enabled_once = TRUE;
6495                         } else {
6496                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6497                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6498                                 netif_stop_queue(dev);
6499                         }
6500                 }
6501                 val64 = readq(&bar0->adapter_control);
6502                 val64 |= ADAPTER_LED_ON;
6503                 writeq(val64, &bar0->adapter_control);
6504                 s2io_link(nic, LINK_UP);
6505         } else {
6506                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6507                                                       subid)) {
6508                         val64 = readq(&bar0->gpio_control);
6509                         val64 &= ~GPIO_CTRL_GPIO_0;
6510                         writeq(val64, &bar0->gpio_control);
6511                         val64 = readq(&bar0->gpio_control);
6512                 }
6513                 /* turn off LED */
6514                 val64 = readq(&bar0->adapter_control);
6515                 val64 = val64 &(~ADAPTER_LED_ON);
6516                 writeq(val64, &bar0->adapter_control);
6517                 s2io_link(nic, LINK_DOWN);
6518         }
6519         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6520
6521 out_unlock:
6522         rtnl_unlock();
6523 }
6524
6525 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6526                                 struct buffAdd *ba,
6527                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6528                                 u64 *temp2, int size)
6529 {
6530         struct net_device *dev = sp->dev;
6531         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6532
6533         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6534                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6535                 /* allocate skb */
6536                 if (*skb) {
6537                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6538                         /*
6539                          * As Rx frame are not going to be processed,
6540                          * using same mapped address for the Rxd
6541                          * buffer pointer
6542                          */
6543                         rxdp1->Buffer0_ptr = *temp0;
6544                 } else {
6545                         *skb = dev_alloc_skb(size);
6546                         if (!(*skb)) {
6547                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6548                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6549                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6550                                 sp->mac_control.stats_info->sw_stat. \
6551                                         mem_alloc_fail_cnt++;
6552                                 return -ENOMEM ;
6553                         }
6554                         sp->mac_control.stats_info->sw_stat.mem_allocated
6555                                 += (*skb)->truesize;
6556                         /* storing the mapped addr in a temp variable
6557                          * such it will be used for next rxd whose
6558                          * Host Control is NULL
6559                          */
6560                         rxdp1->Buffer0_ptr = *temp0 =
6561                                 pci_map_single( sp->pdev, (*skb)->data,
6562                                         size - NET_IP_ALIGN,
6563                                         PCI_DMA_FROMDEVICE);
6564                         if( (rxdp1->Buffer0_ptr == 0) ||
6565                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6566                                 goto memalloc_failed;
6567                         }
6568                         rxdp->Host_Control = (unsigned long) (*skb);
6569                 }
6570         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6571                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6572                 /* Two buffer Mode */
6573                 if (*skb) {
6574                         rxdp3->Buffer2_ptr = *temp2;
6575                         rxdp3->Buffer0_ptr = *temp0;
6576                         rxdp3->Buffer1_ptr = *temp1;
6577                 } else {
6578                         *skb = dev_alloc_skb(size);
6579                         if (!(*skb)) {
6580                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6581                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6582                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6583                                 sp->mac_control.stats_info->sw_stat. \
6584                                         mem_alloc_fail_cnt++;
6585                                 return -ENOMEM;
6586                         }
6587                         sp->mac_control.stats_info->sw_stat.mem_allocated
6588                                 += (*skb)->truesize;
6589                         rxdp3->Buffer2_ptr = *temp2 =
6590                                 pci_map_single(sp->pdev, (*skb)->data,
6591                                                dev->mtu + 4,
6592                                                PCI_DMA_FROMDEVICE);
6593                         if( (rxdp3->Buffer2_ptr == 0) ||
6594                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6595                                 goto memalloc_failed;
6596                         }
6597                         rxdp3->Buffer0_ptr = *temp0 =
6598                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6599                                                 PCI_DMA_FROMDEVICE);
6600                         if( (rxdp3->Buffer0_ptr == 0) ||
6601                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6602                                 pci_unmap_single (sp->pdev,
6603                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6604                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6605                                 goto memalloc_failed;
6606                         }
6607                         rxdp->Host_Control = (unsigned long) (*skb);
6608
6609                         /* Buffer-1 will be dummy buffer not used */
6610                         rxdp3->Buffer1_ptr = *temp1 =
6611                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6612                                                 PCI_DMA_FROMDEVICE);
6613                         if( (rxdp3->Buffer1_ptr == 0) ||
6614                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6615                                 pci_unmap_single (sp->pdev,
6616                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6617                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6618                                 pci_unmap_single (sp->pdev,
6619                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6620                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6621                                 goto memalloc_failed;
6622                         }
6623                 }
6624         }
6625         return 0;
6626         memalloc_failed:
6627                 stats->pci_map_fail_cnt++;
6628                 stats->mem_freed += (*skb)->truesize;
6629                 dev_kfree_skb(*skb);
6630                 return -ENOMEM;
6631 }
6632
6633 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6634                                 int size)
6635 {
6636         struct net_device *dev = sp->dev;
6637         if (sp->rxd_mode == RXD_MODE_1) {
6638                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6639         } else if (sp->rxd_mode == RXD_MODE_3B) {
6640                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6641                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6642                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6643         }
6644 }
6645
6646 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6647 {
6648         int i, j, k, blk_cnt = 0, size;
6649         struct mac_info * mac_control = &sp->mac_control;
6650         struct config_param *config = &sp->config;
6651         struct net_device *dev = sp->dev;
6652         struct RxD_t *rxdp = NULL;
6653         struct sk_buff *skb = NULL;
6654         struct buffAdd *ba = NULL;
6655         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6656
6657         /* Calculate the size based on ring mode */
6658         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6659                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6660         if (sp->rxd_mode == RXD_MODE_1)
6661                 size += NET_IP_ALIGN;
6662         else if (sp->rxd_mode == RXD_MODE_3B)
6663                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6664
6665         for (i = 0; i < config->rx_ring_num; i++) {
6666                 blk_cnt = config->rx_cfg[i].num_rxd /
6667                         (rxd_count[sp->rxd_mode] +1);
6668
6669                 for (j = 0; j < blk_cnt; j++) {
6670                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6671                                 rxdp = mac_control->rings[i].
6672                                         rx_blocks[j].rxds[k].virt_addr;
6673                                 if(sp->rxd_mode == RXD_MODE_3B)
6674                                         ba = &mac_control->rings[i].ba[j][k];
6675                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6676                                                        &skb,(u64 *)&temp0_64,
6677                                                        (u64 *)&temp1_64,
6678                                                        (u64 *)&temp2_64,
6679                                                         size) == ENOMEM) {
6680                                         return 0;
6681                                 }
6682
6683                                 set_rxd_buffer_size(sp, rxdp, size);
6684                                 wmb();
6685                                 /* flip the Ownership bit to Hardware */
6686                                 rxdp->Control_1 |= RXD_OWN_XENA;
6687                         }
6688                 }
6689         }
6690         return 0;
6691
6692 }
6693
6694 static int s2io_add_isr(struct s2io_nic * sp)
6695 {
6696         int ret = 0;
6697         struct net_device *dev = sp->dev;
6698         int err = 0;
6699
6700         if (sp->config.intr_type == MSI_X)
6701                 ret = s2io_enable_msi_x(sp);
6702         if (ret) {
6703                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6704                 sp->config.intr_type = INTA;
6705         }
6706
6707         /* Store the values of the MSIX table in the struct s2io_nic structure */
6708         store_xmsi_data(sp);
6709
6710         /* After proper initialization of H/W, register ISR */
6711         if (sp->config.intr_type == MSI_X) {
6712                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6713
6714                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6715                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6716                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6717                                         dev->name, i);
6718                                 err = request_irq(sp->entries[i].vector,
6719                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6720                                                   sp->s2io_entries[i].arg);
6721                                 /* If either data or addr is zero print it */
6722                                 if(!(sp->msix_info[i].addr &&
6723                                         sp->msix_info[i].data)) {
6724                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6725                                                 "Data:0x%lx\n",sp->desc[i],
6726                                                 (unsigned long long)
6727                                                 sp->msix_info[i].addr,
6728                                                 (unsigned long)
6729                                                 ntohl(sp->msix_info[i].data));
6730                                 } else {
6731                                         msix_tx_cnt++;
6732                                 }
6733                         } else {
6734                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6735                                         dev->name, i);
6736                                 err = request_irq(sp->entries[i].vector,
6737                                           s2io_msix_ring_handle, 0, sp->desc[i],
6738                                                   sp->s2io_entries[i].arg);
6739                                 /* If either data or addr is zero print it */
6740                                 if(!(sp->msix_info[i].addr &&
6741                                         sp->msix_info[i].data)) {
6742                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6743                                                 "Data:0x%lx\n",sp->desc[i],
6744                                                 (unsigned long long)
6745                                                 sp->msix_info[i].addr,
6746                                                 (unsigned long)
6747                                                 ntohl(sp->msix_info[i].data));
6748                                 } else {
6749                                         msix_rx_cnt++;
6750                                 }
6751                         }
6752                         if (err) {
6753                                 remove_msix_isr(sp);
6754                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6755                                           "failed\n", dev->name, i);
6756                                 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
6757                                                  dev->name);
6758                                 sp->config.intr_type = INTA;
6759                                 break;
6760                         }
6761                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6762                 }
6763                 if (!err) {
6764                         printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
6765                                 msix_tx_cnt);
6766                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
6767                                 msix_rx_cnt);
6768                 }
6769         }
6770         if (sp->config.intr_type == INTA) {
6771                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6772                                 sp->name, dev);
6773                 if (err) {
6774                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6775                                   dev->name);
6776                         return -1;
6777                 }
6778         }
6779         return 0;
6780 }
6781 static void s2io_rem_isr(struct s2io_nic * sp)
6782 {
6783         if (sp->config.intr_type == MSI_X)
6784                 remove_msix_isr(sp);
6785         else
6786                 remove_inta_isr(sp);
6787 }
6788
6789 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6790 {
6791         int cnt = 0;
6792         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6793         unsigned long flags;
6794         register u64 val64 = 0;
6795         struct config_param *config;
6796         config = &sp->config;
6797
6798         if (!is_s2io_card_up(sp))
6799                 return;
6800
6801         del_timer_sync(&sp->alarm_timer);
6802         /* If s2io_set_link task is executing, wait till it completes. */
6803         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6804                 msleep(50);
6805         }
6806         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6807
6808         /* Disable napi */
6809         if (config->napi)
6810                 napi_disable(&sp->napi);
6811
6812         /* disable Tx and Rx traffic on the NIC */
6813         if (do_io)
6814                 stop_nic(sp);
6815
6816         s2io_rem_isr(sp);
6817
6818         /* Kill tasklet. */
6819         tasklet_kill(&sp->task);
6820
6821         /* Check if the device is Quiescent and then Reset the NIC */
6822         while(do_io) {
6823                 /* As per the HW requirement we need to replenish the
6824                  * receive buffer to avoid the ring bump. Since there is
6825                  * no intention of processing the Rx frame at this pointwe are
6826                  * just settting the ownership bit of rxd in Each Rx
6827                  * ring to HW and set the appropriate buffer size
6828                  * based on the ring mode
6829                  */
6830                 rxd_owner_bit_reset(sp);
6831
6832                 val64 = readq(&bar0->adapter_status);
6833                 if (verify_xena_quiescence(sp)) {
6834                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6835                         break;
6836                 }
6837
6838                 msleep(50);
6839                 cnt++;
6840                 if (cnt == 10) {
6841                         DBG_PRINT(ERR_DBG,
6842                                   "s2io_close:Device not Quiescent ");
6843                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6844                                   (unsigned long long) val64);
6845                         break;
6846                 }
6847         }
6848         if (do_io)
6849                 s2io_reset(sp);
6850
6851         spin_lock_irqsave(&sp->tx_lock, flags);
6852         /* Free all Tx buffers */
6853         free_tx_buffers(sp);
6854         spin_unlock_irqrestore(&sp->tx_lock, flags);
6855
6856         /* Free all Rx buffers */
6857         spin_lock_irqsave(&sp->rx_lock, flags);
6858         free_rx_buffers(sp);
6859         spin_unlock_irqrestore(&sp->rx_lock, flags);
6860
6861         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6862 }
6863
6864 static void s2io_card_down(struct s2io_nic * sp)
6865 {
6866         do_s2io_card_down(sp, 1);
6867 }
6868
6869 static int s2io_card_up(struct s2io_nic * sp)
6870 {
6871         int i, ret = 0;
6872         struct mac_info *mac_control;
6873         struct config_param *config;
6874         struct net_device *dev = (struct net_device *) sp->dev;
6875         u16 interruptible;
6876
6877         /* Initialize the H/W I/O registers */
6878         ret = init_nic(sp);
6879         if (ret != 0) {
6880                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6881                           dev->name);
6882                 if (ret != -EIO)
6883                         s2io_reset(sp);
6884                 return ret;
6885         }
6886
6887         /*
6888          * Initializing the Rx buffers. For now we are considering only 1
6889          * Rx ring and initializing buffers into 30 Rx blocks
6890          */
6891         mac_control = &sp->mac_control;
6892         config = &sp->config;
6893
6894         for (i = 0; i < config->rx_ring_num; i++) {
6895                 if ((ret = fill_rx_buffers(sp, i))) {
6896                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6897                                   dev->name);
6898                         s2io_reset(sp);
6899                         free_rx_buffers(sp);
6900                         return -ENOMEM;
6901                 }
6902                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6903                           atomic_read(&sp->rx_bufs_left[i]));
6904         }
6905
6906         /* Initialise napi */
6907         if (config->napi)
6908                 napi_enable(&sp->napi);
6909
6910         /* Maintain the state prior to the open */
6911         if (sp->promisc_flg)
6912                 sp->promisc_flg = 0;
6913         if (sp->m_cast_flg) {
6914                 sp->m_cast_flg = 0;
6915                 sp->all_multi_pos= 0;
6916         }
6917
6918         /* Setting its receive mode */
6919         s2io_set_multicast(dev);
6920
6921         if (sp->lro) {
6922                 /* Initialize max aggregatable pkts per session based on MTU */
6923                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6924                 /* Check if we can use(if specified) user provided value */
6925                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6926                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6927         }
6928
6929         /* Enable Rx Traffic and interrupts on the NIC */
6930         if (start_nic(sp)) {
6931                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6932                 s2io_reset(sp);
6933                 free_rx_buffers(sp);
6934                 return -ENODEV;
6935         }
6936
6937         /* Add interrupt service routine */
6938         if (s2io_add_isr(sp) != 0) {
6939                 if (sp->config.intr_type == MSI_X)
6940                         s2io_rem_isr(sp);
6941                 s2io_reset(sp);
6942                 free_rx_buffers(sp);
6943                 return -ENODEV;
6944         }
6945
6946         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6947
6948         /* Enable tasklet for the device */
6949         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6950
6951         /*  Enable select interrupts */
6952         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6953         if (sp->config.intr_type != INTA)
6954                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6955         else {
6956                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6957                 interruptible |= TX_PIC_INTR;
6958                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6959         }
6960
6961         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6962         return 0;
6963 }
6964
6965 /**
6966  * s2io_restart_nic - Resets the NIC.
6967  * @data : long pointer to the device private structure
6968  * Description:
6969  * This function is scheduled to be run by the s2io_tx_watchdog
6970  * function after 0.5 secs to reset the NIC. The idea is to reduce
6971  * the run time of the watch dog routine which is run holding a
6972  * spin lock.
6973  */
6974
6975 static void s2io_restart_nic(struct work_struct *work)
6976 {
6977         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6978         struct net_device *dev = sp->dev;
6979
6980         rtnl_lock();
6981
6982         if (!netif_running(dev))
6983                 goto out_unlock;
6984
6985         s2io_card_down(sp);
6986         if (s2io_card_up(sp)) {
6987                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6988                           dev->name);
6989         }
6990         netif_wake_queue(dev);
6991         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6992                   dev->name);
6993 out_unlock:
6994         rtnl_unlock();
6995 }
6996
6997 /**
6998  *  s2io_tx_watchdog - Watchdog for transmit side.
6999  *  @dev : Pointer to net device structure
7000  *  Description:
7001  *  This function is triggered if the Tx Queue is stopped
7002  *  for a pre-defined amount of time when the Interface is still up.
7003  *  If the Interface is jammed in such a situation, the hardware is
7004  *  reset (by s2io_close) and restarted again (by s2io_open) to
7005  *  overcome any problem that might have been caused in the hardware.
7006  *  Return value:
7007  *  void
7008  */
7009
7010 static void s2io_tx_watchdog(struct net_device *dev)
7011 {
7012         struct s2io_nic *sp = dev->priv;
7013
7014         if (netif_carrier_ok(dev)) {
7015                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7016                 schedule_work(&sp->rst_timer_task);
7017                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7018         }
7019 }
7020
7021 /**
7022  *   rx_osm_handler - To perform some OS related operations on SKB.
7023  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7024  *   @skb : the socket buffer pointer.
7025  *   @len : length of the packet
7026  *   @cksum : FCS checksum of the frame.
7027  *   @ring_no : the ring from which this RxD was extracted.
7028  *   Description:
7029  *   This function is called by the Rx interrupt serivce routine to perform
7030  *   some OS related operations on the SKB before passing it to the upper
7031  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7032  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7033  *   to the upper layer. If the checksum is wrong, it increments the Rx
7034  *   packet error count, frees the SKB and returns error.
7035  *   Return value:
7036  *   SUCCESS on success and -1 on failure.
7037  */
7038 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7039 {
7040         struct s2io_nic *sp = ring_data->nic;
7041         struct net_device *dev = (struct net_device *) sp->dev;
7042         struct sk_buff *skb = (struct sk_buff *)
7043                 ((unsigned long) rxdp->Host_Control);
7044         int ring_no = ring_data->ring_no;
7045         u16 l3_csum, l4_csum;
7046         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7047         struct lro *lro;
7048         u8 err_mask;
7049
7050         skb->dev = dev;
7051
7052         if (err) {
7053                 /* Check for parity error */
7054                 if (err & 0x1) {
7055                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7056                 }
7057                 err_mask = err >> 48;
7058                 switch(err_mask) {
7059                         case 1:
7060                                 sp->mac_control.stats_info->sw_stat.
7061                                 rx_parity_err_cnt++;
7062                         break;
7063
7064                         case 2:
7065                                 sp->mac_control.stats_info->sw_stat.
7066                                 rx_abort_cnt++;
7067                         break;
7068
7069                         case 3:
7070                                 sp->mac_control.stats_info->sw_stat.
7071                                 rx_parity_abort_cnt++;
7072                         break;
7073
7074                         case 4:
7075                                 sp->mac_control.stats_info->sw_stat.
7076                                 rx_rda_fail_cnt++;
7077                         break;
7078
7079                         case 5:
7080                                 sp->mac_control.stats_info->sw_stat.
7081                                 rx_unkn_prot_cnt++;
7082                         break;
7083
7084                         case 6:
7085                                 sp->mac_control.stats_info->sw_stat.
7086                                 rx_fcs_err_cnt++;
7087                         break;
7088
7089                         case 7:
7090                                 sp->mac_control.stats_info->sw_stat.
7091                                 rx_buf_size_err_cnt++;
7092                         break;
7093
7094                         case 8:
7095                                 sp->mac_control.stats_info->sw_stat.
7096                                 rx_rxd_corrupt_cnt++;
7097                         break;
7098
7099                         case 15:
7100                                 sp->mac_control.stats_info->sw_stat.
7101                                 rx_unkn_err_cnt++;
7102                         break;
7103                 }
7104                 /*
7105                 * Drop the packet if bad transfer code. Exception being
7106                 * 0x5, which could be due to unsupported IPv6 extension header.
7107                 * In this case, we let stack handle the packet.
7108                 * Note that in this case, since checksum will be incorrect,
7109                 * stack will validate the same.
7110                 */
7111                 if (err_mask != 0x5) {
7112                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7113                                 dev->name, err_mask);
7114                         sp->stats.rx_crc_errors++;
7115                         sp->mac_control.stats_info->sw_stat.mem_freed
7116                                 += skb->truesize;
7117                         dev_kfree_skb(skb);
7118                         atomic_dec(&sp->rx_bufs_left[ring_no]);
7119                         rxdp->Host_Control = 0;
7120                         return 0;
7121                 }
7122         }
7123
7124         /* Updating statistics */
7125         sp->stats.rx_packets++;
7126         rxdp->Host_Control = 0;
7127         if (sp->rxd_mode == RXD_MODE_1) {
7128                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7129
7130                 sp->stats.rx_bytes += len;
7131                 skb_put(skb, len);
7132
7133         } else if (sp->rxd_mode == RXD_MODE_3B) {
7134                 int get_block = ring_data->rx_curr_get_info.block_index;
7135                 int get_off = ring_data->rx_curr_get_info.offset;
7136                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7137                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7138                 unsigned char *buff = skb_push(skb, buf0_len);
7139
7140                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7141                 sp->stats.rx_bytes += buf0_len + buf2_len;
7142                 memcpy(buff, ba->ba_0, buf0_len);
7143                 skb_put(skb, buf2_len);
7144         }
7145
7146         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7147             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7148             (sp->rx_csum)) {
7149                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7150                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7151                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7152                         /*
7153                          * NIC verifies if the Checksum of the received
7154                          * frame is Ok or not and accordingly returns
7155                          * a flag in the RxD.
7156                          */
7157                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7158                         if (sp->lro) {
7159                                 u32 tcp_len;
7160                                 u8 *tcp;
7161                                 int ret = 0;
7162
7163                                 ret = s2io_club_tcp_session(skb->data, &tcp,
7164                                                             &tcp_len, &lro,
7165                                                             rxdp, sp);
7166                                 switch (ret) {
7167                                         case 3: /* Begin anew */
7168                                                 lro->parent = skb;
7169                                                 goto aggregate;
7170                                         case 1: /* Aggregate */
7171                                         {
7172                                                 lro_append_pkt(sp, lro,
7173                                                         skb, tcp_len);
7174                                                 goto aggregate;
7175                                         }
7176                                         case 4: /* Flush session */
7177                                         {
7178                                                 lro_append_pkt(sp, lro,
7179                                                         skb, tcp_len);
7180                                                 queue_rx_frame(lro->parent);
7181                                                 clear_lro_session(lro);
7182                                                 sp->mac_control.stats_info->
7183                                                     sw_stat.flush_max_pkts++;
7184                                                 goto aggregate;
7185                                         }
7186                                         case 2: /* Flush both */
7187                                                 lro->parent->data_len =
7188                                                         lro->frags_len;
7189                                                 sp->mac_control.stats_info->
7190                                                      sw_stat.sending_both++;
7191                                                 queue_rx_frame(lro->parent);
7192                                                 clear_lro_session(lro);
7193                                                 goto send_up;
7194                                         case 0: /* sessions exceeded */
7195                                         case -1: /* non-TCP or not
7196                                                   * L2 aggregatable
7197                                                   */
7198                                         case 5: /*
7199                                                  * First pkt in session not
7200                                                  * L3/L4 aggregatable
7201                                                  */
7202                                                 break;
7203                                         default:
7204                                                 DBG_PRINT(ERR_DBG,
7205                                                         "%s: Samadhana!!\n",
7206                                                          __FUNCTION__);
7207                                                 BUG();
7208                                 }
7209                         }
7210                 } else {
7211                         /*
7212                          * Packet with erroneous checksum, let the
7213                          * upper layers deal with it.
7214                          */
7215                         skb->ip_summed = CHECKSUM_NONE;
7216                 }
7217         } else {
7218                 skb->ip_summed = CHECKSUM_NONE;
7219         }
7220         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7221         if (!sp->lro) {
7222                 skb->protocol = eth_type_trans(skb, dev);
7223                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7224                         vlan_strip_flag)) {
7225                         /* Queueing the vlan frame to the upper layer */
7226                         if (napi)
7227                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7228                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7229                         else
7230                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7231                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7232                 } else {
7233                         if (napi)
7234                                 netif_receive_skb(skb);
7235                         else
7236                                 netif_rx(skb);
7237                 }
7238         } else {
7239 send_up:
7240                 queue_rx_frame(skb);
7241         }
7242         dev->last_rx = jiffies;
7243 aggregate:
7244         atomic_dec(&sp->rx_bufs_left[ring_no]);
7245         return SUCCESS;
7246 }
7247
7248 /**
7249  *  s2io_link - stops/starts the Tx queue.
7250  *  @sp : private member of the device structure, which is a pointer to the
7251  *  s2io_nic structure.
7252  *  @link : inidicates whether link is UP/DOWN.
7253  *  Description:
7254  *  This function stops/starts the Tx queue depending on whether the link
7255  *  status of the NIC is is down or up. This is called by the Alarm
7256  *  interrupt handler whenever a link change interrupt comes up.
7257  *  Return value:
7258  *  void.
7259  */
7260
7261 static void s2io_link(struct s2io_nic * sp, int link)
7262 {
7263         struct net_device *dev = (struct net_device *) sp->dev;
7264
7265         if (link != sp->last_link_state) {
7266                 if (link == LINK_DOWN) {
7267                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7268                         netif_carrier_off(dev);
7269                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7270                         sp->mac_control.stats_info->sw_stat.link_up_time =
7271                                 jiffies - sp->start_time;
7272                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7273                 } else {
7274                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7275                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7276                         sp->mac_control.stats_info->sw_stat.link_down_time =
7277                                 jiffies - sp->start_time;
7278                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7279                         netif_carrier_on(dev);
7280                 }
7281         }
7282         sp->last_link_state = link;
7283         sp->start_time = jiffies;
7284 }
7285
7286 /**
7287  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7288  *  @sp : private member of the device structure, which is a pointer to the
7289  *  s2io_nic structure.
7290  *  Description:
7291  *  This function initializes a few of the PCI and PCI-X configuration registers
7292  *  with recommended values.
7293  *  Return value:
7294  *  void
7295  */
7296
7297 static void s2io_init_pci(struct s2io_nic * sp)
7298 {
7299         u16 pci_cmd = 0, pcix_cmd = 0;
7300
7301         /* Enable Data Parity Error Recovery in PCI-X command register. */
7302         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7303                              &(pcix_cmd));
7304         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7305                               (pcix_cmd | 1));
7306         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7307                              &(pcix_cmd));
7308
7309         /* Set the PErr Response bit in PCI command register. */
7310         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7311         pci_write_config_word(sp->pdev, PCI_COMMAND,
7312                               (pci_cmd | PCI_COMMAND_PARITY));
7313         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7314 }
7315
7316 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7317 {
7318         if ( tx_fifo_num > 8) {
7319                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7320                          "supported\n");
7321                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7322                 tx_fifo_num = 8;
7323         }
7324         if ( rx_ring_num > 8) {
7325                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7326                          "supported\n");
7327                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7328                 rx_ring_num = 8;
7329         }
7330         if (*dev_intr_type != INTA)
7331                 napi = 0;
7332
7333         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7334                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7335                           "Defaulting to INTA\n");
7336                 *dev_intr_type = INTA;
7337         }
7338
7339         if ((*dev_intr_type == MSI_X) &&
7340                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7341                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7342                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7343                                         "Defaulting to INTA\n");
7344                 *dev_intr_type = INTA;
7345         }
7346
7347         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7348                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7349                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7350                 rx_ring_mode = 1;
7351         }
7352         return SUCCESS;
7353 }
7354
7355 /**
7356  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7357  * or Traffic class respectively.
7358  * @nic: device peivate variable
7359  * Description: The function configures the receive steering to
7360  * desired receive ring.
7361  * Return Value:  SUCCESS on success and
7362  * '-1' on failure (endian settings incorrect).
7363  */
7364 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7365 {
7366         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7367         register u64 val64 = 0;
7368
7369         if (ds_codepoint > 63)
7370                 return FAILURE;
7371
7372         val64 = RTS_DS_MEM_DATA(ring);
7373         writeq(val64, &bar0->rts_ds_mem_data);
7374
7375         val64 = RTS_DS_MEM_CTRL_WE |
7376                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7377                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7378
7379         writeq(val64, &bar0->rts_ds_mem_ctrl);
7380
7381         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7382                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7383                                 S2IO_BIT_RESET);
7384 }
7385
7386 /**
7387  *  s2io_init_nic - Initialization of the adapter .
7388  *  @pdev : structure containing the PCI related information of the device.
7389  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7390  *  Description:
7391  *  The function initializes an adapter identified by the pci_dec structure.
7392  *  All OS related initialization including memory and device structure and
7393  *  initlaization of the device private variable is done. Also the swapper
7394  *  control register is initialized to enable read and write into the I/O
7395  *  registers of the device.
7396  *  Return value:
7397  *  returns 0 on success and negative on failure.
7398  */
7399
7400 static int __devinit
7401 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7402 {
7403         struct s2io_nic *sp;
7404         struct net_device *dev;
7405         int i, j, ret;
7406         int dma_flag = FALSE;
7407         u32 mac_up, mac_down;
7408         u64 val64 = 0, tmp64 = 0;
7409         struct XENA_dev_config __iomem *bar0 = NULL;
7410         u16 subid;
7411         struct mac_info *mac_control;
7412         struct config_param *config;
7413         int mode;
7414         u8 dev_intr_type = intr_type;
7415         DECLARE_MAC_BUF(mac);
7416
7417         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7418                 return ret;
7419
7420         if ((ret = pci_enable_device(pdev))) {
7421                 DBG_PRINT(ERR_DBG,
7422                           "s2io_init_nic: pci_enable_device failed\n");
7423                 return ret;
7424         }
7425
7426         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7427                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7428                 dma_flag = TRUE;
7429                 if (pci_set_consistent_dma_mask
7430                     (pdev, DMA_64BIT_MASK)) {
7431                         DBG_PRINT(ERR_DBG,
7432                                   "Unable to obtain 64bit DMA for \
7433                                         consistent allocations\n");
7434                         pci_disable_device(pdev);
7435                         return -ENOMEM;
7436                 }
7437         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7438                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7439         } else {
7440                 pci_disable_device(pdev);
7441                 return -ENOMEM;
7442         }
7443         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7444                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7445                 pci_disable_device(pdev);
7446                 return -ENODEV;
7447         }
7448
7449         dev = alloc_etherdev(sizeof(struct s2io_nic));
7450         if (dev == NULL) {
7451                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7452                 pci_disable_device(pdev);
7453                 pci_release_regions(pdev);
7454                 return -ENODEV;
7455         }
7456
7457         pci_set_master(pdev);
7458         pci_set_drvdata(pdev, dev);
7459         SET_NETDEV_DEV(dev, &pdev->dev);
7460
7461         /*  Private member variable initialized to s2io NIC structure */
7462         sp = dev->priv;
7463         memset(sp, 0, sizeof(struct s2io_nic));
7464         sp->dev = dev;
7465         sp->pdev = pdev;
7466         sp->high_dma_flag = dma_flag;
7467         sp->device_enabled_once = FALSE;
7468         if (rx_ring_mode == 1)
7469                 sp->rxd_mode = RXD_MODE_1;
7470         if (rx_ring_mode == 2)
7471                 sp->rxd_mode = RXD_MODE_3B;
7472
7473         sp->config.intr_type = dev_intr_type;
7474
7475         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7476                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7477                 sp->device_type = XFRAME_II_DEVICE;
7478         else
7479                 sp->device_type = XFRAME_I_DEVICE;
7480
7481         sp->lro = lro_enable;
7482
7483         /* Initialize some PCI/PCI-X fields of the NIC. */
7484         s2io_init_pci(sp);
7485
7486         /*
7487          * Setting the device configuration parameters.
7488          * Most of these parameters can be specified by the user during
7489          * module insertion as they are module loadable parameters. If
7490          * these parameters are not not specified during load time, they
7491          * are initialized with default values.
7492          */
7493         mac_control = &sp->mac_control;
7494         config = &sp->config;
7495
7496         config->napi = napi;
7497
7498         /* Tx side parameters. */
7499         config->tx_fifo_num = tx_fifo_num;
7500         for (i = 0; i < MAX_TX_FIFOS; i++) {
7501                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7502                 config->tx_cfg[i].fifo_priority = i;
7503         }
7504
7505         /* mapping the QoS priority to the configured fifos */
7506         for (i = 0; i < MAX_TX_FIFOS; i++)
7507                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7508
7509         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7510         for (i = 0; i < config->tx_fifo_num; i++) {
7511                 config->tx_cfg[i].f_no_snoop =
7512                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7513                 if (config->tx_cfg[i].fifo_len < 65) {
7514                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7515                         break;
7516                 }
7517         }
7518         /* + 2 because one Txd for skb->data and one Txd for UFO */
7519         config->max_txds = MAX_SKB_FRAGS + 2;
7520
7521         /* Rx side parameters. */
7522         config->rx_ring_num = rx_ring_num;
7523         for (i = 0; i < MAX_RX_RINGS; i++) {
7524                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7525                     (rxd_count[sp->rxd_mode] + 1);
7526                 config->rx_cfg[i].ring_priority = i;
7527         }
7528
7529         for (i = 0; i < rx_ring_num; i++) {
7530                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7531                 config->rx_cfg[i].f_no_snoop =
7532                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7533         }
7534
7535         /*  Setting Mac Control parameters */
7536         mac_control->rmac_pause_time = rmac_pause_time;
7537         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7538         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7539
7540
7541         /* Initialize Ring buffer parameters. */
7542         for (i = 0; i < config->rx_ring_num; i++)
7543                 atomic_set(&sp->rx_bufs_left[i], 0);
7544
7545         /*  initialize the shared memory used by the NIC and the host */
7546         if (init_shared_mem(sp)) {
7547                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7548                           dev->name);
7549                 ret = -ENOMEM;
7550                 goto mem_alloc_failed;
7551         }
7552
7553         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7554                                      pci_resource_len(pdev, 0));
7555         if (!sp->bar0) {
7556                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7557                           dev->name);
7558                 ret = -ENOMEM;
7559                 goto bar0_remap_failed;
7560         }
7561
7562         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7563                                      pci_resource_len(pdev, 2));
7564         if (!sp->bar1) {
7565                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7566                           dev->name);
7567                 ret = -ENOMEM;
7568                 goto bar1_remap_failed;
7569         }
7570
7571         dev->irq = pdev->irq;
7572         dev->base_addr = (unsigned long) sp->bar0;
7573
7574         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7575         for (j = 0; j < MAX_TX_FIFOS; j++) {
7576                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7577                     (sp->bar1 + (j * 0x00020000));
7578         }
7579
7580         /*  Driver entry points */
7581         dev->open = &s2io_open;
7582         dev->stop = &s2io_close;
7583         dev->hard_start_xmit = &s2io_xmit;
7584         dev->get_stats = &s2io_get_stats;
7585         dev->set_multicast_list = &s2io_set_multicast;
7586         dev->do_ioctl = &s2io_ioctl;
7587         dev->set_mac_address = &s2io_set_mac_addr;
7588         dev->change_mtu = &s2io_change_mtu;
7589         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7590         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7591         dev->vlan_rx_register = s2io_vlan_rx_register;
7592
7593         /*
7594          * will use eth_mac_addr() for  dev->set_mac_address
7595          * mac address will be set every time dev->open() is called
7596          */
7597         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7598
7599 #ifdef CONFIG_NET_POLL_CONTROLLER
7600         dev->poll_controller = s2io_netpoll;
7601 #endif
7602
7603         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7604         if (sp->high_dma_flag == TRUE)
7605                 dev->features |= NETIF_F_HIGHDMA;
7606         dev->features |= NETIF_F_TSO;
7607         dev->features |= NETIF_F_TSO6;
7608         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7609                 dev->features |= NETIF_F_UFO;
7610                 dev->features |= NETIF_F_HW_CSUM;
7611         }
7612
7613         dev->tx_timeout = &s2io_tx_watchdog;
7614         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7615         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7616         INIT_WORK(&sp->set_link_task, s2io_set_link);
7617
7618         pci_save_state(sp->pdev);
7619
7620         /* Setting swapper control on the NIC, for proper reset operation */
7621         if (s2io_set_swapper(sp)) {
7622                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7623                           dev->name);
7624                 ret = -EAGAIN;
7625                 goto set_swap_failed;
7626         }
7627
7628         /* Verify if the Herc works on the slot its placed into */
7629         if (sp->device_type & XFRAME_II_DEVICE) {
7630                 mode = s2io_verify_pci_mode(sp);
7631                 if (mode < 0) {
7632                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7633                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7634                         ret = -EBADSLT;
7635                         goto set_swap_failed;
7636                 }
7637         }
7638
7639         /* Not needed for Herc */
7640         if (sp->device_type & XFRAME_I_DEVICE) {
7641                 /*
7642                  * Fix for all "FFs" MAC address problems observed on
7643                  * Alpha platforms
7644                  */
7645                 fix_mac_address(sp);
7646                 s2io_reset(sp);
7647         }
7648
7649         /*
7650          * MAC address initialization.
7651          * For now only one mac address will be read and used.
7652          */
7653         bar0 = sp->bar0;
7654         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7655             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7656         writeq(val64, &bar0->rmac_addr_cmd_mem);
7657         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7658                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7659         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7660         mac_down = (u32) tmp64;
7661         mac_up = (u32) (tmp64 >> 32);
7662
7663         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7664         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7665         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7666         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7667         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7668         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7669
7670         /*  Set the factory defined MAC address initially   */
7671         dev->addr_len = ETH_ALEN;
7672         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7673         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7674
7675          /* Store the values of the MSIX table in the s2io_nic structure */
7676         store_xmsi_data(sp);
7677         /* reset Nic and bring it to known state */
7678         s2io_reset(sp);
7679
7680         /*
7681          * Initialize the tasklet status and link state flags
7682          * and the card state parameter
7683          */
7684         sp->tasklet_status = 0;
7685         sp->state = 0;
7686
7687         /* Initialize spinlocks */
7688         spin_lock_init(&sp->tx_lock);
7689
7690         if (!napi)
7691                 spin_lock_init(&sp->put_lock);
7692         spin_lock_init(&sp->rx_lock);
7693
7694         /*
7695          * SXE-002: Configure link and activity LED to init state
7696          * on driver load.
7697          */
7698         subid = sp->pdev->subsystem_device;
7699         if ((subid & 0xFF) >= 0x07) {
7700                 val64 = readq(&bar0->gpio_control);
7701                 val64 |= 0x0000800000000000ULL;
7702                 writeq(val64, &bar0->gpio_control);
7703                 val64 = 0x0411040400000000ULL;
7704                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7705                 val64 = readq(&bar0->gpio_control);
7706         }
7707
7708         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7709
7710         if (register_netdev(dev)) {
7711                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7712                 ret = -ENODEV;
7713                 goto register_failed;
7714         }
7715         s2io_vpd_read(sp);
7716         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7717         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7718                   sp->product_name, pdev->revision);
7719         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7720                   s2io_driver_version);
7721         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7722                   dev->name, print_mac(mac, dev->dev_addr));
7723         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7724         if (sp->device_type & XFRAME_II_DEVICE) {
7725                 mode = s2io_print_pci_mode(sp);
7726                 if (mode < 0) {
7727                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7728                         ret = -EBADSLT;
7729                         unregister_netdev(dev);
7730                         goto set_swap_failed;
7731                 }
7732         }
7733         switch(sp->rxd_mode) {
7734                 case RXD_MODE_1:
7735                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7736                                                 dev->name);
7737                     break;
7738                 case RXD_MODE_3B:
7739                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7740                                                 dev->name);
7741                     break;
7742         }
7743
7744         if (napi)
7745                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7746         switch(sp->config.intr_type) {
7747                 case INTA:
7748                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7749                     break;
7750                 case MSI_X:
7751                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7752                     break;
7753         }
7754         if (sp->lro)
7755                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7756                           dev->name);
7757         if (ufo)
7758                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7759                                         " enabled\n", dev->name);
7760         /* Initialize device name */
7761         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7762
7763         /*
7764          * Make Link state as off at this point, when the Link change
7765          * interrupt comes the state will be automatically changed to
7766          * the right state.
7767          */
7768         netif_carrier_off(dev);
7769
7770         return 0;
7771
7772       register_failed:
7773       set_swap_failed:
7774         iounmap(sp->bar1);
7775       bar1_remap_failed:
7776         iounmap(sp->bar0);
7777       bar0_remap_failed:
7778       mem_alloc_failed:
7779         free_shared_mem(sp);
7780         pci_disable_device(pdev);
7781         pci_release_regions(pdev);
7782         pci_set_drvdata(pdev, NULL);
7783         free_netdev(dev);
7784
7785         return ret;
7786 }
7787
7788 /**
7789  * s2io_rem_nic - Free the PCI device
7790  * @pdev: structure containing the PCI related information of the device.
7791  * Description: This function is called by the Pci subsystem to release a
7792  * PCI device and free up all resource held up by the device. This could
7793  * be in response to a Hot plug event or when the driver is to be removed
7794  * from memory.
7795  */
7796
7797 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7798 {
7799         struct net_device *dev =
7800             (struct net_device *) pci_get_drvdata(pdev);
7801         struct s2io_nic *sp;
7802
7803         if (dev == NULL) {
7804                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7805                 return;
7806         }
7807
7808         flush_scheduled_work();
7809
7810         sp = dev->priv;
7811         unregister_netdev(dev);
7812
7813         free_shared_mem(sp);
7814         iounmap(sp->bar0);
7815         iounmap(sp->bar1);
7816         pci_release_regions(pdev);
7817         pci_set_drvdata(pdev, NULL);
7818         free_netdev(dev);
7819         pci_disable_device(pdev);
7820 }
7821
7822 /**
7823  * s2io_starter - Entry point for the driver
7824  * Description: This function is the entry point for the driver. It verifies
7825  * the module loadable parameters and initializes PCI configuration space.
7826  */
7827
7828 static int __init s2io_starter(void)
7829 {
7830         return pci_register_driver(&s2io_driver);
7831 }
7832
7833 /**
7834  * s2io_closer - Cleanup routine for the driver
7835  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7836  */
7837
7838 static __exit void s2io_closer(void)
7839 {
7840         pci_unregister_driver(&s2io_driver);
7841         DBG_PRINT(INIT_DBG, "cleanup done\n");
7842 }
7843
7844 module_init(s2io_starter);
7845 module_exit(s2io_closer);
7846
7847 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7848                 struct tcphdr **tcp, struct RxD_t *rxdp)
7849 {
7850         int ip_off;
7851         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7852
7853         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7854                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7855                           __FUNCTION__);
7856                 return -1;
7857         }
7858
7859         /* TODO:
7860          * By default the VLAN field in the MAC is stripped by the card, if this
7861          * feature is turned off in rx_pa_cfg register, then the ip_off field
7862          * has to be shifted by a further 2 bytes
7863          */
7864         switch (l2_type) {
7865                 case 0: /* DIX type */
7866                 case 4: /* DIX type with VLAN */
7867                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7868                         break;
7869                 /* LLC, SNAP etc are considered non-mergeable */
7870                 default:
7871                         return -1;
7872         }
7873
7874         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7875         ip_len = (u8)((*ip)->ihl);
7876         ip_len <<= 2;
7877         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7878
7879         return 0;
7880 }
7881
7882 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7883                                   struct tcphdr *tcp)
7884 {
7885         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7886         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7887            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7888                 return -1;
7889         return 0;
7890 }
7891
7892 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7893 {
7894         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7895 }
7896
7897 static void initiate_new_session(struct lro *lro, u8 *l2h,
7898                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7899 {
7900         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7901         lro->l2h = l2h;
7902         lro->iph = ip;
7903         lro->tcph = tcp;
7904         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7905         lro->tcp_ack = ntohl(tcp->ack_seq);
7906         lro->sg_num = 1;
7907         lro->total_len = ntohs(ip->tot_len);
7908         lro->frags_len = 0;
7909         /*
7910          * check if we saw TCP timestamp. Other consistency checks have
7911          * already been done.
7912          */
7913         if (tcp->doff == 8) {
7914                 u32 *ptr;
7915                 ptr = (u32 *)(tcp+1);
7916                 lro->saw_ts = 1;
7917                 lro->cur_tsval = *(ptr+1);
7918                 lro->cur_tsecr = *(ptr+2);
7919         }
7920         lro->in_use = 1;
7921 }
7922
7923 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7924 {
7925         struct iphdr *ip = lro->iph;
7926         struct tcphdr *tcp = lro->tcph;
7927         __sum16 nchk;
7928         struct stat_block *statinfo = sp->mac_control.stats_info;
7929         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7930
7931         /* Update L3 header */
7932         ip->tot_len = htons(lro->total_len);
7933         ip->check = 0;
7934         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7935         ip->check = nchk;
7936
7937         /* Update L4 header */
7938         tcp->ack_seq = lro->tcp_ack;
7939         tcp->window = lro->window;
7940
7941         /* Update tsecr field if this session has timestamps enabled */
7942         if (lro->saw_ts) {
7943                 u32 *ptr = (u32 *)(tcp + 1);
7944                 *(ptr+2) = lro->cur_tsecr;
7945         }
7946
7947         /* Update counters required for calculation of
7948          * average no. of packets aggregated.
7949          */
7950         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7951         statinfo->sw_stat.num_aggregations++;
7952 }
7953
7954 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7955                 struct tcphdr *tcp, u32 l4_pyld)
7956 {
7957         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7958         lro->total_len += l4_pyld;
7959         lro->frags_len += l4_pyld;
7960         lro->tcp_next_seq += l4_pyld;
7961         lro->sg_num++;
7962
7963         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7964         lro->tcp_ack = tcp->ack_seq;
7965         lro->window = tcp->window;
7966
7967         if (lro->saw_ts) {
7968                 u32 *ptr;
7969                 /* Update tsecr and tsval from this packet */
7970                 ptr = (u32 *) (tcp + 1);
7971                 lro->cur_tsval = *(ptr + 1);
7972                 lro->cur_tsecr = *(ptr + 2);
7973         }
7974 }
7975
7976 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7977                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7978 {
7979         u8 *ptr;
7980
7981         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7982
7983         if (!tcp_pyld_len) {
7984                 /* Runt frame or a pure ack */
7985                 return -1;
7986         }
7987
7988         if (ip->ihl != 5) /* IP has options */
7989                 return -1;
7990
7991         /* If we see CE codepoint in IP header, packet is not mergeable */
7992         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7993                 return -1;
7994
7995         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7996         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7997                                     tcp->ece || tcp->cwr || !tcp->ack) {
7998                 /*
7999                  * Currently recognize only the ack control word and
8000                  * any other control field being set would result in
8001                  * flushing the LRO session
8002                  */
8003                 return -1;
8004         }
8005
8006         /*
8007          * Allow only one TCP timestamp option. Don't aggregate if
8008          * any other options are detected.
8009          */
8010         if (tcp->doff != 5 && tcp->doff != 8)
8011                 return -1;
8012
8013         if (tcp->doff == 8) {
8014                 ptr = (u8 *)(tcp + 1);
8015                 while (*ptr == TCPOPT_NOP)
8016                         ptr++;
8017                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8018                         return -1;
8019
8020                 /* Ensure timestamp value increases monotonically */
8021                 if (l_lro)
8022                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8023                                 return -1;
8024
8025                 /* timestamp echo reply should be non-zero */
8026                 if (*((u32 *)(ptr+6)) == 0)
8027                         return -1;
8028         }
8029
8030         return 0;
8031 }
8032
8033 static int
8034 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8035                       struct RxD_t *rxdp, struct s2io_nic *sp)
8036 {
8037         struct iphdr *ip;
8038         struct tcphdr *tcph;
8039         int ret = 0, i;
8040
8041         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8042                                          rxdp))) {
8043                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8044                           ip->saddr, ip->daddr);
8045         } else {
8046                 return ret;
8047         }
8048
8049         tcph = (struct tcphdr *)*tcp;
8050         *tcp_len = get_l4_pyld_length(ip, tcph);
8051         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8052                 struct lro *l_lro = &sp->lro0_n[i];
8053                 if (l_lro->in_use) {
8054                         if (check_for_socket_match(l_lro, ip, tcph))
8055                                 continue;
8056                         /* Sock pair matched */
8057                         *lro = l_lro;
8058
8059                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8060                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8061                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8062                                           (*lro)->tcp_next_seq,
8063                                           ntohl(tcph->seq));
8064
8065                                 sp->mac_control.stats_info->
8066                                    sw_stat.outof_sequence_pkts++;
8067                                 ret = 2;
8068                                 break;
8069                         }
8070
8071                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8072                                 ret = 1; /* Aggregate */
8073                         else
8074                                 ret = 2; /* Flush both */
8075                         break;
8076                 }
8077         }
8078
8079         if (ret == 0) {
8080                 /* Before searching for available LRO objects,
8081                  * check if the pkt is L3/L4 aggregatable. If not
8082                  * don't create new LRO session. Just send this
8083                  * packet up.
8084                  */
8085                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8086                         return 5;
8087                 }
8088
8089                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8090                         struct lro *l_lro = &sp->lro0_n[i];
8091                         if (!(l_lro->in_use)) {
8092                                 *lro = l_lro;
8093                                 ret = 3; /* Begin anew */
8094                                 break;
8095                         }
8096                 }
8097         }
8098
8099         if (ret == 0) { /* sessions exceeded */
8100                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8101                           __FUNCTION__);
8102                 *lro = NULL;
8103                 return ret;
8104         }
8105
8106         switch (ret) {
8107                 case 3:
8108                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8109                         break;
8110                 case 2:
8111                         update_L3L4_header(sp, *lro);
8112                         break;
8113                 case 1:
8114                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8115                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8116                                 update_L3L4_header(sp, *lro);
8117                                 ret = 4; /* Flush the LRO */
8118                         }
8119                         break;
8120                 default:
8121                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8122                                 __FUNCTION__);
8123                         break;
8124         }
8125
8126         return ret;
8127 }
8128
8129 static void clear_lro_session(struct lro *lro)
8130 {
8131         static u16 lro_struct_size = sizeof(struct lro);
8132
8133         memset(lro, 0, lro_struct_size);
8134 }
8135
8136 static void queue_rx_frame(struct sk_buff *skb)
8137 {
8138         struct net_device *dev = skb->dev;
8139
8140         skb->protocol = eth_type_trans(skb, dev);
8141         if (napi)
8142                 netif_receive_skb(skb);
8143         else
8144                 netif_rx(skb);
8145 }
8146
8147 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8148                            struct sk_buff *skb,
8149                            u32 tcp_len)
8150 {
8151         struct sk_buff *first = lro->parent;
8152
8153         first->len += tcp_len;
8154         first->data_len = lro->frags_len;
8155         skb_pull(skb, (skb->len - tcp_len));
8156         if (skb_shinfo(first)->frag_list)
8157                 lro->last_frag->next = skb;
8158         else
8159                 skb_shinfo(first)->frag_list = skb;
8160         first->truesize += skb->truesize;
8161         lro->last_frag = skb;
8162         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8163         return;
8164 }
8165
8166 /**
8167  * s2io_io_error_detected - called when PCI error is detected
8168  * @pdev: Pointer to PCI device
8169  * @state: The current pci connection state
8170  *
8171  * This function is called after a PCI bus error affecting
8172  * this device has been detected.
8173  */
8174 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8175                                                pci_channel_state_t state)
8176 {
8177         struct net_device *netdev = pci_get_drvdata(pdev);
8178         struct s2io_nic *sp = netdev->priv;
8179
8180         netif_device_detach(netdev);
8181
8182         if (netif_running(netdev)) {
8183                 /* Bring down the card, while avoiding PCI I/O */
8184                 do_s2io_card_down(sp, 0);
8185         }
8186         pci_disable_device(pdev);
8187
8188         return PCI_ERS_RESULT_NEED_RESET;
8189 }
8190
8191 /**
8192  * s2io_io_slot_reset - called after the pci bus has been reset.
8193  * @pdev: Pointer to PCI device
8194  *
8195  * Restart the card from scratch, as if from a cold-boot.
8196  * At this point, the card has exprienced a hard reset,
8197  * followed by fixups by BIOS, and has its config space
8198  * set up identically to what it was at cold boot.
8199  */
8200 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8201 {
8202         struct net_device *netdev = pci_get_drvdata(pdev);
8203         struct s2io_nic *sp = netdev->priv;
8204
8205         if (pci_enable_device(pdev)) {
8206                 printk(KERN_ERR "s2io: "
8207                        "Cannot re-enable PCI device after reset.\n");
8208                 return PCI_ERS_RESULT_DISCONNECT;
8209         }
8210
8211         pci_set_master(pdev);
8212         s2io_reset(sp);
8213
8214         return PCI_ERS_RESULT_RECOVERED;
8215 }
8216
8217 /**
8218  * s2io_io_resume - called when traffic can start flowing again.
8219  * @pdev: Pointer to PCI device
8220  *
8221  * This callback is called when the error recovery driver tells
8222  * us that its OK to resume normal operation.
8223  */
8224 static void s2io_io_resume(struct pci_dev *pdev)
8225 {
8226         struct net_device *netdev = pci_get_drvdata(pdev);
8227         struct s2io_nic *sp = netdev->priv;
8228
8229         if (netif_running(netdev)) {
8230                 if (s2io_card_up(sp)) {
8231                         printk(KERN_ERR "s2io: "
8232                                "Can't bring device back up after reset.\n");
8233                         return;
8234                 }
8235
8236                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8237                         s2io_card_down(sp);
8238                         printk(KERN_ERR "s2io: "
8239                                "Can't resetore mac addr after reset.\n");
8240                         return;
8241                 }
8242         }
8243
8244         netif_device_attach(netdev);
8245         netif_wake_queue(netdev);
8246 }