S2io: Change kmalloc+memset to k[zc]alloc
[pandora-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.2"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         ("alarm_transceiver_temp_high"),
280         ("alarm_transceiver_temp_low"),
281         ("alarm_laser_bias_current_high"),
282         ("alarm_laser_bias_current_low"),
283         ("alarm_laser_output_power_high"),
284         ("alarm_laser_output_power_low"),
285         ("warn_transceiver_temp_high"),
286         ("warn_transceiver_temp_low"),
287         ("warn_laser_bias_current_high"),
288         ("warn_laser_bias_current_low"),
289         ("warn_laser_output_power_high"),
290         ("warn_laser_output_power_low"),
291         ("lro_aggregated_pkts"),
292         ("lro_flush_both_count"),
293         ("lro_out_of_sequence_pkts"),
294         ("lro_flush_due_to_max_pkts"),
295         ("lro_avg_aggr_pkts"),
296         ("mem_alloc_fail_cnt"),
297         ("pci_map_fail_cnt"),
298         ("watchdog_timer_cnt"),
299         ("mem_allocated"),
300         ("mem_freed"),
301         ("link_up_cnt"),
302         ("link_down_cnt"),
303         ("link_up_time"),
304         ("link_down_time"),
305         ("tx_tcode_buf_abort_cnt"),
306         ("tx_tcode_desc_abort_cnt"),
307         ("tx_tcode_parity_err_cnt"),
308         ("tx_tcode_link_loss_cnt"),
309         ("tx_tcode_list_proc_err_cnt"),
310         ("rx_tcode_parity_err_cnt"),
311         ("rx_tcode_abort_cnt"),
312         ("rx_tcode_parity_abort_cnt"),
313         ("rx_tcode_rda_fail_cnt"),
314         ("rx_tcode_unkn_prot_cnt"),
315         ("rx_tcode_fcs_err_cnt"),
316         ("rx_tcode_buf_size_err_cnt"),
317         ("rx_tcode_rxd_corrupt_cnt"),
318         ("rx_tcode_unkn_err_cnt"),
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340                                         ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
342
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
345
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
348
349 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
351
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
353                         init_timer(&timer);                     \
354                         timer.function = handle;                \
355                         timer.data = (unsigned long) arg;       \
356                         mod_timer(&timer, (jiffies + exp))      \
357
358 /* Add the vlan */
359 static void s2io_vlan_rx_register(struct net_device *dev,
360                                         struct vlan_group *grp)
361 {
362         struct s2io_nic *nic = dev->priv;
363         unsigned long flags;
364
365         spin_lock_irqsave(&nic->tx_lock, flags);
366         nic->vlgrp = grp;
367         spin_unlock_irqrestore(&nic->tx_lock, flags);
368 }
369
370 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
371 static int vlan_strip_flag;
372
373 /*
374  * Constants to be programmed into the Xena's registers, to configure
375  * the XAUI.
376  */
377
378 #define END_SIGN        0x0
379 static const u64 herc_act_dtx_cfg[] = {
380         /* Set address */
381         0x8000051536750000ULL, 0x80000515367500E0ULL,
382         /* Write data */
383         0x8000051536750004ULL, 0x80000515367500E4ULL,
384         /* Set address */
385         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
386         /* Write data */
387         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
388         /* Set address */
389         0x801205150D440000ULL, 0x801205150D4400E0ULL,
390         /* Write data */
391         0x801205150D440004ULL, 0x801205150D4400E4ULL,
392         /* Set address */
393         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
394         /* Write data */
395         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
396         /* Done */
397         END_SIGN
398 };
399
400 static const u64 xena_dtx_cfg[] = {
401         /* Set address */
402         0x8000051500000000ULL, 0x80000515000000E0ULL,
403         /* Write data */
404         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
405         /* Set address */
406         0x8001051500000000ULL, 0x80010515000000E0ULL,
407         /* Write data */
408         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
409         /* Set address */
410         0x8002051500000000ULL, 0x80020515000000E0ULL,
411         /* Write data */
412         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
413         END_SIGN
414 };
415
416 /*
417  * Constants for Fixing the MacAddress problem seen mostly on
418  * Alpha machines.
419  */
420 static const u64 fix_mac[] = {
421         0x0060000000000000ULL, 0x0060600000000000ULL,
422         0x0040600000000000ULL, 0x0000600000000000ULL,
423         0x0020600000000000ULL, 0x0060600000000000ULL,
424         0x0020600000000000ULL, 0x0060600000000000ULL,
425         0x0020600000000000ULL, 0x0060600000000000ULL,
426         0x0020600000000000ULL, 0x0060600000000000ULL,
427         0x0020600000000000ULL, 0x0060600000000000ULL,
428         0x0020600000000000ULL, 0x0060600000000000ULL,
429         0x0020600000000000ULL, 0x0060600000000000ULL,
430         0x0020600000000000ULL, 0x0060600000000000ULL,
431         0x0020600000000000ULL, 0x0060600000000000ULL,
432         0x0020600000000000ULL, 0x0060600000000000ULL,
433         0x0020600000000000ULL, 0x0000600000000000ULL,
434         0x0040600000000000ULL, 0x0060600000000000ULL,
435         END_SIGN
436 };
437
438 MODULE_LICENSE("GPL");
439 MODULE_VERSION(DRV_VERSION);
440
441
442 /* Module Loadable parameters. */
443 S2IO_PARM_INT(tx_fifo_num, 1);
444 S2IO_PARM_INT(rx_ring_num, 1);
445
446
447 S2IO_PARM_INT(rx_ring_mode, 1);
448 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
449 S2IO_PARM_INT(rmac_pause_time, 0x100);
450 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
451 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
452 S2IO_PARM_INT(shared_splits, 0);
453 S2IO_PARM_INT(tmac_util_period, 5);
454 S2IO_PARM_INT(rmac_util_period, 5);
455 S2IO_PARM_INT(bimodal, 0);
456 S2IO_PARM_INT(l3l4hdr_size, 128);
457 /* Frequency of Rx desc syncs expressed as power of 2 */
458 S2IO_PARM_INT(rxsync_frequency, 3);
459 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
460 S2IO_PARM_INT(intr_type, 2);
461 /* Large receive offload feature */
462 S2IO_PARM_INT(lro, 0);
463 /* Max pkts to be aggregated by LRO at one time. If not specified,
464  * aggregation happens until we hit max IP pkt size(64K)
465  */
466 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
467 S2IO_PARM_INT(indicate_max_pkts, 0);
468
469 S2IO_PARM_INT(napi, 1);
470 S2IO_PARM_INT(ufo, 0);
471 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
472
473 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
474     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
475 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
476     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
477 static unsigned int rts_frm_len[MAX_RX_RINGS] =
478     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
479
480 module_param_array(tx_fifo_len, uint, NULL, 0);
481 module_param_array(rx_ring_sz, uint, NULL, 0);
482 module_param_array(rts_frm_len, uint, NULL, 0);
483
484 /*
485  * S2IO device table.
486  * This table lists all the devices that this driver supports.
487  */
488 static struct pci_device_id s2io_tbl[] __devinitdata = {
489         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
490          PCI_ANY_ID, PCI_ANY_ID},
491         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
492          PCI_ANY_ID, PCI_ANY_ID},
493         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
494          PCI_ANY_ID, PCI_ANY_ID},
495         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
496          PCI_ANY_ID, PCI_ANY_ID},
497         {0,}
498 };
499
500 MODULE_DEVICE_TABLE(pci, s2io_tbl);
501
502 static struct pci_error_handlers s2io_err_handler = {
503         .error_detected = s2io_io_error_detected,
504         .slot_reset = s2io_io_slot_reset,
505         .resume = s2io_io_resume,
506 };
507
508 static struct pci_driver s2io_driver = {
509       .name = "S2IO",
510       .id_table = s2io_tbl,
511       .probe = s2io_init_nic,
512       .remove = __devexit_p(s2io_rem_nic),
513       .err_handler = &s2io_err_handler,
514 };
515
516 /* A simplifier macro used both by init and free shared_mem Fns(). */
517 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
518
519 /**
520  * init_shared_mem - Allocation and Initialization of Memory
521  * @nic: Device private variable.
522  * Description: The function allocates all the memory areas shared
523  * between the NIC and the driver. This includes Tx descriptors,
524  * Rx descriptors and the statistics block.
525  */
526
527 static int init_shared_mem(struct s2io_nic *nic)
528 {
529         u32 size;
530         void *tmp_v_addr, *tmp_v_addr_next;
531         dma_addr_t tmp_p_addr, tmp_p_addr_next;
532         struct RxD_block *pre_rxd_blk = NULL;
533         int i, j, blk_cnt;
534         int lst_size, lst_per_page;
535         struct net_device *dev = nic->dev;
536         unsigned long tmp;
537         struct buffAdd *ba;
538
539         struct mac_info *mac_control;
540         struct config_param *config;
541         unsigned long long mem_allocated = 0;
542
543         mac_control = &nic->mac_control;
544         config = &nic->config;
545
546
547         /* Allocation and initialization of TXDLs in FIOFs */
548         size = 0;
549         for (i = 0; i < config->tx_fifo_num; i++) {
550                 size += config->tx_cfg[i].fifo_len;
551         }
552         if (size > MAX_AVAILABLE_TXDS) {
553                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
554                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
555                 return -EINVAL;
556         }
557
558         lst_size = (sizeof(struct TxD) * config->max_txds);
559         lst_per_page = PAGE_SIZE / lst_size;
560
561         for (i = 0; i < config->tx_fifo_num; i++) {
562                 int fifo_len = config->tx_cfg[i].fifo_len;
563                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
564                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
565                                                           GFP_KERNEL);
566                 if (!mac_control->fifos[i].list_info) {
567                         DBG_PRINT(INFO_DBG,
568                                   "Malloc failed for list_info\n");
569                         return -ENOMEM;
570                 }
571                 mem_allocated += list_holder_size;
572         }
573         for (i = 0; i < config->tx_fifo_num; i++) {
574                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
575                                                 lst_per_page);
576                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
577                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
578                     config->tx_cfg[i].fifo_len - 1;
579                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
580                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
581                     config->tx_cfg[i].fifo_len - 1;
582                 mac_control->fifos[i].fifo_no = i;
583                 mac_control->fifos[i].nic = nic;
584                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
585
586                 for (j = 0; j < page_num; j++) {
587                         int k = 0;
588                         dma_addr_t tmp_p;
589                         void *tmp_v;
590                         tmp_v = pci_alloc_consistent(nic->pdev,
591                                                      PAGE_SIZE, &tmp_p);
592                         if (!tmp_v) {
593                                 DBG_PRINT(INFO_DBG,
594                                           "pci_alloc_consistent ");
595                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
596                                 return -ENOMEM;
597                         }
598                         /* If we got a zero DMA address(can happen on
599                          * certain platforms like PPC), reallocate.
600                          * Store virtual address of page we don't want,
601                          * to be freed later.
602                          */
603                         if (!tmp_p) {
604                                 mac_control->zerodma_virt_addr = tmp_v;
605                                 DBG_PRINT(INIT_DBG,
606                                 "%s: Zero DMA address for TxDL. ", dev->name);
607                                 DBG_PRINT(INIT_DBG,
608                                 "Virtual address %p\n", tmp_v);
609                                 tmp_v = pci_alloc_consistent(nic->pdev,
610                                                      PAGE_SIZE, &tmp_p);
611                                 if (!tmp_v) {
612                                         DBG_PRINT(INFO_DBG,
613                                           "pci_alloc_consistent ");
614                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
615                                         return -ENOMEM;
616                                 }
617                                 mem_allocated += PAGE_SIZE;
618                         }
619                         while (k < lst_per_page) {
620                                 int l = (j * lst_per_page) + k;
621                                 if (l == config->tx_cfg[i].fifo_len)
622                                         break;
623                                 mac_control->fifos[i].list_info[l].list_virt_addr =
624                                     tmp_v + (k * lst_size);
625                                 mac_control->fifos[i].list_info[l].list_phy_addr =
626                                     tmp_p + (k * lst_size);
627                                 k++;
628                         }
629                 }
630         }
631
632         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
633         if (!nic->ufo_in_band_v)
634                 return -ENOMEM;
635          mem_allocated += (size * sizeof(u64));
636
637         /* Allocation and initialization of RXDs in Rings */
638         size = 0;
639         for (i = 0; i < config->rx_ring_num; i++) {
640                 if (config->rx_cfg[i].num_rxd %
641                     (rxd_count[nic->rxd_mode] + 1)) {
642                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
643                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
644                                   i);
645                         DBG_PRINT(ERR_DBG, "RxDs per Block");
646                         return FAILURE;
647                 }
648                 size += config->rx_cfg[i].num_rxd;
649                 mac_control->rings[i].block_count =
650                         config->rx_cfg[i].num_rxd /
651                         (rxd_count[nic->rxd_mode] + 1 );
652                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
653                         mac_control->rings[i].block_count;
654         }
655         if (nic->rxd_mode == RXD_MODE_1)
656                 size = (size * (sizeof(struct RxD1)));
657         else
658                 size = (size * (sizeof(struct RxD3)));
659
660         for (i = 0; i < config->rx_ring_num; i++) {
661                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
662                 mac_control->rings[i].rx_curr_get_info.offset = 0;
663                 mac_control->rings[i].rx_curr_get_info.ring_len =
664                     config->rx_cfg[i].num_rxd - 1;
665                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
666                 mac_control->rings[i].rx_curr_put_info.offset = 0;
667                 mac_control->rings[i].rx_curr_put_info.ring_len =
668                     config->rx_cfg[i].num_rxd - 1;
669                 mac_control->rings[i].nic = nic;
670                 mac_control->rings[i].ring_no = i;
671
672                 blk_cnt = config->rx_cfg[i].num_rxd /
673                                 (rxd_count[nic->rxd_mode] + 1);
674                 /*  Allocating all the Rx blocks */
675                 for (j = 0; j < blk_cnt; j++) {
676                         struct rx_block_info *rx_blocks;
677                         int l;
678
679                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
680                         size = SIZE_OF_BLOCK; //size is always page size
681                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
682                                                           &tmp_p_addr);
683                         if (tmp_v_addr == NULL) {
684                                 /*
685                                  * In case of failure, free_shared_mem()
686                                  * is called, which should free any
687                                  * memory that was alloced till the
688                                  * failure happened.
689                                  */
690                                 rx_blocks->block_virt_addr = tmp_v_addr;
691                                 return -ENOMEM;
692                         }
693                         mem_allocated += size;
694                         memset(tmp_v_addr, 0, size);
695                         rx_blocks->block_virt_addr = tmp_v_addr;
696                         rx_blocks->block_dma_addr = tmp_p_addr;
697                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
698                                                   rxd_count[nic->rxd_mode],
699                                                   GFP_KERNEL);
700                         if (!rx_blocks->rxds)
701                                 return -ENOMEM;
702                         mem_allocated += 
703                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
704                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
705                                 rx_blocks->rxds[l].virt_addr =
706                                         rx_blocks->block_virt_addr +
707                                         (rxd_size[nic->rxd_mode] * l);
708                                 rx_blocks->rxds[l].dma_addr =
709                                         rx_blocks->block_dma_addr +
710                                         (rxd_size[nic->rxd_mode] * l);
711                         }
712                 }
713                 /* Interlinking all Rx Blocks */
714                 for (j = 0; j < blk_cnt; j++) {
715                         tmp_v_addr =
716                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
717                         tmp_v_addr_next =
718                                 mac_control->rings[i].rx_blocks[(j + 1) %
719                                               blk_cnt].block_virt_addr;
720                         tmp_p_addr =
721                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
722                         tmp_p_addr_next =
723                                 mac_control->rings[i].rx_blocks[(j + 1) %
724                                               blk_cnt].block_dma_addr;
725
726                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
727                         pre_rxd_blk->reserved_2_pNext_RxD_block =
728                             (unsigned long) tmp_v_addr_next;
729                         pre_rxd_blk->pNext_RxD_Blk_physical =
730                             (u64) tmp_p_addr_next;
731                 }
732         }
733         if (nic->rxd_mode == RXD_MODE_3B) {
734                 /*
735                  * Allocation of Storages for buffer addresses in 2BUFF mode
736                  * and the buffers as well.
737                  */
738                 for (i = 0; i < config->rx_ring_num; i++) {
739                         blk_cnt = config->rx_cfg[i].num_rxd /
740                            (rxd_count[nic->rxd_mode]+ 1);
741                         mac_control->rings[i].ba =
742                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
743                                      GFP_KERNEL);
744                         if (!mac_control->rings[i].ba)
745                                 return -ENOMEM;
746                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
747                         for (j = 0; j < blk_cnt; j++) {
748                                 int k = 0;
749                                 mac_control->rings[i].ba[j] =
750                                         kmalloc((sizeof(struct buffAdd) *
751                                                 (rxd_count[nic->rxd_mode] + 1)),
752                                                 GFP_KERNEL);
753                                 if (!mac_control->rings[i].ba[j])
754                                         return -ENOMEM;
755                                 mem_allocated += (sizeof(struct buffAdd) *  \
756                                         (rxd_count[nic->rxd_mode] + 1));
757                                 while (k != rxd_count[nic->rxd_mode]) {
758                                         ba = &mac_control->rings[i].ba[j][k];
759
760                                         ba->ba_0_org = (void *) kmalloc
761                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
762                                         if (!ba->ba_0_org)
763                                                 return -ENOMEM;
764                                         mem_allocated += 
765                                                 (BUF0_LEN + ALIGN_SIZE);
766                                         tmp = (unsigned long)ba->ba_0_org;
767                                         tmp += ALIGN_SIZE;
768                                         tmp &= ~((unsigned long) ALIGN_SIZE);
769                                         ba->ba_0 = (void *) tmp;
770
771                                         ba->ba_1_org = (void *) kmalloc
772                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
773                                         if (!ba->ba_1_org)
774                                                 return -ENOMEM;
775                                         mem_allocated 
776                                                 += (BUF1_LEN + ALIGN_SIZE);
777                                         tmp = (unsigned long) ba->ba_1_org;
778                                         tmp += ALIGN_SIZE;
779                                         tmp &= ~((unsigned long) ALIGN_SIZE);
780                                         ba->ba_1 = (void *) tmp;
781                                         k++;
782                                 }
783                         }
784                 }
785         }
786
787         /* Allocation and initialization of Statistics block */
788         size = sizeof(struct stat_block);
789         mac_control->stats_mem = pci_alloc_consistent
790             (nic->pdev, size, &mac_control->stats_mem_phy);
791
792         if (!mac_control->stats_mem) {
793                 /*
794                  * In case of failure, free_shared_mem() is called, which
795                  * should free any memory that was alloced till the
796                  * failure happened.
797                  */
798                 return -ENOMEM;
799         }
800         mem_allocated += size;
801         mac_control->stats_mem_sz = size;
802
803         tmp_v_addr = mac_control->stats_mem;
804         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
805         memset(tmp_v_addr, 0, size);
806         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
807                   (unsigned long long) tmp_p_addr);
808         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
809         return SUCCESS;
810 }
811
812 /**
813  * free_shared_mem - Free the allocated Memory
814  * @nic:  Device private variable.
815  * Description: This function is to free all memory locations allocated by
816  * the init_shared_mem() function and return it to the kernel.
817  */
818
819 static void free_shared_mem(struct s2io_nic *nic)
820 {
821         int i, j, blk_cnt, size;
822         u32 ufo_size = 0;
823         void *tmp_v_addr;
824         dma_addr_t tmp_p_addr;
825         struct mac_info *mac_control;
826         struct config_param *config;
827         int lst_size, lst_per_page;
828         struct net_device *dev;
829         int page_num = 0;
830
831         if (!nic)
832                 return;
833
834         dev = nic->dev;
835
836         mac_control = &nic->mac_control;
837         config = &nic->config;
838
839         lst_size = (sizeof(struct TxD) * config->max_txds);
840         lst_per_page = PAGE_SIZE / lst_size;
841
842         for (i = 0; i < config->tx_fifo_num; i++) {
843                 ufo_size += config->tx_cfg[i].fifo_len;
844                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
845                                                         lst_per_page);
846                 for (j = 0; j < page_num; j++) {
847                         int mem_blks = (j * lst_per_page);
848                         if (!mac_control->fifos[i].list_info)
849                                 return;
850                         if (!mac_control->fifos[i].list_info[mem_blks].
851                                  list_virt_addr)
852                                 break;
853                         pci_free_consistent(nic->pdev, PAGE_SIZE,
854                                             mac_control->fifos[i].
855                                             list_info[mem_blks].
856                                             list_virt_addr,
857                                             mac_control->fifos[i].
858                                             list_info[mem_blks].
859                                             list_phy_addr);
860                         nic->mac_control.stats_info->sw_stat.mem_freed 
861                                                 += PAGE_SIZE;
862                 }
863                 /* If we got a zero DMA address during allocation,
864                  * free the page now
865                  */
866                 if (mac_control->zerodma_virt_addr) {
867                         pci_free_consistent(nic->pdev, PAGE_SIZE,
868                                             mac_control->zerodma_virt_addr,
869                                             (dma_addr_t)0);
870                         DBG_PRINT(INIT_DBG,
871                                 "%s: Freeing TxDL with zero DMA addr. ",
872                                 dev->name);
873                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
874                                 mac_control->zerodma_virt_addr);
875                         nic->mac_control.stats_info->sw_stat.mem_freed 
876                                                 += PAGE_SIZE;
877                 }
878                 kfree(mac_control->fifos[i].list_info);
879                 nic->mac_control.stats_info->sw_stat.mem_freed += 
880                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
881         }
882
883         size = SIZE_OF_BLOCK;
884         for (i = 0; i < config->rx_ring_num; i++) {
885                 blk_cnt = mac_control->rings[i].block_count;
886                 for (j = 0; j < blk_cnt; j++) {
887                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
888                                 block_virt_addr;
889                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
890                                 block_dma_addr;
891                         if (tmp_v_addr == NULL)
892                                 break;
893                         pci_free_consistent(nic->pdev, size,
894                                             tmp_v_addr, tmp_p_addr);
895                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
896                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
897                         nic->mac_control.stats_info->sw_stat.mem_freed += 
898                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
899                 }
900         }
901
902         if (nic->rxd_mode == RXD_MODE_3B) {
903                 /* Freeing buffer storage addresses in 2BUFF mode. */
904                 for (i = 0; i < config->rx_ring_num; i++) {
905                         blk_cnt = config->rx_cfg[i].num_rxd /
906                             (rxd_count[nic->rxd_mode] + 1);
907                         for (j = 0; j < blk_cnt; j++) {
908                                 int k = 0;
909                                 if (!mac_control->rings[i].ba[j])
910                                         continue;
911                                 while (k != rxd_count[nic->rxd_mode]) {
912                                         struct buffAdd *ba =
913                                                 &mac_control->rings[i].ba[j][k];
914                                         kfree(ba->ba_0_org);
915                                         nic->mac_control.stats_info->sw_stat.\
916                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
917                                         kfree(ba->ba_1_org);
918                                         nic->mac_control.stats_info->sw_stat.\
919                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
920                                         k++;
921                                 }
922                                 kfree(mac_control->rings[i].ba[j]);
923                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
924                                         (sizeof(struct buffAdd) *
925                                         (rxd_count[nic->rxd_mode] + 1));
926                         }
927                         kfree(mac_control->rings[i].ba);
928                         nic->mac_control.stats_info->sw_stat.mem_freed += 
929                         (sizeof(struct buffAdd *) * blk_cnt);
930                 }
931         }
932
933         if (mac_control->stats_mem) {
934                 pci_free_consistent(nic->pdev,
935                                     mac_control->stats_mem_sz,
936                                     mac_control->stats_mem,
937                                     mac_control->stats_mem_phy);
938                 nic->mac_control.stats_info->sw_stat.mem_freed += 
939                         mac_control->stats_mem_sz;
940         }
941         if (nic->ufo_in_band_v) {
942                 kfree(nic->ufo_in_band_v);
943                 nic->mac_control.stats_info->sw_stat.mem_freed 
944                         += (ufo_size * sizeof(u64));
945         }
946 }
947
948 /**
949  * s2io_verify_pci_mode -
950  */
951
952 static int s2io_verify_pci_mode(struct s2io_nic *nic)
953 {
954         struct XENA_dev_config __iomem *bar0 = nic->bar0;
955         register u64 val64 = 0;
956         int     mode;
957
958         val64 = readq(&bar0->pci_mode);
959         mode = (u8)GET_PCI_MODE(val64);
960
961         if ( val64 & PCI_MODE_UNKNOWN_MODE)
962                 return -1;      /* Unknown PCI mode */
963         return mode;
964 }
965
966 #define NEC_VENID   0x1033
967 #define NEC_DEVID   0x0125
968 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
969 {
970         struct pci_dev *tdev = NULL;
971         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
972                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
973                         if (tdev->bus == s2io_pdev->bus->parent)
974                                 pci_dev_put(tdev);
975                                 return 1;
976                 }
977         }
978         return 0;
979 }
980
981 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
982 /**
983  * s2io_print_pci_mode -
984  */
985 static int s2io_print_pci_mode(struct s2io_nic *nic)
986 {
987         struct XENA_dev_config __iomem *bar0 = nic->bar0;
988         register u64 val64 = 0;
989         int     mode;
990         struct config_param *config = &nic->config;
991
992         val64 = readq(&bar0->pci_mode);
993         mode = (u8)GET_PCI_MODE(val64);
994
995         if ( val64 & PCI_MODE_UNKNOWN_MODE)
996                 return -1;      /* Unknown PCI mode */
997
998         config->bus_speed = bus_speed[mode];
999
1000         if (s2io_on_nec_bridge(nic->pdev)) {
1001                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1002                                                         nic->dev->name);
1003                 return mode;
1004         }
1005
1006         if (val64 & PCI_MODE_32_BITS) {
1007                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1008         } else {
1009                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1010         }
1011
1012         switch(mode) {
1013                 case PCI_MODE_PCI_33:
1014                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1015                         break;
1016                 case PCI_MODE_PCI_66:
1017                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1018                         break;
1019                 case PCI_MODE_PCIX_M1_66:
1020                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1021                         break;
1022                 case PCI_MODE_PCIX_M1_100:
1023                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1024                         break;
1025                 case PCI_MODE_PCIX_M1_133:
1026                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1027                         break;
1028                 case PCI_MODE_PCIX_M2_66:
1029                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1030                         break;
1031                 case PCI_MODE_PCIX_M2_100:
1032                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1033                         break;
1034                 case PCI_MODE_PCIX_M2_133:
1035                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1036                         break;
1037                 default:
1038                         return -1;      /* Unsupported bus speed */
1039         }
1040
1041         return mode;
1042 }
1043
1044 /**
1045  *  init_nic - Initialization of hardware
1046  *  @nic: device peivate variable
1047  *  Description: The function sequentially configures every block
1048  *  of the H/W from their reset values.
1049  *  Return Value:  SUCCESS on success and
1050  *  '-1' on failure (endian settings incorrect).
1051  */
1052
1053 static int init_nic(struct s2io_nic *nic)
1054 {
1055         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1056         struct net_device *dev = nic->dev;
1057         register u64 val64 = 0;
1058         void __iomem *add;
1059         u32 time;
1060         int i, j;
1061         struct mac_info *mac_control;
1062         struct config_param *config;
1063         int dtx_cnt = 0;
1064         unsigned long long mem_share;
1065         int mem_size;
1066
1067         mac_control = &nic->mac_control;
1068         config = &nic->config;
1069
1070         /* to set the swapper controle on the card */
1071         if(s2io_set_swapper(nic)) {
1072                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1073                 return -1;
1074         }
1075
1076         /*
1077          * Herc requires EOI to be removed from reset before XGXS, so..
1078          */
1079         if (nic->device_type & XFRAME_II_DEVICE) {
1080                 val64 = 0xA500000000ULL;
1081                 writeq(val64, &bar0->sw_reset);
1082                 msleep(500);
1083                 val64 = readq(&bar0->sw_reset);
1084         }
1085
1086         /* Remove XGXS from reset state */
1087         val64 = 0;
1088         writeq(val64, &bar0->sw_reset);
1089         msleep(500);
1090         val64 = readq(&bar0->sw_reset);
1091
1092         /*  Enable Receiving broadcasts */
1093         add = &bar0->mac_cfg;
1094         val64 = readq(&bar0->mac_cfg);
1095         val64 |= MAC_RMAC_BCAST_ENABLE;
1096         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1097         writel((u32) val64, add);
1098         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1099         writel((u32) (val64 >> 32), (add + 4));
1100
1101         /* Read registers in all blocks */
1102         val64 = readq(&bar0->mac_int_mask);
1103         val64 = readq(&bar0->mc_int_mask);
1104         val64 = readq(&bar0->xgxs_int_mask);
1105
1106         /*  Set MTU */
1107         val64 = dev->mtu;
1108         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1109
1110         if (nic->device_type & XFRAME_II_DEVICE) {
1111                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1112                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1113                                           &bar0->dtx_control, UF);
1114                         if (dtx_cnt & 0x1)
1115                                 msleep(1); /* Necessary!! */
1116                         dtx_cnt++;
1117                 }
1118         } else {
1119                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1120                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1121                                           &bar0->dtx_control, UF);
1122                         val64 = readq(&bar0->dtx_control);
1123                         dtx_cnt++;
1124                 }
1125         }
1126
1127         /*  Tx DMA Initialization */
1128         val64 = 0;
1129         writeq(val64, &bar0->tx_fifo_partition_0);
1130         writeq(val64, &bar0->tx_fifo_partition_1);
1131         writeq(val64, &bar0->tx_fifo_partition_2);
1132         writeq(val64, &bar0->tx_fifo_partition_3);
1133
1134
1135         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1136                 val64 |=
1137                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1138                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1139                                     ((i * 32) + 5), 3);
1140
1141                 if (i == (config->tx_fifo_num - 1)) {
1142                         if (i % 2 == 0)
1143                                 i++;
1144                 }
1145
1146                 switch (i) {
1147                 case 1:
1148                         writeq(val64, &bar0->tx_fifo_partition_0);
1149                         val64 = 0;
1150                         break;
1151                 case 3:
1152                         writeq(val64, &bar0->tx_fifo_partition_1);
1153                         val64 = 0;
1154                         break;
1155                 case 5:
1156                         writeq(val64, &bar0->tx_fifo_partition_2);
1157                         val64 = 0;
1158                         break;
1159                 case 7:
1160                         writeq(val64, &bar0->tx_fifo_partition_3);
1161                         break;
1162                 }
1163         }
1164
1165         /*
1166          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1167          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1168          */
1169         if ((nic->device_type == XFRAME_I_DEVICE) &&
1170                 (nic->pdev->revision < 4))
1171                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1172
1173         val64 = readq(&bar0->tx_fifo_partition_0);
1174         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1175                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1176
1177         /*
1178          * Initialization of Tx_PA_CONFIG register to ignore packet
1179          * integrity checking.
1180          */
1181         val64 = readq(&bar0->tx_pa_cfg);
1182         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1183             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1184         writeq(val64, &bar0->tx_pa_cfg);
1185
1186         /* Rx DMA intialization. */
1187         val64 = 0;
1188         for (i = 0; i < config->rx_ring_num; i++) {
1189                 val64 |=
1190                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1191                          3);
1192         }
1193         writeq(val64, &bar0->rx_queue_priority);
1194
1195         /*
1196          * Allocating equal share of memory to all the
1197          * configured Rings.
1198          */
1199         val64 = 0;
1200         if (nic->device_type & XFRAME_II_DEVICE)
1201                 mem_size = 32;
1202         else
1203                 mem_size = 64;
1204
1205         for (i = 0; i < config->rx_ring_num; i++) {
1206                 switch (i) {
1207                 case 0:
1208                         mem_share = (mem_size / config->rx_ring_num +
1209                                      mem_size % config->rx_ring_num);
1210                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1211                         continue;
1212                 case 1:
1213                         mem_share = (mem_size / config->rx_ring_num);
1214                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1215                         continue;
1216                 case 2:
1217                         mem_share = (mem_size / config->rx_ring_num);
1218                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1219                         continue;
1220                 case 3:
1221                         mem_share = (mem_size / config->rx_ring_num);
1222                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1223                         continue;
1224                 case 4:
1225                         mem_share = (mem_size / config->rx_ring_num);
1226                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1227                         continue;
1228                 case 5:
1229                         mem_share = (mem_size / config->rx_ring_num);
1230                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1231                         continue;
1232                 case 6:
1233                         mem_share = (mem_size / config->rx_ring_num);
1234                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1235                         continue;
1236                 case 7:
1237                         mem_share = (mem_size / config->rx_ring_num);
1238                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1239                         continue;
1240                 }
1241         }
1242         writeq(val64, &bar0->rx_queue_cfg);
1243
1244         /*
1245          * Filling Tx round robin registers
1246          * as per the number of FIFOs
1247          */
1248         switch (config->tx_fifo_num) {
1249         case 1:
1250                 val64 = 0x0000000000000000ULL;
1251                 writeq(val64, &bar0->tx_w_round_robin_0);
1252                 writeq(val64, &bar0->tx_w_round_robin_1);
1253                 writeq(val64, &bar0->tx_w_round_robin_2);
1254                 writeq(val64, &bar0->tx_w_round_robin_3);
1255                 writeq(val64, &bar0->tx_w_round_robin_4);
1256                 break;
1257         case 2:
1258                 val64 = 0x0000010000010000ULL;
1259                 writeq(val64, &bar0->tx_w_round_robin_0);
1260                 val64 = 0x0100000100000100ULL;
1261                 writeq(val64, &bar0->tx_w_round_robin_1);
1262                 val64 = 0x0001000001000001ULL;
1263                 writeq(val64, &bar0->tx_w_round_robin_2);
1264                 val64 = 0x0000010000010000ULL;
1265                 writeq(val64, &bar0->tx_w_round_robin_3);
1266                 val64 = 0x0100000000000000ULL;
1267                 writeq(val64, &bar0->tx_w_round_robin_4);
1268                 break;
1269         case 3:
1270                 val64 = 0x0001000102000001ULL;
1271                 writeq(val64, &bar0->tx_w_round_robin_0);
1272                 val64 = 0x0001020000010001ULL;
1273                 writeq(val64, &bar0->tx_w_round_robin_1);
1274                 val64 = 0x0200000100010200ULL;
1275                 writeq(val64, &bar0->tx_w_round_robin_2);
1276                 val64 = 0x0001000102000001ULL;
1277                 writeq(val64, &bar0->tx_w_round_robin_3);
1278                 val64 = 0x0001020000000000ULL;
1279                 writeq(val64, &bar0->tx_w_round_robin_4);
1280                 break;
1281         case 4:
1282                 val64 = 0x0001020300010200ULL;
1283                 writeq(val64, &bar0->tx_w_round_robin_0);
1284                 val64 = 0x0100000102030001ULL;
1285                 writeq(val64, &bar0->tx_w_round_robin_1);
1286                 val64 = 0x0200010000010203ULL;
1287                 writeq(val64, &bar0->tx_w_round_robin_2);
1288                 val64 = 0x0001020001000001ULL;
1289                 writeq(val64, &bar0->tx_w_round_robin_3);
1290                 val64 = 0x0203000100000000ULL;
1291                 writeq(val64, &bar0->tx_w_round_robin_4);
1292                 break;
1293         case 5:
1294                 val64 = 0x0001000203000102ULL;
1295                 writeq(val64, &bar0->tx_w_round_robin_0);
1296                 val64 = 0x0001020001030004ULL;
1297                 writeq(val64, &bar0->tx_w_round_robin_1);
1298                 val64 = 0x0001000203000102ULL;
1299                 writeq(val64, &bar0->tx_w_round_robin_2);
1300                 val64 = 0x0001020001030004ULL;
1301                 writeq(val64, &bar0->tx_w_round_robin_3);
1302                 val64 = 0x0001000000000000ULL;
1303                 writeq(val64, &bar0->tx_w_round_robin_4);
1304                 break;
1305         case 6:
1306                 val64 = 0x0001020304000102ULL;
1307                 writeq(val64, &bar0->tx_w_round_robin_0);
1308                 val64 = 0x0304050001020001ULL;
1309                 writeq(val64, &bar0->tx_w_round_robin_1);
1310                 val64 = 0x0203000100000102ULL;
1311                 writeq(val64, &bar0->tx_w_round_robin_2);
1312                 val64 = 0x0304000102030405ULL;
1313                 writeq(val64, &bar0->tx_w_round_robin_3);
1314                 val64 = 0x0001000200000000ULL;
1315                 writeq(val64, &bar0->tx_w_round_robin_4);
1316                 break;
1317         case 7:
1318                 val64 = 0x0001020001020300ULL;
1319                 writeq(val64, &bar0->tx_w_round_robin_0);
1320                 val64 = 0x0102030400010203ULL;
1321                 writeq(val64, &bar0->tx_w_round_robin_1);
1322                 val64 = 0x0405060001020001ULL;
1323                 writeq(val64, &bar0->tx_w_round_robin_2);
1324                 val64 = 0x0304050000010200ULL;
1325                 writeq(val64, &bar0->tx_w_round_robin_3);
1326                 val64 = 0x0102030000000000ULL;
1327                 writeq(val64, &bar0->tx_w_round_robin_4);
1328                 break;
1329         case 8:
1330                 val64 = 0x0001020300040105ULL;
1331                 writeq(val64, &bar0->tx_w_round_robin_0);
1332                 val64 = 0x0200030106000204ULL;
1333                 writeq(val64, &bar0->tx_w_round_robin_1);
1334                 val64 = 0x0103000502010007ULL;
1335                 writeq(val64, &bar0->tx_w_round_robin_2);
1336                 val64 = 0x0304010002060500ULL;
1337                 writeq(val64, &bar0->tx_w_round_robin_3);
1338                 val64 = 0x0103020400000000ULL;
1339                 writeq(val64, &bar0->tx_w_round_robin_4);
1340                 break;
1341         }
1342
1343         /* Enable all configured Tx FIFO partitions */
1344         val64 = readq(&bar0->tx_fifo_partition_0);
1345         val64 |= (TX_FIFO_PARTITION_EN);
1346         writeq(val64, &bar0->tx_fifo_partition_0);
1347
1348         /* Filling the Rx round robin registers as per the
1349          * number of Rings and steering based on QoS.
1350          */
1351         switch (config->rx_ring_num) {
1352         case 1:
1353                 val64 = 0x8080808080808080ULL;
1354                 writeq(val64, &bar0->rts_qos_steering);
1355                 break;
1356         case 2:
1357                 val64 = 0x0000010000010000ULL;
1358                 writeq(val64, &bar0->rx_w_round_robin_0);
1359                 val64 = 0x0100000100000100ULL;
1360                 writeq(val64, &bar0->rx_w_round_robin_1);
1361                 val64 = 0x0001000001000001ULL;
1362                 writeq(val64, &bar0->rx_w_round_robin_2);
1363                 val64 = 0x0000010000010000ULL;
1364                 writeq(val64, &bar0->rx_w_round_robin_3);
1365                 val64 = 0x0100000000000000ULL;
1366                 writeq(val64, &bar0->rx_w_round_robin_4);
1367
1368                 val64 = 0x8080808040404040ULL;
1369                 writeq(val64, &bar0->rts_qos_steering);
1370                 break;
1371         case 3:
1372                 val64 = 0x0001000102000001ULL;
1373                 writeq(val64, &bar0->rx_w_round_robin_0);
1374                 val64 = 0x0001020000010001ULL;
1375                 writeq(val64, &bar0->rx_w_round_robin_1);
1376                 val64 = 0x0200000100010200ULL;
1377                 writeq(val64, &bar0->rx_w_round_robin_2);
1378                 val64 = 0x0001000102000001ULL;
1379                 writeq(val64, &bar0->rx_w_round_robin_3);
1380                 val64 = 0x0001020000000000ULL;
1381                 writeq(val64, &bar0->rx_w_round_robin_4);
1382
1383                 val64 = 0x8080804040402020ULL;
1384                 writeq(val64, &bar0->rts_qos_steering);
1385                 break;
1386         case 4:
1387                 val64 = 0x0001020300010200ULL;
1388                 writeq(val64, &bar0->rx_w_round_robin_0);
1389                 val64 = 0x0100000102030001ULL;
1390                 writeq(val64, &bar0->rx_w_round_robin_1);
1391                 val64 = 0x0200010000010203ULL;
1392                 writeq(val64, &bar0->rx_w_round_robin_2);
1393                 val64 = 0x0001020001000001ULL;
1394                 writeq(val64, &bar0->rx_w_round_robin_3);
1395                 val64 = 0x0203000100000000ULL;
1396                 writeq(val64, &bar0->rx_w_round_robin_4);
1397
1398                 val64 = 0x8080404020201010ULL;
1399                 writeq(val64, &bar0->rts_qos_steering);
1400                 break;
1401         case 5:
1402                 val64 = 0x0001000203000102ULL;
1403                 writeq(val64, &bar0->rx_w_round_robin_0);
1404                 val64 = 0x0001020001030004ULL;
1405                 writeq(val64, &bar0->rx_w_round_robin_1);
1406                 val64 = 0x0001000203000102ULL;
1407                 writeq(val64, &bar0->rx_w_round_robin_2);
1408                 val64 = 0x0001020001030004ULL;
1409                 writeq(val64, &bar0->rx_w_round_robin_3);
1410                 val64 = 0x0001000000000000ULL;
1411                 writeq(val64, &bar0->rx_w_round_robin_4);
1412
1413                 val64 = 0x8080404020201008ULL;
1414                 writeq(val64, &bar0->rts_qos_steering);
1415                 break;
1416         case 6:
1417                 val64 = 0x0001020304000102ULL;
1418                 writeq(val64, &bar0->rx_w_round_robin_0);
1419                 val64 = 0x0304050001020001ULL;
1420                 writeq(val64, &bar0->rx_w_round_robin_1);
1421                 val64 = 0x0203000100000102ULL;
1422                 writeq(val64, &bar0->rx_w_round_robin_2);
1423                 val64 = 0x0304000102030405ULL;
1424                 writeq(val64, &bar0->rx_w_round_robin_3);
1425                 val64 = 0x0001000200000000ULL;
1426                 writeq(val64, &bar0->rx_w_round_robin_4);
1427
1428                 val64 = 0x8080404020100804ULL;
1429                 writeq(val64, &bar0->rts_qos_steering);
1430                 break;
1431         case 7:
1432                 val64 = 0x0001020001020300ULL;
1433                 writeq(val64, &bar0->rx_w_round_robin_0);
1434                 val64 = 0x0102030400010203ULL;
1435                 writeq(val64, &bar0->rx_w_round_robin_1);
1436                 val64 = 0x0405060001020001ULL;
1437                 writeq(val64, &bar0->rx_w_round_robin_2);
1438                 val64 = 0x0304050000010200ULL;
1439                 writeq(val64, &bar0->rx_w_round_robin_3);
1440                 val64 = 0x0102030000000000ULL;
1441                 writeq(val64, &bar0->rx_w_round_robin_4);
1442
1443                 val64 = 0x8080402010080402ULL;
1444                 writeq(val64, &bar0->rts_qos_steering);
1445                 break;
1446         case 8:
1447                 val64 = 0x0001020300040105ULL;
1448                 writeq(val64, &bar0->rx_w_round_robin_0);
1449                 val64 = 0x0200030106000204ULL;
1450                 writeq(val64, &bar0->rx_w_round_robin_1);
1451                 val64 = 0x0103000502010007ULL;
1452                 writeq(val64, &bar0->rx_w_round_robin_2);
1453                 val64 = 0x0304010002060500ULL;
1454                 writeq(val64, &bar0->rx_w_round_robin_3);
1455                 val64 = 0x0103020400000000ULL;
1456                 writeq(val64, &bar0->rx_w_round_robin_4);
1457
1458                 val64 = 0x8040201008040201ULL;
1459                 writeq(val64, &bar0->rts_qos_steering);
1460                 break;
1461         }
1462
1463         /* UDP Fix */
1464         val64 = 0;
1465         for (i = 0; i < 8; i++)
1466                 writeq(val64, &bar0->rts_frm_len_n[i]);
1467
1468         /* Set the default rts frame length for the rings configured */
1469         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1470         for (i = 0 ; i < config->rx_ring_num ; i++)
1471                 writeq(val64, &bar0->rts_frm_len_n[i]);
1472
1473         /* Set the frame length for the configured rings
1474          * desired by the user
1475          */
1476         for (i = 0; i < config->rx_ring_num; i++) {
1477                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1478                  * specified frame length steering.
1479                  * If the user provides the frame length then program
1480                  * the rts_frm_len register for those values or else
1481                  * leave it as it is.
1482                  */
1483                 if (rts_frm_len[i] != 0) {
1484                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1485                                 &bar0->rts_frm_len_n[i]);
1486                 }
1487         }
1488         
1489         /* Disable differentiated services steering logic */
1490         for (i = 0; i < 64; i++) {
1491                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1492                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1493                                 dev->name);
1494                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1495                         return FAILURE;
1496                 }
1497         }
1498
1499         /* Program statistics memory */
1500         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1501
1502         if (nic->device_type == XFRAME_II_DEVICE) {
1503                 val64 = STAT_BC(0x320);
1504                 writeq(val64, &bar0->stat_byte_cnt);
1505         }
1506
1507         /*
1508          * Initializing the sampling rate for the device to calculate the
1509          * bandwidth utilization.
1510          */
1511         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1512             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1513         writeq(val64, &bar0->mac_link_util);
1514
1515
1516         /*
1517          * Initializing the Transmit and Receive Traffic Interrupt
1518          * Scheme.
1519          */
1520         /*
1521          * TTI Initialization. Default Tx timer gets us about
1522          * 250 interrupts per sec. Continuous interrupts are enabled
1523          * by default.
1524          */
1525         if (nic->device_type == XFRAME_II_DEVICE) {
1526                 int count = (nic->config.bus_speed * 125)/2;
1527                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1528         } else {
1529
1530                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1531         }
1532         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1533             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1534             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1535                 if (use_continuous_tx_intrs)
1536                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1537         writeq(val64, &bar0->tti_data1_mem);
1538
1539         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1540             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1541             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1542         writeq(val64, &bar0->tti_data2_mem);
1543
1544         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1545         writeq(val64, &bar0->tti_command_mem);
1546
1547         /*
1548          * Once the operation completes, the Strobe bit of the command
1549          * register will be reset. We poll for this particular condition
1550          * We wait for a maximum of 500ms for the operation to complete,
1551          * if it's not complete by then we return error.
1552          */
1553         time = 0;
1554         while (TRUE) {
1555                 val64 = readq(&bar0->tti_command_mem);
1556                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1557                         break;
1558                 }
1559                 if (time > 10) {
1560                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1561                                   dev->name);
1562                         return -1;
1563                 }
1564                 msleep(50);
1565                 time++;
1566         }
1567
1568         if (nic->config.bimodal) {
1569                 int k = 0;
1570                 for (k = 0; k < config->rx_ring_num; k++) {
1571                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1572                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1573                         writeq(val64, &bar0->tti_command_mem);
1574
1575                 /*
1576                  * Once the operation completes, the Strobe bit of the command
1577                  * register will be reset. We poll for this particular condition
1578                  * We wait for a maximum of 500ms for the operation to complete,
1579                  * if it's not complete by then we return error.
1580                 */
1581                         time = 0;
1582                         while (TRUE) {
1583                                 val64 = readq(&bar0->tti_command_mem);
1584                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1585                                         break;
1586                                 }
1587                                 if (time > 10) {
1588                                         DBG_PRINT(ERR_DBG,
1589                                                 "%s: TTI init Failed\n",
1590                                         dev->name);
1591                                         return -1;
1592                                 }
1593                                 time++;
1594                                 msleep(50);
1595                         }
1596                 }
1597         } else {
1598
1599                 /* RTI Initialization */
1600                 if (nic->device_type == XFRAME_II_DEVICE) {
1601                         /*
1602                          * Programmed to generate Apprx 500 Intrs per
1603                          * second
1604                          */
1605                         int count = (nic->config.bus_speed * 125)/4;
1606                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1607                 } else {
1608                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1609                 }
1610                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1611                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1612                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1613
1614                 writeq(val64, &bar0->rti_data1_mem);
1615
1616                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1617                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1618                 if (nic->config.intr_type == MSI_X)
1619                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1620                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1621                 else
1622                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1623                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1624                 writeq(val64, &bar0->rti_data2_mem);
1625
1626                 for (i = 0; i < config->rx_ring_num; i++) {
1627                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1628                                         | RTI_CMD_MEM_OFFSET(i);
1629                         writeq(val64, &bar0->rti_command_mem);
1630
1631                         /*
1632                          * Once the operation completes, the Strobe bit of the
1633                          * command register will be reset. We poll for this
1634                          * particular condition. We wait for a maximum of 500ms
1635                          * for the operation to complete, if it's not complete
1636                          * by then we return error.
1637                          */
1638                         time = 0;
1639                         while (TRUE) {
1640                                 val64 = readq(&bar0->rti_command_mem);
1641                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1642                                         break;
1643                                 }
1644                                 if (time > 10) {
1645                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1646                                                   dev->name);
1647                                         return -1;
1648                                 }
1649                                 time++;
1650                                 msleep(50);
1651                         }
1652                 }
1653         }
1654
1655         /*
1656          * Initializing proper values as Pause threshold into all
1657          * the 8 Queues on Rx side.
1658          */
1659         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1660         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1661
1662         /* Disable RMAC PAD STRIPPING */
1663         add = &bar0->mac_cfg;
1664         val64 = readq(&bar0->mac_cfg);
1665         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1666         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1667         writel((u32) (val64), add);
1668         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1669         writel((u32) (val64 >> 32), (add + 4));
1670         val64 = readq(&bar0->mac_cfg);
1671
1672         /* Enable FCS stripping by adapter */
1673         add = &bar0->mac_cfg;
1674         val64 = readq(&bar0->mac_cfg);
1675         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1676         if (nic->device_type == XFRAME_II_DEVICE)
1677                 writeq(val64, &bar0->mac_cfg);
1678         else {
1679                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1680                 writel((u32) (val64), add);
1681                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1682                 writel((u32) (val64 >> 32), (add + 4));
1683         }
1684
1685         /*
1686          * Set the time value to be inserted in the pause frame
1687          * generated by xena.
1688          */
1689         val64 = readq(&bar0->rmac_pause_cfg);
1690         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1691         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1692         writeq(val64, &bar0->rmac_pause_cfg);
1693
1694         /*
1695          * Set the Threshold Limit for Generating the pause frame
1696          * If the amount of data in any Queue exceeds ratio of
1697          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1698          * pause frame is generated
1699          */
1700         val64 = 0;
1701         for (i = 0; i < 4; i++) {
1702                 val64 |=
1703                     (((u64) 0xFF00 | nic->mac_control.
1704                       mc_pause_threshold_q0q3)
1705                      << (i * 2 * 8));
1706         }
1707         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1708
1709         val64 = 0;
1710         for (i = 0; i < 4; i++) {
1711                 val64 |=
1712                     (((u64) 0xFF00 | nic->mac_control.
1713                       mc_pause_threshold_q4q7)
1714                      << (i * 2 * 8));
1715         }
1716         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1717
1718         /*
1719          * TxDMA will stop Read request if the number of read split has
1720          * exceeded the limit pointed by shared_splits
1721          */
1722         val64 = readq(&bar0->pic_control);
1723         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1724         writeq(val64, &bar0->pic_control);
1725
1726         if (nic->config.bus_speed == 266) {
1727                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1728                 writeq(0x0, &bar0->read_retry_delay);
1729                 writeq(0x0, &bar0->write_retry_delay);
1730         }
1731
1732         /*
1733          * Programming the Herc to split every write transaction
1734          * that does not start on an ADB to reduce disconnects.
1735          */
1736         if (nic->device_type == XFRAME_II_DEVICE) {
1737                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1738                         MISC_LINK_STABILITY_PRD(3);
1739                 writeq(val64, &bar0->misc_control);
1740                 val64 = readq(&bar0->pic_control2);
1741                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1742                 writeq(val64, &bar0->pic_control2);
1743         }
1744         if (strstr(nic->product_name, "CX4")) {
1745                 val64 = TMAC_AVG_IPG(0x17);
1746                 writeq(val64, &bar0->tmac_avg_ipg);
1747         }
1748
1749         return SUCCESS;
1750 }
1751 #define LINK_UP_DOWN_INTERRUPT          1
1752 #define MAC_RMAC_ERR_TIMER              2
1753
1754 static int s2io_link_fault_indication(struct s2io_nic *nic)
1755 {
1756         if (nic->config.intr_type != INTA)
1757                 return MAC_RMAC_ERR_TIMER;
1758         if (nic->device_type == XFRAME_II_DEVICE)
1759                 return LINK_UP_DOWN_INTERRUPT;
1760         else
1761                 return MAC_RMAC_ERR_TIMER;
1762 }
1763
1764 /**
1765  *  do_s2io_write_bits -  update alarm bits in alarm register
1766  *  @value: alarm bits
1767  *  @flag: interrupt status
1768  *  @addr: address value
1769  *  Description: update alarm bits in alarm register
1770  *  Return Value:
1771  *  NONE.
1772  */
1773 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1774 {
1775         u64 temp64;
1776
1777         temp64 = readq(addr);
1778
1779         if(flag == ENABLE_INTRS)
1780                 temp64 &= ~((u64) value);
1781         else
1782                 temp64 |= ((u64) value);
1783         writeq(temp64, addr);
1784 }
1785
1786 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1787 {
1788         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1789         register u64 gen_int_mask = 0;
1790
1791         if (mask & TX_DMA_INTR) {
1792
1793                 gen_int_mask |= TXDMA_INT_M;
1794
1795                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1796                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1797                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1798                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1799
1800                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1801                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1802                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1803                                 &bar0->pfc_err_mask);
1804
1805                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1806                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1807                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1808
1809                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1810                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1811                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1812                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1813                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1814                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1815
1816                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1817                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1818
1819                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1820                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1821                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1822                                 flag, &bar0->lso_err_mask);
1823
1824                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1825                                 flag, &bar0->tpa_err_mask);
1826
1827                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1828
1829         }
1830
1831         if (mask & TX_MAC_INTR) {
1832                 gen_int_mask |= TXMAC_INT_M;
1833                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1834                                 &bar0->mac_int_mask);
1835                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1836                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1837                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1838                                 flag, &bar0->mac_tmac_err_mask);
1839         }
1840
1841         if (mask & TX_XGXS_INTR) {
1842                 gen_int_mask |= TXXGXS_INT_M;
1843                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1844                                 &bar0->xgxs_int_mask);
1845                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1846                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1847                                 flag, &bar0->xgxs_txgxs_err_mask);
1848         }
1849
1850         if (mask & RX_DMA_INTR) {
1851                 gen_int_mask |= RXDMA_INT_M;
1852                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1853                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1854                                 flag, &bar0->rxdma_int_mask);
1855                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1856                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1857                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1858                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1859                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1860                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1861                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1862                                 &bar0->prc_pcix_err_mask);
1863                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1864                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1865                                 &bar0->rpa_err_mask);
1866                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1867                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1868                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1869                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1870                                 flag, &bar0->rda_err_mask);
1871                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1872                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1873                                 flag, &bar0->rti_err_mask);
1874         }
1875
1876         if (mask & RX_MAC_INTR) {
1877                 gen_int_mask |= RXMAC_INT_M;
1878                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1879                                 &bar0->mac_int_mask);
1880                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1881                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1882                                 RMAC_DOUBLE_ECC_ERR |
1883                                 RMAC_LINK_STATE_CHANGE_INT,
1884                                 flag, &bar0->mac_rmac_err_mask);
1885         }
1886
1887         if (mask & RX_XGXS_INTR)
1888         {
1889                 gen_int_mask |= RXXGXS_INT_M;
1890                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1891                                 &bar0->xgxs_int_mask);
1892                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1893                                 &bar0->xgxs_rxgxs_err_mask);
1894         }
1895
1896         if (mask & MC_INTR) {
1897                 gen_int_mask |= MC_INT_M;
1898                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1899                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1900                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1901                                 &bar0->mc_err_mask);
1902         }
1903         nic->general_int_mask = gen_int_mask;
1904
1905         /* Remove this line when alarm interrupts are enabled */
1906         nic->general_int_mask = 0;
1907 }
1908 /**
1909  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1910  *  @nic: device private variable,
1911  *  @mask: A mask indicating which Intr block must be modified and,
1912  *  @flag: A flag indicating whether to enable or disable the Intrs.
1913  *  Description: This function will either disable or enable the interrupts
1914  *  depending on the flag argument. The mask argument can be used to
1915  *  enable/disable any Intr block.
1916  *  Return Value: NONE.
1917  */
1918
1919 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1920 {
1921         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1922         register u64 temp64 = 0, intr_mask = 0;
1923
1924         intr_mask = nic->general_int_mask;
1925
1926         /*  Top level interrupt classification */
1927         /*  PIC Interrupts */
1928         if (mask & TX_PIC_INTR) {
1929                 /*  Enable PIC Intrs in the general intr mask register */
1930                 intr_mask |= TXPIC_INT_M;
1931                 if (flag == ENABLE_INTRS) {
1932                         /*
1933                          * If Hercules adapter enable GPIO otherwise
1934                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1935                          * interrupts for now.
1936                          * TODO
1937                          */
1938                         if (s2io_link_fault_indication(nic) ==
1939                                         LINK_UP_DOWN_INTERRUPT ) {
1940                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1941                                                 &bar0->pic_int_mask);
1942                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1943                                                 &bar0->gpio_int_mask);
1944                         } else
1945                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1946                 } else if (flag == DISABLE_INTRS) {
1947                         /*
1948                          * Disable PIC Intrs in the general
1949                          * intr mask register
1950                          */
1951                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1952                 }
1953         }
1954
1955         /*  Tx traffic interrupts */
1956         if (mask & TX_TRAFFIC_INTR) {
1957                 intr_mask |= TXTRAFFIC_INT_M;
1958                 if (flag == ENABLE_INTRS) {
1959                         /*
1960                          * Enable all the Tx side interrupts
1961                          * writing 0 Enables all 64 TX interrupt levels
1962                          */
1963                         writeq(0x0, &bar0->tx_traffic_mask);
1964                 } else if (flag == DISABLE_INTRS) {
1965                         /*
1966                          * Disable Tx Traffic Intrs in the general intr mask
1967                          * register.
1968                          */
1969                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1970                 }
1971         }
1972
1973         /*  Rx traffic interrupts */
1974         if (mask & RX_TRAFFIC_INTR) {
1975                 intr_mask |= RXTRAFFIC_INT_M;
1976                 if (flag == ENABLE_INTRS) {
1977                         /* writing 0 Enables all 8 RX interrupt levels */
1978                         writeq(0x0, &bar0->rx_traffic_mask);
1979                 } else if (flag == DISABLE_INTRS) {
1980                         /*
1981                          * Disable Rx Traffic Intrs in the general intr mask
1982                          * register.
1983                          */
1984                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1985                 }
1986         }
1987
1988         temp64 = readq(&bar0->general_int_mask);
1989         if (flag == ENABLE_INTRS)
1990                 temp64 &= ~((u64) intr_mask);
1991         else
1992                 temp64 = DISABLE_ALL_INTRS;
1993         writeq(temp64, &bar0->general_int_mask);
1994
1995         nic->general_int_mask = readq(&bar0->general_int_mask);
1996 }
1997
1998 /**
1999  *  verify_pcc_quiescent- Checks for PCC quiescent state
2000  *  Return: 1 If PCC is quiescence
2001  *          0 If PCC is not quiescence
2002  */
2003 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2004 {
2005         int ret = 0, herc;
2006         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2007         u64 val64 = readq(&bar0->adapter_status);
2008         
2009         herc = (sp->device_type == XFRAME_II_DEVICE);
2010
2011         if (flag == FALSE) {
2012                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2013                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2014                                 ret = 1;
2015                 } else {
2016                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2017                                 ret = 1;
2018                 }
2019         } else {
2020                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2021                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2022                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2023                                 ret = 1;
2024                 } else {
2025                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2026                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2027                                 ret = 1;
2028                 }
2029         }
2030
2031         return ret;
2032 }
2033 /**
2034  *  verify_xena_quiescence - Checks whether the H/W is ready
2035  *  Description: Returns whether the H/W is ready to go or not. Depending
2036  *  on whether adapter enable bit was written or not the comparison
2037  *  differs and the calling function passes the input argument flag to
2038  *  indicate this.
2039  *  Return: 1 If xena is quiescence
2040  *          0 If Xena is not quiescence
2041  */
2042
2043 static int verify_xena_quiescence(struct s2io_nic *sp)
2044 {
2045         int  mode;
2046         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2047         u64 val64 = readq(&bar0->adapter_status);
2048         mode = s2io_verify_pci_mode(sp);
2049
2050         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2051                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2052                 return 0;
2053         }
2054         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2055         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2056                 return 0;
2057         }
2058         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2059                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2060                 return 0;
2061         }
2062         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2063                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2064                 return 0;
2065         }
2066         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2067                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2068                 return 0;
2069         }
2070         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2071                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2072                 return 0;
2073         }
2074         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2075                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2076                 return 0;
2077         }
2078         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2079                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2080                 return 0;
2081         }
2082
2083         /*
2084          * In PCI 33 mode, the P_PLL is not used, and therefore,
2085          * the the P_PLL_LOCK bit in the adapter_status register will
2086          * not be asserted.
2087          */
2088         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2089                 sp->device_type == XFRAME_II_DEVICE && mode !=
2090                 PCI_MODE_PCI_33) {
2091                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2092                 return 0;
2093         }
2094         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2095                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2096                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2097                 return 0;
2098         }
2099         return 1;
2100 }
2101
2102 /**
2103  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2104  * @sp: Pointer to device specifc structure
2105  * Description :
2106  * New procedure to clear mac address reading  problems on Alpha platforms
2107  *
2108  */
2109
2110 static void fix_mac_address(struct s2io_nic * sp)
2111 {
2112         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2113         u64 val64;
2114         int i = 0;
2115
2116         while (fix_mac[i] != END_SIGN) {
2117                 writeq(fix_mac[i++], &bar0->gpio_control);
2118                 udelay(10);
2119                 val64 = readq(&bar0->gpio_control);
2120         }
2121 }
2122
2123 /**
2124  *  start_nic - Turns the device on
2125  *  @nic : device private variable.
2126  *  Description:
2127  *  This function actually turns the device on. Before this  function is
2128  *  called,all Registers are configured from their reset states
2129  *  and shared memory is allocated but the NIC is still quiescent. On
2130  *  calling this function, the device interrupts are cleared and the NIC is
2131  *  literally switched on by writing into the adapter control register.
2132  *  Return Value:
2133  *  SUCCESS on success and -1 on failure.
2134  */
2135
2136 static int start_nic(struct s2io_nic *nic)
2137 {
2138         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2139         struct net_device *dev = nic->dev;
2140         register u64 val64 = 0;
2141         u16 subid, i;
2142         struct mac_info *mac_control;
2143         struct config_param *config;
2144
2145         mac_control = &nic->mac_control;
2146         config = &nic->config;
2147
2148         /*  PRC Initialization and configuration */
2149         for (i = 0; i < config->rx_ring_num; i++) {
2150                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2151                        &bar0->prc_rxd0_n[i]);
2152
2153                 val64 = readq(&bar0->prc_ctrl_n[i]);
2154                 if (nic->config.bimodal)
2155                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2156                 if (nic->rxd_mode == RXD_MODE_1)
2157                         val64 |= PRC_CTRL_RC_ENABLED;
2158                 else
2159                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2160                 if (nic->device_type == XFRAME_II_DEVICE)
2161                         val64 |= PRC_CTRL_GROUP_READS;
2162                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2163                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2164                 writeq(val64, &bar0->prc_ctrl_n[i]);
2165         }
2166
2167         if (nic->rxd_mode == RXD_MODE_3B) {
2168                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2169                 val64 = readq(&bar0->rx_pa_cfg);
2170                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2171                 writeq(val64, &bar0->rx_pa_cfg);
2172         }
2173
2174         if (vlan_tag_strip == 0) {
2175                 val64 = readq(&bar0->rx_pa_cfg);
2176                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2177                 writeq(val64, &bar0->rx_pa_cfg);
2178                 vlan_strip_flag = 0;
2179         }
2180
2181         /*
2182          * Enabling MC-RLDRAM. After enabling the device, we timeout
2183          * for around 100ms, which is approximately the time required
2184          * for the device to be ready for operation.
2185          */
2186         val64 = readq(&bar0->mc_rldram_mrs);
2187         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2188         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2189         val64 = readq(&bar0->mc_rldram_mrs);
2190
2191         msleep(100);    /* Delay by around 100 ms. */
2192
2193         /* Enabling ECC Protection. */
2194         val64 = readq(&bar0->adapter_control);
2195         val64 &= ~ADAPTER_ECC_EN;
2196         writeq(val64, &bar0->adapter_control);
2197
2198         /*
2199          * Verify if the device is ready to be enabled, if so enable
2200          * it.
2201          */
2202         val64 = readq(&bar0->adapter_status);
2203         if (!verify_xena_quiescence(nic)) {
2204                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2205                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2206                           (unsigned long long) val64);
2207                 return FAILURE;
2208         }
2209
2210         /*
2211          * With some switches, link might be already up at this point.
2212          * Because of this weird behavior, when we enable laser,
2213          * we may not get link. We need to handle this. We cannot
2214          * figure out which switch is misbehaving. So we are forced to
2215          * make a global change.
2216          */
2217
2218         /* Enabling Laser. */
2219         val64 = readq(&bar0->adapter_control);
2220         val64 |= ADAPTER_EOI_TX_ON;
2221         writeq(val64, &bar0->adapter_control);
2222
2223         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2224                 /*
2225                  * Dont see link state interrupts initally on some switches,
2226                  * so directly scheduling the link state task here.
2227                  */
2228                 schedule_work(&nic->set_link_task);
2229         }
2230         /* SXE-002: Initialize link and activity LED */
2231         subid = nic->pdev->subsystem_device;
2232         if (((subid & 0xFF) >= 0x07) &&
2233             (nic->device_type == XFRAME_I_DEVICE)) {
2234                 val64 = readq(&bar0->gpio_control);
2235                 val64 |= 0x0000800000000000ULL;
2236                 writeq(val64, &bar0->gpio_control);
2237                 val64 = 0x0411040400000000ULL;
2238                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2239         }
2240
2241         return SUCCESS;
2242 }
2243 /**
2244  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2245  */
2246 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2247                                         TxD *txdlp, int get_off)
2248 {
2249         struct s2io_nic *nic = fifo_data->nic;
2250         struct sk_buff *skb;
2251         struct TxD *txds;
2252         u16 j, frg_cnt;
2253
2254         txds = txdlp;
2255         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2256                 pci_unmap_single(nic->pdev, (dma_addr_t)
2257                         txds->Buffer_Pointer, sizeof(u64),
2258                         PCI_DMA_TODEVICE);
2259                 txds++;
2260         }
2261
2262         skb = (struct sk_buff *) ((unsigned long)
2263                         txds->Host_Control);
2264         if (!skb) {
2265                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2266                 return NULL;
2267         }
2268         pci_unmap_single(nic->pdev, (dma_addr_t)
2269                          txds->Buffer_Pointer,
2270                          skb->len - skb->data_len,
2271                          PCI_DMA_TODEVICE);
2272         frg_cnt = skb_shinfo(skb)->nr_frags;
2273         if (frg_cnt) {
2274                 txds++;
2275                 for (j = 0; j < frg_cnt; j++, txds++) {
2276                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2277                         if (!txds->Buffer_Pointer)
2278                                 break;
2279                         pci_unmap_page(nic->pdev, (dma_addr_t)
2280                                         txds->Buffer_Pointer,
2281                                        frag->size, PCI_DMA_TODEVICE);
2282                 }
2283         }
2284         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2285         return(skb);
2286 }
2287
2288 /**
2289  *  free_tx_buffers - Free all queued Tx buffers
2290  *  @nic : device private variable.
2291  *  Description:
2292  *  Free all queued Tx buffers.
2293  *  Return Value: void
2294 */
2295
2296 static void free_tx_buffers(struct s2io_nic *nic)
2297 {
2298         struct net_device *dev = nic->dev;
2299         struct sk_buff *skb;
2300         struct TxD *txdp;
2301         int i, j;
2302         struct mac_info *mac_control;
2303         struct config_param *config;
2304         int cnt = 0;
2305
2306         mac_control = &nic->mac_control;
2307         config = &nic->config;
2308
2309         for (i = 0; i < config->tx_fifo_num; i++) {
2310                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2311                         txdp = (struct TxD *) \
2312                         mac_control->fifos[i].list_info[j].list_virt_addr;
2313                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2314                         if (skb) {
2315                                 nic->mac_control.stats_info->sw_stat.mem_freed 
2316                                         += skb->truesize;
2317                                 dev_kfree_skb(skb);
2318                                 cnt++;
2319                         }
2320                 }
2321                 DBG_PRINT(INTR_DBG,
2322                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2323                           dev->name, cnt, i);
2324                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2325                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2326         }
2327 }
2328
2329 /**
2330  *   stop_nic -  To stop the nic
2331  *   @nic ; device private variable.
2332  *   Description:
2333  *   This function does exactly the opposite of what the start_nic()
2334  *   function does. This function is called to stop the device.
2335  *   Return Value:
2336  *   void.
2337  */
2338
2339 static void stop_nic(struct s2io_nic *nic)
2340 {
2341         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2342         register u64 val64 = 0;
2343         u16 interruptible;
2344         struct mac_info *mac_control;
2345         struct config_param *config;
2346
2347         mac_control = &nic->mac_control;
2348         config = &nic->config;
2349
2350         /*  Disable all interrupts */
2351         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2352         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2353         interruptible |= TX_PIC_INTR;
2354         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2355
2356         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2357         val64 = readq(&bar0->adapter_control);
2358         val64 &= ~(ADAPTER_CNTL_EN);
2359         writeq(val64, &bar0->adapter_control);
2360 }
2361
2362 /**
2363  *  fill_rx_buffers - Allocates the Rx side skbs
2364  *  @nic:  device private variable
2365  *  @ring_no: ring number
2366  *  Description:
2367  *  The function allocates Rx side skbs and puts the physical
2368  *  address of these buffers into the RxD buffer pointers, so that the NIC
2369  *  can DMA the received frame into these locations.
2370  *  The NIC supports 3 receive modes, viz
2371  *  1. single buffer,
2372  *  2. three buffer and
2373  *  3. Five buffer modes.
2374  *  Each mode defines how many fragments the received frame will be split
2375  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2376  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2377  *  is split into 3 fragments. As of now only single buffer mode is
2378  *  supported.
2379  *   Return Value:
2380  *  SUCCESS on success or an appropriate -ve value on failure.
2381  */
2382
2383 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2384 {
2385         struct net_device *dev = nic->dev;
2386         struct sk_buff *skb;
2387         struct RxD_t *rxdp;
2388         int off, off1, size, block_no, block_no1;
2389         u32 alloc_tab = 0;
2390         u32 alloc_cnt;
2391         struct mac_info *mac_control;
2392         struct config_param *config;
2393         u64 tmp;
2394         struct buffAdd *ba;
2395         unsigned long flags;
2396         struct RxD_t *first_rxdp = NULL;
2397         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2398         struct RxD1 *rxdp1;
2399         struct RxD3 *rxdp3;
2400         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2401
2402         mac_control = &nic->mac_control;
2403         config = &nic->config;
2404         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2405             atomic_read(&nic->rx_bufs_left[ring_no]);
2406
2407         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2408         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2409         while (alloc_tab < alloc_cnt) {
2410                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2411                     block_index;
2412                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2413
2414                 rxdp = mac_control->rings[ring_no].
2415                                 rx_blocks[block_no].rxds[off].virt_addr;
2416
2417                 if ((block_no == block_no1) && (off == off1) &&
2418                                         (rxdp->Host_Control)) {
2419                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2420                                   dev->name);
2421                         DBG_PRINT(INTR_DBG, " info equated\n");
2422                         goto end;
2423                 }
2424                 if (off && (off == rxd_count[nic->rxd_mode])) {
2425                         mac_control->rings[ring_no].rx_curr_put_info.
2426                             block_index++;
2427                         if (mac_control->rings[ring_no].rx_curr_put_info.
2428                             block_index == mac_control->rings[ring_no].
2429                                         block_count)
2430                                 mac_control->rings[ring_no].rx_curr_put_info.
2431                                         block_index = 0;
2432                         block_no = mac_control->rings[ring_no].
2433                                         rx_curr_put_info.block_index;
2434                         if (off == rxd_count[nic->rxd_mode])
2435                                 off = 0;
2436                         mac_control->rings[ring_no].rx_curr_put_info.
2437                                 offset = off;
2438                         rxdp = mac_control->rings[ring_no].
2439                                 rx_blocks[block_no].block_virt_addr;
2440                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2441                                   dev->name, rxdp);
2442                 }
2443                 if(!napi) {
2444                         spin_lock_irqsave(&nic->put_lock, flags);
2445                         mac_control->rings[ring_no].put_pos =
2446                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2447                         spin_unlock_irqrestore(&nic->put_lock, flags);
2448                 } else {
2449                         mac_control->rings[ring_no].put_pos =
2450                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2451                 }
2452                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2453                         ((nic->rxd_mode == RXD_MODE_3B) &&
2454                                 (rxdp->Control_2 & BIT(0)))) {
2455                         mac_control->rings[ring_no].rx_curr_put_info.
2456                                         offset = off;
2457                         goto end;
2458                 }
2459                 /* calculate size of skb based on ring mode */
2460                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2461                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2462                 if (nic->rxd_mode == RXD_MODE_1)
2463                         size += NET_IP_ALIGN;
2464                 else
2465                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2466
2467                 /* allocate skb */
2468                 skb = dev_alloc_skb(size);
2469                 if(!skb) {
2470                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2471                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2472                         if (first_rxdp) {
2473                                 wmb();
2474                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2475                         }
2476                         nic->mac_control.stats_info->sw_stat. \
2477                                 mem_alloc_fail_cnt++;
2478                         return -ENOMEM ;
2479                 }
2480                 nic->mac_control.stats_info->sw_stat.mem_allocated 
2481                         += skb->truesize;
2482                 if (nic->rxd_mode == RXD_MODE_1) {
2483                         /* 1 buffer mode - normal operation mode */
2484                         rxdp1 = (struct RxD1*)rxdp;
2485                         memset(rxdp, 0, sizeof(struct RxD1));
2486                         skb_reserve(skb, NET_IP_ALIGN);
2487                         rxdp1->Buffer0_ptr = pci_map_single
2488                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2489                                 PCI_DMA_FROMDEVICE);
2490                         if( (rxdp1->Buffer0_ptr == 0) ||
2491                                 (rxdp1->Buffer0_ptr ==
2492                                 DMA_ERROR_CODE))
2493                                 goto pci_map_failed;
2494
2495                         rxdp->Control_2 = 
2496                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2497
2498                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2499                         /*
2500                          * 2 buffer mode -
2501                          * 2 buffer mode provides 128
2502                          * byte aligned receive buffers.
2503                          */
2504
2505                         rxdp3 = (struct RxD3*)rxdp;
2506                         /* save buffer pointers to avoid frequent dma mapping */
2507                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2508                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2509                         memset(rxdp, 0, sizeof(struct RxD3));
2510                         /* restore the buffer pointers for dma sync*/
2511                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2512                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2513
2514                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2515                         skb_reserve(skb, BUF0_LEN);
2516                         tmp = (u64)(unsigned long) skb->data;
2517                         tmp += ALIGN_SIZE;
2518                         tmp &= ~ALIGN_SIZE;
2519                         skb->data = (void *) (unsigned long)tmp;
2520                         skb_reset_tail_pointer(skb);
2521
2522                         if (!(rxdp3->Buffer0_ptr))
2523                                 rxdp3->Buffer0_ptr =
2524                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2525                                            PCI_DMA_FROMDEVICE);
2526                         else
2527                                 pci_dma_sync_single_for_device(nic->pdev,
2528                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2529                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2530                         if( (rxdp3->Buffer0_ptr == 0) ||
2531                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2532                                 goto pci_map_failed;
2533
2534                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2535                         if (nic->rxd_mode == RXD_MODE_3B) {
2536                                 /* Two buffer mode */
2537
2538                                 /*
2539                                  * Buffer2 will have L3/L4 header plus
2540                                  * L4 payload
2541                                  */
2542                                 rxdp3->Buffer2_ptr = pci_map_single
2543                                 (nic->pdev, skb->data, dev->mtu + 4,
2544                                                 PCI_DMA_FROMDEVICE);
2545
2546                                 if( (rxdp3->Buffer2_ptr == 0) ||
2547                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2548                                         goto pci_map_failed;
2549
2550                                 rxdp3->Buffer1_ptr =
2551                                                 pci_map_single(nic->pdev,
2552                                                 ba->ba_1, BUF1_LEN,
2553                                                 PCI_DMA_FROMDEVICE);
2554                                 if( (rxdp3->Buffer1_ptr == 0) ||
2555                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2556                                         pci_unmap_single
2557                                                 (nic->pdev,
2558                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2559                                                 dev->mtu + 4,
2560                                                 PCI_DMA_FROMDEVICE);
2561                                         goto pci_map_failed;
2562                                 }
2563                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2564                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2565                                                                 (dev->mtu + 4);
2566                         }
2567                         rxdp->Control_2 |= BIT(0);
2568                 }
2569                 rxdp->Host_Control = (unsigned long) (skb);
2570                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2571                         rxdp->Control_1 |= RXD_OWN_XENA;
2572                 off++;
2573                 if (off == (rxd_count[nic->rxd_mode] + 1))
2574                         off = 0;
2575                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2576
2577                 rxdp->Control_2 |= SET_RXD_MARKER;
2578                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2579                         if (first_rxdp) {
2580                                 wmb();
2581                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2582                         }
2583                         first_rxdp = rxdp;
2584                 }
2585                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2586                 alloc_tab++;
2587         }
2588
2589       end:
2590         /* Transfer ownership of first descriptor to adapter just before
2591          * exiting. Before that, use memory barrier so that ownership
2592          * and other fields are seen by adapter correctly.
2593          */
2594         if (first_rxdp) {
2595                 wmb();
2596                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2597         }
2598
2599         return SUCCESS;
2600 pci_map_failed:
2601         stats->pci_map_fail_cnt++;
2602         stats->mem_freed += skb->truesize;
2603         dev_kfree_skb_irq(skb);
2604         return -ENOMEM;
2605 }
2606
2607 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2608 {
2609         struct net_device *dev = sp->dev;
2610         int j;
2611         struct sk_buff *skb;
2612         struct RxD_t *rxdp;
2613         struct mac_info *mac_control;
2614         struct buffAdd *ba;
2615         struct RxD1 *rxdp1;
2616         struct RxD3 *rxdp3;
2617
2618         mac_control = &sp->mac_control;
2619         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2620                 rxdp = mac_control->rings[ring_no].
2621                                 rx_blocks[blk].rxds[j].virt_addr;
2622                 skb = (struct sk_buff *)
2623                         ((unsigned long) rxdp->Host_Control);
2624                 if (!skb) {
2625                         continue;
2626                 }
2627                 if (sp->rxd_mode == RXD_MODE_1) {
2628                         rxdp1 = (struct RxD1*)rxdp;
2629                         pci_unmap_single(sp->pdev, (dma_addr_t)
2630                                 rxdp1->Buffer0_ptr,
2631                                 dev->mtu +
2632                                 HEADER_ETHERNET_II_802_3_SIZE
2633                                 + HEADER_802_2_SIZE +
2634                                 HEADER_SNAP_SIZE,
2635                                 PCI_DMA_FROMDEVICE);
2636                         memset(rxdp, 0, sizeof(struct RxD1));
2637                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2638                         rxdp3 = (struct RxD3*)rxdp;
2639                         ba = &mac_control->rings[ring_no].
2640                                 ba[blk][j];
2641                         pci_unmap_single(sp->pdev, (dma_addr_t)
2642                                 rxdp3->Buffer0_ptr,
2643                                 BUF0_LEN,
2644                                 PCI_DMA_FROMDEVICE);
2645                         pci_unmap_single(sp->pdev, (dma_addr_t)
2646                                 rxdp3->Buffer1_ptr,
2647                                 BUF1_LEN,
2648                                 PCI_DMA_FROMDEVICE);
2649                         pci_unmap_single(sp->pdev, (dma_addr_t)
2650                                 rxdp3->Buffer2_ptr,
2651                                 dev->mtu + 4,
2652                                 PCI_DMA_FROMDEVICE);
2653                         memset(rxdp, 0, sizeof(struct RxD3));
2654                 }
2655                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2656                 dev_kfree_skb(skb);
2657                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2658         }
2659 }
2660
2661 /**
2662  *  free_rx_buffers - Frees all Rx buffers
2663  *  @sp: device private variable.
2664  *  Description:
2665  *  This function will free all Rx buffers allocated by host.
2666  *  Return Value:
2667  *  NONE.
2668  */
2669
2670 static void free_rx_buffers(struct s2io_nic *sp)
2671 {
2672         struct net_device *dev = sp->dev;
2673         int i, blk = 0, buf_cnt = 0;
2674         struct mac_info *mac_control;
2675         struct config_param *config;
2676
2677         mac_control = &sp->mac_control;
2678         config = &sp->config;
2679
2680         for (i = 0; i < config->rx_ring_num; i++) {
2681                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2682                         free_rxd_blk(sp,i,blk);
2683
2684                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2685                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2686                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2687                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2688                 atomic_set(&sp->rx_bufs_left[i], 0);
2689                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2690                           dev->name, buf_cnt, i);
2691         }
2692 }
2693
2694 /**
2695  * s2io_poll - Rx interrupt handler for NAPI support
2696  * @napi : pointer to the napi structure.
2697  * @budget : The number of packets that were budgeted to be processed
2698  * during  one pass through the 'Poll" function.
2699  * Description:
2700  * Comes into picture only if NAPI support has been incorporated. It does
2701  * the same thing that rx_intr_handler does, but not in a interrupt context
2702  * also It will process only a given number of packets.
2703  * Return value:
2704  * 0 on success and 1 if there are No Rx packets to be processed.
2705  */
2706
2707 static int s2io_poll(struct napi_struct *napi, int budget)
2708 {
2709         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2710         struct net_device *dev = nic->dev;
2711         int pkt_cnt = 0, org_pkts_to_process;
2712         struct mac_info *mac_control;
2713         struct config_param *config;
2714         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2715         int i;
2716
2717         if (!is_s2io_card_up(nic))
2718                 return 0;
2719
2720         mac_control = &nic->mac_control;
2721         config = &nic->config;
2722
2723         nic->pkts_to_process = budget;
2724         org_pkts_to_process = nic->pkts_to_process;
2725
2726         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2727         readl(&bar0->rx_traffic_int);
2728
2729         for (i = 0; i < config->rx_ring_num; i++) {
2730                 rx_intr_handler(&mac_control->rings[i]);
2731                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2732                 if (!nic->pkts_to_process) {
2733                         /* Quota for the current iteration has been met */
2734                         goto no_rx;
2735                 }
2736         }
2737
2738         netif_rx_complete(dev, napi);
2739
2740         for (i = 0; i < config->rx_ring_num; i++) {
2741                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2742                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2743                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2744                         break;
2745                 }
2746         }
2747         /* Re enable the Rx interrupts. */
2748         writeq(0x0, &bar0->rx_traffic_mask);
2749         readl(&bar0->rx_traffic_mask);
2750         return pkt_cnt;
2751
2752 no_rx:
2753         for (i = 0; i < config->rx_ring_num; i++) {
2754                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2755                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2756                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2757                         break;
2758                 }
2759         }
2760         return pkt_cnt;
2761 }
2762
2763 #ifdef CONFIG_NET_POLL_CONTROLLER
2764 /**
2765  * s2io_netpoll - netpoll event handler entry point
2766  * @dev : pointer to the device structure.
2767  * Description:
2768  *      This function will be called by upper layer to check for events on the
2769  * interface in situations where interrupts are disabled. It is used for
2770  * specific in-kernel networking tasks, such as remote consoles and kernel
2771  * debugging over the network (example netdump in RedHat).
2772  */
2773 static void s2io_netpoll(struct net_device *dev)
2774 {
2775         struct s2io_nic *nic = dev->priv;
2776         struct mac_info *mac_control;
2777         struct config_param *config;
2778         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2779         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2780         int i;
2781
2782         if (pci_channel_offline(nic->pdev))
2783                 return;
2784
2785         disable_irq(dev->irq);
2786
2787         mac_control = &nic->mac_control;
2788         config = &nic->config;
2789
2790         writeq(val64, &bar0->rx_traffic_int);
2791         writeq(val64, &bar0->tx_traffic_int);
2792
2793         /* we need to free up the transmitted skbufs or else netpoll will
2794          * run out of skbs and will fail and eventually netpoll application such
2795          * as netdump will fail.
2796          */
2797         for (i = 0; i < config->tx_fifo_num; i++)
2798                 tx_intr_handler(&mac_control->fifos[i]);
2799
2800         /* check for received packet and indicate up to network */
2801         for (i = 0; i < config->rx_ring_num; i++)
2802                 rx_intr_handler(&mac_control->rings[i]);
2803
2804         for (i = 0; i < config->rx_ring_num; i++) {
2805                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2806                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2807                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2808                         break;
2809                 }
2810         }
2811         enable_irq(dev->irq);
2812         return;
2813 }
2814 #endif
2815
2816 /**
2817  *  rx_intr_handler - Rx interrupt handler
2818  *  @nic: device private variable.
2819  *  Description:
2820  *  If the interrupt is because of a received frame or if the
2821  *  receive ring contains fresh as yet un-processed frames,this function is
2822  *  called. It picks out the RxD at which place the last Rx processing had
2823  *  stopped and sends the skb to the OSM's Rx handler and then increments
2824  *  the offset.
2825  *  Return Value:
2826  *  NONE.
2827  */
2828 static void rx_intr_handler(struct ring_info *ring_data)
2829 {
2830         struct s2io_nic *nic = ring_data->nic;
2831         struct net_device *dev = (struct net_device *) nic->dev;
2832         int get_block, put_block, put_offset;
2833         struct rx_curr_get_info get_info, put_info;
2834         struct RxD_t *rxdp;
2835         struct sk_buff *skb;
2836         int pkt_cnt = 0;
2837         int i;
2838         struct RxD1* rxdp1;
2839         struct RxD3* rxdp3;
2840
2841         spin_lock(&nic->rx_lock);
2842
2843         get_info = ring_data->rx_curr_get_info;
2844         get_block = get_info.block_index;
2845         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2846         put_block = put_info.block_index;
2847         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2848         if (!napi) {
2849                 spin_lock(&nic->put_lock);
2850                 put_offset = ring_data->put_pos;
2851                 spin_unlock(&nic->put_lock);
2852         } else
2853                 put_offset = ring_data->put_pos;
2854
2855         while (RXD_IS_UP2DT(rxdp)) {
2856                 /*
2857                  * If your are next to put index then it's
2858                  * FIFO full condition
2859                  */
2860                 if ((get_block == put_block) &&
2861                     (get_info.offset + 1) == put_info.offset) {
2862                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2863                         break;
2864                 }
2865                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2866                 if (skb == NULL) {
2867                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2868                                   dev->name);
2869                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2870                         spin_unlock(&nic->rx_lock);
2871                         return;
2872                 }
2873                 if (nic->rxd_mode == RXD_MODE_1) {
2874                         rxdp1 = (struct RxD1*)rxdp;
2875                         pci_unmap_single(nic->pdev, (dma_addr_t)
2876                                 rxdp1->Buffer0_ptr,
2877                                 dev->mtu +
2878                                 HEADER_ETHERNET_II_802_3_SIZE +
2879                                 HEADER_802_2_SIZE +
2880                                 HEADER_SNAP_SIZE,
2881                                 PCI_DMA_FROMDEVICE);
2882                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2883                         rxdp3 = (struct RxD3*)rxdp;
2884                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2885                                 rxdp3->Buffer0_ptr,
2886                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2887                         pci_unmap_single(nic->pdev, (dma_addr_t)
2888                                 rxdp3->Buffer2_ptr,
2889                                 dev->mtu + 4,
2890                                 PCI_DMA_FROMDEVICE);
2891                 }
2892                 prefetch(skb->data);
2893                 rx_osm_handler(ring_data, rxdp);
2894                 get_info.offset++;
2895                 ring_data->rx_curr_get_info.offset = get_info.offset;
2896                 rxdp = ring_data->rx_blocks[get_block].
2897                                 rxds[get_info.offset].virt_addr;
2898                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2899                         get_info.offset = 0;
2900                         ring_data->rx_curr_get_info.offset = get_info.offset;
2901                         get_block++;
2902                         if (get_block == ring_data->block_count)
2903                                 get_block = 0;
2904                         ring_data->rx_curr_get_info.block_index = get_block;
2905                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2906                 }
2907
2908                 nic->pkts_to_process -= 1;
2909                 if ((napi) && (!nic->pkts_to_process))
2910                         break;
2911                 pkt_cnt++;
2912                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2913                         break;
2914         }
2915         if (nic->lro) {
2916                 /* Clear all LRO sessions before exiting */
2917                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2918                         struct lro *lro = &nic->lro0_n[i];
2919                         if (lro->in_use) {
2920                                 update_L3L4_header(nic, lro);
2921                                 queue_rx_frame(lro->parent);
2922                                 clear_lro_session(lro);
2923                         }
2924                 }
2925         }
2926
2927         spin_unlock(&nic->rx_lock);
2928 }
2929
2930 /**
2931  *  tx_intr_handler - Transmit interrupt handler
2932  *  @nic : device private variable
2933  *  Description:
2934  *  If an interrupt was raised to indicate DMA complete of the
2935  *  Tx packet, this function is called. It identifies the last TxD
2936  *  whose buffer was freed and frees all skbs whose data have already
2937  *  DMA'ed into the NICs internal memory.
2938  *  Return Value:
2939  *  NONE
2940  */
2941
2942 static void tx_intr_handler(struct fifo_info *fifo_data)
2943 {
2944         struct s2io_nic *nic = fifo_data->nic;
2945         struct net_device *dev = (struct net_device *) nic->dev;
2946         struct tx_curr_get_info get_info, put_info;
2947         struct sk_buff *skb;
2948         struct TxD *txdlp;
2949         u8 err_mask;
2950
2951         get_info = fifo_data->tx_curr_get_info;
2952         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2953         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2954             list_virt_addr;
2955         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2956                (get_info.offset != put_info.offset) &&
2957                (txdlp->Host_Control)) {
2958                 /* Check for TxD errors */
2959                 if (txdlp->Control_1 & TXD_T_CODE) {
2960                         unsigned long long err;
2961                         err = txdlp->Control_1 & TXD_T_CODE;
2962                         if (err & 0x1) {
2963                                 nic->mac_control.stats_info->sw_stat.
2964                                                 parity_err_cnt++;
2965                         }
2966
2967                         /* update t_code statistics */
2968                         err_mask = err >> 48;
2969                         switch(err_mask) {
2970                                 case 2:
2971                                         nic->mac_control.stats_info->sw_stat.
2972                                                         tx_buf_abort_cnt++;
2973                                 break;
2974
2975                                 case 3:
2976                                         nic->mac_control.stats_info->sw_stat.
2977                                                         tx_desc_abort_cnt++;
2978                                 break;
2979
2980                                 case 7:
2981                                         nic->mac_control.stats_info->sw_stat.
2982                                                         tx_parity_err_cnt++;
2983                                 break;
2984
2985                                 case 10:
2986                                         nic->mac_control.stats_info->sw_stat.
2987                                                         tx_link_loss_cnt++;
2988                                 break;
2989
2990                                 case 15:
2991                                         nic->mac_control.stats_info->sw_stat.
2992                                                         tx_list_proc_err_cnt++;
2993                                 break;
2994                         }
2995                 }
2996
2997                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2998                 if (skb == NULL) {
2999                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3000                         __FUNCTION__);
3001                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3002                         return;
3003                 }
3004
3005                 /* Updating the statistics block */
3006                 nic->stats.tx_bytes += skb->len;
3007                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3008                 dev_kfree_skb_irq(skb);
3009
3010                 get_info.offset++;
3011                 if (get_info.offset == get_info.fifo_len + 1)
3012                         get_info.offset = 0;
3013                 txdlp = (struct TxD *) fifo_data->list_info
3014                     [get_info.offset].list_virt_addr;
3015                 fifo_data->tx_curr_get_info.offset =
3016                     get_info.offset;
3017         }
3018
3019         spin_lock(&nic->tx_lock);
3020         if (netif_queue_stopped(dev))
3021                 netif_wake_queue(dev);
3022         spin_unlock(&nic->tx_lock);
3023 }
3024
3025 /**
3026  *  s2io_mdio_write - Function to write in to MDIO registers
3027  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3028  *  @addr     : address value
3029  *  @value    : data value
3030  *  @dev      : pointer to net_device structure
3031  *  Description:
3032  *  This function is used to write values to the MDIO registers
3033  *  NONE
3034  */
3035 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3036 {
3037         u64 val64 = 0x0;
3038         struct s2io_nic *sp = dev->priv;
3039         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3040
3041         //address transaction
3042         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3043                         | MDIO_MMD_DEV_ADDR(mmd_type)
3044                         | MDIO_MMS_PRT_ADDR(0x0);
3045         writeq(val64, &bar0->mdio_control);
3046         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3047         writeq(val64, &bar0->mdio_control);
3048         udelay(100);
3049
3050         //Data transaction
3051         val64 = 0x0;
3052         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3053                         | MDIO_MMD_DEV_ADDR(mmd_type)
3054                         | MDIO_MMS_PRT_ADDR(0x0)
3055                         | MDIO_MDIO_DATA(value)
3056                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3057         writeq(val64, &bar0->mdio_control);
3058         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3059         writeq(val64, &bar0->mdio_control);
3060         udelay(100);
3061
3062         val64 = 0x0;
3063         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3064         | MDIO_MMD_DEV_ADDR(mmd_type)
3065         | MDIO_MMS_PRT_ADDR(0x0)
3066         | MDIO_OP(MDIO_OP_READ_TRANS);
3067         writeq(val64, &bar0->mdio_control);
3068         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3069         writeq(val64, &bar0->mdio_control);
3070         udelay(100);
3071
3072 }
3073
3074 /**
3075  *  s2io_mdio_read - Function to write in to MDIO registers
3076  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3077  *  @addr     : address value
3078  *  @dev      : pointer to net_device structure
3079  *  Description:
3080  *  This function is used to read values to the MDIO registers
3081  *  NONE
3082  */
3083 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3084 {
3085         u64 val64 = 0x0;
3086         u64 rval64 = 0x0;
3087         struct s2io_nic *sp = dev->priv;
3088         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3089
3090         /* address transaction */
3091         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3092                         | MDIO_MMD_DEV_ADDR(mmd_type)
3093                         | MDIO_MMS_PRT_ADDR(0x0);
3094         writeq(val64, &bar0->mdio_control);
3095         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3096         writeq(val64, &bar0->mdio_control);
3097         udelay(100);
3098
3099         /* Data transaction */
3100         val64 = 0x0;
3101         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3102                         | MDIO_MMD_DEV_ADDR(mmd_type)
3103                         | MDIO_MMS_PRT_ADDR(0x0)
3104                         | MDIO_OP(MDIO_OP_READ_TRANS);
3105         writeq(val64, &bar0->mdio_control);
3106         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107         writeq(val64, &bar0->mdio_control);
3108         udelay(100);
3109
3110         /* Read the value from regs */
3111         rval64 = readq(&bar0->mdio_control);
3112         rval64 = rval64 & 0xFFFF0000;
3113         rval64 = rval64 >> 16;
3114         return rval64;
3115 }
3116 /**
3117  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3118  *  @counter      : couter value to be updated
3119  *  @flag         : flag to indicate the status
3120  *  @type         : counter type
3121  *  Description:
3122  *  This function is to check the status of the xpak counters value
3123  *  NONE
3124  */
3125
3126 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3127 {
3128         u64 mask = 0x3;
3129         u64 val64;
3130         int i;
3131         for(i = 0; i <index; i++)
3132                 mask = mask << 0x2;
3133
3134         if(flag > 0)
3135         {
3136                 *counter = *counter + 1;
3137                 val64 = *regs_stat & mask;
3138                 val64 = val64 >> (index * 0x2);
3139                 val64 = val64 + 1;
3140                 if(val64 == 3)
3141                 {
3142                         switch(type)
3143                         {
3144                         case 1:
3145                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3146                                           "service. Excessive temperatures may "
3147                                           "result in premature transceiver "
3148                                           "failure \n");
3149                         break;
3150                         case 2:
3151                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3152                                           "service Excessive bias currents may "
3153                                           "indicate imminent laser diode "
3154                                           "failure \n");
3155                         break;
3156                         case 3:
3157                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3158                                           "service Excessive laser output "
3159                                           "power may saturate far-end "
3160                                           "receiver\n");
3161                         break;
3162                         default:
3163                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3164                                           "type \n");
3165                         }
3166                         val64 = 0x0;
3167                 }
3168                 val64 = val64 << (index * 0x2);
3169                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3170
3171         } else {
3172                 *regs_stat = *regs_stat & (~mask);
3173         }
3174 }
3175
3176 /**
3177  *  s2io_updt_xpak_counter - Function to update the xpak counters
3178  *  @dev         : pointer to net_device struct
3179  *  Description:
3180  *  This function is to upate the status of the xpak counters value
3181  *  NONE
3182  */
3183 static void s2io_updt_xpak_counter(struct net_device *dev)
3184 {
3185         u16 flag  = 0x0;
3186         u16 type  = 0x0;
3187         u16 val16 = 0x0;
3188         u64 val64 = 0x0;
3189         u64 addr  = 0x0;
3190
3191         struct s2io_nic *sp = dev->priv;
3192         struct stat_block *stat_info = sp->mac_control.stats_info;
3193
3194         /* Check the communication with the MDIO slave */
3195         addr = 0x0000;
3196         val64 = 0x0;
3197         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3198         if((val64 == 0xFFFF) || (val64 == 0x0000))
3199         {
3200                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3201                           "Returned %llx\n", (unsigned long long)val64);
3202                 return;
3203         }
3204
3205         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3206         if(val64 != 0x2040)
3207         {
3208                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3209                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3210                           (unsigned long long)val64);
3211                 return;
3212         }
3213
3214         /* Loading the DOM register to MDIO register */
3215         addr = 0xA100;
3216         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3217         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3218
3219         /* Reading the Alarm flags */
3220         addr = 0xA070;
3221         val64 = 0x0;
3222         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3223
3224         flag = CHECKBIT(val64, 0x7);
3225         type = 1;
3226         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3227                                 &stat_info->xpak_stat.xpak_regs_stat,
3228                                 0x0, flag, type);
3229
3230         if(CHECKBIT(val64, 0x6))
3231                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3232
3233         flag = CHECKBIT(val64, 0x3);
3234         type = 2;
3235         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3236                                 &stat_info->xpak_stat.xpak_regs_stat,
3237                                 0x2, flag, type);
3238
3239         if(CHECKBIT(val64, 0x2))
3240                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3241
3242         flag = CHECKBIT(val64, 0x1);
3243         type = 3;
3244         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3245                                 &stat_info->xpak_stat.xpak_regs_stat,
3246                                 0x4, flag, type);
3247
3248         if(CHECKBIT(val64, 0x0))
3249                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3250
3251         /* Reading the Warning flags */
3252         addr = 0xA074;
3253         val64 = 0x0;
3254         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3255
3256         if(CHECKBIT(val64, 0x7))
3257                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3258
3259         if(CHECKBIT(val64, 0x6))
3260                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3261
3262         if(CHECKBIT(val64, 0x3))
3263                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3264
3265         if(CHECKBIT(val64, 0x2))
3266                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3267
3268         if(CHECKBIT(val64, 0x1))
3269                 stat_info->xpak_stat.warn_laser_output_power_high++;
3270
3271         if(CHECKBIT(val64, 0x0))
3272                 stat_info->xpak_stat.warn_laser_output_power_low++;
3273 }
3274
3275 /**
3276  *  wait_for_cmd_complete - waits for a command to complete.
3277  *  @sp : private member of the device structure, which is a pointer to the
3278  *  s2io_nic structure.
3279  *  Description: Function that waits for a command to Write into RMAC
3280  *  ADDR DATA registers to be completed and returns either success or
3281  *  error depending on whether the command was complete or not.
3282  *  Return value:
3283  *   SUCCESS on success and FAILURE on failure.
3284  */
3285
3286 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3287                                 int bit_state)
3288 {
3289         int ret = FAILURE, cnt = 0, delay = 1;
3290         u64 val64;
3291
3292         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3293                 return FAILURE;
3294
3295         do {
3296                 val64 = readq(addr);
3297                 if (bit_state == S2IO_BIT_RESET) {
3298                         if (!(val64 & busy_bit)) {
3299                                 ret = SUCCESS;
3300                                 break;
3301                         }
3302                 } else {
3303                         if (!(val64 & busy_bit)) {
3304                                 ret = SUCCESS;
3305                                 break;
3306                         }
3307                 }
3308
3309                 if(in_interrupt())
3310                         mdelay(delay);
3311                 else
3312                         msleep(delay);
3313
3314                 if (++cnt >= 10)
3315                         delay = 50;
3316         } while (cnt < 20);
3317         return ret;
3318 }
3319 /*
3320  * check_pci_device_id - Checks if the device id is supported
3321  * @id : device id
3322  * Description: Function to check if the pci device id is supported by driver.
3323  * Return value: Actual device id if supported else PCI_ANY_ID
3324  */
3325 static u16 check_pci_device_id(u16 id)
3326 {
3327         switch (id) {
3328         case PCI_DEVICE_ID_HERC_WIN:
3329         case PCI_DEVICE_ID_HERC_UNI:
3330                 return XFRAME_II_DEVICE;
3331         case PCI_DEVICE_ID_S2IO_UNI:
3332         case PCI_DEVICE_ID_S2IO_WIN:
3333                 return XFRAME_I_DEVICE;
3334         default:
3335                 return PCI_ANY_ID;
3336         }
3337 }
3338
3339 /**
3340  *  s2io_reset - Resets the card.
3341  *  @sp : private member of the device structure.
3342  *  Description: Function to Reset the card. This function then also
3343  *  restores the previously saved PCI configuration space registers as
3344  *  the card reset also resets the configuration space.
3345  *  Return value:
3346  *  void.
3347  */
3348
3349 static void s2io_reset(struct s2io_nic * sp)
3350 {
3351         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3352         u64 val64;
3353         u16 subid, pci_cmd;
3354         int i;
3355         u16 val16;
3356         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3357         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3358
3359         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3360                         __FUNCTION__, sp->dev->name);
3361
3362         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3363         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3364
3365         val64 = SW_RESET_ALL;
3366         writeq(val64, &bar0->sw_reset);
3367         if (strstr(sp->product_name, "CX4")) {
3368                 msleep(750);
3369         }
3370         msleep(250);
3371         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3372
3373                 /* Restore the PCI state saved during initialization. */
3374                 pci_restore_state(sp->pdev);
3375                 pci_read_config_word(sp->pdev, 0x2, &val16);
3376                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3377                         break;
3378                 msleep(200);
3379         }
3380
3381         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3382                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3383         }
3384
3385         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3386
3387         s2io_init_pci(sp);
3388
3389         /* Set swapper to enable I/O register access */
3390         s2io_set_swapper(sp);
3391
3392         /* Restore the MSIX table entries from local variables */
3393         restore_xmsi_data(sp);
3394
3395         /* Clear certain PCI/PCI-X fields after reset */
3396         if (sp->device_type == XFRAME_II_DEVICE) {
3397                 /* Clear "detected parity error" bit */
3398                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3399
3400                 /* Clearing PCIX Ecc status register */
3401                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3402
3403                 /* Clearing PCI_STATUS error reflected here */
3404                 writeq(BIT(62), &bar0->txpic_int_reg);
3405         }
3406
3407         /* Reset device statistics maintained by OS */
3408         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3409         
3410         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3411         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3412         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3413         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3414         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3415         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3416         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3417         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3418         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3419         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3420         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3421         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3422         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3423         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3424         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3425         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3426         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3427         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3428         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3429
3430         /* SXE-002: Configure link and activity LED to turn it off */
3431         subid = sp->pdev->subsystem_device;
3432         if (((subid & 0xFF) >= 0x07) &&
3433             (sp->device_type == XFRAME_I_DEVICE)) {
3434                 val64 = readq(&bar0->gpio_control);
3435                 val64 |= 0x0000800000000000ULL;
3436                 writeq(val64, &bar0->gpio_control);
3437                 val64 = 0x0411040400000000ULL;
3438                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3439         }
3440
3441         /*
3442          * Clear spurious ECC interrupts that would have occured on
3443          * XFRAME II cards after reset.
3444          */
3445         if (sp->device_type == XFRAME_II_DEVICE) {
3446                 val64 = readq(&bar0->pcc_err_reg);
3447                 writeq(val64, &bar0->pcc_err_reg);
3448         }
3449
3450         /* restore the previously assigned mac address */
3451         s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3452
3453         sp->device_enabled_once = FALSE;
3454 }
3455
3456 /**
3457  *  s2io_set_swapper - to set the swapper controle on the card
3458  *  @sp : private member of the device structure,
3459  *  pointer to the s2io_nic structure.
3460  *  Description: Function to set the swapper control on the card
3461  *  correctly depending on the 'endianness' of the system.
3462  *  Return value:
3463  *  SUCCESS on success and FAILURE on failure.
3464  */
3465
3466 static int s2io_set_swapper(struct s2io_nic * sp)
3467 {
3468         struct net_device *dev = sp->dev;
3469         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3470         u64 val64, valt, valr;
3471
3472         /*
3473          * Set proper endian settings and verify the same by reading
3474          * the PIF Feed-back register.
3475          */
3476
3477         val64 = readq(&bar0->pif_rd_swapper_fb);
3478         if (val64 != 0x0123456789ABCDEFULL) {
3479                 int i = 0;
3480                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3481                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3482                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3483                                 0};                     /* FE=0, SE=0 */
3484
3485                 while(i<4) {
3486                         writeq(value[i], &bar0->swapper_ctrl);
3487                         val64 = readq(&bar0->pif_rd_swapper_fb);
3488                         if (val64 == 0x0123456789ABCDEFULL)
3489                                 break;
3490                         i++;
3491                 }
3492                 if (i == 4) {
3493                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3494                                 dev->name);
3495                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3496                                 (unsigned long long) val64);
3497                         return FAILURE;
3498                 }
3499                 valr = value[i];
3500         } else {
3501                 valr = readq(&bar0->swapper_ctrl);
3502         }
3503
3504         valt = 0x0123456789ABCDEFULL;
3505         writeq(valt, &bar0->xmsi_address);
3506         val64 = readq(&bar0->xmsi_address);
3507
3508         if(val64 != valt) {
3509                 int i = 0;
3510                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3511                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3512                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3513                                 0};                     /* FE=0, SE=0 */
3514
3515                 while(i<4) {
3516                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3517                         writeq(valt, &bar0->xmsi_address);
3518                         val64 = readq(&bar0->xmsi_address);
3519                         if(val64 == valt)
3520                                 break;
3521                         i++;
3522                 }
3523                 if(i == 4) {
3524                         unsigned long long x = val64;
3525                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3526                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3527                         return FAILURE;
3528                 }
3529         }
3530         val64 = readq(&bar0->swapper_ctrl);
3531         val64 &= 0xFFFF000000000000ULL;
3532
3533 #ifdef  __BIG_ENDIAN
3534         /*
3535          * The device by default set to a big endian format, so a
3536          * big endian driver need not set anything.
3537          */
3538         val64 |= (SWAPPER_CTRL_TXP_FE |
3539                  SWAPPER_CTRL_TXP_SE |
3540                  SWAPPER_CTRL_TXD_R_FE |
3541                  SWAPPER_CTRL_TXD_W_FE |
3542                  SWAPPER_CTRL_TXF_R_FE |
3543                  SWAPPER_CTRL_RXD_R_FE |
3544                  SWAPPER_CTRL_RXD_W_FE |
3545                  SWAPPER_CTRL_RXF_W_FE |
3546                  SWAPPER_CTRL_XMSI_FE |
3547                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3548         if (sp->config.intr_type == INTA)
3549                 val64 |= SWAPPER_CTRL_XMSI_SE;
3550         writeq(val64, &bar0->swapper_ctrl);
3551 #else
3552         /*
3553          * Initially we enable all bits to make it accessible by the
3554          * driver, then we selectively enable only those bits that
3555          * we want to set.
3556          */
3557         val64 |= (SWAPPER_CTRL_TXP_FE |
3558                  SWAPPER_CTRL_TXP_SE |
3559                  SWAPPER_CTRL_TXD_R_FE |
3560                  SWAPPER_CTRL_TXD_R_SE |
3561                  SWAPPER_CTRL_TXD_W_FE |
3562                  SWAPPER_CTRL_TXD_W_SE |
3563                  SWAPPER_CTRL_TXF_R_FE |
3564                  SWAPPER_CTRL_RXD_R_FE |
3565                  SWAPPER_CTRL_RXD_R_SE |
3566                  SWAPPER_CTRL_RXD_W_FE |
3567                  SWAPPER_CTRL_RXD_W_SE |
3568                  SWAPPER_CTRL_RXF_W_FE |
3569                  SWAPPER_CTRL_XMSI_FE |
3570                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3571         if (sp->config.intr_type == INTA)
3572                 val64 |= SWAPPER_CTRL_XMSI_SE;
3573         writeq(val64, &bar0->swapper_ctrl);
3574 #endif
3575         val64 = readq(&bar0->swapper_ctrl);
3576
3577         /*
3578          * Verifying if endian settings are accurate by reading a
3579          * feedback register.
3580          */
3581         val64 = readq(&bar0->pif_rd_swapper_fb);
3582         if (val64 != 0x0123456789ABCDEFULL) {
3583                 /* Endian settings are incorrect, calls for another dekko. */
3584                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3585                           dev->name);
3586                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3587                           (unsigned long long) val64);
3588                 return FAILURE;
3589         }
3590
3591         return SUCCESS;
3592 }
3593
3594 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3595 {
3596         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3597         u64 val64;
3598         int ret = 0, cnt = 0;
3599
3600         do {
3601                 val64 = readq(&bar0->xmsi_access);
3602                 if (!(val64 & BIT(15)))
3603                         break;
3604                 mdelay(1);
3605                 cnt++;
3606         } while(cnt < 5);
3607         if (cnt == 5) {
3608                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3609                 ret = 1;
3610         }
3611
3612         return ret;
3613 }
3614
3615 static void restore_xmsi_data(struct s2io_nic *nic)
3616 {
3617         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3618         u64 val64;
3619         int i;
3620
3621         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3622                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3623                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3624                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3625                 writeq(val64, &bar0->xmsi_access);
3626                 if (wait_for_msix_trans(nic, i)) {
3627                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3628                         continue;
3629                 }
3630         }
3631 }
3632
3633 static void store_xmsi_data(struct s2io_nic *nic)
3634 {
3635         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3636         u64 val64, addr, data;
3637         int i;
3638
3639         /* Store and display */
3640         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3641                 val64 = (BIT(15) | vBIT(i, 26, 6));
3642                 writeq(val64, &bar0->xmsi_access);
3643                 if (wait_for_msix_trans(nic, i)) {
3644                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3645                         continue;
3646                 }
3647                 addr = readq(&bar0->xmsi_address);
3648                 data = readq(&bar0->xmsi_data);
3649                 if (addr && data) {
3650                         nic->msix_info[i].addr = addr;
3651                         nic->msix_info[i].data = data;
3652                 }
3653         }
3654 }
3655
3656 static int s2io_enable_msi_x(struct s2io_nic *nic)
3657 {
3658         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3659         u64 tx_mat, rx_mat;
3660         u16 msi_control; /* Temp variable */
3661         int ret, i, j, msix_indx = 1;
3662
3663         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3664                                GFP_KERNEL);
3665         if (!nic->entries) {
3666                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3667                         __FUNCTION__);
3668                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3669                 return -ENOMEM;
3670         }
3671         nic->mac_control.stats_info->sw_stat.mem_allocated 
3672                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3673
3674         nic->s2io_entries =
3675                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3676                                    GFP_KERNEL);
3677         if (!nic->s2io_entries) {
3678                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 
3679                         __FUNCTION__);
3680                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3681                 kfree(nic->entries);
3682                 nic->mac_control.stats_info->sw_stat.mem_freed 
3683                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3684                 return -ENOMEM;
3685         }
3686          nic->mac_control.stats_info->sw_stat.mem_allocated 
3687                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3688
3689         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3690                 nic->entries[i].entry = i;
3691                 nic->s2io_entries[i].entry = i;
3692                 nic->s2io_entries[i].arg = NULL;
3693                 nic->s2io_entries[i].in_use = 0;
3694         }
3695
3696         tx_mat = readq(&bar0->tx_mat0_n[0]);
3697         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3698                 tx_mat |= TX_MAT_SET(i, msix_indx);
3699                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3700                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3701                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3702         }
3703         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3704
3705         if (!nic->config.bimodal) {
3706                 rx_mat = readq(&bar0->rx_mat);
3707                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3708                         rx_mat |= RX_MAT_SET(j, msix_indx);
3709                         nic->s2io_entries[msix_indx].arg 
3710                                 = &nic->mac_control.rings[j];
3711                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3712                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3713                 }
3714                 writeq(rx_mat, &bar0->rx_mat);
3715         } else {
3716                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3717                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3718                         tx_mat |= TX_MAT_SET(i, msix_indx);
3719                         nic->s2io_entries[msix_indx].arg 
3720                                 = &nic->mac_control.rings[j];
3721                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3722                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3723                 }
3724                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3725         }
3726
3727         nic->avail_msix_vectors = 0;
3728         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3729         /* We fail init if error or we get less vectors than min required */
3730         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3731                 nic->avail_msix_vectors = ret;
3732                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3733         }
3734         if (ret) {
3735                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3736                 kfree(nic->entries);
3737                 nic->mac_control.stats_info->sw_stat.mem_freed 
3738                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3739                 kfree(nic->s2io_entries);
3740                 nic->mac_control.stats_info->sw_stat.mem_freed 
3741                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3742                 nic->entries = NULL;
3743                 nic->s2io_entries = NULL;
3744                 nic->avail_msix_vectors = 0;
3745                 return -ENOMEM;
3746         }
3747         if (!nic->avail_msix_vectors)
3748                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3749
3750         /*
3751          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3752          * in the herc NIC. (Temp change, needs to be removed later)
3753          */
3754         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3755         msi_control |= 0x1; /* Enable MSI */
3756         pci_write_config_word(nic->pdev, 0x42, msi_control);
3757
3758         return 0;
3759 }
3760
3761 /* Handle software interrupt used during MSI(X) test */
3762 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3763 {
3764         struct s2io_nic *sp = dev_id;
3765
3766         sp->msi_detected = 1;
3767         wake_up(&sp->msi_wait);
3768
3769         return IRQ_HANDLED;
3770 }
3771
3772 /* Test interrupt path by forcing a a software IRQ */
3773 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3774 {
3775         struct pci_dev *pdev = sp->pdev;
3776         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3777         int err;
3778         u64 val64, saved64;
3779
3780         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3781                         sp->name, sp);
3782         if (err) {
3783                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3784                        sp->dev->name, pci_name(pdev), pdev->irq);
3785                 return err;
3786         }
3787
3788         init_waitqueue_head (&sp->msi_wait);
3789         sp->msi_detected = 0;
3790
3791         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3792         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3793         val64 |= SCHED_INT_CTRL_TIMER_EN;
3794         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3795         writeq(val64, &bar0->scheduled_int_ctrl);
3796
3797         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3798
3799         if (!sp->msi_detected) {
3800                 /* MSI(X) test failed, go back to INTx mode */
3801                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3802                         "using MSI(X) during test\n", sp->dev->name,
3803                         pci_name(pdev));
3804
3805                 err = -EOPNOTSUPP;
3806         }
3807
3808         free_irq(sp->entries[1].vector, sp);
3809
3810         writeq(saved64, &bar0->scheduled_int_ctrl);
3811
3812         return err;
3813 }
3814 /* ********************************************************* *
3815  * Functions defined below concern the OS part of the driver *
3816  * ********************************************************* */
3817
3818 /**
3819  *  s2io_open - open entry point of the driver
3820  *  @dev : pointer to the device structure.
3821  *  Description:
3822  *  This function is the open entry point of the driver. It mainly calls a
3823  *  function to allocate Rx buffers and inserts them into the buffer
3824  *  descriptors and then enables the Rx part of the NIC.
3825  *  Return value:
3826  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3827  *   file on failure.
3828  */
3829
3830 static int s2io_open(struct net_device *dev)
3831 {
3832         struct s2io_nic *sp = dev->priv;
3833         int err = 0;
3834
3835         /*
3836          * Make sure you have link off by default every time
3837          * Nic is initialized
3838          */
3839         netif_carrier_off(dev);
3840         sp->last_link_state = 0;
3841
3842         napi_enable(&sp->napi);
3843
3844         if (sp->config.intr_type == MSI_X) {
3845                 int ret = s2io_enable_msi_x(sp);
3846
3847                 if (!ret) {
3848                         u16 msi_control;
3849
3850                         ret = s2io_test_msi(sp);
3851
3852                         /* rollback MSI-X, will re-enable during add_isr() */
3853                         kfree(sp->entries);
3854                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3855                                 (MAX_REQUESTED_MSI_X *
3856                                 sizeof(struct msix_entry));
3857                         kfree(sp->s2io_entries);
3858                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3859                                 (MAX_REQUESTED_MSI_X *
3860                                 sizeof(struct s2io_msix_entry));
3861                         sp->entries = NULL;
3862                         sp->s2io_entries = NULL;
3863
3864                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3865                         msi_control &= 0xFFFE; /* Disable MSI */
3866                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3867
3868                         pci_disable_msix(sp->pdev);
3869
3870                 }
3871                 if (ret) {
3872
3873                         DBG_PRINT(ERR_DBG,
3874                           "%s: MSI-X requested but failed to enable\n",
3875                           dev->name);
3876                         sp->config.intr_type = INTA;
3877                 }
3878         }
3879
3880         /* NAPI doesn't work well with MSI(X) */
3881          if (sp->config.intr_type != INTA) {
3882                 if(sp->config.napi)
3883                         sp->config.napi = 0;
3884         }
3885
3886         /* Initialize H/W and enable interrupts */
3887         err = s2io_card_up(sp);
3888         if (err) {
3889                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3890                           dev->name);
3891                 goto hw_init_failed;
3892         }
3893
3894         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3895                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3896                 s2io_card_down(sp);
3897                 err = -ENODEV;
3898                 goto hw_init_failed;
3899         }
3900
3901         netif_start_queue(dev);
3902         return 0;
3903
3904 hw_init_failed:
3905         napi_disable(&sp->napi);
3906         if (sp->config.intr_type == MSI_X) {
3907                 if (sp->entries) {
3908                         kfree(sp->entries);
3909                         sp->mac_control.stats_info->sw_stat.mem_freed 
3910                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3911                 }
3912                 if (sp->s2io_entries) {
3913                         kfree(sp->s2io_entries);
3914                         sp->mac_control.stats_info->sw_stat.mem_freed 
3915                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3916                 }
3917         }
3918         return err;
3919 }
3920
3921 /**
3922  *  s2io_close -close entry point of the driver
3923  *  @dev : device pointer.
3924  *  Description:
3925  *  This is the stop entry point of the driver. It needs to undo exactly
3926  *  whatever was done by the open entry point,thus it's usually referred to
3927  *  as the close function.Among other things this function mainly stops the
3928  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3929  *  Return value:
3930  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3931  *  file on failure.
3932  */
3933
3934 static int s2io_close(struct net_device *dev)
3935 {
3936         struct s2io_nic *sp = dev->priv;
3937
3938         netif_stop_queue(dev);
3939         napi_disable(&sp->napi);
3940         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3941         s2io_card_down(sp);
3942
3943         return 0;
3944 }
3945
3946 /**
3947  *  s2io_xmit - Tx entry point of te driver
3948  *  @skb : the socket buffer containing the Tx data.
3949  *  @dev : device pointer.
3950  *  Description :
3951  *  This function is the Tx entry point of the driver. S2IO NIC supports
3952  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3953  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3954  *  not be upadted.
3955  *  Return value:
3956  *  0 on success & 1 on failure.
3957  */
3958
3959 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3960 {
3961         struct s2io_nic *sp = dev->priv;
3962         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3963         register u64 val64;
3964         struct TxD *txdp;
3965         struct TxFIFO_element __iomem *tx_fifo;
3966         unsigned long flags;
3967         u16 vlan_tag = 0;
3968         int vlan_priority = 0;
3969         struct mac_info *mac_control;
3970         struct config_param *config;
3971         int offload_type;
3972         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3973
3974         mac_control = &sp->mac_control;
3975         config = &sp->config;
3976
3977         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3978
3979         if (unlikely(skb->len <= 0)) {
3980                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3981                 dev_kfree_skb_any(skb);
3982                 return 0;
3983 }
3984
3985         spin_lock_irqsave(&sp->tx_lock, flags);
3986         if (!is_s2io_card_up(sp)) {
3987                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3988                           dev->name);
3989                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3990                 dev_kfree_skb(skb);
3991                 return 0;
3992         }
3993
3994         queue = 0;
3995         /* Get Fifo number to Transmit based on vlan priority */
3996         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3997                 vlan_tag = vlan_tx_tag_get(skb);
3998                 vlan_priority = vlan_tag >> 13;
3999                 queue = config->fifo_mapping[vlan_priority];
4000         }
4001
4002         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4003         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4004         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4005                 list_virt_addr;
4006
4007         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4008         /* Avoid "put" pointer going beyond "get" pointer */
4009         if (txdp->Host_Control ||
4010                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4011                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4012                 netif_stop_queue(dev);
4013                 dev_kfree_skb(skb);
4014                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4015                 return 0;
4016         }
4017
4018         offload_type = s2io_offload_type(skb);
4019         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4020                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4021                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4022         }
4023         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4024                 txdp->Control_2 |=
4025                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4026                      TXD_TX_CKO_UDP_EN);
4027         }
4028         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4029         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4030         txdp->Control_2 |= config->tx_intr_type;
4031
4032         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4033                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4034                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4035         }
4036
4037         frg_len = skb->len - skb->data_len;
4038         if (offload_type == SKB_GSO_UDP) {
4039                 int ufo_size;
4040
4041                 ufo_size = s2io_udp_mss(skb);
4042                 ufo_size &= ~7;
4043                 txdp->Control_1 |= TXD_UFO_EN;
4044                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4045                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4046 #ifdef __BIG_ENDIAN
4047                 sp->ufo_in_band_v[put_off] =
4048                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4049 #else
4050                 sp->ufo_in_band_v[put_off] =
4051                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4052 #endif
4053                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4054                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4055                                         sp->ufo_in_band_v,
4056                                         sizeof(u64), PCI_DMA_TODEVICE);
4057                 if((txdp->Buffer_Pointer == 0) ||
4058                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4059                         goto pci_map_failed;
4060                 txdp++;
4061         }
4062
4063         txdp->Buffer_Pointer = pci_map_single
4064             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4065         if((txdp->Buffer_Pointer == 0) ||
4066                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4067                 goto pci_map_failed;
4068
4069         txdp->Host_Control = (unsigned long) skb;
4070         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4071         if (offload_type == SKB_GSO_UDP)
4072                 txdp->Control_1 |= TXD_UFO_EN;
4073
4074         frg_cnt = skb_shinfo(skb)->nr_frags;
4075         /* For fragmented SKB. */
4076         for (i = 0; i < frg_cnt; i++) {
4077                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4078                 /* A '0' length fragment will be ignored */
4079                 if (!frag->size)
4080                         continue;
4081                 txdp++;
4082                 txdp->Buffer_Pointer = (u64) pci_map_page
4083                     (sp->pdev, frag->page, frag->page_offset,
4084                      frag->size, PCI_DMA_TODEVICE);
4085                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4086                 if (offload_type == SKB_GSO_UDP)
4087                         txdp->Control_1 |= TXD_UFO_EN;
4088         }
4089         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4090
4091         if (offload_type == SKB_GSO_UDP)
4092                 frg_cnt++; /* as Txd0 was used for inband header */
4093
4094         tx_fifo = mac_control->tx_FIFO_start[queue];
4095         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4096         writeq(val64, &tx_fifo->TxDL_Pointer);
4097
4098         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4099                  TX_FIFO_LAST_LIST);
4100         if (offload_type)
4101                 val64 |= TX_FIFO_SPECIAL_FUNC;
4102
4103         writeq(val64, &tx_fifo->List_Control);
4104
4105         mmiowb();
4106
4107         put_off++;
4108         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4109                 put_off = 0;
4110         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4111
4112         /* Avoid "put" pointer going beyond "get" pointer */
4113         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4114                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4115                 DBG_PRINT(TX_DBG,
4116                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4117                           put_off, get_off);
4118                 netif_stop_queue(dev);
4119         }
4120         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4121         dev->trans_start = jiffies;
4122         spin_unlock_irqrestore(&sp->tx_lock, flags);
4123
4124         return 0;
4125 pci_map_failed:
4126         stats->pci_map_fail_cnt++;
4127         netif_stop_queue(dev);
4128         stats->mem_freed += skb->truesize;
4129         dev_kfree_skb(skb);
4130         spin_unlock_irqrestore(&sp->tx_lock, flags);
4131         return 0;
4132 }
4133
4134 static void
4135 s2io_alarm_handle(unsigned long data)
4136 {
4137         struct s2io_nic *sp = (struct s2io_nic *)data;
4138         struct net_device *dev = sp->dev;
4139
4140         s2io_handle_errors(dev);
4141         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4142 }
4143
4144 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4145 {
4146         int rxb_size, level;
4147
4148         if (!sp->lro) {
4149                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4150                 level = rx_buffer_level(sp, rxb_size, rng_n);
4151
4152                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4153                         int ret;
4154                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4155                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4156                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4157                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4158                                           __FUNCTION__);
4159                                 clear_bit(0, (&sp->tasklet_status));
4160                                 return -1;
4161                         }
4162                         clear_bit(0, (&sp->tasklet_status));
4163                 } else if (level == LOW)
4164                         tasklet_schedule(&sp->task);
4165
4166         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4167                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4168                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4169         }
4170         return 0;
4171 }
4172
4173 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4174 {
4175         struct ring_info *ring = (struct ring_info *)dev_id;
4176         struct s2io_nic *sp = ring->nic;
4177
4178         if (!is_s2io_card_up(sp))
4179                 return IRQ_HANDLED;
4180
4181         rx_intr_handler(ring);
4182         s2io_chk_rx_buffers(sp, ring->ring_no);
4183
4184         return IRQ_HANDLED;
4185 }
4186
4187 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4188 {
4189         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4190         struct s2io_nic *sp = fifo->nic;
4191
4192         if (!is_s2io_card_up(sp))
4193                 return IRQ_HANDLED;
4194
4195         tx_intr_handler(fifo);
4196         return IRQ_HANDLED;
4197 }
4198 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4199 {
4200         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4201         u64 val64;
4202
4203         val64 = readq(&bar0->pic_int_status);
4204         if (val64 & PIC_INT_GPIO) {
4205                 val64 = readq(&bar0->gpio_int_reg);
4206                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4207                     (val64 & GPIO_INT_REG_LINK_UP)) {
4208                         /*
4209                          * This is unstable state so clear both up/down
4210                          * interrupt and adapter to re-evaluate the link state.
4211                          */
4212                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4213                         val64 |= GPIO_INT_REG_LINK_UP;
4214                         writeq(val64, &bar0->gpio_int_reg);
4215                         val64 = readq(&bar0->gpio_int_mask);
4216                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4217                                    GPIO_INT_MASK_LINK_DOWN);
4218                         writeq(val64, &bar0->gpio_int_mask);
4219                 }
4220                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4221                         val64 = readq(&bar0->adapter_status);
4222                                 /* Enable Adapter */
4223                         val64 = readq(&bar0->adapter_control);
4224                         val64 |= ADAPTER_CNTL_EN;
4225                         writeq(val64, &bar0->adapter_control);
4226                         val64 |= ADAPTER_LED_ON;
4227                         writeq(val64, &bar0->adapter_control);
4228                         if (!sp->device_enabled_once)
4229                                 sp->device_enabled_once = 1;
4230
4231                         s2io_link(sp, LINK_UP);
4232                         /*
4233                          * unmask link down interrupt and mask link-up
4234                          * intr
4235                          */
4236                         val64 = readq(&bar0->gpio_int_mask);
4237                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4238                         val64 |= GPIO_INT_MASK_LINK_UP;
4239                         writeq(val64, &bar0->gpio_int_mask);
4240
4241                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4242                         val64 = readq(&bar0->adapter_status);
4243                         s2io_link(sp, LINK_DOWN);
4244                         /* Link is down so unmaks link up interrupt */
4245                         val64 = readq(&bar0->gpio_int_mask);
4246                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4247                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4248                         writeq(val64, &bar0->gpio_int_mask);
4249
4250                         /* turn off LED */
4251                         val64 = readq(&bar0->adapter_control);
4252                         val64 = val64 &(~ADAPTER_LED_ON);
4253                         writeq(val64, &bar0->adapter_control);
4254                 }
4255         }
4256         val64 = readq(&bar0->gpio_int_mask);
4257 }
4258
4259 /**
4260  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4261  *  @value: alarm bits
4262  *  @addr: address value
4263  *  @cnt: counter variable
4264  *  Description: Check for alarm and increment the counter
4265  *  Return Value:
4266  *  1 - if alarm bit set
4267  *  0 - if alarm bit is not set
4268  */
4269 int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4270                           unsigned long long *cnt)
4271 {
4272         u64 val64;
4273         val64 = readq(addr);
4274         if ( val64 & value ) {
4275                 writeq(val64, addr);
4276                 (*cnt)++;
4277                 return 1;
4278         }
4279         return 0;
4280
4281 }
4282
4283 /**
4284  *  s2io_handle_errors - Xframe error indication handler
4285  *  @nic: device private variable
4286  *  Description: Handle alarms such as loss of link, single or
4287  *  double ECC errors, critical and serious errors.
4288  *  Return Value:
4289  *  NONE
4290  */
4291 static void s2io_handle_errors(void * dev_id)
4292 {
4293         struct net_device *dev = (struct net_device *) dev_id;
4294         struct s2io_nic *sp = dev->priv;
4295         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4296         u64 temp64 = 0,val64=0;
4297         int i = 0;
4298
4299         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4300         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4301
4302         if (!is_s2io_card_up(sp))
4303                 return;
4304
4305         if (pci_channel_offline(sp->pdev))
4306                 return;
4307
4308         memset(&sw_stat->ring_full_cnt, 0,
4309                 sizeof(sw_stat->ring_full_cnt));
4310
4311         /* Handling the XPAK counters update */
4312         if(stats->xpak_timer_count < 72000) {
4313                 /* waiting for an hour */
4314                 stats->xpak_timer_count++;
4315         } else {
4316                 s2io_updt_xpak_counter(dev);
4317                 /* reset the count to zero */
4318                 stats->xpak_timer_count = 0;
4319         }
4320
4321         /* Handling link status change error Intr */
4322         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4323                 val64 = readq(&bar0->mac_rmac_err_reg);
4324                 writeq(val64, &bar0->mac_rmac_err_reg);
4325                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4326                         schedule_work(&sp->set_link_task);
4327         }
4328
4329         /* In case of a serious error, the device will be Reset. */
4330         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4331                                 &sw_stat->serious_err_cnt))
4332                 goto reset;
4333
4334         /* Check for data parity error */
4335         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4336                                 &sw_stat->parity_err_cnt))
4337                 goto reset;
4338
4339         /* Check for ring full counter */
4340         if (sp->device_type == XFRAME_II_DEVICE) {
4341                 val64 = readq(&bar0->ring_bump_counter1);
4342                 for (i=0; i<4; i++) {
4343                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4344                         temp64 >>= 64 - ((i+1)*16);
4345                         sw_stat->ring_full_cnt[i] += temp64;
4346                 }
4347
4348                 val64 = readq(&bar0->ring_bump_counter2);
4349                 for (i=0; i<4; i++) {
4350                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4351                         temp64 >>= 64 - ((i+1)*16);
4352                          sw_stat->ring_full_cnt[i+4] += temp64;
4353                 }
4354         }
4355
4356         val64 = readq(&bar0->txdma_int_status);
4357         /*check for pfc_err*/
4358         if (val64 & TXDMA_PFC_INT) {
4359                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4360                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4361                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4362                                 &sw_stat->pfc_err_cnt))
4363                         goto reset;
4364                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4365                                 &sw_stat->pfc_err_cnt);
4366         }
4367
4368         /*check for tda_err*/
4369         if (val64 & TXDMA_TDA_INT) {
4370                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4371                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4372                                 &sw_stat->tda_err_cnt))
4373                         goto reset;
4374                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4375                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4376         }
4377         /*check for pcc_err*/
4378         if (val64 & TXDMA_PCC_INT) {
4379                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4380                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4381                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4382                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4383                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4384                                 &sw_stat->pcc_err_cnt))
4385                         goto reset;
4386                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4387                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4388         }
4389
4390         /*check for tti_err*/
4391         if (val64 & TXDMA_TTI_INT) {
4392                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4393                                 &sw_stat->tti_err_cnt))
4394                         goto reset;
4395                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4396                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4397         }
4398
4399         /*check for lso_err*/
4400         if (val64 & TXDMA_LSO_INT) {
4401                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4402                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4403                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4404                         goto reset;
4405                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4406                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4407         }
4408
4409         /*check for tpa_err*/
4410         if (val64 & TXDMA_TPA_INT) {
4411                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4412                         &sw_stat->tpa_err_cnt))
4413                         goto reset;
4414                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4415                         &sw_stat->tpa_err_cnt);
4416         }
4417
4418         /*check for sm_err*/
4419         if (val64 & TXDMA_SM_INT) {
4420                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4421                         &sw_stat->sm_err_cnt))
4422                         goto reset;
4423         }
4424
4425         val64 = readq(&bar0->mac_int_status);
4426         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4427                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4428                                 &bar0->mac_tmac_err_reg,
4429                                 &sw_stat->mac_tmac_err_cnt))
4430                         goto reset;
4431                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4432                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4433                                 &bar0->mac_tmac_err_reg,
4434                                 &sw_stat->mac_tmac_err_cnt);
4435         }
4436
4437         val64 = readq(&bar0->xgxs_int_status);
4438         if (val64 & XGXS_INT_STATUS_TXGXS) {
4439                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4440                                 &bar0->xgxs_txgxs_err_reg,
4441                                 &sw_stat->xgxs_txgxs_err_cnt))
4442                         goto reset;
4443                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4444                                 &bar0->xgxs_txgxs_err_reg,
4445                                 &sw_stat->xgxs_txgxs_err_cnt);
4446         }
4447
4448         val64 = readq(&bar0->rxdma_int_status);
4449         if (val64 & RXDMA_INT_RC_INT_M) {
4450                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4451                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4452                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4453                         goto reset;
4454                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4455                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4456                                 &sw_stat->rc_err_cnt);
4457                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4458                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4459                                 &sw_stat->prc_pcix_err_cnt))
4460                         goto reset;
4461                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4462                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4463                                 &sw_stat->prc_pcix_err_cnt);
4464         }
4465
4466         if (val64 & RXDMA_INT_RPA_INT_M) {
4467                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4468                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4469                         goto reset;
4470                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4471                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4472         }
4473
4474         if (val64 & RXDMA_INT_RDA_INT_M) {
4475                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4476                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4477                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4478                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4479                         goto reset;
4480                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4481                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4482                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4483         }
4484
4485         if (val64 & RXDMA_INT_RTI_INT_M) {
4486                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4487                                 &sw_stat->rti_err_cnt))
4488                         goto reset;
4489                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4490                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4491         }
4492
4493         val64 = readq(&bar0->mac_int_status);
4494         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4495                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4496                                 &bar0->mac_rmac_err_reg,
4497                                 &sw_stat->mac_rmac_err_cnt))
4498                         goto reset;
4499                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4500                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4501                                 &sw_stat->mac_rmac_err_cnt);
4502         }
4503
4504         val64 = readq(&bar0->xgxs_int_status);
4505         if (val64 & XGXS_INT_STATUS_RXGXS) {
4506                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4507                                 &bar0->xgxs_rxgxs_err_reg,
4508                                 &sw_stat->xgxs_rxgxs_err_cnt))
4509                         goto reset;
4510         }
4511
4512         val64 = readq(&bar0->mc_int_status);
4513         if(val64 & MC_INT_STATUS_MC_INT) {
4514                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4515                                 &sw_stat->mc_err_cnt))
4516                         goto reset;
4517
4518                 /* Handling Ecc errors */
4519                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4520                         writeq(val64, &bar0->mc_err_reg);
4521                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4522                                 sw_stat->double_ecc_errs++;
4523                                 if (sp->device_type != XFRAME_II_DEVICE) {
4524                                         /*
4525                                          * Reset XframeI only if critical error
4526                                          */
4527                                         if (val64 &
4528                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4529                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4530                                                                 goto reset;
4531                                         }
4532                         } else
4533                                 sw_stat->single_ecc_errs++;
4534                 }
4535         }
4536         return;
4537
4538 reset:
4539         netif_stop_queue(dev);
4540         schedule_work(&sp->rst_timer_task);
4541         sw_stat->soft_reset_cnt++;
4542         return;
4543 }
4544
4545 /**
4546  *  s2io_isr - ISR handler of the device .
4547  *  @irq: the irq of the device.
4548  *  @dev_id: a void pointer to the dev structure of the NIC.
4549  *  Description:  This function is the ISR handler of the device. It
4550  *  identifies the reason for the interrupt and calls the relevant
4551  *  service routines. As a contongency measure, this ISR allocates the
4552  *  recv buffers, if their numbers are below the panic value which is
4553  *  presently set to 25% of the original number of rcv buffers allocated.
4554  *  Return value:
4555  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4556  *   IRQ_NONE: will be returned if interrupt is not from our device
4557  */
4558 static irqreturn_t s2io_isr(int irq, void *dev_id)
4559 {
4560         struct net_device *dev = (struct net_device *) dev_id;
4561         struct s2io_nic *sp = dev->priv;
4562         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4563         int i;
4564         u64 reason = 0;
4565         struct mac_info *mac_control;
4566         struct config_param *config;
4567
4568         /* Pretend we handled any irq's from a disconnected card */
4569         if (pci_channel_offline(sp->pdev))
4570                 return IRQ_NONE;
4571
4572         if (!is_s2io_card_up(sp))
4573                 return IRQ_NONE;
4574
4575         mac_control = &sp->mac_control;
4576         config = &sp->config;
4577
4578         /*
4579          * Identify the cause for interrupt and call the appropriate
4580          * interrupt handler. Causes for the interrupt could be;
4581          * 1. Rx of packet.
4582          * 2. Tx complete.
4583          * 3. Link down.
4584          */
4585         reason = readq(&bar0->general_int_status);
4586
4587         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4588                 /* Nothing much can be done. Get out */
4589                 return IRQ_HANDLED;
4590         }
4591
4592         if (reason & (GEN_INTR_RXTRAFFIC |
4593                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4594         {
4595                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4596
4597                 if (config->napi) {
4598                         if (reason & GEN_INTR_RXTRAFFIC) {
4599                                 if (likely(netif_rx_schedule_prep(dev,
4600                                                         &sp->napi))) {
4601                                         __netif_rx_schedule(dev, &sp->napi);
4602                                         writeq(S2IO_MINUS_ONE,
4603                                                &bar0->rx_traffic_mask);
4604                                 } else
4605                                         writeq(S2IO_MINUS_ONE,
4606                                                &bar0->rx_traffic_int);
4607                         }
4608                 } else {
4609                         /*
4610                          * rx_traffic_int reg is an R1 register, writing all 1's
4611                          * will ensure that the actual interrupt causing bit
4612                          * get's cleared and hence a read can be avoided.
4613                          */
4614                         if (reason & GEN_INTR_RXTRAFFIC)
4615                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4616
4617                         for (i = 0; i < config->rx_ring_num; i++)
4618                                 rx_intr_handler(&mac_control->rings[i]);
4619                 }
4620
4621                 /*
4622                  * tx_traffic_int reg is an R1 register, writing all 1's
4623                  * will ensure that the actual interrupt causing bit get's
4624                  * cleared and hence a read can be avoided.
4625                  */
4626                 if (reason & GEN_INTR_TXTRAFFIC)
4627                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4628
4629                 for (i = 0; i < config->tx_fifo_num; i++)
4630                         tx_intr_handler(&mac_control->fifos[i]);
4631
4632                 if (reason & GEN_INTR_TXPIC)
4633                         s2io_txpic_intr_handle(sp);
4634
4635                 /*
4636                  * Reallocate the buffers from the interrupt handler itself.
4637                  */
4638                 if (!config->napi) {
4639                         for (i = 0; i < config->rx_ring_num; i++)
4640                                 s2io_chk_rx_buffers(sp, i);
4641                 }
4642                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4643                 readl(&bar0->general_int_status);
4644
4645                 return IRQ_HANDLED;
4646
4647         }
4648         else if (!reason) {
4649                 /* The interrupt was not raised by us */
4650                 return IRQ_NONE;
4651         }
4652
4653         return IRQ_HANDLED;
4654 }
4655
4656 /**
4657  * s2io_updt_stats -
4658  */
4659 static void s2io_updt_stats(struct s2io_nic *sp)
4660 {
4661         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4662         u64 val64;
4663         int cnt = 0;
4664
4665         if (is_s2io_card_up(sp)) {
4666                 /* Apprx 30us on a 133 MHz bus */
4667                 val64 = SET_UPDT_CLICKS(10) |
4668                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4669                 writeq(val64, &bar0->stat_cfg);
4670                 do {
4671                         udelay(100);
4672                         val64 = readq(&bar0->stat_cfg);
4673                         if (!(val64 & BIT(0)))
4674                                 break;
4675                         cnt++;
4676                         if (cnt == 5)
4677                                 break; /* Updt failed */
4678                 } while(1);
4679         } 
4680 }
4681
4682 /**
4683  *  s2io_get_stats - Updates the device statistics structure.
4684  *  @dev : pointer to the device structure.
4685  *  Description:
4686  *  This function updates the device statistics structure in the s2io_nic
4687  *  structure and returns a pointer to the same.
4688  *  Return value:
4689  *  pointer to the updated net_device_stats structure.
4690  */
4691
4692 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4693 {
4694         struct s2io_nic *sp = dev->priv;
4695         struct mac_info *mac_control;
4696         struct config_param *config;
4697
4698
4699         mac_control = &sp->mac_control;
4700         config = &sp->config;
4701
4702         /* Configure Stats for immediate updt */
4703         s2io_updt_stats(sp);
4704
4705         sp->stats.tx_packets =
4706                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4707         sp->stats.tx_errors =
4708                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4709         sp->stats.rx_errors =
4710                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4711         sp->stats.multicast =
4712                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4713         sp->stats.rx_length_errors =
4714                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4715
4716         return (&sp->stats);
4717 }
4718
4719 /**
4720  *  s2io_set_multicast - entry point for multicast address enable/disable.
4721  *  @dev : pointer to the device structure
4722  *  Description:
4723  *  This function is a driver entry point which gets called by the kernel
4724  *  whenever multicast addresses must be enabled/disabled. This also gets
4725  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4726  *  determine, if multicast address must be enabled or if promiscuous mode
4727  *  is to be disabled etc.
4728  *  Return value:
4729  *  void.
4730  */
4731
4732 static void s2io_set_multicast(struct net_device *dev)
4733 {
4734         int i, j, prev_cnt;
4735         struct dev_mc_list *mclist;
4736         struct s2io_nic *sp = dev->priv;
4737         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4738         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4739             0xfeffffffffffULL;
4740         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4741         void __iomem *add;
4742
4743         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4744                 /*  Enable all Multicast addresses */
4745                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4746                        &bar0->rmac_addr_data0_mem);
4747                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4748                        &bar0->rmac_addr_data1_mem);
4749                 val64 = RMAC_ADDR_CMD_MEM_WE |
4750                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4751                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4752                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4753                 /* Wait till command completes */
4754                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4755                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4756                                         S2IO_BIT_RESET);
4757
4758                 sp->m_cast_flg = 1;
4759                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4760         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4761                 /*  Disable all Multicast addresses */
4762                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4763                        &bar0->rmac_addr_data0_mem);
4764                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4765                        &bar0->rmac_addr_data1_mem);
4766                 val64 = RMAC_ADDR_CMD_MEM_WE |
4767                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4768                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4769                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4770                 /* Wait till command completes */
4771                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4772                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4773                                         S2IO_BIT_RESET);
4774
4775                 sp->m_cast_flg = 0;
4776                 sp->all_multi_pos = 0;
4777         }
4778
4779         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4780                 /*  Put the NIC into promiscuous mode */
4781                 add = &bar0->mac_cfg;
4782                 val64 = readq(&bar0->mac_cfg);
4783                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4784
4785                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4786                 writel((u32) val64, add);
4787                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4788                 writel((u32) (val64 >> 32), (add + 4));
4789
4790                 if (vlan_tag_strip != 1) {
4791                         val64 = readq(&bar0->rx_pa_cfg);
4792                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4793                         writeq(val64, &bar0->rx_pa_cfg);
4794                         vlan_strip_flag = 0;
4795                 }
4796
4797                 val64 = readq(&bar0->mac_cfg);
4798                 sp->promisc_flg = 1;
4799                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4800                           dev->name);
4801         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4802                 /*  Remove the NIC from promiscuous mode */
4803                 add = &bar0->mac_cfg;
4804                 val64 = readq(&bar0->mac_cfg);
4805                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4806
4807                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4808                 writel((u32) val64, add);
4809                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4810                 writel((u32) (val64 >> 32), (add + 4));
4811
4812                 if (vlan_tag_strip != 0) {
4813                         val64 = readq(&bar0->rx_pa_cfg);
4814                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4815                         writeq(val64, &bar0->rx_pa_cfg);
4816                         vlan_strip_flag = 1;
4817                 }
4818
4819                 val64 = readq(&bar0->mac_cfg);
4820                 sp->promisc_flg = 0;
4821                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4822                           dev->name);
4823         }
4824
4825         /*  Update individual M_CAST address list */
4826         if ((!sp->m_cast_flg) && dev->mc_count) {
4827                 if (dev->mc_count >
4828                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4829                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4830                                   dev->name);
4831                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4832                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4833                         return;
4834                 }
4835
4836                 prev_cnt = sp->mc_addr_count;
4837                 sp->mc_addr_count = dev->mc_count;
4838
4839                 /* Clear out the previous list of Mc in the H/W. */
4840                 for (i = 0; i < prev_cnt; i++) {
4841                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4842                                &bar0->rmac_addr_data0_mem);
4843                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4844                                 &bar0->rmac_addr_data1_mem);
4845                         val64 = RMAC_ADDR_CMD_MEM_WE |
4846                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4847                             RMAC_ADDR_CMD_MEM_OFFSET
4848                             (MAC_MC_ADDR_START_OFFSET + i);
4849                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4850
4851                         /* Wait for command completes */
4852                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4853                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4854                                         S2IO_BIT_RESET)) {
4855                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4856                                           dev->name);
4857                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4858                                 return;
4859                         }
4860                 }
4861
4862                 /* Create the new Rx filter list and update the same in H/W. */
4863                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4864                      i++, mclist = mclist->next) {
4865                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4866                                ETH_ALEN);
4867                         mac_addr = 0;
4868                         for (j = 0; j < ETH_ALEN; j++) {
4869                                 mac_addr |= mclist->dmi_addr[j];
4870                                 mac_addr <<= 8;
4871                         }
4872                         mac_addr >>= 8;
4873                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4874                                &bar0->rmac_addr_data0_mem);
4875                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4876                                 &bar0->rmac_addr_data1_mem);
4877                         val64 = RMAC_ADDR_CMD_MEM_WE |
4878                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4879                             RMAC_ADDR_CMD_MEM_OFFSET
4880                             (i + MAC_MC_ADDR_START_OFFSET);
4881                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4882
4883                         /* Wait for command completes */
4884                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4885                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4886                                         S2IO_BIT_RESET)) {
4887                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4888                                           dev->name);
4889                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4890                                 return;
4891                         }
4892                 }
4893         }
4894 }
4895
4896 /**
4897  *  s2io_set_mac_addr - Programs the Xframe mac address
4898  *  @dev : pointer to the device structure.
4899  *  @addr: a uchar pointer to the new mac address which is to be set.
4900  *  Description : This procedure will program the Xframe to receive
4901  *  frames with new Mac Address
4902  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4903  *  as defined in errno.h file on failure.
4904  */
4905
4906 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4907 {
4908         struct s2io_nic *sp = dev->priv;
4909         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4910         register u64 val64, mac_addr = 0;
4911         int i;
4912         u64 old_mac_addr = 0;
4913
4914         /*
4915          * Set the new MAC address as the new unicast filter and reflect this
4916          * change on the device address registered with the OS. It will be
4917          * at offset 0.
4918          */
4919         for (i = 0; i < ETH_ALEN; i++) {
4920                 mac_addr <<= 8;
4921                 mac_addr |= addr[i];
4922                 old_mac_addr <<= 8;
4923                 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4924         }
4925
4926         if(0 == mac_addr)
4927                 return SUCCESS;
4928
4929         /* Update the internal structure with this new mac address */
4930         if(mac_addr != old_mac_addr) {
4931                 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4932                 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4933                 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4934                 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4935                 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4936                 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4937                 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4938         }
4939
4940         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4941                &bar0->rmac_addr_data0_mem);
4942
4943         val64 =
4944             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4945             RMAC_ADDR_CMD_MEM_OFFSET(0);
4946         writeq(val64, &bar0->rmac_addr_cmd_mem);
4947         /* Wait till command completes */
4948         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4949                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4950                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4951                 return FAILURE;
4952         }
4953
4954         return SUCCESS;
4955 }
4956
4957 /**
4958  * s2io_ethtool_sset - Sets different link parameters.
4959  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4960  * @info: pointer to the structure with parameters given by ethtool to set
4961  * link information.
4962  * Description:
4963  * The function sets different link parameters provided by the user onto
4964  * the NIC.
4965  * Return value:
4966  * 0 on success.
4967 */
4968
4969 static int s2io_ethtool_sset(struct net_device *dev,
4970                              struct ethtool_cmd *info)
4971 {
4972         struct s2io_nic *sp = dev->priv;
4973         if ((info->autoneg == AUTONEG_ENABLE) ||
4974             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4975                 return -EINVAL;
4976         else {
4977                 s2io_close(sp->dev);
4978                 s2io_open(sp->dev);
4979         }
4980
4981         return 0;
4982 }
4983
4984 /**
4985  * s2io_ethtol_gset - Return link specific information.
4986  * @sp : private member of the device structure, pointer to the
4987  *      s2io_nic structure.
4988  * @info : pointer to the structure with parameters given by ethtool
4989  * to return link information.
4990  * Description:
4991  * Returns link specific information like speed, duplex etc.. to ethtool.
4992  * Return value :
4993  * return 0 on success.
4994  */
4995
4996 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4997 {
4998         struct s2io_nic *sp = dev->priv;
4999         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5000         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5001         info->port = PORT_FIBRE;
5002         /* info->transceiver?? TODO */
5003
5004         if (netif_carrier_ok(sp->dev)) {
5005                 info->speed = 10000;
5006                 info->duplex = DUPLEX_FULL;
5007         } else {
5008                 info->speed = -1;
5009                 info->duplex = -1;
5010         }
5011
5012         info->autoneg = AUTONEG_DISABLE;
5013         return 0;
5014 }
5015
5016 /**
5017  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5018  * @sp : private member of the device structure, which is a pointer to the
5019  * s2io_nic structure.
5020  * @info : pointer to the structure with parameters given by ethtool to
5021  * return driver information.
5022  * Description:
5023  * Returns driver specefic information like name, version etc.. to ethtool.
5024  * Return value:
5025  *  void
5026  */
5027
5028 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5029                                   struct ethtool_drvinfo *info)
5030 {
5031         struct s2io_nic *sp = dev->priv;
5032
5033         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5034         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5035         strncpy(info->fw_version, "", sizeof(info->fw_version));
5036         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5037         info->regdump_len = XENA_REG_SPACE;
5038         info->eedump_len = XENA_EEPROM_SPACE;
5039 }
5040
5041 /**
5042  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5043  *  @sp: private member of the device structure, which is a pointer to the
5044  *  s2io_nic structure.
5045  *  @regs : pointer to the structure with parameters given by ethtool for
5046  *  dumping the registers.
5047  *  @reg_space: The input argumnet into which all the registers are dumped.
5048  *  Description:
5049  *  Dumps the entire register space of xFrame NIC into the user given
5050  *  buffer area.
5051  * Return value :
5052  * void .
5053 */
5054
5055 static void s2io_ethtool_gregs(struct net_device *dev,
5056                                struct ethtool_regs *regs, void *space)
5057 {
5058         int i;
5059         u64 reg;
5060         u8 *reg_space = (u8 *) space;
5061         struct s2io_nic *sp = dev->priv;
5062
5063         regs->len = XENA_REG_SPACE;
5064         regs->version = sp->pdev->subsystem_device;
5065
5066         for (i = 0; i < regs->len; i += 8) {
5067                 reg = readq(sp->bar0 + i);
5068                 memcpy((reg_space + i), &reg, 8);
5069         }
5070 }
5071
5072 /**
5073  *  s2io_phy_id  - timer function that alternates adapter LED.
5074  *  @data : address of the private member of the device structure, which
5075  *  is a pointer to the s2io_nic structure, provided as an u32.
5076  * Description: This is actually the timer function that alternates the
5077  * adapter LED bit of the adapter control bit to set/reset every time on
5078  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5079  *  once every second.
5080 */
5081 static void s2io_phy_id(unsigned long data)
5082 {
5083         struct s2io_nic *sp = (struct s2io_nic *) data;
5084         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5085         u64 val64 = 0;
5086         u16 subid;
5087
5088         subid = sp->pdev->subsystem_device;
5089         if ((sp->device_type == XFRAME_II_DEVICE) ||
5090                    ((subid & 0xFF) >= 0x07)) {
5091                 val64 = readq(&bar0->gpio_control);
5092                 val64 ^= GPIO_CTRL_GPIO_0;
5093                 writeq(val64, &bar0->gpio_control);
5094         } else {
5095                 val64 = readq(&bar0->adapter_control);
5096                 val64 ^= ADAPTER_LED_ON;
5097                 writeq(val64, &bar0->adapter_control);
5098         }
5099
5100         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5101 }
5102
5103 /**
5104  * s2io_ethtool_idnic - To physically identify the nic on the system.
5105  * @sp : private member of the device structure, which is a pointer to the
5106  * s2io_nic structure.
5107  * @id : pointer to the structure with identification parameters given by
5108  * ethtool.
5109  * Description: Used to physically identify the NIC on the system.
5110  * The Link LED will blink for a time specified by the user for
5111  * identification.
5112  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5113  * identification is possible only if it's link is up.
5114  * Return value:
5115  * int , returns 0 on success
5116  */
5117
5118 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5119 {
5120         u64 val64 = 0, last_gpio_ctrl_val;
5121         struct s2io_nic *sp = dev->priv;
5122         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5123         u16 subid;
5124
5125         subid = sp->pdev->subsystem_device;
5126         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5127         if ((sp->device_type == XFRAME_I_DEVICE) &&
5128                 ((subid & 0xFF) < 0x07)) {
5129                 val64 = readq(&bar0->adapter_control);
5130                 if (!(val64 & ADAPTER_CNTL_EN)) {
5131                         printk(KERN_ERR
5132                                "Adapter Link down, cannot blink LED\n");
5133                         return -EFAULT;
5134                 }
5135         }
5136         if (sp->id_timer.function == NULL) {
5137                 init_timer(&sp->id_timer);
5138                 sp->id_timer.function = s2io_phy_id;
5139                 sp->id_timer.data = (unsigned long) sp;
5140         }
5141         mod_timer(&sp->id_timer, jiffies);
5142         if (data)
5143                 msleep_interruptible(data * HZ);
5144         else
5145                 msleep_interruptible(MAX_FLICKER_TIME);
5146         del_timer_sync(&sp->id_timer);
5147
5148         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5149                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5150                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5151         }
5152
5153         return 0;
5154 }
5155
5156 static void s2io_ethtool_gringparam(struct net_device *dev,
5157                                     struct ethtool_ringparam *ering)
5158 {
5159         struct s2io_nic *sp = dev->priv;
5160         int i,tx_desc_count=0,rx_desc_count=0;
5161
5162         if (sp->rxd_mode == RXD_MODE_1)
5163                 ering->rx_max_pending = MAX_RX_DESC_1;
5164         else if (sp->rxd_mode == RXD_MODE_3B)
5165                 ering->rx_max_pending = MAX_RX_DESC_2;
5166
5167         ering->tx_max_pending = MAX_TX_DESC;
5168         for (i = 0 ; i < sp->config.tx_fifo_num ; i++) 
5169                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5170         
5171         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5172         ering->tx_pending = tx_desc_count;
5173         rx_desc_count = 0;
5174         for (i = 0 ; i < sp->config.rx_ring_num ; i++) 
5175                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5176
5177         ering->rx_pending = rx_desc_count;
5178
5179         ering->rx_mini_max_pending = 0;
5180         ering->rx_mini_pending = 0;
5181         if(sp->rxd_mode == RXD_MODE_1)
5182                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5183         else if (sp->rxd_mode == RXD_MODE_3B)
5184                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5185         ering->rx_jumbo_pending = rx_desc_count;
5186 }
5187
5188 /**
5189  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5190  * @sp : private member of the device structure, which is a pointer to the
5191  *      s2io_nic structure.
5192  * @ep : pointer to the structure with pause parameters given by ethtool.
5193  * Description:
5194  * Returns the Pause frame generation and reception capability of the NIC.
5195  * Return value:
5196  *  void
5197  */
5198 static void s2io_ethtool_getpause_data(struct net_device *dev,
5199                                        struct ethtool_pauseparam *ep)
5200 {
5201         u64 val64;
5202         struct s2io_nic *sp = dev->priv;
5203         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5204
5205         val64 = readq(&bar0->rmac_pause_cfg);
5206         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5207                 ep->tx_pause = TRUE;
5208         if (val64 & RMAC_PAUSE_RX_ENABLE)
5209                 ep->rx_pause = TRUE;
5210         ep->autoneg = FALSE;
5211 }
5212
5213 /**
5214  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5215  * @sp : private member of the device structure, which is a pointer to the
5216  *      s2io_nic structure.
5217  * @ep : pointer to the structure with pause parameters given by ethtool.
5218  * Description:
5219  * It can be used to set or reset Pause frame generation or reception
5220  * support of the NIC.
5221  * Return value:
5222  * int, returns 0 on Success
5223  */
5224
5225 static int s2io_ethtool_setpause_data(struct net_device *dev,
5226                                struct ethtool_pauseparam *ep)
5227 {
5228         u64 val64;
5229         struct s2io_nic *sp = dev->priv;
5230         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5231
5232         val64 = readq(&bar0->rmac_pause_cfg);
5233         if (ep->tx_pause)
5234                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5235         else
5236                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5237         if (ep->rx_pause)
5238                 val64 |= RMAC_PAUSE_RX_ENABLE;
5239         else
5240                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5241         writeq(val64, &bar0->rmac_pause_cfg);
5242         return 0;
5243 }
5244
5245 /**
5246  * read_eeprom - reads 4 bytes of data from user given offset.
5247  * @sp : private member of the device structure, which is a pointer to the
5248  *      s2io_nic structure.
5249  * @off : offset at which the data must be written
5250  * @data : Its an output parameter where the data read at the given
5251  *      offset is stored.
5252  * Description:
5253  * Will read 4 bytes of data from the user given offset and return the
5254  * read data.
5255  * NOTE: Will allow to read only part of the EEPROM visible through the
5256  *   I2C bus.
5257  * Return value:
5258  *  -1 on failure and 0 on success.
5259  */
5260
5261 #define S2IO_DEV_ID             5
5262 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5263 {
5264         int ret = -1;
5265         u32 exit_cnt = 0;
5266         u64 val64;
5267         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5268
5269         if (sp->device_type == XFRAME_I_DEVICE) {
5270                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5271                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5272                     I2C_CONTROL_CNTL_START;
5273                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5274
5275                 while (exit_cnt < 5) {
5276                         val64 = readq(&bar0->i2c_control);
5277                         if (I2C_CONTROL_CNTL_END(val64)) {
5278                                 *data = I2C_CONTROL_GET_DATA(val64);
5279                                 ret = 0;
5280                                 break;
5281                         }
5282                         msleep(50);
5283                         exit_cnt++;
5284                 }
5285         }
5286
5287         if (sp->device_type == XFRAME_II_DEVICE) {
5288                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5289                         SPI_CONTROL_BYTECNT(0x3) |
5290                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5291                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5292                 val64 |= SPI_CONTROL_REQ;
5293                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5294                 while (exit_cnt < 5) {
5295                         val64 = readq(&bar0->spi_control);
5296                         if (val64 & SPI_CONTROL_NACK) {
5297                                 ret = 1;
5298                                 break;
5299                         } else if (val64 & SPI_CONTROL_DONE) {
5300                                 *data = readq(&bar0->spi_data);
5301                                 *data &= 0xffffff;
5302                                 ret = 0;
5303                                 break;
5304                         }
5305                         msleep(50);
5306                         exit_cnt++;
5307                 }
5308         }
5309         return ret;
5310 }
5311
5312 /**
5313  *  write_eeprom - actually writes the relevant part of the data value.
5314  *  @sp : private member of the device structure, which is a pointer to the
5315  *       s2io_nic structure.
5316  *  @off : offset at which the data must be written
5317  *  @data : The data that is to be written
5318  *  @cnt : Number of bytes of the data that are actually to be written into
5319  *  the Eeprom. (max of 3)
5320  * Description:
5321  *  Actually writes the relevant part of the data value into the Eeprom
5322  *  through the I2C bus.
5323  * Return value:
5324  *  0 on success, -1 on failure.
5325  */
5326
5327 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5328 {
5329         int exit_cnt = 0, ret = -1;
5330         u64 val64;
5331         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5332
5333         if (sp->device_type == XFRAME_I_DEVICE) {
5334                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5335                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5336                     I2C_CONTROL_CNTL_START;
5337                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5338
5339                 while (exit_cnt < 5) {
5340                         val64 = readq(&bar0->i2c_control);
5341                         if (I2C_CONTROL_CNTL_END(val64)) {
5342                                 if (!(val64 & I2C_CONTROL_NACK))
5343                                         ret = 0;
5344                                 break;
5345                         }
5346                         msleep(50);
5347                         exit_cnt++;
5348                 }
5349         }
5350
5351         if (sp->device_type == XFRAME_II_DEVICE) {
5352                 int write_cnt = (cnt == 8) ? 0 : cnt;
5353                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5354
5355                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5356                         SPI_CONTROL_BYTECNT(write_cnt) |
5357                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5358                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5359                 val64 |= SPI_CONTROL_REQ;
5360                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5361                 while (exit_cnt < 5) {
5362                         val64 = readq(&bar0->spi_control);
5363                         if (val64 & SPI_CONTROL_NACK) {
5364                                 ret = 1;
5365                                 break;
5366                         } else if (val64 & SPI_CONTROL_DONE) {
5367                                 ret = 0;
5368                                 break;
5369                         }
5370                         msleep(50);
5371                         exit_cnt++;
5372                 }
5373         }
5374         return ret;
5375 }
5376 static void s2io_vpd_read(struct s2io_nic *nic)
5377 {
5378         u8 *vpd_data;
5379         u8 data;
5380         int i=0, cnt, fail = 0;
5381         int vpd_addr = 0x80;
5382
5383         if (nic->device_type == XFRAME_II_DEVICE) {
5384                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5385                 vpd_addr = 0x80;
5386         }
5387         else {
5388                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5389                 vpd_addr = 0x50;
5390         }
5391         strcpy(nic->serial_num, "NOT AVAILABLE");
5392
5393         vpd_data = kmalloc(256, GFP_KERNEL);
5394         if (!vpd_data) {
5395                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5396                 return;
5397         }
5398         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5399
5400         for (i = 0; i < 256; i +=4 ) {
5401                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5402                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5403                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5404                 for (cnt = 0; cnt <5; cnt++) {
5405                         msleep(2);
5406                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5407                         if (data == 0x80)
5408                                 break;
5409                 }
5410                 if (cnt >= 5) {
5411                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5412                         fail = 1;
5413                         break;
5414                 }
5415                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5416                                       (u32 *)&vpd_data[i]);
5417         }
5418
5419         if(!fail) {
5420                 /* read serial number of adapter */
5421                 for (cnt = 0; cnt < 256; cnt++) {
5422                 if ((vpd_data[cnt] == 'S') &&
5423                         (vpd_data[cnt+1] == 'N') &&
5424                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5425                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5426                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5427                                         vpd_data[cnt+2]);
5428                                 break;
5429                         }
5430                 }
5431         }
5432
5433         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5434                 memset(nic->product_name, 0, vpd_data[1]);
5435                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5436         }
5437         kfree(vpd_data);
5438         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5439 }
5440
5441 /**
5442  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5443  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5444  *  @eeprom : pointer to the user level structure provided by ethtool,
5445  *  containing all relevant information.
5446  *  @data_buf : user defined value to be written into Eeprom.
5447  *  Description: Reads the values stored in the Eeprom at given offset
5448  *  for a given length. Stores these values int the input argument data
5449  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5450  *  Return value:
5451  *  int  0 on success
5452  */
5453
5454 static int s2io_ethtool_geeprom(struct net_device *dev,
5455                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5456 {
5457         u32 i, valid;
5458         u64 data;
5459         struct s2io_nic *sp = dev->priv;
5460
5461         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5462
5463         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5464                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5465
5466         for (i = 0; i < eeprom->len; i += 4) {
5467                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5468                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5469                         return -EFAULT;
5470                 }
5471                 valid = INV(data);
5472                 memcpy((data_buf + i), &valid, 4);
5473         }
5474         return 0;
5475 }
5476
5477 /**
5478  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5479  *  @sp : private member of the device structure, which is a pointer to the
5480  *  s2io_nic structure.
5481  *  @eeprom : pointer to the user level structure provided by ethtool,
5482  *  containing all relevant information.
5483  *  @data_buf ; user defined value to be written into Eeprom.
5484  *  Description:
5485  *  Tries to write the user provided value in the Eeprom, at the offset
5486  *  given by the user.
5487  *  Return value:
5488  *  0 on success, -EFAULT on failure.
5489  */
5490
5491 static int s2io_ethtool_seeprom(struct net_device *dev,
5492                                 struct ethtool_eeprom *eeprom,
5493                                 u8 * data_buf)
5494 {
5495         int len = eeprom->len, cnt = 0;
5496         u64 valid = 0, data;
5497         struct s2io_nic *sp = dev->priv;
5498
5499         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5500                 DBG_PRINT(ERR_DBG,
5501                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5502                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5503                           eeprom->magic);
5504                 return -EFAULT;
5505         }
5506
5507         while (len) {
5508                 data = (u32) data_buf[cnt] & 0x000000FF;
5509                 if (data) {
5510                         valid = (u32) (data << 24);
5511                 } else
5512                         valid = data;
5513
5514                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5515                         DBG_PRINT(ERR_DBG,
5516                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5517                         DBG_PRINT(ERR_DBG,
5518                                   "write into the specified offset\n");
5519                         return -EFAULT;
5520                 }
5521                 cnt++;
5522                 len--;
5523         }
5524
5525         return 0;
5526 }
5527
5528 /**
5529  * s2io_register_test - reads and writes into all clock domains.
5530  * @sp : private member of the device structure, which is a pointer to the
5531  * s2io_nic structure.
5532  * @data : variable that returns the result of each of the test conducted b
5533  * by the driver.
5534  * Description:
5535  * Read and write into all clock domains. The NIC has 3 clock domains,
5536  * see that registers in all the three regions are accessible.
5537  * Return value:
5538  * 0 on success.
5539  */
5540
5541 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5542 {
5543         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5544         u64 val64 = 0, exp_val;
5545         int fail = 0;
5546
5547         val64 = readq(&bar0->pif_rd_swapper_fb);
5548         if (val64 != 0x123456789abcdefULL) {
5549                 fail = 1;
5550                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5551         }
5552
5553         val64 = readq(&bar0->rmac_pause_cfg);
5554         if (val64 != 0xc000ffff00000000ULL) {
5555                 fail = 1;
5556                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5557         }
5558
5559         val64 = readq(&bar0->rx_queue_cfg);
5560         if (sp->device_type == XFRAME_II_DEVICE)
5561                 exp_val = 0x0404040404040404ULL;
5562         else
5563                 exp_val = 0x0808080808080808ULL;
5564         if (val64 != exp_val) {
5565                 fail = 1;
5566                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5567         }
5568
5569         val64 = readq(&bar0->xgxs_efifo_cfg);
5570         if (val64 != 0x000000001923141EULL) {
5571                 fail = 1;
5572                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5573         }
5574
5575         val64 = 0x5A5A5A5A5A5A5A5AULL;
5576         writeq(val64, &bar0->xmsi_data);
5577         val64 = readq(&bar0->xmsi_data);
5578         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5579                 fail = 1;
5580                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5581         }
5582
5583         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5584         writeq(val64, &bar0->xmsi_data);
5585         val64 = readq(&bar0->xmsi_data);
5586         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5587                 fail = 1;
5588                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5589         }
5590
5591         *data = fail;
5592         return fail;
5593 }
5594
5595 /**
5596  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5597  * @sp : private member of the device structure, which is a pointer to the
5598  * s2io_nic structure.
5599  * @data:variable that returns the result of each of the test conducted by
5600  * the driver.
5601  * Description:
5602  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5603  * register.
5604  * Return value:
5605  * 0 on success.
5606  */
5607
5608 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5609 {
5610         int fail = 0;
5611         u64 ret_data, org_4F0, org_7F0;
5612         u8 saved_4F0 = 0, saved_7F0 = 0;
5613         struct net_device *dev = sp->dev;
5614
5615         /* Test Write Error at offset 0 */
5616         /* Note that SPI interface allows write access to all areas
5617          * of EEPROM. Hence doing all negative testing only for Xframe I.
5618          */
5619         if (sp->device_type == XFRAME_I_DEVICE)
5620                 if (!write_eeprom(sp, 0, 0, 3))
5621                         fail = 1;
5622
5623         /* Save current values at offsets 0x4F0 and 0x7F0 */
5624         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5625                 saved_4F0 = 1;
5626         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5627                 saved_7F0 = 1;
5628
5629         /* Test Write at offset 4f0 */
5630         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5631                 fail = 1;
5632         if (read_eeprom(sp, 0x4F0, &ret_data))
5633                 fail = 1;
5634
5635         if (ret_data != 0x012345) {
5636                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5637                         "Data written %llx Data read %llx\n",
5638                         dev->name, (unsigned long long)0x12345,
5639                         (unsigned long long)ret_data);
5640                 fail = 1;
5641         }
5642
5643         /* Reset the EEPROM data go FFFF */
5644         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5645
5646         /* Test Write Request Error at offset 0x7c */
5647         if (sp->device_type == XFRAME_I_DEVICE)
5648                 if (!write_eeprom(sp, 0x07C, 0, 3))
5649                         fail = 1;
5650
5651         /* Test Write Request at offset 0x7f0 */
5652         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5653                 fail = 1;
5654         if (read_eeprom(sp, 0x7F0, &ret_data))
5655                 fail = 1;
5656
5657         if (ret_data != 0x012345) {
5658                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5659                         "Data written %llx Data read %llx\n",
5660                         dev->name, (unsigned long long)0x12345,
5661                         (unsigned long long)ret_data);
5662                 fail = 1;
5663         }
5664
5665         /* Reset the EEPROM data go FFFF */
5666         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5667
5668         if (sp->device_type == XFRAME_I_DEVICE) {
5669                 /* Test Write Error at offset 0x80 */
5670                 if (!write_eeprom(sp, 0x080, 0, 3))
5671                         fail = 1;
5672
5673                 /* Test Write Error at offset 0xfc */
5674                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5675                         fail = 1;
5676
5677                 /* Test Write Error at offset 0x100 */
5678                 if (!write_eeprom(sp, 0x100, 0, 3))
5679                         fail = 1;
5680
5681                 /* Test Write Error at offset 4ec */
5682                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5683                         fail = 1;
5684         }
5685
5686         /* Restore values at offsets 0x4F0 and 0x7F0 */
5687         if (saved_4F0)
5688                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5689         if (saved_7F0)
5690                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5691
5692         *data = fail;
5693         return fail;
5694 }
5695
5696 /**
5697  * s2io_bist_test - invokes the MemBist test of the card .
5698  * @sp : private member of the device structure, which is a pointer to the
5699  * s2io_nic structure.
5700  * @data:variable that returns the result of each of the test conducted by
5701  * the driver.
5702  * Description:
5703  * This invokes the MemBist test of the card. We give around
5704  * 2 secs time for the Test to complete. If it's still not complete
5705  * within this peiod, we consider that the test failed.
5706  * Return value:
5707  * 0 on success and -1 on failure.
5708  */
5709
5710 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5711 {
5712         u8 bist = 0;
5713         int cnt = 0, ret = -1;
5714
5715         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5716         bist |= PCI_BIST_START;
5717         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5718
5719         while (cnt < 20) {
5720                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5721                 if (!(bist & PCI_BIST_START)) {
5722                         *data = (bist & PCI_BIST_CODE_MASK);
5723                         ret = 0;
5724                         break;
5725                 }
5726                 msleep(100);
5727                 cnt++;
5728         }
5729
5730         return ret;
5731 }
5732
5733 /**
5734  * s2io-link_test - verifies the link state of the nic
5735  * @sp ; private member of the device structure, which is a pointer to the
5736  * s2io_nic structure.
5737  * @data: variable that returns the result of each of the test conducted by
5738  * the driver.
5739  * Description:
5740  * The function verifies the link state of the NIC and updates the input
5741  * argument 'data' appropriately.
5742  * Return value:
5743  * 0 on success.
5744  */
5745
5746 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5747 {
5748         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5749         u64 val64;
5750
5751         val64 = readq(&bar0->adapter_status);
5752         if(!(LINK_IS_UP(val64)))
5753                 *data = 1;
5754         else
5755                 *data = 0;
5756
5757         return *data;
5758 }
5759
5760 /**
5761  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5762  * @sp - private member of the device structure, which is a pointer to the
5763  * s2io_nic structure.
5764  * @data - variable that returns the result of each of the test
5765  * conducted by the driver.
5766  * Description:
5767  *  This is one of the offline test that tests the read and write
5768  *  access to the RldRam chip on the NIC.
5769  * Return value:
5770  *  0 on success.
5771  */
5772
5773 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5774 {
5775         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5776         u64 val64;
5777         int cnt, iteration = 0, test_fail = 0;
5778
5779         val64 = readq(&bar0->adapter_control);
5780         val64 &= ~ADAPTER_ECC_EN;
5781         writeq(val64, &bar0->adapter_control);
5782
5783         val64 = readq(&bar0->mc_rldram_test_ctrl);
5784         val64 |= MC_RLDRAM_TEST_MODE;
5785         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5786
5787         val64 = readq(&bar0->mc_rldram_mrs);
5788         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5789         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5790
5791         val64 |= MC_RLDRAM_MRS_ENABLE;
5792         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5793
5794         while (iteration < 2) {
5795                 val64 = 0x55555555aaaa0000ULL;
5796                 if (iteration == 1) {
5797                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5798                 }
5799                 writeq(val64, &bar0->mc_rldram_test_d0);
5800
5801                 val64 = 0xaaaa5a5555550000ULL;
5802                 if (iteration == 1) {
5803                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5804                 }
5805                 writeq(val64, &bar0->mc_rldram_test_d1);
5806
5807                 val64 = 0x55aaaaaaaa5a0000ULL;
5808                 if (iteration == 1) {
5809                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5810                 }
5811                 writeq(val64, &bar0->mc_rldram_test_d2);
5812
5813                 val64 = (u64) (0x0000003ffffe0100ULL);
5814                 writeq(val64, &bar0->mc_rldram_test_add);
5815
5816                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5817                         MC_RLDRAM_TEST_GO;
5818                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5819
5820                 for (cnt = 0; cnt < 5; cnt++) {
5821                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5822                         if (val64 & MC_RLDRAM_TEST_DONE)
5823                                 break;
5824                         msleep(200);
5825                 }
5826
5827                 if (cnt == 5)
5828                         break;
5829
5830                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5831                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5832
5833                 for (cnt = 0; cnt < 5; cnt++) {
5834                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5835                         if (val64 & MC_RLDRAM_TEST_DONE)
5836                                 break;
5837                         msleep(500);
5838                 }
5839
5840                 if (cnt == 5)
5841                         break;
5842
5843                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5844                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5845                         test_fail = 1;
5846
5847                 iteration++;
5848         }
5849
5850         *data = test_fail;
5851
5852         /* Bring the adapter out of test mode */
5853         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5854
5855         return test_fail;
5856 }
5857
5858 /**
5859  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5860  *  @sp : private member of the device structure, which is a pointer to the
5861  *  s2io_nic structure.
5862  *  @ethtest : pointer to a ethtool command specific structure that will be
5863  *  returned to the user.
5864  *  @data : variable that returns the result of each of the test
5865  * conducted by the driver.
5866  * Description:
5867  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5868  *  the health of the card.
5869  * Return value:
5870  *  void
5871  */
5872
5873 static void s2io_ethtool_test(struct net_device *dev,
5874                               struct ethtool_test *ethtest,
5875                               uint64_t * data)
5876 {
5877         struct s2io_nic *sp = dev->priv;
5878         int orig_state = netif_running(sp->dev);
5879
5880         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5881                 /* Offline Tests. */
5882                 if (orig_state)
5883                         s2io_close(sp->dev);
5884
5885                 if (s2io_register_test(sp, &data[0]))
5886                         ethtest->flags |= ETH_TEST_FL_FAILED;
5887
5888                 s2io_reset(sp);
5889
5890                 if (s2io_rldram_test(sp, &data[3]))
5891                         ethtest->flags |= ETH_TEST_FL_FAILED;
5892
5893                 s2io_reset(sp);
5894
5895                 if (s2io_eeprom_test(sp, &data[1]))
5896                         ethtest->flags |= ETH_TEST_FL_FAILED;
5897
5898                 if (s2io_bist_test(sp, &data[4]))
5899                         ethtest->flags |= ETH_TEST_FL_FAILED;
5900
5901                 if (orig_state)
5902                         s2io_open(sp->dev);
5903
5904                 data[2] = 0;
5905         } else {
5906                 /* Online Tests. */
5907                 if (!orig_state) {
5908                         DBG_PRINT(ERR_DBG,
5909                                   "%s: is not up, cannot run test\n",
5910                                   dev->name);
5911                         data[0] = -1;
5912                         data[1] = -1;
5913                         data[2] = -1;
5914                         data[3] = -1;
5915                         data[4] = -1;
5916                 }
5917
5918                 if (s2io_link_test(sp, &data[2]))
5919                         ethtest->flags |= ETH_TEST_FL_FAILED;
5920
5921                 data[0] = 0;
5922                 data[1] = 0;
5923                 data[3] = 0;
5924                 data[4] = 0;
5925         }
5926 }
5927
5928 static void s2io_get_ethtool_stats(struct net_device *dev,
5929                                    struct ethtool_stats *estats,
5930                                    u64 * tmp_stats)
5931 {
5932         int i = 0, k;
5933         struct s2io_nic *sp = dev->priv;
5934         struct stat_block *stat_info = sp->mac_control.stats_info;
5935
5936         s2io_updt_stats(sp);
5937         tmp_stats[i++] =
5938                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5939                 le32_to_cpu(stat_info->tmac_frms);
5940         tmp_stats[i++] =
5941                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5942                 le32_to_cpu(stat_info->tmac_data_octets);
5943         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5944         tmp_stats[i++] =
5945                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5946                 le32_to_cpu(stat_info->tmac_mcst_frms);
5947         tmp_stats[i++] =
5948                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5949                 le32_to_cpu(stat_info->tmac_bcst_frms);
5950         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5951         tmp_stats[i++] =
5952                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5953                 le32_to_cpu(stat_info->tmac_ttl_octets);
5954         tmp_stats[i++] =
5955                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5956                 le32_to_cpu(stat_info->tmac_ucst_frms);
5957         tmp_stats[i++] =
5958                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5959                 le32_to_cpu(stat_info->tmac_nucst_frms);
5960         tmp_stats[i++] =
5961                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5962                 le32_to_cpu(stat_info->tmac_any_err_frms);
5963         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5964         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5965         tmp_stats[i++] =
5966                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5967                 le32_to_cpu(stat_info->tmac_vld_ip);
5968         tmp_stats[i++] =
5969                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5970                 le32_to_cpu(stat_info->tmac_drop_ip);
5971         tmp_stats[i++] =
5972                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5973                 le32_to_cpu(stat_info->tmac_icmp);
5974         tmp_stats[i++] =
5975                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5976                 le32_to_cpu(stat_info->tmac_rst_tcp);
5977         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5978         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5979                 le32_to_cpu(stat_info->tmac_udp);
5980         tmp_stats[i++] =
5981                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5982                 le32_to_cpu(stat_info->rmac_vld_frms);
5983         tmp_stats[i++] =
5984                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5985                 le32_to_cpu(stat_info->rmac_data_octets);
5986         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5987         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5988         tmp_stats[i++] =
5989                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5990                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5991         tmp_stats[i++] =
5992                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5993                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5994         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5995         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5996         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5997         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5998         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5999         tmp_stats[i++] =
6000                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6001                 le32_to_cpu(stat_info->rmac_ttl_octets);
6002         tmp_stats[i++] =
6003                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6004                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6005         tmp_stats[i++] =
6006                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6007                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6008         tmp_stats[i++] =
6009                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6010                 le32_to_cpu(stat_info->rmac_discarded_frms);
6011         tmp_stats[i++] =
6012                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6013                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6014         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6015         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6016         tmp_stats[i++] =
6017                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6018                 le32_to_cpu(stat_info->rmac_usized_frms);
6019         tmp_stats[i++] =
6020                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6021                 le32_to_cpu(stat_info->rmac_osized_frms);
6022         tmp_stats[i++] =
6023                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6024                 le32_to_cpu(stat_info->rmac_frag_frms);
6025         tmp_stats[i++] =
6026                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6027                 le32_to_cpu(stat_info->rmac_jabber_frms);
6028         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6029         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6030         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6031         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6032         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6033         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6034         tmp_stats[i++] =
6035                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6036                 le32_to_cpu(stat_info->rmac_ip);
6037         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6038         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6039         tmp_stats[i++] =
6040                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6041                 le32_to_cpu(stat_info->rmac_drop_ip);
6042         tmp_stats[i++] =
6043                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6044                 le32_to_cpu(stat_info->rmac_icmp);
6045         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6046         tmp_stats[i++] =
6047                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6048                 le32_to_cpu(stat_info->rmac_udp);
6049         tmp_stats[i++] =
6050                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6051                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6052         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6053         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6054         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6055         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6056         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6057         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6058         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6059         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6060         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6061         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6062         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6063         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6064         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6065         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6066         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6067         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6068         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6069         tmp_stats[i++] =
6070                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6071                 le32_to_cpu(stat_info->rmac_pause_cnt);
6072         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6073         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6074         tmp_stats[i++] =
6075                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6076                 le32_to_cpu(stat_info->rmac_accepted_ip);
6077         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6078         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6079         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6080         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6081         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6082         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6083         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6084         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6085         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6086         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6087         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6088         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6089         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6090         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6091         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6092         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6093         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6094         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6095         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6096
6097         /* Enhanced statistics exist only for Hercules */
6098         if(sp->device_type == XFRAME_II_DEVICE) {
6099                 tmp_stats[i++] =
6100                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6101                 tmp_stats[i++] =
6102                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6103                 tmp_stats[i++] =
6104                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6105                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6106                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6107                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6108                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6109                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6110                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6111                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6112                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6113                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6114                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6115                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6116                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6117                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6118         }
6119
6120         tmp_stats[i++] = 0;
6121         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6122         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6123         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6124         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6125         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6126         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6127         for (k = 0; k < MAX_RX_RINGS; k++)
6128                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6129         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6130         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6131         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6132         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6133         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6134         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6135         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6136         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6137         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6138         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6139         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6140         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6141         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6142         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6143         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6144         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6145         if (stat_info->sw_stat.num_aggregations) {
6146                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6147                 int count = 0;
6148                 /*
6149                  * Since 64-bit divide does not work on all platforms,
6150                  * do repeated subtraction.
6151                  */
6152                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6153                         tmp -= stat_info->sw_stat.num_aggregations;
6154                         count++;
6155                 }
6156                 tmp_stats[i++] = count;
6157         }
6158         else
6159                 tmp_stats[i++] = 0;
6160         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6161         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6162         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6163         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6164         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6165         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6166         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6167         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6168         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6169
6170         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6171         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6172         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6173         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6174         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6175
6176         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6177         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6178         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6179         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6180         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6181         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6182         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6183         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6184         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6185         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6186         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6187         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6188         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6189         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6190         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6191         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6192         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6193         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6194         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6195         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6196         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6197         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6198         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6199         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6200         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6201         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6202 }
6203
6204 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6205 {
6206         return (XENA_REG_SPACE);
6207 }
6208
6209
6210 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6211 {
6212         struct s2io_nic *sp = dev->priv;
6213
6214         return (sp->rx_csum);
6215 }
6216
6217 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6218 {
6219         struct s2io_nic *sp = dev->priv;
6220
6221         if (data)
6222                 sp->rx_csum = 1;
6223         else
6224                 sp->rx_csum = 0;
6225
6226         return 0;
6227 }
6228
6229 static int s2io_get_eeprom_len(struct net_device *dev)
6230 {
6231         return (XENA_EEPROM_SPACE);
6232 }
6233
6234 static int s2io_get_sset_count(struct net_device *dev, int sset)
6235 {
6236         struct s2io_nic *sp = dev->priv;
6237
6238         switch (sset) {
6239         case ETH_SS_TEST:
6240                 return S2IO_TEST_LEN;
6241         case ETH_SS_STATS:
6242                 switch(sp->device_type) {
6243                 case XFRAME_I_DEVICE:
6244                         return XFRAME_I_STAT_LEN;
6245                 case XFRAME_II_DEVICE:
6246                         return XFRAME_II_STAT_LEN;
6247                 default:
6248                         return 0;
6249                 }
6250         default:
6251                 return -EOPNOTSUPP;
6252         }
6253 }
6254
6255 static void s2io_ethtool_get_strings(struct net_device *dev,
6256                                      u32 stringset, u8 * data)
6257 {
6258         int stat_size = 0;
6259         struct s2io_nic *sp = dev->priv;
6260
6261         switch (stringset) {
6262         case ETH_SS_TEST:
6263                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6264                 break;
6265         case ETH_SS_STATS:
6266                 stat_size = sizeof(ethtool_xena_stats_keys);
6267                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6268                 if(sp->device_type == XFRAME_II_DEVICE) {
6269                         memcpy(data + stat_size,
6270                                 &ethtool_enhanced_stats_keys,
6271                                 sizeof(ethtool_enhanced_stats_keys));
6272                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6273                 }
6274
6275                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6276                         sizeof(ethtool_driver_stats_keys));
6277         }
6278 }
6279
6280 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6281 {
6282         if (data)
6283                 dev->features |= NETIF_F_IP_CSUM;
6284         else
6285                 dev->features &= ~NETIF_F_IP_CSUM;
6286
6287         return 0;
6288 }
6289
6290 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6291 {
6292         return (dev->features & NETIF_F_TSO) != 0;
6293 }
6294 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6295 {
6296         if (data)
6297                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6298         else
6299                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6300
6301         return 0;
6302 }
6303
6304 static const struct ethtool_ops netdev_ethtool_ops = {
6305         .get_settings = s2io_ethtool_gset,
6306         .set_settings = s2io_ethtool_sset,
6307         .get_drvinfo = s2io_ethtool_gdrvinfo,
6308         .get_regs_len = s2io_ethtool_get_regs_len,
6309         .get_regs = s2io_ethtool_gregs,
6310         .get_link = ethtool_op_get_link,
6311         .get_eeprom_len = s2io_get_eeprom_len,
6312         .get_eeprom = s2io_ethtool_geeprom,
6313         .set_eeprom = s2io_ethtool_seeprom,
6314         .get_ringparam = s2io_ethtool_gringparam,
6315         .get_pauseparam = s2io_ethtool_getpause_data,
6316         .set_pauseparam = s2io_ethtool_setpause_data,
6317         .get_rx_csum = s2io_ethtool_get_rx_csum,
6318         .set_rx_csum = s2io_ethtool_set_rx_csum,
6319         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6320         .set_sg = ethtool_op_set_sg,
6321         .get_tso = s2io_ethtool_op_get_tso,
6322         .set_tso = s2io_ethtool_op_set_tso,
6323         .set_ufo = ethtool_op_set_ufo,
6324         .self_test = s2io_ethtool_test,
6325         .get_strings = s2io_ethtool_get_strings,
6326         .phys_id = s2io_ethtool_idnic,
6327         .get_ethtool_stats = s2io_get_ethtool_stats,
6328         .get_sset_count = s2io_get_sset_count,
6329 };
6330
6331 /**
6332  *  s2io_ioctl - Entry point for the Ioctl
6333  *  @dev :  Device pointer.
6334  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6335  *  a proprietary structure used to pass information to the driver.
6336  *  @cmd :  This is used to distinguish between the different commands that
6337  *  can be passed to the IOCTL functions.
6338  *  Description:
6339  *  Currently there are no special functionality supported in IOCTL, hence
6340  *  function always return EOPNOTSUPPORTED
6341  */
6342
6343 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6344 {
6345         return -EOPNOTSUPP;
6346 }
6347
6348 /**
6349  *  s2io_change_mtu - entry point to change MTU size for the device.
6350  *   @dev : device pointer.
6351  *   @new_mtu : the new MTU size for the device.
6352  *   Description: A driver entry point to change MTU size for the device.
6353  *   Before changing the MTU the device must be stopped.
6354  *  Return value:
6355  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6356  *   file on failure.
6357  */
6358
6359 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6360 {
6361         struct s2io_nic *sp = dev->priv;
6362
6363         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6364                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6365                           dev->name);
6366                 return -EPERM;
6367         }
6368
6369         dev->mtu = new_mtu;
6370         if (netif_running(dev)) {
6371                 s2io_card_down(sp);
6372                 netif_stop_queue(dev);
6373                 if (s2io_card_up(sp)) {
6374                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6375                                   __FUNCTION__);
6376                 }
6377                 if (netif_queue_stopped(dev))
6378                         netif_wake_queue(dev);
6379         } else { /* Device is down */
6380                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6381                 u64 val64 = new_mtu;
6382
6383                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6384         }
6385
6386         return 0;
6387 }
6388
6389 /**
6390  *  s2io_tasklet - Bottom half of the ISR.
6391  *  @dev_adr : address of the device structure in dma_addr_t format.
6392  *  Description:
6393  *  This is the tasklet or the bottom half of the ISR. This is
6394  *  an extension of the ISR which is scheduled by the scheduler to be run
6395  *  when the load on the CPU is low. All low priority tasks of the ISR can
6396  *  be pushed into the tasklet. For now the tasklet is used only to
6397  *  replenish the Rx buffers in the Rx buffer descriptors.
6398  *  Return value:
6399  *  void.
6400  */
6401
6402 static void s2io_tasklet(unsigned long dev_addr)
6403 {
6404         struct net_device *dev = (struct net_device *) dev_addr;
6405         struct s2io_nic *sp = dev->priv;
6406         int i, ret;
6407         struct mac_info *mac_control;
6408         struct config_param *config;
6409
6410         mac_control = &sp->mac_control;
6411         config = &sp->config;
6412
6413         if (!TASKLET_IN_USE) {
6414                 for (i = 0; i < config->rx_ring_num; i++) {
6415                         ret = fill_rx_buffers(sp, i);
6416                         if (ret == -ENOMEM) {
6417                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6418                                           dev->name);
6419                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6420                                 break;
6421                         } else if (ret == -EFILL) {
6422                                 DBG_PRINT(INFO_DBG,
6423                                           "%s: Rx Ring %d is full\n",
6424                                           dev->name, i);
6425                                 break;
6426                         }
6427                 }
6428                 clear_bit(0, (&sp->tasklet_status));
6429         }
6430 }
6431
6432 /**
6433  * s2io_set_link - Set the LInk status
6434  * @data: long pointer to device private structue
6435  * Description: Sets the link status for the adapter
6436  */
6437
6438 static void s2io_set_link(struct work_struct *work)
6439 {
6440         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6441         struct net_device *dev = nic->dev;
6442         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6443         register u64 val64;
6444         u16 subid;
6445
6446         rtnl_lock();
6447
6448         if (!netif_running(dev))
6449                 goto out_unlock;
6450
6451         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6452                 /* The card is being reset, no point doing anything */
6453                 goto out_unlock;
6454         }
6455
6456         subid = nic->pdev->subsystem_device;
6457         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6458                 /*
6459                  * Allow a small delay for the NICs self initiated
6460                  * cleanup to complete.
6461                  */
6462                 msleep(100);
6463         }
6464
6465         val64 = readq(&bar0->adapter_status);
6466         if (LINK_IS_UP(val64)) {
6467                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6468                         if (verify_xena_quiescence(nic)) {
6469                                 val64 = readq(&bar0->adapter_control);
6470                                 val64 |= ADAPTER_CNTL_EN;
6471                                 writeq(val64, &bar0->adapter_control);
6472                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6473                                         nic->device_type, subid)) {
6474                                         val64 = readq(&bar0->gpio_control);
6475                                         val64 |= GPIO_CTRL_GPIO_0;
6476                                         writeq(val64, &bar0->gpio_control);
6477                                         val64 = readq(&bar0->gpio_control);
6478                                 } else {
6479                                         val64 |= ADAPTER_LED_ON;
6480                                         writeq(val64, &bar0->adapter_control);
6481                                 }
6482                                 nic->device_enabled_once = TRUE;
6483                         } else {
6484                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6485                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6486                                 netif_stop_queue(dev);
6487                         }
6488                 }
6489                 val64 = readq(&bar0->adapter_control);
6490                 val64 |= ADAPTER_LED_ON;
6491                 writeq(val64, &bar0->adapter_control);
6492                 s2io_link(nic, LINK_UP);
6493         } else {
6494                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6495                                                       subid)) {
6496                         val64 = readq(&bar0->gpio_control);
6497                         val64 &= ~GPIO_CTRL_GPIO_0;
6498                         writeq(val64, &bar0->gpio_control);
6499                         val64 = readq(&bar0->gpio_control);
6500                 }
6501                 /* turn off LED */
6502                 val64 = readq(&bar0->adapter_control);
6503                 val64 = val64 &(~ADAPTER_LED_ON);
6504                 writeq(val64, &bar0->adapter_control);
6505                 s2io_link(nic, LINK_DOWN);
6506         }
6507         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6508
6509 out_unlock:
6510         rtnl_unlock();
6511 }
6512
6513 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6514                                 struct buffAdd *ba,
6515                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6516                                 u64 *temp2, int size)
6517 {
6518         struct net_device *dev = sp->dev;
6519         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6520
6521         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6522                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6523                 /* allocate skb */
6524                 if (*skb) {
6525                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6526                         /*
6527                          * As Rx frame are not going to be processed,
6528                          * using same mapped address for the Rxd
6529                          * buffer pointer
6530                          */
6531                         rxdp1->Buffer0_ptr = *temp0;
6532                 } else {
6533                         *skb = dev_alloc_skb(size);
6534                         if (!(*skb)) {
6535                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6536                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6537                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6538                                 sp->mac_control.stats_info->sw_stat. \
6539                                         mem_alloc_fail_cnt++;
6540                                 return -ENOMEM ;
6541                         }
6542                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6543                                 += (*skb)->truesize;
6544                         /* storing the mapped addr in a temp variable
6545                          * such it will be used for next rxd whose
6546                          * Host Control is NULL
6547                          */
6548                         rxdp1->Buffer0_ptr = *temp0 =
6549                                 pci_map_single( sp->pdev, (*skb)->data,
6550                                         size - NET_IP_ALIGN,
6551                                         PCI_DMA_FROMDEVICE);
6552                         if( (rxdp1->Buffer0_ptr == 0) ||
6553                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6554                                 goto memalloc_failed;
6555                         }
6556                         rxdp->Host_Control = (unsigned long) (*skb);
6557                 }
6558         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6559                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6560                 /* Two buffer Mode */
6561                 if (*skb) {
6562                         rxdp3->Buffer2_ptr = *temp2;
6563                         rxdp3->Buffer0_ptr = *temp0;
6564                         rxdp3->Buffer1_ptr = *temp1;
6565                 } else {
6566                         *skb = dev_alloc_skb(size);
6567                         if (!(*skb)) {
6568                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6569                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6570                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6571                                 sp->mac_control.stats_info->sw_stat. \
6572                                         mem_alloc_fail_cnt++;
6573                                 return -ENOMEM;
6574                         }
6575                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6576                                 += (*skb)->truesize;
6577                         rxdp3->Buffer2_ptr = *temp2 =
6578                                 pci_map_single(sp->pdev, (*skb)->data,
6579                                                dev->mtu + 4,
6580                                                PCI_DMA_FROMDEVICE);
6581                         if( (rxdp3->Buffer2_ptr == 0) ||
6582                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6583                                 goto memalloc_failed;
6584                         }
6585                         rxdp3->Buffer0_ptr = *temp0 =
6586                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6587                                                 PCI_DMA_FROMDEVICE);
6588                         if( (rxdp3->Buffer0_ptr == 0) ||
6589                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6590                                 pci_unmap_single (sp->pdev,
6591                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6592                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6593                                 goto memalloc_failed;
6594                         }
6595                         rxdp->Host_Control = (unsigned long) (*skb);
6596
6597                         /* Buffer-1 will be dummy buffer not used */
6598                         rxdp3->Buffer1_ptr = *temp1 =
6599                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6600                                                 PCI_DMA_FROMDEVICE);
6601                         if( (rxdp3->Buffer1_ptr == 0) ||
6602                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6603                                 pci_unmap_single (sp->pdev,
6604                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6605                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6606                                 pci_unmap_single (sp->pdev,
6607                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6608                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6609                                 goto memalloc_failed;
6610                         }
6611                 }
6612         }
6613         return 0;
6614         memalloc_failed:
6615                 stats->pci_map_fail_cnt++;
6616                 stats->mem_freed += (*skb)->truesize;
6617                 dev_kfree_skb(*skb);
6618                 return -ENOMEM;
6619 }
6620
6621 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6622                                 int size)
6623 {
6624         struct net_device *dev = sp->dev;
6625         if (sp->rxd_mode == RXD_MODE_1) {
6626                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6627         } else if (sp->rxd_mode == RXD_MODE_3B) {
6628                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6629                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6630                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6631         }
6632 }
6633
6634 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6635 {
6636         int i, j, k, blk_cnt = 0, size;
6637         struct mac_info * mac_control = &sp->mac_control;
6638         struct config_param *config = &sp->config;
6639         struct net_device *dev = sp->dev;
6640         struct RxD_t *rxdp = NULL;
6641         struct sk_buff *skb = NULL;
6642         struct buffAdd *ba = NULL;
6643         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6644
6645         /* Calculate the size based on ring mode */
6646         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6647                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6648         if (sp->rxd_mode == RXD_MODE_1)
6649                 size += NET_IP_ALIGN;
6650         else if (sp->rxd_mode == RXD_MODE_3B)
6651                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6652
6653         for (i = 0; i < config->rx_ring_num; i++) {
6654                 blk_cnt = config->rx_cfg[i].num_rxd /
6655                         (rxd_count[sp->rxd_mode] +1);
6656
6657                 for (j = 0; j < blk_cnt; j++) {
6658                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6659                                 rxdp = mac_control->rings[i].
6660                                         rx_blocks[j].rxds[k].virt_addr;
6661                                 if(sp->rxd_mode == RXD_MODE_3B)
6662                                         ba = &mac_control->rings[i].ba[j][k];
6663                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6664                                                        &skb,(u64 *)&temp0_64,
6665                                                        (u64 *)&temp1_64,
6666                                                        (u64 *)&temp2_64,
6667                                                         size) == ENOMEM) {
6668                                         return 0;
6669                                 }
6670
6671                                 set_rxd_buffer_size(sp, rxdp, size);
6672                                 wmb();
6673                                 /* flip the Ownership bit to Hardware */
6674                                 rxdp->Control_1 |= RXD_OWN_XENA;
6675                         }
6676                 }
6677         }
6678         return 0;
6679
6680 }
6681
6682 static int s2io_add_isr(struct s2io_nic * sp)
6683 {
6684         int ret = 0;
6685         struct net_device *dev = sp->dev;
6686         int err = 0;
6687
6688         if (sp->config.intr_type == MSI_X)
6689                 ret = s2io_enable_msi_x(sp);
6690         if (ret) {
6691                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6692                 sp->config.intr_type = INTA;
6693         }
6694
6695         /* Store the values of the MSIX table in the struct s2io_nic structure */
6696         store_xmsi_data(sp);
6697
6698         /* After proper initialization of H/W, register ISR */
6699         if (sp->config.intr_type == MSI_X) {
6700                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6701
6702                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6703                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6704                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6705                                         dev->name, i);
6706                                 err = request_irq(sp->entries[i].vector,
6707                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6708                                                   sp->s2io_entries[i].arg);
6709                                 /* If either data or addr is zero print it */
6710                                 if(!(sp->msix_info[i].addr &&
6711                                         sp->msix_info[i].data)) {
6712                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6713                                                 "Data:0x%lx\n",sp->desc[i],
6714                                                 (unsigned long long)
6715                                                 sp->msix_info[i].addr,
6716                                                 (unsigned long)
6717                                                 ntohl(sp->msix_info[i].data));
6718                                 } else {
6719                                         msix_tx_cnt++;
6720                                 }
6721                         } else {
6722                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6723                                         dev->name, i);
6724                                 err = request_irq(sp->entries[i].vector,
6725                                           s2io_msix_ring_handle, 0, sp->desc[i],
6726                                                   sp->s2io_entries[i].arg);
6727                                 /* If either data or addr is zero print it */
6728                                 if(!(sp->msix_info[i].addr &&
6729                                         sp->msix_info[i].data)) {
6730                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6731                                                 "Data:0x%lx\n",sp->desc[i],
6732                                                 (unsigned long long)
6733                                                 sp->msix_info[i].addr,
6734                                                 (unsigned long)
6735                                                 ntohl(sp->msix_info[i].data));
6736                                 } else {
6737                                         msix_rx_cnt++;
6738                                 }
6739                         }
6740                         if (err) {
6741                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6742                                           "failed\n", dev->name, i);
6743                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6744                                 return -1;
6745                         }
6746                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6747                 }
6748                 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6749                 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6750         }
6751         if (sp->config.intr_type == INTA) {
6752                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6753                                 sp->name, dev);
6754                 if (err) {
6755                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6756                                   dev->name);
6757                         return -1;
6758                 }
6759         }
6760         return 0;
6761 }
6762 static void s2io_rem_isr(struct s2io_nic * sp)
6763 {
6764         struct net_device *dev = sp->dev;
6765         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6766
6767         if (sp->config.intr_type == MSI_X) {
6768                 int i;
6769                 u16 msi_control;
6770
6771                 for (i=1; (sp->s2io_entries[i].in_use ==
6772                         MSIX_REGISTERED_SUCCESS); i++) {
6773                         int vector = sp->entries[i].vector;
6774                         void *arg = sp->s2io_entries[i].arg;
6775
6776                         synchronize_irq(vector);
6777                         free_irq(vector, arg);
6778                 }
6779
6780                 kfree(sp->entries);
6781                 stats->mem_freed +=
6782                         (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6783                 kfree(sp->s2io_entries);
6784                 stats->mem_freed +=
6785                         (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6786                 sp->entries = NULL;
6787                 sp->s2io_entries = NULL;
6788
6789                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6790                 msi_control &= 0xFFFE; /* Disable MSI */
6791                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6792
6793                 pci_disable_msix(sp->pdev);
6794         } else {
6795                 synchronize_irq(sp->pdev->irq);
6796                 free_irq(sp->pdev->irq, dev);
6797         }
6798 }
6799
6800 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6801 {
6802         int cnt = 0;
6803         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6804         unsigned long flags;
6805         register u64 val64 = 0;
6806
6807         del_timer_sync(&sp->alarm_timer);
6808         /* If s2io_set_link task is executing, wait till it completes. */
6809         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6810                 msleep(50);
6811         }
6812         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6813
6814         /* disable Tx and Rx traffic on the NIC */
6815         if (do_io)
6816                 stop_nic(sp);
6817
6818         s2io_rem_isr(sp);
6819
6820         /* Kill tasklet. */
6821         tasklet_kill(&sp->task);
6822
6823         /* Check if the device is Quiescent and then Reset the NIC */
6824         while(do_io) {
6825                 /* As per the HW requirement we need to replenish the
6826                  * receive buffer to avoid the ring bump. Since there is
6827                  * no intention of processing the Rx frame at this pointwe are
6828                  * just settting the ownership bit of rxd in Each Rx
6829                  * ring to HW and set the appropriate buffer size
6830                  * based on the ring mode
6831                  */
6832                 rxd_owner_bit_reset(sp);
6833
6834                 val64 = readq(&bar0->adapter_status);
6835                 if (verify_xena_quiescence(sp)) {
6836                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6837                         break;
6838                 }
6839
6840                 msleep(50);
6841                 cnt++;
6842                 if (cnt == 10) {
6843                         DBG_PRINT(ERR_DBG,
6844                                   "s2io_close:Device not Quiescent ");
6845                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6846                                   (unsigned long long) val64);
6847                         break;
6848                 }
6849         }
6850         if (do_io)
6851                 s2io_reset(sp);
6852
6853         spin_lock_irqsave(&sp->tx_lock, flags);
6854         /* Free all Tx buffers */
6855         free_tx_buffers(sp);
6856         spin_unlock_irqrestore(&sp->tx_lock, flags);
6857
6858         /* Free all Rx buffers */
6859         spin_lock_irqsave(&sp->rx_lock, flags);
6860         free_rx_buffers(sp);
6861         spin_unlock_irqrestore(&sp->rx_lock, flags);
6862
6863         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6864 }
6865
6866 static void s2io_card_down(struct s2io_nic * sp)
6867 {
6868         do_s2io_card_down(sp, 1);
6869 }
6870
6871 static int s2io_card_up(struct s2io_nic * sp)
6872 {
6873         int i, ret = 0;
6874         struct mac_info *mac_control;
6875         struct config_param *config;
6876         struct net_device *dev = (struct net_device *) sp->dev;
6877         u16 interruptible;
6878
6879         /* Initialize the H/W I/O registers */
6880         if (init_nic(sp) != 0) {
6881                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6882                           dev->name);
6883                 s2io_reset(sp);
6884                 return -ENODEV;
6885         }
6886
6887         /*
6888          * Initializing the Rx buffers. For now we are considering only 1
6889          * Rx ring and initializing buffers into 30 Rx blocks
6890          */
6891         mac_control = &sp->mac_control;
6892         config = &sp->config;
6893
6894         for (i = 0; i < config->rx_ring_num; i++) {
6895                 if ((ret = fill_rx_buffers(sp, i))) {
6896                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6897                                   dev->name);
6898                         s2io_reset(sp);
6899                         free_rx_buffers(sp);
6900                         return -ENOMEM;
6901                 }
6902                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6903                           atomic_read(&sp->rx_bufs_left[i]));
6904         }
6905         /* Maintain the state prior to the open */
6906         if (sp->promisc_flg)
6907                 sp->promisc_flg = 0;
6908         if (sp->m_cast_flg) {
6909                 sp->m_cast_flg = 0;
6910                 sp->all_multi_pos= 0;
6911         }
6912
6913         /* Setting its receive mode */
6914         s2io_set_multicast(dev);
6915
6916         if (sp->lro) {
6917                 /* Initialize max aggregatable pkts per session based on MTU */
6918                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6919                 /* Check if we can use(if specified) user provided value */
6920                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6921                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6922         }
6923
6924         /* Enable Rx Traffic and interrupts on the NIC */
6925         if (start_nic(sp)) {
6926                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6927                 s2io_reset(sp);
6928                 free_rx_buffers(sp);
6929                 return -ENODEV;
6930         }
6931
6932         /* Add interrupt service routine */
6933         if (s2io_add_isr(sp) != 0) {
6934                 if (sp->config.intr_type == MSI_X)
6935                         s2io_rem_isr(sp);
6936                 s2io_reset(sp);
6937                 free_rx_buffers(sp);
6938                 return -ENODEV;
6939         }
6940
6941         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6942
6943         /* Enable tasklet for the device */
6944         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6945
6946         /*  Enable select interrupts */
6947         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6948         if (sp->config.intr_type != INTA)
6949                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6950         else {
6951                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6952                 interruptible |= TX_PIC_INTR;
6953                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6954         }
6955
6956         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6957         return 0;
6958 }
6959
6960 /**
6961  * s2io_restart_nic - Resets the NIC.
6962  * @data : long pointer to the device private structure
6963  * Description:
6964  * This function is scheduled to be run by the s2io_tx_watchdog
6965  * function after 0.5 secs to reset the NIC. The idea is to reduce
6966  * the run time of the watch dog routine which is run holding a
6967  * spin lock.
6968  */
6969
6970 static void s2io_restart_nic(struct work_struct *work)
6971 {
6972         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6973         struct net_device *dev = sp->dev;
6974
6975         rtnl_lock();
6976
6977         if (!netif_running(dev))
6978                 goto out_unlock;
6979
6980         s2io_card_down(sp);
6981         if (s2io_card_up(sp)) {
6982                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6983                           dev->name);
6984         }
6985         netif_wake_queue(dev);
6986         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6987                   dev->name);
6988 out_unlock:
6989         rtnl_unlock();
6990 }
6991
6992 /**
6993  *  s2io_tx_watchdog - Watchdog for transmit side.
6994  *  @dev : Pointer to net device structure
6995  *  Description:
6996  *  This function is triggered if the Tx Queue is stopped
6997  *  for a pre-defined amount of time when the Interface is still up.
6998  *  If the Interface is jammed in such a situation, the hardware is
6999  *  reset (by s2io_close) and restarted again (by s2io_open) to
7000  *  overcome any problem that might have been caused in the hardware.
7001  *  Return value:
7002  *  void
7003  */
7004
7005 static void s2io_tx_watchdog(struct net_device *dev)
7006 {
7007         struct s2io_nic *sp = dev->priv;
7008
7009         if (netif_carrier_ok(dev)) {
7010                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7011                 schedule_work(&sp->rst_timer_task);
7012                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7013         }
7014 }
7015
7016 /**
7017  *   rx_osm_handler - To perform some OS related operations on SKB.
7018  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7019  *   @skb : the socket buffer pointer.
7020  *   @len : length of the packet
7021  *   @cksum : FCS checksum of the frame.
7022  *   @ring_no : the ring from which this RxD was extracted.
7023  *   Description:
7024  *   This function is called by the Rx interrupt serivce routine to perform
7025  *   some OS related operations on the SKB before passing it to the upper
7026  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7027  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7028  *   to the upper layer. If the checksum is wrong, it increments the Rx
7029  *   packet error count, frees the SKB and returns error.
7030  *   Return value:
7031  *   SUCCESS on success and -1 on failure.
7032  */
7033 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7034 {
7035         struct s2io_nic *sp = ring_data->nic;
7036         struct net_device *dev = (struct net_device *) sp->dev;
7037         struct sk_buff *skb = (struct sk_buff *)
7038                 ((unsigned long) rxdp->Host_Control);
7039         int ring_no = ring_data->ring_no;
7040         u16 l3_csum, l4_csum;
7041         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7042         struct lro *lro;
7043         u8 err_mask;
7044
7045         skb->dev = dev;
7046
7047         if (err) {
7048                 /* Check for parity error */
7049                 if (err & 0x1) {
7050                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7051                 }
7052                 err_mask = err >> 48;
7053                 switch(err_mask) {
7054                         case 1:
7055                                 sp->mac_control.stats_info->sw_stat.
7056                                 rx_parity_err_cnt++;
7057                         break;
7058
7059                         case 2:
7060                                 sp->mac_control.stats_info->sw_stat.
7061                                 rx_abort_cnt++;
7062                         break;
7063
7064                         case 3:
7065                                 sp->mac_control.stats_info->sw_stat.
7066                                 rx_parity_abort_cnt++;
7067                         break;
7068
7069                         case 4:
7070                                 sp->mac_control.stats_info->sw_stat.
7071                                 rx_rda_fail_cnt++;
7072                         break;
7073
7074                         case 5:
7075                                 sp->mac_control.stats_info->sw_stat.
7076                                 rx_unkn_prot_cnt++;
7077                         break;
7078
7079                         case 6:
7080                                 sp->mac_control.stats_info->sw_stat.
7081                                 rx_fcs_err_cnt++;
7082                         break;
7083
7084                         case 7:
7085                                 sp->mac_control.stats_info->sw_stat.
7086                                 rx_buf_size_err_cnt++;
7087                         break;
7088
7089                         case 8:
7090                                 sp->mac_control.stats_info->sw_stat.
7091                                 rx_rxd_corrupt_cnt++;
7092                         break;
7093
7094                         case 15:
7095                                 sp->mac_control.stats_info->sw_stat.
7096                                 rx_unkn_err_cnt++;
7097                         break;
7098                 }
7099                 /*
7100                 * Drop the packet if bad transfer code. Exception being
7101                 * 0x5, which could be due to unsupported IPv6 extension header.
7102                 * In this case, we let stack handle the packet.
7103                 * Note that in this case, since checksum will be incorrect,
7104                 * stack will validate the same.
7105                 */
7106                 if (err_mask != 0x5) {
7107                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7108                                 dev->name, err_mask);
7109                         sp->stats.rx_crc_errors++;
7110                         sp->mac_control.stats_info->sw_stat.mem_freed 
7111                                 += skb->truesize;
7112                         dev_kfree_skb(skb);
7113                         atomic_dec(&sp->rx_bufs_left[ring_no]);
7114                         rxdp->Host_Control = 0;
7115                         return 0;
7116                 }
7117         }
7118
7119         /* Updating statistics */
7120         sp->stats.rx_packets++;
7121         rxdp->Host_Control = 0;
7122         if (sp->rxd_mode == RXD_MODE_1) {
7123                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7124
7125                 sp->stats.rx_bytes += len;
7126                 skb_put(skb, len);
7127
7128         } else if (sp->rxd_mode == RXD_MODE_3B) {
7129                 int get_block = ring_data->rx_curr_get_info.block_index;
7130                 int get_off = ring_data->rx_curr_get_info.offset;
7131                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7132                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7133                 unsigned char *buff = skb_push(skb, buf0_len);
7134
7135                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7136                 sp->stats.rx_bytes += buf0_len + buf2_len;
7137                 memcpy(buff, ba->ba_0, buf0_len);
7138                 skb_put(skb, buf2_len);
7139         }
7140
7141         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7142             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7143             (sp->rx_csum)) {
7144                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7145                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7146                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7147                         /*
7148                          * NIC verifies if the Checksum of the received
7149                          * frame is Ok or not and accordingly returns
7150                          * a flag in the RxD.
7151                          */
7152                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7153                         if (sp->lro) {
7154                                 u32 tcp_len;
7155                                 u8 *tcp;
7156                                 int ret = 0;
7157
7158                                 ret = s2io_club_tcp_session(skb->data, &tcp,
7159                                                 &tcp_len, &lro, rxdp, sp);
7160                                 switch (ret) {
7161                                         case 3: /* Begin anew */
7162                                                 lro->parent = skb;
7163                                                 goto aggregate;
7164                                         case 1: /* Aggregate */
7165                                         {
7166                                                 lro_append_pkt(sp, lro,
7167                                                         skb, tcp_len);
7168                                                 goto aggregate;
7169                                         }
7170                                         case 4: /* Flush session */
7171                                         {
7172                                                 lro_append_pkt(sp, lro,
7173                                                         skb, tcp_len);
7174                                                 queue_rx_frame(lro->parent);
7175                                                 clear_lro_session(lro);
7176                                                 sp->mac_control.stats_info->
7177                                                     sw_stat.flush_max_pkts++;
7178                                                 goto aggregate;
7179                                         }
7180                                         case 2: /* Flush both */
7181                                                 lro->parent->data_len =
7182                                                         lro->frags_len;
7183                                                 sp->mac_control.stats_info->
7184                                                      sw_stat.sending_both++;
7185                                                 queue_rx_frame(lro->parent);
7186                                                 clear_lro_session(lro);
7187                                                 goto send_up;
7188                                         case 0: /* sessions exceeded */
7189                                         case -1: /* non-TCP or not
7190                                                   * L2 aggregatable
7191                                                   */
7192                                         case 5: /*
7193                                                  * First pkt in session not
7194                                                  * L3/L4 aggregatable
7195                                                  */
7196                                                 break;
7197                                         default:
7198                                                 DBG_PRINT(ERR_DBG,
7199                                                         "%s: Samadhana!!\n",
7200                                                          __FUNCTION__);
7201                                                 BUG();
7202                                 }
7203                         }
7204                 } else {
7205                         /*
7206                          * Packet with erroneous checksum, let the
7207                          * upper layers deal with it.
7208                          */
7209                         skb->ip_summed = CHECKSUM_NONE;
7210                 }
7211         } else {
7212                 skb->ip_summed = CHECKSUM_NONE;
7213         }
7214         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7215         if (!sp->lro) {
7216                 skb->protocol = eth_type_trans(skb, dev);
7217                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7218                         vlan_strip_flag)) {
7219                         /* Queueing the vlan frame to the upper layer */
7220                         if (napi)
7221                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7222                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7223                         else
7224                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7225                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7226                 } else {
7227                         if (napi)
7228                                 netif_receive_skb(skb);
7229                         else
7230                                 netif_rx(skb);
7231                 }
7232         } else {
7233 send_up:
7234                 queue_rx_frame(skb);
7235         }
7236         dev->last_rx = jiffies;
7237 aggregate:
7238         atomic_dec(&sp->rx_bufs_left[ring_no]);
7239         return SUCCESS;
7240 }
7241
7242 /**
7243  *  s2io_link - stops/starts the Tx queue.
7244  *  @sp : private member of the device structure, which is a pointer to the
7245  *  s2io_nic structure.
7246  *  @link : inidicates whether link is UP/DOWN.
7247  *  Description:
7248  *  This function stops/starts the Tx queue depending on whether the link
7249  *  status of the NIC is is down or up. This is called by the Alarm
7250  *  interrupt handler whenever a link change interrupt comes up.
7251  *  Return value:
7252  *  void.
7253  */
7254
7255 static void s2io_link(struct s2io_nic * sp, int link)
7256 {
7257         struct net_device *dev = (struct net_device *) sp->dev;
7258
7259         if (link != sp->last_link_state) {
7260                 if (link == LINK_DOWN) {
7261                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7262                         netif_carrier_off(dev);
7263                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7264                         sp->mac_control.stats_info->sw_stat.link_up_time = 
7265                                 jiffies - sp->start_time;
7266                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7267                 } else {
7268                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7269                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7270                         sp->mac_control.stats_info->sw_stat.link_down_time = 
7271                                 jiffies - sp->start_time;
7272                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7273                         netif_carrier_on(dev);
7274                 }
7275         }
7276         sp->last_link_state = link;
7277         sp->start_time = jiffies;
7278 }
7279
7280 /**
7281  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7282  *  @sp : private member of the device structure, which is a pointer to the
7283  *  s2io_nic structure.
7284  *  Description:
7285  *  This function initializes a few of the PCI and PCI-X configuration registers
7286  *  with recommended values.
7287  *  Return value:
7288  *  void
7289  */
7290
7291 static void s2io_init_pci(struct s2io_nic * sp)
7292 {
7293         u16 pci_cmd = 0, pcix_cmd = 0;
7294
7295         /* Enable Data Parity Error Recovery in PCI-X command register. */
7296         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7297                              &(pcix_cmd));
7298         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7299                               (pcix_cmd | 1));
7300         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7301                              &(pcix_cmd));
7302
7303         /* Set the PErr Response bit in PCI command register. */
7304         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7305         pci_write_config_word(sp->pdev, PCI_COMMAND,
7306                               (pci_cmd | PCI_COMMAND_PARITY));
7307         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7308 }
7309
7310 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7311 {
7312         if ( tx_fifo_num > 8) {
7313                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7314                          "supported\n");
7315                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7316                 tx_fifo_num = 8;
7317         }
7318         if ( rx_ring_num > 8) {
7319                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7320                          "supported\n");
7321                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7322                 rx_ring_num = 8;
7323         }
7324         if (*dev_intr_type != INTA)
7325                 napi = 0;
7326
7327         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7328                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7329                           "Defaulting to INTA\n");
7330                 *dev_intr_type = INTA;
7331         }
7332
7333         if ((*dev_intr_type == MSI_X) &&
7334                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7335                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7336                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7337                                         "Defaulting to INTA\n");
7338                 *dev_intr_type = INTA;
7339         }
7340
7341         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7342                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7343                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7344                 rx_ring_mode = 1;
7345         }
7346         return SUCCESS;
7347 }
7348
7349 /**
7350  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7351  * or Traffic class respectively.
7352  * @nic: device peivate variable
7353  * Description: The function configures the receive steering to
7354  * desired receive ring.
7355  * Return Value:  SUCCESS on success and
7356  * '-1' on failure (endian settings incorrect).
7357  */
7358 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7359 {
7360         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7361         register u64 val64 = 0;
7362
7363         if (ds_codepoint > 63)
7364                 return FAILURE;
7365
7366         val64 = RTS_DS_MEM_DATA(ring);
7367         writeq(val64, &bar0->rts_ds_mem_data);
7368
7369         val64 = RTS_DS_MEM_CTRL_WE |
7370                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7371                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7372
7373         writeq(val64, &bar0->rts_ds_mem_ctrl);
7374
7375         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7376                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7377                                 S2IO_BIT_RESET);
7378 }
7379
7380 /**
7381  *  s2io_init_nic - Initialization of the adapter .
7382  *  @pdev : structure containing the PCI related information of the device.
7383  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7384  *  Description:
7385  *  The function initializes an adapter identified by the pci_dec structure.
7386  *  All OS related initialization including memory and device structure and
7387  *  initlaization of the device private variable is done. Also the swapper
7388  *  control register is initialized to enable read and write into the I/O
7389  *  registers of the device.
7390  *  Return value:
7391  *  returns 0 on success and negative on failure.
7392  */
7393
7394 static int __devinit
7395 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7396 {
7397         struct s2io_nic *sp;
7398         struct net_device *dev;
7399         int i, j, ret;
7400         int dma_flag = FALSE;
7401         u32 mac_up, mac_down;
7402         u64 val64 = 0, tmp64 = 0;
7403         struct XENA_dev_config __iomem *bar0 = NULL;
7404         u16 subid;
7405         struct mac_info *mac_control;
7406         struct config_param *config;
7407         int mode;
7408         u8 dev_intr_type = intr_type;
7409         DECLARE_MAC_BUF(mac);
7410
7411         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7412                 return ret;
7413
7414         if ((ret = pci_enable_device(pdev))) {
7415                 DBG_PRINT(ERR_DBG,
7416                           "s2io_init_nic: pci_enable_device failed\n");
7417                 return ret;
7418         }
7419
7420         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7421                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7422                 dma_flag = TRUE;
7423                 if (pci_set_consistent_dma_mask
7424                     (pdev, DMA_64BIT_MASK)) {
7425                         DBG_PRINT(ERR_DBG,
7426                                   "Unable to obtain 64bit DMA for \
7427                                         consistent allocations\n");
7428                         pci_disable_device(pdev);
7429                         return -ENOMEM;
7430                 }
7431         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7432                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7433         } else {
7434                 pci_disable_device(pdev);
7435                 return -ENOMEM;
7436         }
7437         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7438                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7439                 pci_disable_device(pdev);
7440                 return -ENODEV;
7441         }
7442
7443         dev = alloc_etherdev(sizeof(struct s2io_nic));
7444         if (dev == NULL) {
7445                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7446                 pci_disable_device(pdev);
7447                 pci_release_regions(pdev);
7448                 return -ENODEV;
7449         }
7450
7451         pci_set_master(pdev);
7452         pci_set_drvdata(pdev, dev);
7453         SET_NETDEV_DEV(dev, &pdev->dev);
7454
7455         /*  Private member variable initialized to s2io NIC structure */
7456         sp = dev->priv;
7457         memset(sp, 0, sizeof(struct s2io_nic));
7458         sp->dev = dev;
7459         sp->pdev = pdev;
7460         sp->high_dma_flag = dma_flag;
7461         sp->device_enabled_once = FALSE;
7462         if (rx_ring_mode == 1)
7463                 sp->rxd_mode = RXD_MODE_1;
7464         if (rx_ring_mode == 2)
7465                 sp->rxd_mode = RXD_MODE_3B;
7466
7467         sp->config.intr_type = dev_intr_type;
7468
7469         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7470                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7471                 sp->device_type = XFRAME_II_DEVICE;
7472         else
7473                 sp->device_type = XFRAME_I_DEVICE;
7474
7475         sp->lro = lro;
7476
7477         /* Initialize some PCI/PCI-X fields of the NIC. */
7478         s2io_init_pci(sp);
7479
7480         /*
7481          * Setting the device configuration parameters.
7482          * Most of these parameters can be specified by the user during
7483          * module insertion as they are module loadable parameters. If
7484          * these parameters are not not specified during load time, they
7485          * are initialized with default values.
7486          */
7487         mac_control = &sp->mac_control;
7488         config = &sp->config;
7489
7490         config->napi = napi;
7491
7492         /* Tx side parameters. */
7493         config->tx_fifo_num = tx_fifo_num;
7494         for (i = 0; i < MAX_TX_FIFOS; i++) {
7495                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7496                 config->tx_cfg[i].fifo_priority = i;
7497         }
7498
7499         /* mapping the QoS priority to the configured fifos */
7500         for (i = 0; i < MAX_TX_FIFOS; i++)
7501                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7502
7503         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7504         for (i = 0; i < config->tx_fifo_num; i++) {
7505                 config->tx_cfg[i].f_no_snoop =
7506                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7507                 if (config->tx_cfg[i].fifo_len < 65) {
7508                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7509                         break;
7510                 }
7511         }
7512         /* + 2 because one Txd for skb->data and one Txd for UFO */
7513         config->max_txds = MAX_SKB_FRAGS + 2;
7514
7515         /* Rx side parameters. */
7516         config->rx_ring_num = rx_ring_num;
7517         for (i = 0; i < MAX_RX_RINGS; i++) {
7518                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7519                     (rxd_count[sp->rxd_mode] + 1);
7520                 config->rx_cfg[i].ring_priority = i;
7521         }
7522
7523         for (i = 0; i < rx_ring_num; i++) {
7524                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7525                 config->rx_cfg[i].f_no_snoop =
7526                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7527         }
7528
7529         /*  Setting Mac Control parameters */
7530         mac_control->rmac_pause_time = rmac_pause_time;
7531         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7532         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7533
7534
7535         /* Initialize Ring buffer parameters. */
7536         for (i = 0; i < config->rx_ring_num; i++)
7537                 atomic_set(&sp->rx_bufs_left[i], 0);
7538
7539         /*  initialize the shared memory used by the NIC and the host */
7540         if (init_shared_mem(sp)) {
7541                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7542                           dev->name);
7543                 ret = -ENOMEM;
7544                 goto mem_alloc_failed;
7545         }
7546
7547         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7548                                      pci_resource_len(pdev, 0));
7549         if (!sp->bar0) {
7550                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7551                           dev->name);
7552                 ret = -ENOMEM;
7553                 goto bar0_remap_failed;
7554         }
7555
7556         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7557                                      pci_resource_len(pdev, 2));
7558         if (!sp->bar1) {
7559                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7560                           dev->name);
7561                 ret = -ENOMEM;
7562                 goto bar1_remap_failed;
7563         }
7564
7565         dev->irq = pdev->irq;
7566         dev->base_addr = (unsigned long) sp->bar0;
7567
7568         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7569         for (j = 0; j < MAX_TX_FIFOS; j++) {
7570                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7571                     (sp->bar1 + (j * 0x00020000));
7572         }
7573
7574         /*  Driver entry points */
7575         dev->open = &s2io_open;
7576         dev->stop = &s2io_close;
7577         dev->hard_start_xmit = &s2io_xmit;
7578         dev->get_stats = &s2io_get_stats;
7579         dev->set_multicast_list = &s2io_set_multicast;
7580         dev->do_ioctl = &s2io_ioctl;
7581         dev->change_mtu = &s2io_change_mtu;
7582         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7583         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7584         dev->vlan_rx_register = s2io_vlan_rx_register;
7585
7586         /*
7587          * will use eth_mac_addr() for  dev->set_mac_address
7588          * mac address will be set every time dev->open() is called
7589          */
7590         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7591
7592 #ifdef CONFIG_NET_POLL_CONTROLLER
7593         dev->poll_controller = s2io_netpoll;
7594 #endif
7595
7596         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7597         if (sp->high_dma_flag == TRUE)
7598                 dev->features |= NETIF_F_HIGHDMA;
7599         dev->features |= NETIF_F_TSO;
7600         dev->features |= NETIF_F_TSO6;
7601         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7602                 dev->features |= NETIF_F_UFO;
7603                 dev->features |= NETIF_F_HW_CSUM;
7604         }
7605
7606         dev->tx_timeout = &s2io_tx_watchdog;
7607         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7608         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7609         INIT_WORK(&sp->set_link_task, s2io_set_link);
7610
7611         pci_save_state(sp->pdev);
7612
7613         /* Setting swapper control on the NIC, for proper reset operation */
7614         if (s2io_set_swapper(sp)) {
7615                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7616                           dev->name);
7617                 ret = -EAGAIN;
7618                 goto set_swap_failed;
7619         }
7620
7621         /* Verify if the Herc works on the slot its placed into */
7622         if (sp->device_type & XFRAME_II_DEVICE) {
7623                 mode = s2io_verify_pci_mode(sp);
7624                 if (mode < 0) {
7625                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7626                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7627                         ret = -EBADSLT;
7628                         goto set_swap_failed;
7629                 }
7630         }
7631
7632         /* Not needed for Herc */
7633         if (sp->device_type & XFRAME_I_DEVICE) {
7634                 /*
7635                  * Fix for all "FFs" MAC address problems observed on
7636                  * Alpha platforms
7637                  */
7638                 fix_mac_address(sp);
7639                 s2io_reset(sp);
7640         }
7641
7642         /*
7643          * MAC address initialization.
7644          * For now only one mac address will be read and used.
7645          */
7646         bar0 = sp->bar0;
7647         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7648             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7649         writeq(val64, &bar0->rmac_addr_cmd_mem);
7650         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7651                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7652         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7653         mac_down = (u32) tmp64;
7654         mac_up = (u32) (tmp64 >> 32);
7655
7656         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7657         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7658         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7659         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7660         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7661         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7662
7663         /*  Set the factory defined MAC address initially   */
7664         dev->addr_len = ETH_ALEN;
7665         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7666
7667          /* Store the values of the MSIX table in the s2io_nic structure */
7668         store_xmsi_data(sp);
7669         /* reset Nic and bring it to known state */
7670         s2io_reset(sp);
7671
7672         /*
7673          * Initialize the tasklet status and link state flags
7674          * and the card state parameter
7675          */
7676         sp->tasklet_status = 0;
7677         sp->state = 0;
7678
7679         /* Initialize spinlocks */
7680         spin_lock_init(&sp->tx_lock);
7681
7682         if (!napi)
7683                 spin_lock_init(&sp->put_lock);
7684         spin_lock_init(&sp->rx_lock);
7685
7686         /*
7687          * SXE-002: Configure link and activity LED to init state
7688          * on driver load.
7689          */
7690         subid = sp->pdev->subsystem_device;
7691         if ((subid & 0xFF) >= 0x07) {
7692                 val64 = readq(&bar0->gpio_control);
7693                 val64 |= 0x0000800000000000ULL;
7694                 writeq(val64, &bar0->gpio_control);
7695                 val64 = 0x0411040400000000ULL;
7696                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7697                 val64 = readq(&bar0->gpio_control);
7698         }
7699
7700         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7701
7702         if (register_netdev(dev)) {
7703                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7704                 ret = -ENODEV;
7705                 goto register_failed;
7706         }
7707         s2io_vpd_read(sp);
7708         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7709         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7710                   sp->product_name, pdev->revision);
7711         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7712                   s2io_driver_version);
7713         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7714                   dev->name, print_mac(mac, dev->dev_addr));
7715         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7716         if (sp->device_type & XFRAME_II_DEVICE) {
7717                 mode = s2io_print_pci_mode(sp);
7718                 if (mode < 0) {
7719                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7720                         ret = -EBADSLT;
7721                         unregister_netdev(dev);
7722                         goto set_swap_failed;
7723                 }
7724         }
7725         switch(sp->rxd_mode) {
7726                 case RXD_MODE_1:
7727                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7728                                                 dev->name);
7729                     break;
7730                 case RXD_MODE_3B:
7731                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7732                                                 dev->name);
7733                     break;
7734         }
7735
7736         if (napi)
7737                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7738         switch(sp->config.intr_type) {
7739                 case INTA:
7740                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7741                     break;
7742                 case MSI_X:
7743                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7744                     break;
7745         }
7746         if (sp->lro)
7747                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7748                           dev->name);
7749         if (ufo)
7750                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7751                                         " enabled\n", dev->name);
7752         /* Initialize device name */
7753         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7754
7755         /* Initialize bimodal Interrupts */
7756         sp->config.bimodal = bimodal;
7757         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7758                 sp->config.bimodal = 0;
7759                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7760                         dev->name);
7761         }
7762
7763         /*
7764          * Make Link state as off at this point, when the Link change
7765          * interrupt comes the state will be automatically changed to
7766          * the right state.
7767          */
7768         netif_carrier_off(dev);
7769
7770         return 0;
7771
7772       register_failed:
7773       set_swap_failed:
7774         iounmap(sp->bar1);
7775       bar1_remap_failed:
7776         iounmap(sp->bar0);
7777       bar0_remap_failed:
7778       mem_alloc_failed:
7779         free_shared_mem(sp);
7780         pci_disable_device(pdev);
7781         pci_release_regions(pdev);
7782         pci_set_drvdata(pdev, NULL);
7783         free_netdev(dev);
7784
7785         return ret;
7786 }
7787
7788 /**
7789  * s2io_rem_nic - Free the PCI device
7790  * @pdev: structure containing the PCI related information of the device.
7791  * Description: This function is called by the Pci subsystem to release a
7792  * PCI device and free up all resource held up by the device. This could
7793  * be in response to a Hot plug event or when the driver is to be removed
7794  * from memory.
7795  */
7796
7797 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7798 {
7799         struct net_device *dev =
7800             (struct net_device *) pci_get_drvdata(pdev);
7801         struct s2io_nic *sp;
7802
7803         if (dev == NULL) {
7804                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7805                 return;
7806         }
7807
7808         flush_scheduled_work();
7809
7810         sp = dev->priv;
7811         unregister_netdev(dev);
7812
7813         free_shared_mem(sp);
7814         iounmap(sp->bar0);
7815         iounmap(sp->bar1);
7816         pci_release_regions(pdev);
7817         pci_set_drvdata(pdev, NULL);
7818         free_netdev(dev);
7819         pci_disable_device(pdev);
7820 }
7821
7822 /**
7823  * s2io_starter - Entry point for the driver
7824  * Description: This function is the entry point for the driver. It verifies
7825  * the module loadable parameters and initializes PCI configuration space.
7826  */
7827
7828 int __init s2io_starter(void)
7829 {
7830         return pci_register_driver(&s2io_driver);
7831 }
7832
7833 /**
7834  * s2io_closer - Cleanup routine for the driver
7835  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7836  */
7837
7838 static __exit void s2io_closer(void)
7839 {
7840         pci_unregister_driver(&s2io_driver);
7841         DBG_PRINT(INIT_DBG, "cleanup done\n");
7842 }
7843
7844 module_init(s2io_starter);
7845 module_exit(s2io_closer);
7846
7847 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7848                 struct tcphdr **tcp, struct RxD_t *rxdp)
7849 {
7850         int ip_off;
7851         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7852
7853         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7854                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7855                           __FUNCTION__);
7856                 return -1;
7857         }
7858
7859         /* TODO:
7860          * By default the VLAN field in the MAC is stripped by the card, if this
7861          * feature is turned off in rx_pa_cfg register, then the ip_off field
7862          * has to be shifted by a further 2 bytes
7863          */
7864         switch (l2_type) {
7865                 case 0: /* DIX type */
7866                 case 4: /* DIX type with VLAN */
7867                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7868                         break;
7869                 /* LLC, SNAP etc are considered non-mergeable */
7870                 default:
7871                         return -1;
7872         }
7873
7874         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7875         ip_len = (u8)((*ip)->ihl);
7876         ip_len <<= 2;
7877         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7878
7879         return 0;
7880 }
7881
7882 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7883                                   struct tcphdr *tcp)
7884 {
7885         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7886         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7887            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7888                 return -1;
7889         return 0;
7890 }
7891
7892 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7893 {
7894         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7895 }
7896
7897 static void initiate_new_session(struct lro *lro, u8 *l2h,
7898                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7899 {
7900         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7901         lro->l2h = l2h;
7902         lro->iph = ip;
7903         lro->tcph = tcp;
7904         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7905         lro->tcp_ack = ntohl(tcp->ack_seq);
7906         lro->sg_num = 1;
7907         lro->total_len = ntohs(ip->tot_len);
7908         lro->frags_len = 0;
7909         /*
7910          * check if we saw TCP timestamp. Other consistency checks have
7911          * already been done.
7912          */
7913         if (tcp->doff == 8) {
7914                 u32 *ptr;
7915                 ptr = (u32 *)(tcp+1);
7916                 lro->saw_ts = 1;
7917                 lro->cur_tsval = *(ptr+1);
7918                 lro->cur_tsecr = *(ptr+2);
7919         }
7920         lro->in_use = 1;
7921 }
7922
7923 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7924 {
7925         struct iphdr *ip = lro->iph;
7926         struct tcphdr *tcp = lro->tcph;
7927         __sum16 nchk;
7928         struct stat_block *statinfo = sp->mac_control.stats_info;
7929         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7930
7931         /* Update L3 header */
7932         ip->tot_len = htons(lro->total_len);
7933         ip->check = 0;
7934         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7935         ip->check = nchk;
7936
7937         /* Update L4 header */
7938         tcp->ack_seq = lro->tcp_ack;
7939         tcp->window = lro->window;
7940
7941         /* Update tsecr field if this session has timestamps enabled */
7942         if (lro->saw_ts) {
7943                 u32 *ptr = (u32 *)(tcp + 1);
7944                 *(ptr+2) = lro->cur_tsecr;
7945         }
7946
7947         /* Update counters required for calculation of
7948          * average no. of packets aggregated.
7949          */
7950         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7951         statinfo->sw_stat.num_aggregations++;
7952 }
7953
7954 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7955                 struct tcphdr *tcp, u32 l4_pyld)
7956 {
7957         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7958         lro->total_len += l4_pyld;
7959         lro->frags_len += l4_pyld;
7960         lro->tcp_next_seq += l4_pyld;
7961         lro->sg_num++;
7962
7963         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7964         lro->tcp_ack = tcp->ack_seq;
7965         lro->window = tcp->window;
7966
7967         if (lro->saw_ts) {
7968                 u32 *ptr;
7969                 /* Update tsecr and tsval from this packet */
7970                 ptr = (u32 *) (tcp + 1);
7971                 lro->cur_tsval = *(ptr + 1);
7972                 lro->cur_tsecr = *(ptr + 2);
7973         }
7974 }
7975
7976 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7977                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7978 {
7979         u8 *ptr;
7980
7981         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7982
7983         if (!tcp_pyld_len) {
7984                 /* Runt frame or a pure ack */
7985                 return -1;
7986         }
7987
7988         if (ip->ihl != 5) /* IP has options */
7989                 return -1;
7990
7991         /* If we see CE codepoint in IP header, packet is not mergeable */
7992         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7993                 return -1;
7994
7995         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7996         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7997                                     tcp->ece || tcp->cwr || !tcp->ack) {
7998                 /*
7999                  * Currently recognize only the ack control word and
8000                  * any other control field being set would result in
8001                  * flushing the LRO session
8002                  */
8003                 return -1;
8004         }
8005
8006         /*
8007          * Allow only one TCP timestamp option. Don't aggregate if
8008          * any other options are detected.
8009          */
8010         if (tcp->doff != 5 && tcp->doff != 8)
8011                 return -1;
8012
8013         if (tcp->doff == 8) {
8014                 ptr = (u8 *)(tcp + 1);
8015                 while (*ptr == TCPOPT_NOP)
8016                         ptr++;
8017                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8018                         return -1;
8019
8020                 /* Ensure timestamp value increases monotonically */
8021                 if (l_lro)
8022                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8023                                 return -1;
8024
8025                 /* timestamp echo reply should be non-zero */
8026                 if (*((u32 *)(ptr+6)) == 0)
8027                         return -1;
8028         }
8029
8030         return 0;
8031 }
8032
8033 static int
8034 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8035                       struct RxD_t *rxdp, struct s2io_nic *sp)
8036 {
8037         struct iphdr *ip;
8038         struct tcphdr *tcph;
8039         int ret = 0, i;
8040
8041         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8042                                          rxdp))) {
8043                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8044                           ip->saddr, ip->daddr);
8045         } else {
8046                 return ret;
8047         }
8048
8049         tcph = (struct tcphdr *)*tcp;
8050         *tcp_len = get_l4_pyld_length(ip, tcph);
8051         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8052                 struct lro *l_lro = &sp->lro0_n[i];
8053                 if (l_lro->in_use) {
8054                         if (check_for_socket_match(l_lro, ip, tcph))
8055                                 continue;
8056                         /* Sock pair matched */
8057                         *lro = l_lro;
8058
8059                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8060                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8061                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8062                                           (*lro)->tcp_next_seq,
8063                                           ntohl(tcph->seq));
8064
8065                                 sp->mac_control.stats_info->
8066                                    sw_stat.outof_sequence_pkts++;
8067                                 ret = 2;
8068                                 break;
8069                         }
8070
8071                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8072                                 ret = 1; /* Aggregate */
8073                         else
8074                                 ret = 2; /* Flush both */
8075                         break;
8076                 }
8077         }
8078
8079         if (ret == 0) {
8080                 /* Before searching for available LRO objects,
8081                  * check if the pkt is L3/L4 aggregatable. If not
8082                  * don't create new LRO session. Just send this
8083                  * packet up.
8084                  */
8085                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8086                         return 5;
8087                 }
8088
8089                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8090                         struct lro *l_lro = &sp->lro0_n[i];
8091                         if (!(l_lro->in_use)) {
8092                                 *lro = l_lro;
8093                                 ret = 3; /* Begin anew */
8094                                 break;
8095                         }
8096                 }
8097         }
8098
8099         if (ret == 0) { /* sessions exceeded */
8100                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8101                           __FUNCTION__);
8102                 *lro = NULL;
8103                 return ret;
8104         }
8105
8106         switch (ret) {
8107                 case 3:
8108                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8109                         break;
8110                 case 2:
8111                         update_L3L4_header(sp, *lro);
8112                         break;
8113                 case 1:
8114                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8115                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8116                                 update_L3L4_header(sp, *lro);
8117                                 ret = 4; /* Flush the LRO */
8118                         }
8119                         break;
8120                 default:
8121                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8122                                 __FUNCTION__);
8123                         break;
8124         }
8125
8126         return ret;
8127 }
8128
8129 static void clear_lro_session(struct lro *lro)
8130 {
8131         static u16 lro_struct_size = sizeof(struct lro);
8132
8133         memset(lro, 0, lro_struct_size);
8134 }
8135
8136 static void queue_rx_frame(struct sk_buff *skb)
8137 {
8138         struct net_device *dev = skb->dev;
8139
8140         skb->protocol = eth_type_trans(skb, dev);
8141         if (napi)
8142                 netif_receive_skb(skb);
8143         else
8144                 netif_rx(skb);
8145 }
8146
8147 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8148                            struct sk_buff *skb,
8149                            u32 tcp_len)
8150 {
8151         struct sk_buff *first = lro->parent;
8152
8153         first->len += tcp_len;
8154         first->data_len = lro->frags_len;
8155         skb_pull(skb, (skb->len - tcp_len));
8156         if (skb_shinfo(first)->frag_list)
8157                 lro->last_frag->next = skb;
8158         else
8159                 skb_shinfo(first)->frag_list = skb;
8160         first->truesize += skb->truesize;
8161         lro->last_frag = skb;
8162         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8163         return;
8164 }
8165
8166 /**
8167  * s2io_io_error_detected - called when PCI error is detected
8168  * @pdev: Pointer to PCI device
8169  * @state: The current pci connection state
8170  *
8171  * This function is called after a PCI bus error affecting
8172  * this device has been detected.
8173  */
8174 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8175                                                pci_channel_state_t state)
8176 {
8177         struct net_device *netdev = pci_get_drvdata(pdev);
8178         struct s2io_nic *sp = netdev->priv;
8179
8180         netif_device_detach(netdev);
8181
8182         if (netif_running(netdev)) {
8183                 /* Bring down the card, while avoiding PCI I/O */
8184                 do_s2io_card_down(sp, 0);
8185         }
8186         pci_disable_device(pdev);
8187
8188         return PCI_ERS_RESULT_NEED_RESET;
8189 }
8190
8191 /**
8192  * s2io_io_slot_reset - called after the pci bus has been reset.
8193  * @pdev: Pointer to PCI device
8194  *
8195  * Restart the card from scratch, as if from a cold-boot.
8196  * At this point, the card has exprienced a hard reset,
8197  * followed by fixups by BIOS, and has its config space
8198  * set up identically to what it was at cold boot.
8199  */
8200 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8201 {
8202         struct net_device *netdev = pci_get_drvdata(pdev);
8203         struct s2io_nic *sp = netdev->priv;
8204
8205         if (pci_enable_device(pdev)) {
8206                 printk(KERN_ERR "s2io: "
8207                        "Cannot re-enable PCI device after reset.\n");
8208                 return PCI_ERS_RESULT_DISCONNECT;
8209         }
8210
8211         pci_set_master(pdev);
8212         s2io_reset(sp);
8213
8214         return PCI_ERS_RESULT_RECOVERED;
8215 }
8216
8217 /**
8218  * s2io_io_resume - called when traffic can start flowing again.
8219  * @pdev: Pointer to PCI device
8220  *
8221  * This callback is called when the error recovery driver tells
8222  * us that its OK to resume normal operation.
8223  */
8224 static void s2io_io_resume(struct pci_dev *pdev)
8225 {
8226         struct net_device *netdev = pci_get_drvdata(pdev);
8227         struct s2io_nic *sp = netdev->priv;
8228
8229         if (netif_running(netdev)) {
8230                 if (s2io_card_up(sp)) {
8231                         printk(KERN_ERR "s2io: "
8232                                "Can't bring device back up after reset.\n");
8233                         return;
8234                 }
8235
8236                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8237                         s2io_card_down(sp);
8238                         printk(KERN_ERR "s2io: "
8239                                "Can't resetore mac addr after reset.\n");
8240                         return;
8241                 }
8242         }
8243
8244         netif_device_attach(netdev);
8245         netif_wake_queue(netdev);
8246 }