[PATCH] Add adm8211 802.11b wireless driver
[pandora-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.25.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135         "Register test\t(offline)",
136         "Eeprom test\t(offline)",
137         "Link test\t(online)",
138         "RLDRAM test\t(offline)",
139         "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143         {"tmac_frms"},
144         {"tmac_data_octets"},
145         {"tmac_drop_frms"},
146         {"tmac_mcst_frms"},
147         {"tmac_bcst_frms"},
148         {"tmac_pause_ctrl_frms"},
149         {"tmac_ttl_octets"},
150         {"tmac_ucst_frms"},
151         {"tmac_nucst_frms"},
152         {"tmac_any_err_frms"},
153         {"tmac_ttl_less_fb_octets"},
154         {"tmac_vld_ip_octets"},
155         {"tmac_vld_ip"},
156         {"tmac_drop_ip"},
157         {"tmac_icmp"},
158         {"tmac_rst_tcp"},
159         {"tmac_tcp"},
160         {"tmac_udp"},
161         {"rmac_vld_frms"},
162         {"rmac_data_octets"},
163         {"rmac_fcs_err_frms"},
164         {"rmac_drop_frms"},
165         {"rmac_vld_mcst_frms"},
166         {"rmac_vld_bcst_frms"},
167         {"rmac_in_rng_len_err_frms"},
168         {"rmac_out_rng_len_err_frms"},
169         {"rmac_long_frms"},
170         {"rmac_pause_ctrl_frms"},
171         {"rmac_unsup_ctrl_frms"},
172         {"rmac_ttl_octets"},
173         {"rmac_accepted_ucst_frms"},
174         {"rmac_accepted_nucst_frms"},
175         {"rmac_discarded_frms"},
176         {"rmac_drop_events"},
177         {"rmac_ttl_less_fb_octets"},
178         {"rmac_ttl_frms"},
179         {"rmac_usized_frms"},
180         {"rmac_osized_frms"},
181         {"rmac_frag_frms"},
182         {"rmac_jabber_frms"},
183         {"rmac_ttl_64_frms"},
184         {"rmac_ttl_65_127_frms"},
185         {"rmac_ttl_128_255_frms"},
186         {"rmac_ttl_256_511_frms"},
187         {"rmac_ttl_512_1023_frms"},
188         {"rmac_ttl_1024_1518_frms"},
189         {"rmac_ip"},
190         {"rmac_ip_octets"},
191         {"rmac_hdr_err_ip"},
192         {"rmac_drop_ip"},
193         {"rmac_icmp"},
194         {"rmac_tcp"},
195         {"rmac_udp"},
196         {"rmac_err_drp_udp"},
197         {"rmac_xgmii_err_sym"},
198         {"rmac_frms_q0"},
199         {"rmac_frms_q1"},
200         {"rmac_frms_q2"},
201         {"rmac_frms_q3"},
202         {"rmac_frms_q4"},
203         {"rmac_frms_q5"},
204         {"rmac_frms_q6"},
205         {"rmac_frms_q7"},
206         {"rmac_full_q0"},
207         {"rmac_full_q1"},
208         {"rmac_full_q2"},
209         {"rmac_full_q3"},
210         {"rmac_full_q4"},
211         {"rmac_full_q5"},
212         {"rmac_full_q6"},
213         {"rmac_full_q7"},
214         {"rmac_pause_cnt"},
215         {"rmac_xgmii_data_err_cnt"},
216         {"rmac_xgmii_ctrl_err_cnt"},
217         {"rmac_accepted_ip"},
218         {"rmac_err_tcp"},
219         {"rd_req_cnt"},
220         {"new_rd_req_cnt"},
221         {"new_rd_req_rtry_cnt"},
222         {"rd_rtry_cnt"},
223         {"wr_rtry_rd_ack_cnt"},
224         {"wr_req_cnt"},
225         {"new_wr_req_cnt"},
226         {"new_wr_req_rtry_cnt"},
227         {"wr_rtry_cnt"},
228         {"wr_disc_cnt"},
229         {"rd_rtry_wr_ack_cnt"},
230         {"txp_wr_cnt"},
231         {"txd_rd_cnt"},
232         {"txd_wr_cnt"},
233         {"rxd_rd_cnt"},
234         {"rxd_wr_cnt"},
235         {"txf_rd_cnt"},
236         {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240         {"rmac_ttl_1519_4095_frms"},
241         {"rmac_ttl_4096_8191_frms"},
242         {"rmac_ttl_8192_max_frms"},
243         {"rmac_ttl_gt_max_frms"},
244         {"rmac_osized_alt_frms"},
245         {"rmac_jabber_alt_frms"},
246         {"rmac_gt_max_alt_frms"},
247         {"rmac_vlan_frms"},
248         {"rmac_len_discard"},
249         {"rmac_fcs_discard"},
250         {"rmac_pf_discard"},
251         {"rmac_da_discard"},
252         {"rmac_red_discard"},
253         {"rmac_rts_discard"},
254         {"rmac_ingm_full_discard"},
255         {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259         {"\n DRIVER STATISTICS"},
260         {"single_bit_ecc_errs"},
261         {"double_bit_ecc_errs"},
262         {"parity_err_cnt"},
263         {"serious_err_cnt"},
264         {"soft_reset_cnt"},
265         {"fifo_full_cnt"},
266         {"ring_full_cnt"},
267         ("alarm_transceiver_temp_high"),
268         ("alarm_transceiver_temp_low"),
269         ("alarm_laser_bias_current_high"),
270         ("alarm_laser_bias_current_low"),
271         ("alarm_laser_output_power_high"),
272         ("alarm_laser_output_power_low"),
273         ("warn_transceiver_temp_high"),
274         ("warn_transceiver_temp_low"),
275         ("warn_laser_bias_current_high"),
276         ("warn_laser_bias_current_low"),
277         ("warn_laser_output_power_high"),
278         ("warn_laser_output_power_low"),
279         ("lro_aggregated_pkts"),
280         ("lro_flush_both_count"),
281         ("lro_out_of_sequence_pkts"),
282         ("lro_flush_due_to_max_pkts"),
283         ("lro_avg_aggr_pkts"),
284         ("mem_alloc_fail_cnt"),
285         ("pci_map_fail_cnt"),
286         ("watchdog_timer_cnt"),
287         ("mem_allocated"),
288         ("mem_freed"),
289         ("link_up_cnt"),
290         ("link_down_cnt"),
291         ("link_up_time"),
292         ("link_down_time"),
293         ("tx_tcode_buf_abort_cnt"),
294         ("tx_tcode_desc_abort_cnt"),
295         ("tx_tcode_parity_err_cnt"),
296         ("tx_tcode_link_loss_cnt"),
297         ("tx_tcode_list_proc_err_cnt"),
298         ("rx_tcode_parity_err_cnt"),
299         ("rx_tcode_abort_cnt"),
300         ("rx_tcode_parity_abort_cnt"),
301         ("rx_tcode_rda_fail_cnt"),
302         ("rx_tcode_unkn_prot_cnt"),
303         ("rx_tcode_fcs_err_cnt"),
304         ("rx_tcode_buf_size_err_cnt"),
305         ("rx_tcode_rxd_corrupt_cnt"),
306         ("rx_tcode_unkn_err_cnt")
307 };
308
309 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
310 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
311                                         ETH_GSTRING_LEN
312 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
313
314 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
315 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
316
317 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
318 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
319
320 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
321 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
322
323 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
324                         init_timer(&timer);                     \
325                         timer.function = handle;                \
326                         timer.data = (unsigned long) arg;       \
327                         mod_timer(&timer, (jiffies + exp))      \
328
329 /* Add the vlan */
330 static void s2io_vlan_rx_register(struct net_device *dev,
331                                         struct vlan_group *grp)
332 {
333         struct s2io_nic *nic = dev->priv;
334         unsigned long flags;
335
336         spin_lock_irqsave(&nic->tx_lock, flags);
337         nic->vlgrp = grp;
338         spin_unlock_irqrestore(&nic->tx_lock, flags);
339 }
340
341 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
342 static int vlan_strip_flag;
343
344 /*
345  * Constants to be programmed into the Xena's registers, to configure
346  * the XAUI.
347  */
348
349 #define END_SIGN        0x0
350 static const u64 herc_act_dtx_cfg[] = {
351         /* Set address */
352         0x8000051536750000ULL, 0x80000515367500E0ULL,
353         /* Write data */
354         0x8000051536750004ULL, 0x80000515367500E4ULL,
355         /* Set address */
356         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
357         /* Write data */
358         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
359         /* Set address */
360         0x801205150D440000ULL, 0x801205150D4400E0ULL,
361         /* Write data */
362         0x801205150D440004ULL, 0x801205150D4400E4ULL,
363         /* Set address */
364         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
365         /* Write data */
366         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
367         /* Done */
368         END_SIGN
369 };
370
371 static const u64 xena_dtx_cfg[] = {
372         /* Set address */
373         0x8000051500000000ULL, 0x80000515000000E0ULL,
374         /* Write data */
375         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
376         /* Set address */
377         0x8001051500000000ULL, 0x80010515000000E0ULL,
378         /* Write data */
379         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
380         /* Set address */
381         0x8002051500000000ULL, 0x80020515000000E0ULL,
382         /* Write data */
383         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
384         END_SIGN
385 };
386
387 /*
388  * Constants for Fixing the MacAddress problem seen mostly on
389  * Alpha machines.
390  */
391 static const u64 fix_mac[] = {
392         0x0060000000000000ULL, 0x0060600000000000ULL,
393         0x0040600000000000ULL, 0x0000600000000000ULL,
394         0x0020600000000000ULL, 0x0060600000000000ULL,
395         0x0020600000000000ULL, 0x0060600000000000ULL,
396         0x0020600000000000ULL, 0x0060600000000000ULL,
397         0x0020600000000000ULL, 0x0060600000000000ULL,
398         0x0020600000000000ULL, 0x0060600000000000ULL,
399         0x0020600000000000ULL, 0x0060600000000000ULL,
400         0x0020600000000000ULL, 0x0060600000000000ULL,
401         0x0020600000000000ULL, 0x0060600000000000ULL,
402         0x0020600000000000ULL, 0x0060600000000000ULL,
403         0x0020600000000000ULL, 0x0060600000000000ULL,
404         0x0020600000000000ULL, 0x0000600000000000ULL,
405         0x0040600000000000ULL, 0x0060600000000000ULL,
406         END_SIGN
407 };
408
409 MODULE_LICENSE("GPL");
410 MODULE_VERSION(DRV_VERSION);
411
412
413 /* Module Loadable parameters. */
414 S2IO_PARM_INT(tx_fifo_num, 1);
415 S2IO_PARM_INT(rx_ring_num, 1);
416
417
418 S2IO_PARM_INT(rx_ring_mode, 1);
419 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
420 S2IO_PARM_INT(rmac_pause_time, 0x100);
421 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
422 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
423 S2IO_PARM_INT(shared_splits, 0);
424 S2IO_PARM_INT(tmac_util_period, 5);
425 S2IO_PARM_INT(rmac_util_period, 5);
426 S2IO_PARM_INT(bimodal, 0);
427 S2IO_PARM_INT(l3l4hdr_size, 128);
428 /* Frequency of Rx desc syncs expressed as power of 2 */
429 S2IO_PARM_INT(rxsync_frequency, 3);
430 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
431 S2IO_PARM_INT(intr_type, 0);
432 /* Large receive offload feature */
433 S2IO_PARM_INT(lro, 0);
434 /* Max pkts to be aggregated by LRO at one time. If not specified,
435  * aggregation happens until we hit max IP pkt size(64K)
436  */
437 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
438 S2IO_PARM_INT(indicate_max_pkts, 0);
439
440 S2IO_PARM_INT(napi, 1);
441 S2IO_PARM_INT(ufo, 0);
442 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
443
444 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
445     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
446 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
447     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
448 static unsigned int rts_frm_len[MAX_RX_RINGS] =
449     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
450
451 module_param_array(tx_fifo_len, uint, NULL, 0);
452 module_param_array(rx_ring_sz, uint, NULL, 0);
453 module_param_array(rts_frm_len, uint, NULL, 0);
454
455 /*
456  * S2IO device table.
457  * This table lists all the devices that this driver supports.
458  */
459 static struct pci_device_id s2io_tbl[] __devinitdata = {
460         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
461          PCI_ANY_ID, PCI_ANY_ID},
462         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
463          PCI_ANY_ID, PCI_ANY_ID},
464         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
465          PCI_ANY_ID, PCI_ANY_ID},
466         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
467          PCI_ANY_ID, PCI_ANY_ID},
468         {0,}
469 };
470
471 MODULE_DEVICE_TABLE(pci, s2io_tbl);
472
473 static struct pci_error_handlers s2io_err_handler = {
474         .error_detected = s2io_io_error_detected,
475         .slot_reset = s2io_io_slot_reset,
476         .resume = s2io_io_resume,
477 };
478
479 static struct pci_driver s2io_driver = {
480       .name = "S2IO",
481       .id_table = s2io_tbl,
482       .probe = s2io_init_nic,
483       .remove = __devexit_p(s2io_rem_nic),
484       .err_handler = &s2io_err_handler,
485 };
486
487 /* A simplifier macro used both by init and free shared_mem Fns(). */
488 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
489
490 /**
491  * init_shared_mem - Allocation and Initialization of Memory
492  * @nic: Device private variable.
493  * Description: The function allocates all the memory areas shared
494  * between the NIC and the driver. This includes Tx descriptors,
495  * Rx descriptors and the statistics block.
496  */
497
498 static int init_shared_mem(struct s2io_nic *nic)
499 {
500         u32 size;
501         void *tmp_v_addr, *tmp_v_addr_next;
502         dma_addr_t tmp_p_addr, tmp_p_addr_next;
503         struct RxD_block *pre_rxd_blk = NULL;
504         int i, j, blk_cnt;
505         int lst_size, lst_per_page;
506         struct net_device *dev = nic->dev;
507         unsigned long tmp;
508         struct buffAdd *ba;
509
510         struct mac_info *mac_control;
511         struct config_param *config;
512         unsigned long long mem_allocated = 0;
513
514         mac_control = &nic->mac_control;
515         config = &nic->config;
516
517
518         /* Allocation and initialization of TXDLs in FIOFs */
519         size = 0;
520         for (i = 0; i < config->tx_fifo_num; i++) {
521                 size += config->tx_cfg[i].fifo_len;
522         }
523         if (size > MAX_AVAILABLE_TXDS) {
524                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
525                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
526                 return -EINVAL;
527         }
528
529         lst_size = (sizeof(struct TxD) * config->max_txds);
530         lst_per_page = PAGE_SIZE / lst_size;
531
532         for (i = 0; i < config->tx_fifo_num; i++) {
533                 int fifo_len = config->tx_cfg[i].fifo_len;
534                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
535                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
536                                                           GFP_KERNEL);
537                 if (!mac_control->fifos[i].list_info) {
538                         DBG_PRINT(INFO_DBG,
539                                   "Malloc failed for list_info\n");
540                         return -ENOMEM;
541                 }
542                 mem_allocated += list_holder_size;
543                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
544         }
545         for (i = 0; i < config->tx_fifo_num; i++) {
546                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
547                                                 lst_per_page);
548                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
549                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
550                     config->tx_cfg[i].fifo_len - 1;
551                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
552                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
553                     config->tx_cfg[i].fifo_len - 1;
554                 mac_control->fifos[i].fifo_no = i;
555                 mac_control->fifos[i].nic = nic;
556                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
557
558                 for (j = 0; j < page_num; j++) {
559                         int k = 0;
560                         dma_addr_t tmp_p;
561                         void *tmp_v;
562                         tmp_v = pci_alloc_consistent(nic->pdev,
563                                                      PAGE_SIZE, &tmp_p);
564                         if (!tmp_v) {
565                                 DBG_PRINT(INFO_DBG,
566                                           "pci_alloc_consistent ");
567                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
568                                 return -ENOMEM;
569                         }
570                         /* If we got a zero DMA address(can happen on
571                          * certain platforms like PPC), reallocate.
572                          * Store virtual address of page we don't want,
573                          * to be freed later.
574                          */
575                         if (!tmp_p) {
576                                 mac_control->zerodma_virt_addr = tmp_v;
577                                 DBG_PRINT(INIT_DBG,
578                                 "%s: Zero DMA address for TxDL. ", dev->name);
579                                 DBG_PRINT(INIT_DBG,
580                                 "Virtual address %p\n", tmp_v);
581                                 tmp_v = pci_alloc_consistent(nic->pdev,
582                                                      PAGE_SIZE, &tmp_p);
583                                 if (!tmp_v) {
584                                         DBG_PRINT(INFO_DBG,
585                                           "pci_alloc_consistent ");
586                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
587                                         return -ENOMEM;
588                                 }
589                                 mem_allocated += PAGE_SIZE;
590                         }
591                         while (k < lst_per_page) {
592                                 int l = (j * lst_per_page) + k;
593                                 if (l == config->tx_cfg[i].fifo_len)
594                                         break;
595                                 mac_control->fifos[i].list_info[l].list_virt_addr =
596                                     tmp_v + (k * lst_size);
597                                 mac_control->fifos[i].list_info[l].list_phy_addr =
598                                     tmp_p + (k * lst_size);
599                                 k++;
600                         }
601                 }
602         }
603
604         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
605         if (!nic->ufo_in_band_v)
606                 return -ENOMEM;
607          mem_allocated += (size * sizeof(u64));
608
609         /* Allocation and initialization of RXDs in Rings */
610         size = 0;
611         for (i = 0; i < config->rx_ring_num; i++) {
612                 if (config->rx_cfg[i].num_rxd %
613                     (rxd_count[nic->rxd_mode] + 1)) {
614                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
615                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
616                                   i);
617                         DBG_PRINT(ERR_DBG, "RxDs per Block");
618                         return FAILURE;
619                 }
620                 size += config->rx_cfg[i].num_rxd;
621                 mac_control->rings[i].block_count =
622                         config->rx_cfg[i].num_rxd /
623                         (rxd_count[nic->rxd_mode] + 1 );
624                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
625                         mac_control->rings[i].block_count;
626         }
627         if (nic->rxd_mode == RXD_MODE_1)
628                 size = (size * (sizeof(struct RxD1)));
629         else
630                 size = (size * (sizeof(struct RxD3)));
631
632         for (i = 0; i < config->rx_ring_num; i++) {
633                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
634                 mac_control->rings[i].rx_curr_get_info.offset = 0;
635                 mac_control->rings[i].rx_curr_get_info.ring_len =
636                     config->rx_cfg[i].num_rxd - 1;
637                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
638                 mac_control->rings[i].rx_curr_put_info.offset = 0;
639                 mac_control->rings[i].rx_curr_put_info.ring_len =
640                     config->rx_cfg[i].num_rxd - 1;
641                 mac_control->rings[i].nic = nic;
642                 mac_control->rings[i].ring_no = i;
643
644                 blk_cnt = config->rx_cfg[i].num_rxd /
645                                 (rxd_count[nic->rxd_mode] + 1);
646                 /*  Allocating all the Rx blocks */
647                 for (j = 0; j < blk_cnt; j++) {
648                         struct rx_block_info *rx_blocks;
649                         int l;
650
651                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
652                         size = SIZE_OF_BLOCK; //size is always page size
653                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
654                                                           &tmp_p_addr);
655                         if (tmp_v_addr == NULL) {
656                                 /*
657                                  * In case of failure, free_shared_mem()
658                                  * is called, which should free any
659                                  * memory that was alloced till the
660                                  * failure happened.
661                                  */
662                                 rx_blocks->block_virt_addr = tmp_v_addr;
663                                 return -ENOMEM;
664                         }
665                         mem_allocated += size;
666                         memset(tmp_v_addr, 0, size);
667                         rx_blocks->block_virt_addr = tmp_v_addr;
668                         rx_blocks->block_dma_addr = tmp_p_addr;
669                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
670                                                   rxd_count[nic->rxd_mode],
671                                                   GFP_KERNEL);
672                         if (!rx_blocks->rxds)
673                                 return -ENOMEM;
674                         mem_allocated += 
675                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
676                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
677                                 rx_blocks->rxds[l].virt_addr =
678                                         rx_blocks->block_virt_addr +
679                                         (rxd_size[nic->rxd_mode] * l);
680                                 rx_blocks->rxds[l].dma_addr =
681                                         rx_blocks->block_dma_addr +
682                                         (rxd_size[nic->rxd_mode] * l);
683                         }
684                 }
685                 /* Interlinking all Rx Blocks */
686                 for (j = 0; j < blk_cnt; j++) {
687                         tmp_v_addr =
688                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
689                         tmp_v_addr_next =
690                                 mac_control->rings[i].rx_blocks[(j + 1) %
691                                               blk_cnt].block_virt_addr;
692                         tmp_p_addr =
693                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
694                         tmp_p_addr_next =
695                                 mac_control->rings[i].rx_blocks[(j + 1) %
696                                               blk_cnt].block_dma_addr;
697
698                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
699                         pre_rxd_blk->reserved_2_pNext_RxD_block =
700                             (unsigned long) tmp_v_addr_next;
701                         pre_rxd_blk->pNext_RxD_Blk_physical =
702                             (u64) tmp_p_addr_next;
703                 }
704         }
705         if (nic->rxd_mode == RXD_MODE_3B) {
706                 /*
707                  * Allocation of Storages for buffer addresses in 2BUFF mode
708                  * and the buffers as well.
709                  */
710                 for (i = 0; i < config->rx_ring_num; i++) {
711                         blk_cnt = config->rx_cfg[i].num_rxd /
712                            (rxd_count[nic->rxd_mode]+ 1);
713                         mac_control->rings[i].ba =
714                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
715                                      GFP_KERNEL);
716                         if (!mac_control->rings[i].ba)
717                                 return -ENOMEM;
718                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
719                         for (j = 0; j < blk_cnt; j++) {
720                                 int k = 0;
721                                 mac_control->rings[i].ba[j] =
722                                         kmalloc((sizeof(struct buffAdd) *
723                                                 (rxd_count[nic->rxd_mode] + 1)),
724                                                 GFP_KERNEL);
725                                 if (!mac_control->rings[i].ba[j])
726                                         return -ENOMEM;
727                                 mem_allocated += (sizeof(struct buffAdd) *  \
728                                         (rxd_count[nic->rxd_mode] + 1));
729                                 while (k != rxd_count[nic->rxd_mode]) {
730                                         ba = &mac_control->rings[i].ba[j][k];
731
732                                         ba->ba_0_org = (void *) kmalloc
733                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
734                                         if (!ba->ba_0_org)
735                                                 return -ENOMEM;
736                                         mem_allocated += 
737                                                 (BUF0_LEN + ALIGN_SIZE);
738                                         tmp = (unsigned long)ba->ba_0_org;
739                                         tmp += ALIGN_SIZE;
740                                         tmp &= ~((unsigned long) ALIGN_SIZE);
741                                         ba->ba_0 = (void *) tmp;
742
743                                         ba->ba_1_org = (void *) kmalloc
744                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
745                                         if (!ba->ba_1_org)
746                                                 return -ENOMEM;
747                                         mem_allocated 
748                                                 += (BUF1_LEN + ALIGN_SIZE);
749                                         tmp = (unsigned long) ba->ba_1_org;
750                                         tmp += ALIGN_SIZE;
751                                         tmp &= ~((unsigned long) ALIGN_SIZE);
752                                         ba->ba_1 = (void *) tmp;
753                                         k++;
754                                 }
755                         }
756                 }
757         }
758
759         /* Allocation and initialization of Statistics block */
760         size = sizeof(struct stat_block);
761         mac_control->stats_mem = pci_alloc_consistent
762             (nic->pdev, size, &mac_control->stats_mem_phy);
763
764         if (!mac_control->stats_mem) {
765                 /*
766                  * In case of failure, free_shared_mem() is called, which
767                  * should free any memory that was alloced till the
768                  * failure happened.
769                  */
770                 return -ENOMEM;
771         }
772         mem_allocated += size;
773         mac_control->stats_mem_sz = size;
774
775         tmp_v_addr = mac_control->stats_mem;
776         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
777         memset(tmp_v_addr, 0, size);
778         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
779                   (unsigned long long) tmp_p_addr);
780         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
781         return SUCCESS;
782 }
783
784 /**
785  * free_shared_mem - Free the allocated Memory
786  * @nic:  Device private variable.
787  * Description: This function is to free all memory locations allocated by
788  * the init_shared_mem() function and return it to the kernel.
789  */
790
791 static void free_shared_mem(struct s2io_nic *nic)
792 {
793         int i, j, blk_cnt, size;
794         u32 ufo_size = 0;
795         void *tmp_v_addr;
796         dma_addr_t tmp_p_addr;
797         struct mac_info *mac_control;
798         struct config_param *config;
799         int lst_size, lst_per_page;
800         struct net_device *dev;
801         int page_num = 0;
802
803         if (!nic)
804                 return;
805
806         dev = nic->dev;
807
808         mac_control = &nic->mac_control;
809         config = &nic->config;
810
811         lst_size = (sizeof(struct TxD) * config->max_txds);
812         lst_per_page = PAGE_SIZE / lst_size;
813
814         for (i = 0; i < config->tx_fifo_num; i++) {
815                 ufo_size += config->tx_cfg[i].fifo_len;
816                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
817                                                         lst_per_page);
818                 for (j = 0; j < page_num; j++) {
819                         int mem_blks = (j * lst_per_page);
820                         if (!mac_control->fifos[i].list_info)
821                                 return;
822                         if (!mac_control->fifos[i].list_info[mem_blks].
823                                  list_virt_addr)
824                                 break;
825                         pci_free_consistent(nic->pdev, PAGE_SIZE,
826                                             mac_control->fifos[i].
827                                             list_info[mem_blks].
828                                             list_virt_addr,
829                                             mac_control->fifos[i].
830                                             list_info[mem_blks].
831                                             list_phy_addr);
832                         nic->mac_control.stats_info->sw_stat.mem_freed 
833                                                 += PAGE_SIZE;
834                 }
835                 /* If we got a zero DMA address during allocation,
836                  * free the page now
837                  */
838                 if (mac_control->zerodma_virt_addr) {
839                         pci_free_consistent(nic->pdev, PAGE_SIZE,
840                                             mac_control->zerodma_virt_addr,
841                                             (dma_addr_t)0);
842                         DBG_PRINT(INIT_DBG,
843                                 "%s: Freeing TxDL with zero DMA addr. ",
844                                 dev->name);
845                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
846                                 mac_control->zerodma_virt_addr);
847                         nic->mac_control.stats_info->sw_stat.mem_freed 
848                                                 += PAGE_SIZE;
849                 }
850                 kfree(mac_control->fifos[i].list_info);
851                 nic->mac_control.stats_info->sw_stat.mem_freed += 
852                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
853         }
854
855         size = SIZE_OF_BLOCK;
856         for (i = 0; i < config->rx_ring_num; i++) {
857                 blk_cnt = mac_control->rings[i].block_count;
858                 for (j = 0; j < blk_cnt; j++) {
859                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
860                                 block_virt_addr;
861                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
862                                 block_dma_addr;
863                         if (tmp_v_addr == NULL)
864                                 break;
865                         pci_free_consistent(nic->pdev, size,
866                                             tmp_v_addr, tmp_p_addr);
867                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
868                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
869                         nic->mac_control.stats_info->sw_stat.mem_freed += 
870                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
871                 }
872         }
873
874         if (nic->rxd_mode == RXD_MODE_3B) {
875                 /* Freeing buffer storage addresses in 2BUFF mode. */
876                 for (i = 0; i < config->rx_ring_num; i++) {
877                         blk_cnt = config->rx_cfg[i].num_rxd /
878                             (rxd_count[nic->rxd_mode] + 1);
879                         for (j = 0; j < blk_cnt; j++) {
880                                 int k = 0;
881                                 if (!mac_control->rings[i].ba[j])
882                                         continue;
883                                 while (k != rxd_count[nic->rxd_mode]) {
884                                         struct buffAdd *ba =
885                                                 &mac_control->rings[i].ba[j][k];
886                                         kfree(ba->ba_0_org);
887                                         nic->mac_control.stats_info->sw_stat.\
888                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
889                                         kfree(ba->ba_1_org);
890                                         nic->mac_control.stats_info->sw_stat.\
891                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
892                                         k++;
893                                 }
894                                 kfree(mac_control->rings[i].ba[j]);
895                                 nic->mac_control.stats_info->sw_stat.mem_freed                          += (sizeof(struct buffAdd) * 
896                                 (rxd_count[nic->rxd_mode] + 1));
897                         }
898                         kfree(mac_control->rings[i].ba);
899                         nic->mac_control.stats_info->sw_stat.mem_freed += 
900                         (sizeof(struct buffAdd *) * blk_cnt);
901                 }
902         }
903
904         if (mac_control->stats_mem) {
905                 pci_free_consistent(nic->pdev,
906                                     mac_control->stats_mem_sz,
907                                     mac_control->stats_mem,
908                                     mac_control->stats_mem_phy);
909                 nic->mac_control.stats_info->sw_stat.mem_freed += 
910                         mac_control->stats_mem_sz;
911         }
912         if (nic->ufo_in_band_v) {
913                 kfree(nic->ufo_in_band_v);
914                 nic->mac_control.stats_info->sw_stat.mem_freed 
915                         += (ufo_size * sizeof(u64));
916         }
917 }
918
919 /**
920  * s2io_verify_pci_mode -
921  */
922
923 static int s2io_verify_pci_mode(struct s2io_nic *nic)
924 {
925         struct XENA_dev_config __iomem *bar0 = nic->bar0;
926         register u64 val64 = 0;
927         int     mode;
928
929         val64 = readq(&bar0->pci_mode);
930         mode = (u8)GET_PCI_MODE(val64);
931
932         if ( val64 & PCI_MODE_UNKNOWN_MODE)
933                 return -1;      /* Unknown PCI mode */
934         return mode;
935 }
936
937 #define NEC_VENID   0x1033
938 #define NEC_DEVID   0x0125
939 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
940 {
941         struct pci_dev *tdev = NULL;
942         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
943                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
944                         if (tdev->bus == s2io_pdev->bus->parent)
945                                 pci_dev_put(tdev);
946                                 return 1;
947                 }
948         }
949         return 0;
950 }
951
952 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
953 /**
954  * s2io_print_pci_mode -
955  */
956 static int s2io_print_pci_mode(struct s2io_nic *nic)
957 {
958         struct XENA_dev_config __iomem *bar0 = nic->bar0;
959         register u64 val64 = 0;
960         int     mode;
961         struct config_param *config = &nic->config;
962
963         val64 = readq(&bar0->pci_mode);
964         mode = (u8)GET_PCI_MODE(val64);
965
966         if ( val64 & PCI_MODE_UNKNOWN_MODE)
967                 return -1;      /* Unknown PCI mode */
968
969         config->bus_speed = bus_speed[mode];
970
971         if (s2io_on_nec_bridge(nic->pdev)) {
972                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
973                                                         nic->dev->name);
974                 return mode;
975         }
976
977         if (val64 & PCI_MODE_32_BITS) {
978                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
979         } else {
980                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
981         }
982
983         switch(mode) {
984                 case PCI_MODE_PCI_33:
985                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
986                         break;
987                 case PCI_MODE_PCI_66:
988                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
989                         break;
990                 case PCI_MODE_PCIX_M1_66:
991                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
992                         break;
993                 case PCI_MODE_PCIX_M1_100:
994                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
995                         break;
996                 case PCI_MODE_PCIX_M1_133:
997                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
998                         break;
999                 case PCI_MODE_PCIX_M2_66:
1000                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1001                         break;
1002                 case PCI_MODE_PCIX_M2_100:
1003                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1004                         break;
1005                 case PCI_MODE_PCIX_M2_133:
1006                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1007                         break;
1008                 default:
1009                         return -1;      /* Unsupported bus speed */
1010         }
1011
1012         return mode;
1013 }
1014
1015 /**
1016  *  init_nic - Initialization of hardware
1017  *  @nic: device peivate variable
1018  *  Description: The function sequentially configures every block
1019  *  of the H/W from their reset values.
1020  *  Return Value:  SUCCESS on success and
1021  *  '-1' on failure (endian settings incorrect).
1022  */
1023
1024 static int init_nic(struct s2io_nic *nic)
1025 {
1026         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1027         struct net_device *dev = nic->dev;
1028         register u64 val64 = 0;
1029         void __iomem *add;
1030         u32 time;
1031         int i, j;
1032         struct mac_info *mac_control;
1033         struct config_param *config;
1034         int dtx_cnt = 0;
1035         unsigned long long mem_share;
1036         int mem_size;
1037
1038         mac_control = &nic->mac_control;
1039         config = &nic->config;
1040
1041         /* to set the swapper controle on the card */
1042         if(s2io_set_swapper(nic)) {
1043                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1044                 return -1;
1045         }
1046
1047         /*
1048          * Herc requires EOI to be removed from reset before XGXS, so..
1049          */
1050         if (nic->device_type & XFRAME_II_DEVICE) {
1051                 val64 = 0xA500000000ULL;
1052                 writeq(val64, &bar0->sw_reset);
1053                 msleep(500);
1054                 val64 = readq(&bar0->sw_reset);
1055         }
1056
1057         /* Remove XGXS from reset state */
1058         val64 = 0;
1059         writeq(val64, &bar0->sw_reset);
1060         msleep(500);
1061         val64 = readq(&bar0->sw_reset);
1062
1063         /*  Enable Receiving broadcasts */
1064         add = &bar0->mac_cfg;
1065         val64 = readq(&bar0->mac_cfg);
1066         val64 |= MAC_RMAC_BCAST_ENABLE;
1067         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1068         writel((u32) val64, add);
1069         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1070         writel((u32) (val64 >> 32), (add + 4));
1071
1072         /* Read registers in all blocks */
1073         val64 = readq(&bar0->mac_int_mask);
1074         val64 = readq(&bar0->mc_int_mask);
1075         val64 = readq(&bar0->xgxs_int_mask);
1076
1077         /*  Set MTU */
1078         val64 = dev->mtu;
1079         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1080
1081         if (nic->device_type & XFRAME_II_DEVICE) {
1082                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1083                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1084                                           &bar0->dtx_control, UF);
1085                         if (dtx_cnt & 0x1)
1086                                 msleep(1); /* Necessary!! */
1087                         dtx_cnt++;
1088                 }
1089         } else {
1090                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1091                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1092                                           &bar0->dtx_control, UF);
1093                         val64 = readq(&bar0->dtx_control);
1094                         dtx_cnt++;
1095                 }
1096         }
1097
1098         /*  Tx DMA Initialization */
1099         val64 = 0;
1100         writeq(val64, &bar0->tx_fifo_partition_0);
1101         writeq(val64, &bar0->tx_fifo_partition_1);
1102         writeq(val64, &bar0->tx_fifo_partition_2);
1103         writeq(val64, &bar0->tx_fifo_partition_3);
1104
1105
1106         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1107                 val64 |=
1108                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1109                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1110                                     ((i * 32) + 5), 3);
1111
1112                 if (i == (config->tx_fifo_num - 1)) {
1113                         if (i % 2 == 0)
1114                                 i++;
1115                 }
1116
1117                 switch (i) {
1118                 case 1:
1119                         writeq(val64, &bar0->tx_fifo_partition_0);
1120                         val64 = 0;
1121                         break;
1122                 case 3:
1123                         writeq(val64, &bar0->tx_fifo_partition_1);
1124                         val64 = 0;
1125                         break;
1126                 case 5:
1127                         writeq(val64, &bar0->tx_fifo_partition_2);
1128                         val64 = 0;
1129                         break;
1130                 case 7:
1131                         writeq(val64, &bar0->tx_fifo_partition_3);
1132                         break;
1133                 }
1134         }
1135
1136         /*
1137          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1138          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1139          */
1140         if ((nic->device_type == XFRAME_I_DEVICE) &&
1141                 (nic->pdev->revision < 4))
1142                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1143
1144         val64 = readq(&bar0->tx_fifo_partition_0);
1145         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1146                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1147
1148         /*
1149          * Initialization of Tx_PA_CONFIG register to ignore packet
1150          * integrity checking.
1151          */
1152         val64 = readq(&bar0->tx_pa_cfg);
1153         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1154             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1155         writeq(val64, &bar0->tx_pa_cfg);
1156
1157         /* Rx DMA intialization. */
1158         val64 = 0;
1159         for (i = 0; i < config->rx_ring_num; i++) {
1160                 val64 |=
1161                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1162                          3);
1163         }
1164         writeq(val64, &bar0->rx_queue_priority);
1165
1166         /*
1167          * Allocating equal share of memory to all the
1168          * configured Rings.
1169          */
1170         val64 = 0;
1171         if (nic->device_type & XFRAME_II_DEVICE)
1172                 mem_size = 32;
1173         else
1174                 mem_size = 64;
1175
1176         for (i = 0; i < config->rx_ring_num; i++) {
1177                 switch (i) {
1178                 case 0:
1179                         mem_share = (mem_size / config->rx_ring_num +
1180                                      mem_size % config->rx_ring_num);
1181                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1182                         continue;
1183                 case 1:
1184                         mem_share = (mem_size / config->rx_ring_num);
1185                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1186                         continue;
1187                 case 2:
1188                         mem_share = (mem_size / config->rx_ring_num);
1189                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1190                         continue;
1191                 case 3:
1192                         mem_share = (mem_size / config->rx_ring_num);
1193                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1194                         continue;
1195                 case 4:
1196                         mem_share = (mem_size / config->rx_ring_num);
1197                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1198                         continue;
1199                 case 5:
1200                         mem_share = (mem_size / config->rx_ring_num);
1201                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1202                         continue;
1203                 case 6:
1204                         mem_share = (mem_size / config->rx_ring_num);
1205                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1206                         continue;
1207                 case 7:
1208                         mem_share = (mem_size / config->rx_ring_num);
1209                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1210                         continue;
1211                 }
1212         }
1213         writeq(val64, &bar0->rx_queue_cfg);
1214
1215         /*
1216          * Filling Tx round robin registers
1217          * as per the number of FIFOs
1218          */
1219         switch (config->tx_fifo_num) {
1220         case 1:
1221                 val64 = 0x0000000000000000ULL;
1222                 writeq(val64, &bar0->tx_w_round_robin_0);
1223                 writeq(val64, &bar0->tx_w_round_robin_1);
1224                 writeq(val64, &bar0->tx_w_round_robin_2);
1225                 writeq(val64, &bar0->tx_w_round_robin_3);
1226                 writeq(val64, &bar0->tx_w_round_robin_4);
1227                 break;
1228         case 2:
1229                 val64 = 0x0000010000010000ULL;
1230                 writeq(val64, &bar0->tx_w_round_robin_0);
1231                 val64 = 0x0100000100000100ULL;
1232                 writeq(val64, &bar0->tx_w_round_robin_1);
1233                 val64 = 0x0001000001000001ULL;
1234                 writeq(val64, &bar0->tx_w_round_robin_2);
1235                 val64 = 0x0000010000010000ULL;
1236                 writeq(val64, &bar0->tx_w_round_robin_3);
1237                 val64 = 0x0100000000000000ULL;
1238                 writeq(val64, &bar0->tx_w_round_robin_4);
1239                 break;
1240         case 3:
1241                 val64 = 0x0001000102000001ULL;
1242                 writeq(val64, &bar0->tx_w_round_robin_0);
1243                 val64 = 0x0001020000010001ULL;
1244                 writeq(val64, &bar0->tx_w_round_robin_1);
1245                 val64 = 0x0200000100010200ULL;
1246                 writeq(val64, &bar0->tx_w_round_robin_2);
1247                 val64 = 0x0001000102000001ULL;
1248                 writeq(val64, &bar0->tx_w_round_robin_3);
1249                 val64 = 0x0001020000000000ULL;
1250                 writeq(val64, &bar0->tx_w_round_robin_4);
1251                 break;
1252         case 4:
1253                 val64 = 0x0001020300010200ULL;
1254                 writeq(val64, &bar0->tx_w_round_robin_0);
1255                 val64 = 0x0100000102030001ULL;
1256                 writeq(val64, &bar0->tx_w_round_robin_1);
1257                 val64 = 0x0200010000010203ULL;
1258                 writeq(val64, &bar0->tx_w_round_robin_2);
1259                 val64 = 0x0001020001000001ULL;
1260                 writeq(val64, &bar0->tx_w_round_robin_3);
1261                 val64 = 0x0203000100000000ULL;
1262                 writeq(val64, &bar0->tx_w_round_robin_4);
1263                 break;
1264         case 5:
1265                 val64 = 0x0001000203000102ULL;
1266                 writeq(val64, &bar0->tx_w_round_robin_0);
1267                 val64 = 0x0001020001030004ULL;
1268                 writeq(val64, &bar0->tx_w_round_robin_1);
1269                 val64 = 0x0001000203000102ULL;
1270                 writeq(val64, &bar0->tx_w_round_robin_2);
1271                 val64 = 0x0001020001030004ULL;
1272                 writeq(val64, &bar0->tx_w_round_robin_3);
1273                 val64 = 0x0001000000000000ULL;
1274                 writeq(val64, &bar0->tx_w_round_robin_4);
1275                 break;
1276         case 6:
1277                 val64 = 0x0001020304000102ULL;
1278                 writeq(val64, &bar0->tx_w_round_robin_0);
1279                 val64 = 0x0304050001020001ULL;
1280                 writeq(val64, &bar0->tx_w_round_robin_1);
1281                 val64 = 0x0203000100000102ULL;
1282                 writeq(val64, &bar0->tx_w_round_robin_2);
1283                 val64 = 0x0304000102030405ULL;
1284                 writeq(val64, &bar0->tx_w_round_robin_3);
1285                 val64 = 0x0001000200000000ULL;
1286                 writeq(val64, &bar0->tx_w_round_robin_4);
1287                 break;
1288         case 7:
1289                 val64 = 0x0001020001020300ULL;
1290                 writeq(val64, &bar0->tx_w_round_robin_0);
1291                 val64 = 0x0102030400010203ULL;
1292                 writeq(val64, &bar0->tx_w_round_robin_1);
1293                 val64 = 0x0405060001020001ULL;
1294                 writeq(val64, &bar0->tx_w_round_robin_2);
1295                 val64 = 0x0304050000010200ULL;
1296                 writeq(val64, &bar0->tx_w_round_robin_3);
1297                 val64 = 0x0102030000000000ULL;
1298                 writeq(val64, &bar0->tx_w_round_robin_4);
1299                 break;
1300         case 8:
1301                 val64 = 0x0001020300040105ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_0);
1303                 val64 = 0x0200030106000204ULL;
1304                 writeq(val64, &bar0->tx_w_round_robin_1);
1305                 val64 = 0x0103000502010007ULL;
1306                 writeq(val64, &bar0->tx_w_round_robin_2);
1307                 val64 = 0x0304010002060500ULL;
1308                 writeq(val64, &bar0->tx_w_round_robin_3);
1309                 val64 = 0x0103020400000000ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_4);
1311                 break;
1312         }
1313
1314         /* Enable all configured Tx FIFO partitions */
1315         val64 = readq(&bar0->tx_fifo_partition_0);
1316         val64 |= (TX_FIFO_PARTITION_EN);
1317         writeq(val64, &bar0->tx_fifo_partition_0);
1318
1319         /* Filling the Rx round robin registers as per the
1320          * number of Rings and steering based on QoS.
1321          */
1322         switch (config->rx_ring_num) {
1323         case 1:
1324                 val64 = 0x8080808080808080ULL;
1325                 writeq(val64, &bar0->rts_qos_steering);
1326                 break;
1327         case 2:
1328                 val64 = 0x0000010000010000ULL;
1329                 writeq(val64, &bar0->rx_w_round_robin_0);
1330                 val64 = 0x0100000100000100ULL;
1331                 writeq(val64, &bar0->rx_w_round_robin_1);
1332                 val64 = 0x0001000001000001ULL;
1333                 writeq(val64, &bar0->rx_w_round_robin_2);
1334                 val64 = 0x0000010000010000ULL;
1335                 writeq(val64, &bar0->rx_w_round_robin_3);
1336                 val64 = 0x0100000000000000ULL;
1337                 writeq(val64, &bar0->rx_w_round_robin_4);
1338
1339                 val64 = 0x8080808040404040ULL;
1340                 writeq(val64, &bar0->rts_qos_steering);
1341                 break;
1342         case 3:
1343                 val64 = 0x0001000102000001ULL;
1344                 writeq(val64, &bar0->rx_w_round_robin_0);
1345                 val64 = 0x0001020000010001ULL;
1346                 writeq(val64, &bar0->rx_w_round_robin_1);
1347                 val64 = 0x0200000100010200ULL;
1348                 writeq(val64, &bar0->rx_w_round_robin_2);
1349                 val64 = 0x0001000102000001ULL;
1350                 writeq(val64, &bar0->rx_w_round_robin_3);
1351                 val64 = 0x0001020000000000ULL;
1352                 writeq(val64, &bar0->rx_w_round_robin_4);
1353
1354                 val64 = 0x8080804040402020ULL;
1355                 writeq(val64, &bar0->rts_qos_steering);
1356                 break;
1357         case 4:
1358                 val64 = 0x0001020300010200ULL;
1359                 writeq(val64, &bar0->rx_w_round_robin_0);
1360                 val64 = 0x0100000102030001ULL;
1361                 writeq(val64, &bar0->rx_w_round_robin_1);
1362                 val64 = 0x0200010000010203ULL;
1363                 writeq(val64, &bar0->rx_w_round_robin_2);
1364                 val64 = 0x0001020001000001ULL;
1365                 writeq(val64, &bar0->rx_w_round_robin_3);
1366                 val64 = 0x0203000100000000ULL;
1367                 writeq(val64, &bar0->rx_w_round_robin_4);
1368
1369                 val64 = 0x8080404020201010ULL;
1370                 writeq(val64, &bar0->rts_qos_steering);
1371                 break;
1372         case 5:
1373                 val64 = 0x0001000203000102ULL;
1374                 writeq(val64, &bar0->rx_w_round_robin_0);
1375                 val64 = 0x0001020001030004ULL;
1376                 writeq(val64, &bar0->rx_w_round_robin_1);
1377                 val64 = 0x0001000203000102ULL;
1378                 writeq(val64, &bar0->rx_w_round_robin_2);
1379                 val64 = 0x0001020001030004ULL;
1380                 writeq(val64, &bar0->rx_w_round_robin_3);
1381                 val64 = 0x0001000000000000ULL;
1382                 writeq(val64, &bar0->rx_w_round_robin_4);
1383
1384                 val64 = 0x8080404020201008ULL;
1385                 writeq(val64, &bar0->rts_qos_steering);
1386                 break;
1387         case 6:
1388                 val64 = 0x0001020304000102ULL;
1389                 writeq(val64, &bar0->rx_w_round_robin_0);
1390                 val64 = 0x0304050001020001ULL;
1391                 writeq(val64, &bar0->rx_w_round_robin_1);
1392                 val64 = 0x0203000100000102ULL;
1393                 writeq(val64, &bar0->rx_w_round_robin_2);
1394                 val64 = 0x0304000102030405ULL;
1395                 writeq(val64, &bar0->rx_w_round_robin_3);
1396                 val64 = 0x0001000200000000ULL;
1397                 writeq(val64, &bar0->rx_w_round_robin_4);
1398
1399                 val64 = 0x8080404020100804ULL;
1400                 writeq(val64, &bar0->rts_qos_steering);
1401                 break;
1402         case 7:
1403                 val64 = 0x0001020001020300ULL;
1404                 writeq(val64, &bar0->rx_w_round_robin_0);
1405                 val64 = 0x0102030400010203ULL;
1406                 writeq(val64, &bar0->rx_w_round_robin_1);
1407                 val64 = 0x0405060001020001ULL;
1408                 writeq(val64, &bar0->rx_w_round_robin_2);
1409                 val64 = 0x0304050000010200ULL;
1410                 writeq(val64, &bar0->rx_w_round_robin_3);
1411                 val64 = 0x0102030000000000ULL;
1412                 writeq(val64, &bar0->rx_w_round_robin_4);
1413
1414                 val64 = 0x8080402010080402ULL;
1415                 writeq(val64, &bar0->rts_qos_steering);
1416                 break;
1417         case 8:
1418                 val64 = 0x0001020300040105ULL;
1419                 writeq(val64, &bar0->rx_w_round_robin_0);
1420                 val64 = 0x0200030106000204ULL;
1421                 writeq(val64, &bar0->rx_w_round_robin_1);
1422                 val64 = 0x0103000502010007ULL;
1423                 writeq(val64, &bar0->rx_w_round_robin_2);
1424                 val64 = 0x0304010002060500ULL;
1425                 writeq(val64, &bar0->rx_w_round_robin_3);
1426                 val64 = 0x0103020400000000ULL;
1427                 writeq(val64, &bar0->rx_w_round_robin_4);
1428
1429                 val64 = 0x8040201008040201ULL;
1430                 writeq(val64, &bar0->rts_qos_steering);
1431                 break;
1432         }
1433
1434         /* UDP Fix */
1435         val64 = 0;
1436         for (i = 0; i < 8; i++)
1437                 writeq(val64, &bar0->rts_frm_len_n[i]);
1438
1439         /* Set the default rts frame length for the rings configured */
1440         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1441         for (i = 0 ; i < config->rx_ring_num ; i++)
1442                 writeq(val64, &bar0->rts_frm_len_n[i]);
1443
1444         /* Set the frame length for the configured rings
1445          * desired by the user
1446          */
1447         for (i = 0; i < config->rx_ring_num; i++) {
1448                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1449                  * specified frame length steering.
1450                  * If the user provides the frame length then program
1451                  * the rts_frm_len register for those values or else
1452                  * leave it as it is.
1453                  */
1454                 if (rts_frm_len[i] != 0) {
1455                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1456                                 &bar0->rts_frm_len_n[i]);
1457                 }
1458         }
1459         
1460         /* Disable differentiated services steering logic */
1461         for (i = 0; i < 64; i++) {
1462                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1463                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1464                                 dev->name);
1465                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1466                         return FAILURE;
1467                 }
1468         }
1469
1470         /* Program statistics memory */
1471         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1472
1473         if (nic->device_type == XFRAME_II_DEVICE) {
1474                 val64 = STAT_BC(0x320);
1475                 writeq(val64, &bar0->stat_byte_cnt);
1476         }
1477
1478         /*
1479          * Initializing the sampling rate for the device to calculate the
1480          * bandwidth utilization.
1481          */
1482         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1483             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1484         writeq(val64, &bar0->mac_link_util);
1485
1486
1487         /*
1488          * Initializing the Transmit and Receive Traffic Interrupt
1489          * Scheme.
1490          */
1491         /*
1492          * TTI Initialization. Default Tx timer gets us about
1493          * 250 interrupts per sec. Continuous interrupts are enabled
1494          * by default.
1495          */
1496         if (nic->device_type == XFRAME_II_DEVICE) {
1497                 int count = (nic->config.bus_speed * 125)/2;
1498                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1499         } else {
1500
1501                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1502         }
1503         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1504             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1505             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1506                 if (use_continuous_tx_intrs)
1507                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1508         writeq(val64, &bar0->tti_data1_mem);
1509
1510         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1511             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1512             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1513         writeq(val64, &bar0->tti_data2_mem);
1514
1515         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1516         writeq(val64, &bar0->tti_command_mem);
1517
1518         /*
1519          * Once the operation completes, the Strobe bit of the command
1520          * register will be reset. We poll for this particular condition
1521          * We wait for a maximum of 500ms for the operation to complete,
1522          * if it's not complete by then we return error.
1523          */
1524         time = 0;
1525         while (TRUE) {
1526                 val64 = readq(&bar0->tti_command_mem);
1527                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1528                         break;
1529                 }
1530                 if (time > 10) {
1531                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1532                                   dev->name);
1533                         return -1;
1534                 }
1535                 msleep(50);
1536                 time++;
1537         }
1538
1539         if (nic->config.bimodal) {
1540                 int k = 0;
1541                 for (k = 0; k < config->rx_ring_num; k++) {
1542                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1543                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1544                         writeq(val64, &bar0->tti_command_mem);
1545
1546                 /*
1547                  * Once the operation completes, the Strobe bit of the command
1548                  * register will be reset. We poll for this particular condition
1549                  * We wait for a maximum of 500ms for the operation to complete,
1550                  * if it's not complete by then we return error.
1551                 */
1552                         time = 0;
1553                         while (TRUE) {
1554                                 val64 = readq(&bar0->tti_command_mem);
1555                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1556                                         break;
1557                                 }
1558                                 if (time > 10) {
1559                                         DBG_PRINT(ERR_DBG,
1560                                                 "%s: TTI init Failed\n",
1561                                         dev->name);
1562                                         return -1;
1563                                 }
1564                                 time++;
1565                                 msleep(50);
1566                         }
1567                 }
1568         } else {
1569
1570                 /* RTI Initialization */
1571                 if (nic->device_type == XFRAME_II_DEVICE) {
1572                         /*
1573                          * Programmed to generate Apprx 500 Intrs per
1574                          * second
1575                          */
1576                         int count = (nic->config.bus_speed * 125)/4;
1577                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1578                 } else {
1579                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1580                 }
1581                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1582                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1583                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1584
1585                 writeq(val64, &bar0->rti_data1_mem);
1586
1587                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1588                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1589                 if (nic->intr_type == MSI_X)
1590                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1591                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1592                 else
1593                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1594                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1595                 writeq(val64, &bar0->rti_data2_mem);
1596
1597                 for (i = 0; i < config->rx_ring_num; i++) {
1598                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1599                                         | RTI_CMD_MEM_OFFSET(i);
1600                         writeq(val64, &bar0->rti_command_mem);
1601
1602                         /*
1603                          * Once the operation completes, the Strobe bit of the
1604                          * command register will be reset. We poll for this
1605                          * particular condition. We wait for a maximum of 500ms
1606                          * for the operation to complete, if it's not complete
1607                          * by then we return error.
1608                          */
1609                         time = 0;
1610                         while (TRUE) {
1611                                 val64 = readq(&bar0->rti_command_mem);
1612                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1613                                         break;
1614                                 }
1615                                 if (time > 10) {
1616                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1617                                                   dev->name);
1618                                         return -1;
1619                                 }
1620                                 time++;
1621                                 msleep(50);
1622                         }
1623                 }
1624         }
1625
1626         /*
1627          * Initializing proper values as Pause threshold into all
1628          * the 8 Queues on Rx side.
1629          */
1630         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1631         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1632
1633         /* Disable RMAC PAD STRIPPING */
1634         add = &bar0->mac_cfg;
1635         val64 = readq(&bar0->mac_cfg);
1636         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1637         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1638         writel((u32) (val64), add);
1639         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1640         writel((u32) (val64 >> 32), (add + 4));
1641         val64 = readq(&bar0->mac_cfg);
1642
1643         /* Enable FCS stripping by adapter */
1644         add = &bar0->mac_cfg;
1645         val64 = readq(&bar0->mac_cfg);
1646         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1647         if (nic->device_type == XFRAME_II_DEVICE)
1648                 writeq(val64, &bar0->mac_cfg);
1649         else {
1650                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1651                 writel((u32) (val64), add);
1652                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1653                 writel((u32) (val64 >> 32), (add + 4));
1654         }
1655
1656         /*
1657          * Set the time value to be inserted in the pause frame
1658          * generated by xena.
1659          */
1660         val64 = readq(&bar0->rmac_pause_cfg);
1661         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1662         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1663         writeq(val64, &bar0->rmac_pause_cfg);
1664
1665         /*
1666          * Set the Threshold Limit for Generating the pause frame
1667          * If the amount of data in any Queue exceeds ratio of
1668          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1669          * pause frame is generated
1670          */
1671         val64 = 0;
1672         for (i = 0; i < 4; i++) {
1673                 val64 |=
1674                     (((u64) 0xFF00 | nic->mac_control.
1675                       mc_pause_threshold_q0q3)
1676                      << (i * 2 * 8));
1677         }
1678         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1679
1680         val64 = 0;
1681         for (i = 0; i < 4; i++) {
1682                 val64 |=
1683                     (((u64) 0xFF00 | nic->mac_control.
1684                       mc_pause_threshold_q4q7)
1685                      << (i * 2 * 8));
1686         }
1687         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1688
1689         /*
1690          * TxDMA will stop Read request if the number of read split has
1691          * exceeded the limit pointed by shared_splits
1692          */
1693         val64 = readq(&bar0->pic_control);
1694         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1695         writeq(val64, &bar0->pic_control);
1696
1697         if (nic->config.bus_speed == 266) {
1698                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1699                 writeq(0x0, &bar0->read_retry_delay);
1700                 writeq(0x0, &bar0->write_retry_delay);
1701         }
1702
1703         /*
1704          * Programming the Herc to split every write transaction
1705          * that does not start on an ADB to reduce disconnects.
1706          */
1707         if (nic->device_type == XFRAME_II_DEVICE) {
1708                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1709                         MISC_LINK_STABILITY_PRD(3);
1710                 writeq(val64, &bar0->misc_control);
1711                 val64 = readq(&bar0->pic_control2);
1712                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1713                 writeq(val64, &bar0->pic_control2);
1714         }
1715         if (strstr(nic->product_name, "CX4")) {
1716                 val64 = TMAC_AVG_IPG(0x17);
1717                 writeq(val64, &bar0->tmac_avg_ipg);
1718         }
1719
1720         return SUCCESS;
1721 }
1722 #define LINK_UP_DOWN_INTERRUPT          1
1723 #define MAC_RMAC_ERR_TIMER              2
1724
1725 static int s2io_link_fault_indication(struct s2io_nic *nic)
1726 {
1727         if (nic->intr_type != INTA)
1728                 return MAC_RMAC_ERR_TIMER;
1729         if (nic->device_type == XFRAME_II_DEVICE)
1730                 return LINK_UP_DOWN_INTERRUPT;
1731         else
1732                 return MAC_RMAC_ERR_TIMER;
1733 }
1734
1735 /**
1736  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1737  *  @nic: device private variable,
1738  *  @mask: A mask indicating which Intr block must be modified and,
1739  *  @flag: A flag indicating whether to enable or disable the Intrs.
1740  *  Description: This function will either disable or enable the interrupts
1741  *  depending on the flag argument. The mask argument can be used to
1742  *  enable/disable any Intr block.
1743  *  Return Value: NONE.
1744  */
1745
1746 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1747 {
1748         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1749         register u64 val64 = 0, temp64 = 0;
1750
1751         /*  Top level interrupt classification */
1752         /*  PIC Interrupts */
1753         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1754                 /*  Enable PIC Intrs in the general intr mask register */
1755                 val64 = TXPIC_INT_M;
1756                 if (flag == ENABLE_INTRS) {
1757                         temp64 = readq(&bar0->general_int_mask);
1758                         temp64 &= ~((u64) val64);
1759                         writeq(temp64, &bar0->general_int_mask);
1760                         /*
1761                          * If Hercules adapter enable GPIO otherwise
1762                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1763                          * interrupts for now.
1764                          * TODO
1765                          */
1766                         if (s2io_link_fault_indication(nic) ==
1767                                         LINK_UP_DOWN_INTERRUPT ) {
1768                                 temp64 = readq(&bar0->pic_int_mask);
1769                                 temp64 &= ~((u64) PIC_INT_GPIO);
1770                                 writeq(temp64, &bar0->pic_int_mask);
1771                                 temp64 = readq(&bar0->gpio_int_mask);
1772                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1773                                 writeq(temp64, &bar0->gpio_int_mask);
1774                         } else {
1775                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1776                         }
1777                         /*
1778                          * No MSI Support is available presently, so TTI and
1779                          * RTI interrupts are also disabled.
1780                          */
1781                 } else if (flag == DISABLE_INTRS) {
1782                         /*
1783                          * Disable PIC Intrs in the general
1784                          * intr mask register
1785                          */
1786                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1787                         temp64 = readq(&bar0->general_int_mask);
1788                         val64 |= temp64;
1789                         writeq(val64, &bar0->general_int_mask);
1790                 }
1791         }
1792
1793         /*  MAC Interrupts */
1794         /*  Enabling/Disabling MAC interrupts */
1795         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1796                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1797                 if (flag == ENABLE_INTRS) {
1798                         temp64 = readq(&bar0->general_int_mask);
1799                         temp64 &= ~((u64) val64);
1800                         writeq(temp64, &bar0->general_int_mask);
1801                         /*
1802                          * All MAC block error interrupts are disabled for now
1803                          * TODO
1804                          */
1805                 } else if (flag == DISABLE_INTRS) {
1806                         /*
1807                          * Disable MAC Intrs in the general intr mask register
1808                          */
1809                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1810                         writeq(DISABLE_ALL_INTRS,
1811                                &bar0->mac_rmac_err_mask);
1812
1813                         temp64 = readq(&bar0->general_int_mask);
1814                         val64 |= temp64;
1815                         writeq(val64, &bar0->general_int_mask);
1816                 }
1817         }
1818
1819         /*  Tx traffic interrupts */
1820         if (mask & TX_TRAFFIC_INTR) {
1821                 val64 = TXTRAFFIC_INT_M;
1822                 if (flag == ENABLE_INTRS) {
1823                         temp64 = readq(&bar0->general_int_mask);
1824                         temp64 &= ~((u64) val64);
1825                         writeq(temp64, &bar0->general_int_mask);
1826                         /*
1827                          * Enable all the Tx side interrupts
1828                          * writing 0 Enables all 64 TX interrupt levels
1829                          */
1830                         writeq(0x0, &bar0->tx_traffic_mask);
1831                 } else if (flag == DISABLE_INTRS) {
1832                         /*
1833                          * Disable Tx Traffic Intrs in the general intr mask
1834                          * register.
1835                          */
1836                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1837                         temp64 = readq(&bar0->general_int_mask);
1838                         val64 |= temp64;
1839                         writeq(val64, &bar0->general_int_mask);
1840                 }
1841         }
1842
1843         /*  Rx traffic interrupts */
1844         if (mask & RX_TRAFFIC_INTR) {
1845                 val64 = RXTRAFFIC_INT_M;
1846                 if (flag == ENABLE_INTRS) {
1847                         temp64 = readq(&bar0->general_int_mask);
1848                         temp64 &= ~((u64) val64);
1849                         writeq(temp64, &bar0->general_int_mask);
1850                         /* writing 0 Enables all 8 RX interrupt levels */
1851                         writeq(0x0, &bar0->rx_traffic_mask);
1852                 } else if (flag == DISABLE_INTRS) {
1853                         /*
1854                          * Disable Rx Traffic Intrs in the general intr mask
1855                          * register.
1856                          */
1857                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1858                         temp64 = readq(&bar0->general_int_mask);
1859                         val64 |= temp64;
1860                         writeq(val64, &bar0->general_int_mask);
1861                 }
1862         }
1863 }
1864
1865 /**
1866  *  verify_pcc_quiescent- Checks for PCC quiescent state
1867  *  Return: 1 If PCC is quiescence
1868  *          0 If PCC is not quiescence
1869  */
1870 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1871 {
1872         int ret = 0, herc;
1873         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1874         u64 val64 = readq(&bar0->adapter_status);
1875         
1876         herc = (sp->device_type == XFRAME_II_DEVICE);
1877
1878         if (flag == FALSE) {
1879                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1880                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1881                                 ret = 1;
1882                 } else {
1883                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1884                                 ret = 1;
1885                 }
1886         } else {
1887                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1888                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1889                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1890                                 ret = 1;
1891                 } else {
1892                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1893                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1894                                 ret = 1;
1895                 }
1896         }
1897
1898         return ret;
1899 }
1900 /**
1901  *  verify_xena_quiescence - Checks whether the H/W is ready
1902  *  Description: Returns whether the H/W is ready to go or not. Depending
1903  *  on whether adapter enable bit was written or not the comparison
1904  *  differs and the calling function passes the input argument flag to
1905  *  indicate this.
1906  *  Return: 1 If xena is quiescence
1907  *          0 If Xena is not quiescence
1908  */
1909
1910 static int verify_xena_quiescence(struct s2io_nic *sp)
1911 {
1912         int  mode;
1913         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1914         u64 val64 = readq(&bar0->adapter_status);
1915         mode = s2io_verify_pci_mode(sp);
1916
1917         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1918                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1919                 return 0;
1920         }
1921         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1922         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1923                 return 0;
1924         }
1925         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1926                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1927                 return 0;
1928         }
1929         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1930                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1931                 return 0;
1932         }
1933         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1934                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1935                 return 0;
1936         }
1937         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1938                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1939                 return 0;
1940         }
1941         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1942                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1943                 return 0;
1944         }
1945         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1946                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1947                 return 0;
1948         }
1949
1950         /*
1951          * In PCI 33 mode, the P_PLL is not used, and therefore,
1952          * the the P_PLL_LOCK bit in the adapter_status register will
1953          * not be asserted.
1954          */
1955         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1956                 sp->device_type == XFRAME_II_DEVICE && mode !=
1957                 PCI_MODE_PCI_33) {
1958                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1959                 return 0;
1960         }
1961         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1962                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1963                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1964                 return 0;
1965         }
1966         return 1;
1967 }
1968
1969 /**
1970  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1971  * @sp: Pointer to device specifc structure
1972  * Description :
1973  * New procedure to clear mac address reading  problems on Alpha platforms
1974  *
1975  */
1976
1977 static void fix_mac_address(struct s2io_nic * sp)
1978 {
1979         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1980         u64 val64;
1981         int i = 0;
1982
1983         while (fix_mac[i] != END_SIGN) {
1984                 writeq(fix_mac[i++], &bar0->gpio_control);
1985                 udelay(10);
1986                 val64 = readq(&bar0->gpio_control);
1987         }
1988 }
1989
1990 /**
1991  *  start_nic - Turns the device on
1992  *  @nic : device private variable.
1993  *  Description:
1994  *  This function actually turns the device on. Before this  function is
1995  *  called,all Registers are configured from their reset states
1996  *  and shared memory is allocated but the NIC is still quiescent. On
1997  *  calling this function, the device interrupts are cleared and the NIC is
1998  *  literally switched on by writing into the adapter control register.
1999  *  Return Value:
2000  *  SUCCESS on success and -1 on failure.
2001  */
2002
2003 static int start_nic(struct s2io_nic *nic)
2004 {
2005         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2006         struct net_device *dev = nic->dev;
2007         register u64 val64 = 0;
2008         u16 subid, i;
2009         struct mac_info *mac_control;
2010         struct config_param *config;
2011
2012         mac_control = &nic->mac_control;
2013         config = &nic->config;
2014
2015         /*  PRC Initialization and configuration */
2016         for (i = 0; i < config->rx_ring_num; i++) {
2017                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2018                        &bar0->prc_rxd0_n[i]);
2019
2020                 val64 = readq(&bar0->prc_ctrl_n[i]);
2021                 if (nic->config.bimodal)
2022                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2023                 if (nic->rxd_mode == RXD_MODE_1)
2024                         val64 |= PRC_CTRL_RC_ENABLED;
2025                 else
2026                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2027                 if (nic->device_type == XFRAME_II_DEVICE)
2028                         val64 |= PRC_CTRL_GROUP_READS;
2029                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2030                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2031                 writeq(val64, &bar0->prc_ctrl_n[i]);
2032         }
2033
2034         if (nic->rxd_mode == RXD_MODE_3B) {
2035                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2036                 val64 = readq(&bar0->rx_pa_cfg);
2037                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2038                 writeq(val64, &bar0->rx_pa_cfg);
2039         }
2040
2041         if (vlan_tag_strip == 0) {
2042                 val64 = readq(&bar0->rx_pa_cfg);
2043                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2044                 writeq(val64, &bar0->rx_pa_cfg);
2045                 vlan_strip_flag = 0;
2046         }
2047
2048         /*
2049          * Enabling MC-RLDRAM. After enabling the device, we timeout
2050          * for around 100ms, which is approximately the time required
2051          * for the device to be ready for operation.
2052          */
2053         val64 = readq(&bar0->mc_rldram_mrs);
2054         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2055         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2056         val64 = readq(&bar0->mc_rldram_mrs);
2057
2058         msleep(100);    /* Delay by around 100 ms. */
2059
2060         /* Enabling ECC Protection. */
2061         val64 = readq(&bar0->adapter_control);
2062         val64 &= ~ADAPTER_ECC_EN;
2063         writeq(val64, &bar0->adapter_control);
2064
2065         /*
2066          * Clearing any possible Link state change interrupts that
2067          * could have popped up just before Enabling the card.
2068          */
2069         val64 = readq(&bar0->mac_rmac_err_reg);
2070         if (val64)
2071                 writeq(val64, &bar0->mac_rmac_err_reg);
2072
2073         /*
2074          * Verify if the device is ready to be enabled, if so enable
2075          * it.
2076          */
2077         val64 = readq(&bar0->adapter_status);
2078         if (!verify_xena_quiescence(nic)) {
2079                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2080                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2081                           (unsigned long long) val64);
2082                 return FAILURE;
2083         }
2084
2085         /*
2086          * With some switches, link might be already up at this point.
2087          * Because of this weird behavior, when we enable laser,
2088          * we may not get link. We need to handle this. We cannot
2089          * figure out which switch is misbehaving. So we are forced to
2090          * make a global change.
2091          */
2092
2093         /* Enabling Laser. */
2094         val64 = readq(&bar0->adapter_control);
2095         val64 |= ADAPTER_EOI_TX_ON;
2096         writeq(val64, &bar0->adapter_control);
2097
2098         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2099                 /*
2100                  * Dont see link state interrupts initally on some switches,
2101                  * so directly scheduling the link state task here.
2102                  */
2103                 schedule_work(&nic->set_link_task);
2104         }
2105         /* SXE-002: Initialize link and activity LED */
2106         subid = nic->pdev->subsystem_device;
2107         if (((subid & 0xFF) >= 0x07) &&
2108             (nic->device_type == XFRAME_I_DEVICE)) {
2109                 val64 = readq(&bar0->gpio_control);
2110                 val64 |= 0x0000800000000000ULL;
2111                 writeq(val64, &bar0->gpio_control);
2112                 val64 = 0x0411040400000000ULL;
2113                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2114         }
2115
2116         return SUCCESS;
2117 }
2118 /**
2119  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2120  */
2121 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2122                                         TxD *txdlp, int get_off)
2123 {
2124         struct s2io_nic *nic = fifo_data->nic;
2125         struct sk_buff *skb;
2126         struct TxD *txds;
2127         u16 j, frg_cnt;
2128
2129         txds = txdlp;
2130         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2131                 pci_unmap_single(nic->pdev, (dma_addr_t)
2132                         txds->Buffer_Pointer, sizeof(u64),
2133                         PCI_DMA_TODEVICE);
2134                 txds++;
2135         }
2136
2137         skb = (struct sk_buff *) ((unsigned long)
2138                         txds->Host_Control);
2139         if (!skb) {
2140                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2141                 return NULL;
2142         }
2143         pci_unmap_single(nic->pdev, (dma_addr_t)
2144                          txds->Buffer_Pointer,
2145                          skb->len - skb->data_len,
2146                          PCI_DMA_TODEVICE);
2147         frg_cnt = skb_shinfo(skb)->nr_frags;
2148         if (frg_cnt) {
2149                 txds++;
2150                 for (j = 0; j < frg_cnt; j++, txds++) {
2151                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2152                         if (!txds->Buffer_Pointer)
2153                                 break;
2154                         pci_unmap_page(nic->pdev, (dma_addr_t)
2155                                         txds->Buffer_Pointer,
2156                                        frag->size, PCI_DMA_TODEVICE);
2157                 }
2158         }
2159         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2160         return(skb);
2161 }
2162
2163 /**
2164  *  free_tx_buffers - Free all queued Tx buffers
2165  *  @nic : device private variable.
2166  *  Description:
2167  *  Free all queued Tx buffers.
2168  *  Return Value: void
2169 */
2170
2171 static void free_tx_buffers(struct s2io_nic *nic)
2172 {
2173         struct net_device *dev = nic->dev;
2174         struct sk_buff *skb;
2175         struct TxD *txdp;
2176         int i, j;
2177         struct mac_info *mac_control;
2178         struct config_param *config;
2179         int cnt = 0;
2180
2181         mac_control = &nic->mac_control;
2182         config = &nic->config;
2183
2184         for (i = 0; i < config->tx_fifo_num; i++) {
2185                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2186                         txdp = (struct TxD *) \
2187                         mac_control->fifos[i].list_info[j].list_virt_addr;
2188                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2189                         if (skb) {
2190                                 nic->mac_control.stats_info->sw_stat.mem_freed 
2191                                         += skb->truesize;
2192                                 dev_kfree_skb(skb);
2193                                 cnt++;
2194                         }
2195                 }
2196                 DBG_PRINT(INTR_DBG,
2197                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2198                           dev->name, cnt, i);
2199                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2200                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2201         }
2202 }
2203
2204 /**
2205  *   stop_nic -  To stop the nic
2206  *   @nic ; device private variable.
2207  *   Description:
2208  *   This function does exactly the opposite of what the start_nic()
2209  *   function does. This function is called to stop the device.
2210  *   Return Value:
2211  *   void.
2212  */
2213
2214 static void stop_nic(struct s2io_nic *nic)
2215 {
2216         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2217         register u64 val64 = 0;
2218         u16 interruptible;
2219         struct mac_info *mac_control;
2220         struct config_param *config;
2221
2222         mac_control = &nic->mac_control;
2223         config = &nic->config;
2224
2225         /*  Disable all interrupts */
2226         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2227         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2228         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2229         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2230
2231         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2232         val64 = readq(&bar0->adapter_control);
2233         val64 &= ~(ADAPTER_CNTL_EN);
2234         writeq(val64, &bar0->adapter_control);
2235 }
2236
2237 /**
2238  *  fill_rx_buffers - Allocates the Rx side skbs
2239  *  @nic:  device private variable
2240  *  @ring_no: ring number
2241  *  Description:
2242  *  The function allocates Rx side skbs and puts the physical
2243  *  address of these buffers into the RxD buffer pointers, so that the NIC
2244  *  can DMA the received frame into these locations.
2245  *  The NIC supports 3 receive modes, viz
2246  *  1. single buffer,
2247  *  2. three buffer and
2248  *  3. Five buffer modes.
2249  *  Each mode defines how many fragments the received frame will be split
2250  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2251  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2252  *  is split into 3 fragments. As of now only single buffer mode is
2253  *  supported.
2254  *   Return Value:
2255  *  SUCCESS on success or an appropriate -ve value on failure.
2256  */
2257
2258 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2259 {
2260         struct net_device *dev = nic->dev;
2261         struct sk_buff *skb;
2262         struct RxD_t *rxdp;
2263         int off, off1, size, block_no, block_no1;
2264         u32 alloc_tab = 0;
2265         u32 alloc_cnt;
2266         struct mac_info *mac_control;
2267         struct config_param *config;
2268         u64 tmp;
2269         struct buffAdd *ba;
2270         unsigned long flags;
2271         struct RxD_t *first_rxdp = NULL;
2272         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2273         struct RxD1 *rxdp1;
2274         struct RxD3 *rxdp3;
2275         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2276
2277         mac_control = &nic->mac_control;
2278         config = &nic->config;
2279         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2280             atomic_read(&nic->rx_bufs_left[ring_no]);
2281
2282         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2283         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2284         while (alloc_tab < alloc_cnt) {
2285                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2286                     block_index;
2287                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2288
2289                 rxdp = mac_control->rings[ring_no].
2290                                 rx_blocks[block_no].rxds[off].virt_addr;
2291
2292                 if ((block_no == block_no1) && (off == off1) &&
2293                                         (rxdp->Host_Control)) {
2294                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2295                                   dev->name);
2296                         DBG_PRINT(INTR_DBG, " info equated\n");
2297                         goto end;
2298                 }
2299                 if (off && (off == rxd_count[nic->rxd_mode])) {
2300                         mac_control->rings[ring_no].rx_curr_put_info.
2301                             block_index++;
2302                         if (mac_control->rings[ring_no].rx_curr_put_info.
2303                             block_index == mac_control->rings[ring_no].
2304                                         block_count)
2305                                 mac_control->rings[ring_no].rx_curr_put_info.
2306                                         block_index = 0;
2307                         block_no = mac_control->rings[ring_no].
2308                                         rx_curr_put_info.block_index;
2309                         if (off == rxd_count[nic->rxd_mode])
2310                                 off = 0;
2311                         mac_control->rings[ring_no].rx_curr_put_info.
2312                                 offset = off;
2313                         rxdp = mac_control->rings[ring_no].
2314                                 rx_blocks[block_no].block_virt_addr;
2315                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2316                                   dev->name, rxdp);
2317                 }
2318                 if(!napi) {
2319                         spin_lock_irqsave(&nic->put_lock, flags);
2320                         mac_control->rings[ring_no].put_pos =
2321                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2322                         spin_unlock_irqrestore(&nic->put_lock, flags);
2323                 } else {
2324                         mac_control->rings[ring_no].put_pos =
2325                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326                 }
2327                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2328                         ((nic->rxd_mode == RXD_MODE_3B) &&
2329                                 (rxdp->Control_2 & BIT(0)))) {
2330                         mac_control->rings[ring_no].rx_curr_put_info.
2331                                         offset = off;
2332                         goto end;
2333                 }
2334                 /* calculate size of skb based on ring mode */
2335                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2336                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2337                 if (nic->rxd_mode == RXD_MODE_1)
2338                         size += NET_IP_ALIGN;
2339                 else
2340                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2341
2342                 /* allocate skb */
2343                 skb = dev_alloc_skb(size);
2344                 if(!skb) {
2345                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2346                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2347                         if (first_rxdp) {
2348                                 wmb();
2349                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2350                         }
2351                         nic->mac_control.stats_info->sw_stat. \
2352                                 mem_alloc_fail_cnt++;
2353                         return -ENOMEM ;
2354                 }
2355                 nic->mac_control.stats_info->sw_stat.mem_allocated 
2356                         += skb->truesize;
2357                 if (nic->rxd_mode == RXD_MODE_1) {
2358                         /* 1 buffer mode - normal operation mode */
2359                         rxdp1 = (struct RxD1*)rxdp;
2360                         memset(rxdp, 0, sizeof(struct RxD1));
2361                         skb_reserve(skb, NET_IP_ALIGN);
2362                         rxdp1->Buffer0_ptr = pci_map_single
2363                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2364                                 PCI_DMA_FROMDEVICE);
2365                         if( (rxdp1->Buffer0_ptr == 0) ||
2366                                 (rxdp1->Buffer0_ptr ==
2367                                 DMA_ERROR_CODE))
2368                                 goto pci_map_failed;
2369
2370                         rxdp->Control_2 = 
2371                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2372
2373                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2374                         /*
2375                          * 2 buffer mode -
2376                          * 2 buffer mode provides 128
2377                          * byte aligned receive buffers.
2378                          */
2379
2380                         rxdp3 = (struct RxD3*)rxdp;
2381                         /* save buffer pointers to avoid frequent dma mapping */
2382                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2383                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2384                         memset(rxdp, 0, sizeof(struct RxD3));
2385                         /* restore the buffer pointers for dma sync*/
2386                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2387                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2388
2389                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2390                         skb_reserve(skb, BUF0_LEN);
2391                         tmp = (u64)(unsigned long) skb->data;
2392                         tmp += ALIGN_SIZE;
2393                         tmp &= ~ALIGN_SIZE;
2394                         skb->data = (void *) (unsigned long)tmp;
2395                         skb_reset_tail_pointer(skb);
2396
2397                         if (!(rxdp3->Buffer0_ptr))
2398                                 rxdp3->Buffer0_ptr =
2399                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2400                                            PCI_DMA_FROMDEVICE);
2401                         else
2402                                 pci_dma_sync_single_for_device(nic->pdev,
2403                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2404                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2405                         if( (rxdp3->Buffer0_ptr == 0) ||
2406                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2407                                 goto pci_map_failed;
2408
2409                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2410                         if (nic->rxd_mode == RXD_MODE_3B) {
2411                                 /* Two buffer mode */
2412
2413                                 /*
2414                                  * Buffer2 will have L3/L4 header plus
2415                                  * L4 payload
2416                                  */
2417                                 rxdp3->Buffer2_ptr = pci_map_single
2418                                 (nic->pdev, skb->data, dev->mtu + 4,
2419                                                 PCI_DMA_FROMDEVICE);
2420
2421                                 if( (rxdp3->Buffer2_ptr == 0) ||
2422                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2423                                         goto pci_map_failed;
2424
2425                                 rxdp3->Buffer1_ptr =
2426                                                 pci_map_single(nic->pdev,
2427                                                 ba->ba_1, BUF1_LEN,
2428                                                 PCI_DMA_FROMDEVICE);
2429                                 if( (rxdp3->Buffer1_ptr == 0) ||
2430                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2431                                         pci_unmap_single
2432                                                 (nic->pdev,
2433                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2434                                                 dev->mtu + 4,
2435                                                 PCI_DMA_FROMDEVICE);
2436                                         goto pci_map_failed;
2437                                 }
2438                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2439                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2440                                                                 (dev->mtu + 4);
2441                         }
2442                         rxdp->Control_2 |= BIT(0);
2443                 }
2444                 rxdp->Host_Control = (unsigned long) (skb);
2445                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2446                         rxdp->Control_1 |= RXD_OWN_XENA;
2447                 off++;
2448                 if (off == (rxd_count[nic->rxd_mode] + 1))
2449                         off = 0;
2450                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2451
2452                 rxdp->Control_2 |= SET_RXD_MARKER;
2453                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2454                         if (first_rxdp) {
2455                                 wmb();
2456                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2457                         }
2458                         first_rxdp = rxdp;
2459                 }
2460                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2461                 alloc_tab++;
2462         }
2463
2464       end:
2465         /* Transfer ownership of first descriptor to adapter just before
2466          * exiting. Before that, use memory barrier so that ownership
2467          * and other fields are seen by adapter correctly.
2468          */
2469         if (first_rxdp) {
2470                 wmb();
2471                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2472         }
2473
2474         return SUCCESS;
2475 pci_map_failed:
2476         stats->pci_map_fail_cnt++;
2477         stats->mem_freed += skb->truesize;
2478         dev_kfree_skb_irq(skb);
2479         return -ENOMEM;
2480 }
2481
2482 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2483 {
2484         struct net_device *dev = sp->dev;
2485         int j;
2486         struct sk_buff *skb;
2487         struct RxD_t *rxdp;
2488         struct mac_info *mac_control;
2489         struct buffAdd *ba;
2490         struct RxD1 *rxdp1;
2491         struct RxD3 *rxdp3;
2492
2493         mac_control = &sp->mac_control;
2494         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2495                 rxdp = mac_control->rings[ring_no].
2496                                 rx_blocks[blk].rxds[j].virt_addr;
2497                 skb = (struct sk_buff *)
2498                         ((unsigned long) rxdp->Host_Control);
2499                 if (!skb) {
2500                         continue;
2501                 }
2502                 if (sp->rxd_mode == RXD_MODE_1) {
2503                         rxdp1 = (struct RxD1*)rxdp;
2504                         pci_unmap_single(sp->pdev, (dma_addr_t)
2505                                 rxdp1->Buffer0_ptr,
2506                                 dev->mtu +
2507                                 HEADER_ETHERNET_II_802_3_SIZE
2508                                 + HEADER_802_2_SIZE +
2509                                 HEADER_SNAP_SIZE,
2510                                 PCI_DMA_FROMDEVICE);
2511                         memset(rxdp, 0, sizeof(struct RxD1));
2512                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2513                         rxdp3 = (struct RxD3*)rxdp;
2514                         ba = &mac_control->rings[ring_no].
2515                                 ba[blk][j];
2516                         pci_unmap_single(sp->pdev, (dma_addr_t)
2517                                 rxdp3->Buffer0_ptr,
2518                                 BUF0_LEN,
2519                                 PCI_DMA_FROMDEVICE);
2520                         pci_unmap_single(sp->pdev, (dma_addr_t)
2521                                 rxdp3->Buffer1_ptr,
2522                                 BUF1_LEN,
2523                                 PCI_DMA_FROMDEVICE);
2524                         pci_unmap_single(sp->pdev, (dma_addr_t)
2525                                 rxdp3->Buffer2_ptr,
2526                                 dev->mtu + 4,
2527                                 PCI_DMA_FROMDEVICE);
2528                         memset(rxdp, 0, sizeof(struct RxD3));
2529                 }
2530                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2531                 dev_kfree_skb(skb);
2532                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2533         }
2534 }
2535
2536 /**
2537  *  free_rx_buffers - Frees all Rx buffers
2538  *  @sp: device private variable.
2539  *  Description:
2540  *  This function will free all Rx buffers allocated by host.
2541  *  Return Value:
2542  *  NONE.
2543  */
2544
2545 static void free_rx_buffers(struct s2io_nic *sp)
2546 {
2547         struct net_device *dev = sp->dev;
2548         int i, blk = 0, buf_cnt = 0;
2549         struct mac_info *mac_control;
2550         struct config_param *config;
2551
2552         mac_control = &sp->mac_control;
2553         config = &sp->config;
2554
2555         for (i = 0; i < config->rx_ring_num; i++) {
2556                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2557                         free_rxd_blk(sp,i,blk);
2558
2559                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2560                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2561                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2562                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2563                 atomic_set(&sp->rx_bufs_left[i], 0);
2564                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2565                           dev->name, buf_cnt, i);
2566         }
2567 }
2568
2569 /**
2570  * s2io_poll - Rx interrupt handler for NAPI support
2571  * @napi : pointer to the napi structure.
2572  * @budget : The number of packets that were budgeted to be processed
2573  * during  one pass through the 'Poll" function.
2574  * Description:
2575  * Comes into picture only if NAPI support has been incorporated. It does
2576  * the same thing that rx_intr_handler does, but not in a interrupt context
2577  * also It will process only a given number of packets.
2578  * Return value:
2579  * 0 on success and 1 if there are No Rx packets to be processed.
2580  */
2581
2582 static int s2io_poll(struct napi_struct *napi, int budget)
2583 {
2584         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2585         struct net_device *dev = nic->dev;
2586         int pkt_cnt = 0, org_pkts_to_process;
2587         struct mac_info *mac_control;
2588         struct config_param *config;
2589         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2590         int i;
2591
2592         atomic_inc(&nic->isr_cnt);
2593         mac_control = &nic->mac_control;
2594         config = &nic->config;
2595
2596         nic->pkts_to_process = budget;
2597         org_pkts_to_process = nic->pkts_to_process;
2598
2599         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2600         readl(&bar0->rx_traffic_int);
2601
2602         for (i = 0; i < config->rx_ring_num; i++) {
2603                 rx_intr_handler(&mac_control->rings[i]);
2604                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2605                 if (!nic->pkts_to_process) {
2606                         /* Quota for the current iteration has been met */
2607                         goto no_rx;
2608                 }
2609         }
2610
2611         netif_rx_complete(dev, napi);
2612
2613         for (i = 0; i < config->rx_ring_num; i++) {
2614                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2615                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2616                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2617                         break;
2618                 }
2619         }
2620         /* Re enable the Rx interrupts. */
2621         writeq(0x0, &bar0->rx_traffic_mask);
2622         readl(&bar0->rx_traffic_mask);
2623         atomic_dec(&nic->isr_cnt);
2624         return pkt_cnt;
2625
2626 no_rx:
2627         for (i = 0; i < config->rx_ring_num; i++) {
2628                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2629                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2630                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2631                         break;
2632                 }
2633         }
2634         atomic_dec(&nic->isr_cnt);
2635         return pkt_cnt;
2636 }
2637
2638 #ifdef CONFIG_NET_POLL_CONTROLLER
2639 /**
2640  * s2io_netpoll - netpoll event handler entry point
2641  * @dev : pointer to the device structure.
2642  * Description:
2643  *      This function will be called by upper layer to check for events on the
2644  * interface in situations where interrupts are disabled. It is used for
2645  * specific in-kernel networking tasks, such as remote consoles and kernel
2646  * debugging over the network (example netdump in RedHat).
2647  */
2648 static void s2io_netpoll(struct net_device *dev)
2649 {
2650         struct s2io_nic *nic = dev->priv;
2651         struct mac_info *mac_control;
2652         struct config_param *config;
2653         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2654         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2655         int i;
2656
2657         if (pci_channel_offline(nic->pdev))
2658                 return;
2659
2660         disable_irq(dev->irq);
2661
2662         atomic_inc(&nic->isr_cnt);
2663         mac_control = &nic->mac_control;
2664         config = &nic->config;
2665
2666         writeq(val64, &bar0->rx_traffic_int);
2667         writeq(val64, &bar0->tx_traffic_int);
2668
2669         /* we need to free up the transmitted skbufs or else netpoll will
2670          * run out of skbs and will fail and eventually netpoll application such
2671          * as netdump will fail.
2672          */
2673         for (i = 0; i < config->tx_fifo_num; i++)
2674                 tx_intr_handler(&mac_control->fifos[i]);
2675
2676         /* check for received packet and indicate up to network */
2677         for (i = 0; i < config->rx_ring_num; i++)
2678                 rx_intr_handler(&mac_control->rings[i]);
2679
2680         for (i = 0; i < config->rx_ring_num; i++) {
2681                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2682                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2683                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2684                         break;
2685                 }
2686         }
2687         atomic_dec(&nic->isr_cnt);
2688         enable_irq(dev->irq);
2689         return;
2690 }
2691 #endif
2692
2693 /**
2694  *  rx_intr_handler - Rx interrupt handler
2695  *  @nic: device private variable.
2696  *  Description:
2697  *  If the interrupt is because of a received frame or if the
2698  *  receive ring contains fresh as yet un-processed frames,this function is
2699  *  called. It picks out the RxD at which place the last Rx processing had
2700  *  stopped and sends the skb to the OSM's Rx handler and then increments
2701  *  the offset.
2702  *  Return Value:
2703  *  NONE.
2704  */
2705 static void rx_intr_handler(struct ring_info *ring_data)
2706 {
2707         struct s2io_nic *nic = ring_data->nic;
2708         struct net_device *dev = (struct net_device *) nic->dev;
2709         int get_block, put_block, put_offset;
2710         struct rx_curr_get_info get_info, put_info;
2711         struct RxD_t *rxdp;
2712         struct sk_buff *skb;
2713         int pkt_cnt = 0;
2714         int i;
2715         struct RxD1* rxdp1;
2716         struct RxD3* rxdp3;
2717
2718         spin_lock(&nic->rx_lock);
2719         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2720                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2721                           __FUNCTION__, dev->name);
2722                 spin_unlock(&nic->rx_lock);
2723                 return;
2724         }
2725
2726         get_info = ring_data->rx_curr_get_info;
2727         get_block = get_info.block_index;
2728         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2729         put_block = put_info.block_index;
2730         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2731         if (!napi) {
2732                 spin_lock(&nic->put_lock);
2733                 put_offset = ring_data->put_pos;
2734                 spin_unlock(&nic->put_lock);
2735         } else
2736                 put_offset = ring_data->put_pos;
2737
2738         while (RXD_IS_UP2DT(rxdp)) {
2739                 /*
2740                  * If your are next to put index then it's
2741                  * FIFO full condition
2742                  */
2743                 if ((get_block == put_block) &&
2744                     (get_info.offset + 1) == put_info.offset) {
2745                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2746                         break;
2747                 }
2748                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2749                 if (skb == NULL) {
2750                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2751                                   dev->name);
2752                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2753                         spin_unlock(&nic->rx_lock);
2754                         return;
2755                 }
2756                 if (nic->rxd_mode == RXD_MODE_1) {
2757                         rxdp1 = (struct RxD1*)rxdp;
2758                         pci_unmap_single(nic->pdev, (dma_addr_t)
2759                                 rxdp1->Buffer0_ptr,
2760                                 dev->mtu +
2761                                 HEADER_ETHERNET_II_802_3_SIZE +
2762                                 HEADER_802_2_SIZE +
2763                                 HEADER_SNAP_SIZE,
2764                                 PCI_DMA_FROMDEVICE);
2765                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2766                         rxdp3 = (struct RxD3*)rxdp;
2767                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2768                                 rxdp3->Buffer0_ptr,
2769                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2770                         pci_unmap_single(nic->pdev, (dma_addr_t)
2771                                 rxdp3->Buffer2_ptr,
2772                                 dev->mtu + 4,
2773                                 PCI_DMA_FROMDEVICE);
2774                 }
2775                 prefetch(skb->data);
2776                 rx_osm_handler(ring_data, rxdp);
2777                 get_info.offset++;
2778                 ring_data->rx_curr_get_info.offset = get_info.offset;
2779                 rxdp = ring_data->rx_blocks[get_block].
2780                                 rxds[get_info.offset].virt_addr;
2781                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2782                         get_info.offset = 0;
2783                         ring_data->rx_curr_get_info.offset = get_info.offset;
2784                         get_block++;
2785                         if (get_block == ring_data->block_count)
2786                                 get_block = 0;
2787                         ring_data->rx_curr_get_info.block_index = get_block;
2788                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2789                 }
2790
2791                 nic->pkts_to_process -= 1;
2792                 if ((napi) && (!nic->pkts_to_process))
2793                         break;
2794                 pkt_cnt++;
2795                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2796                         break;
2797         }
2798         if (nic->lro) {
2799                 /* Clear all LRO sessions before exiting */
2800                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2801                         struct lro *lro = &nic->lro0_n[i];
2802                         if (lro->in_use) {
2803                                 update_L3L4_header(nic, lro);
2804                                 queue_rx_frame(lro->parent);
2805                                 clear_lro_session(lro);
2806                         }
2807                 }
2808         }
2809
2810         spin_unlock(&nic->rx_lock);
2811 }
2812
2813 /**
2814  *  tx_intr_handler - Transmit interrupt handler
2815  *  @nic : device private variable
2816  *  Description:
2817  *  If an interrupt was raised to indicate DMA complete of the
2818  *  Tx packet, this function is called. It identifies the last TxD
2819  *  whose buffer was freed and frees all skbs whose data have already
2820  *  DMA'ed into the NICs internal memory.
2821  *  Return Value:
2822  *  NONE
2823  */
2824
2825 static void tx_intr_handler(struct fifo_info *fifo_data)
2826 {
2827         struct s2io_nic *nic = fifo_data->nic;
2828         struct net_device *dev = (struct net_device *) nic->dev;
2829         struct tx_curr_get_info get_info, put_info;
2830         struct sk_buff *skb;
2831         struct TxD *txdlp;
2832         u8 err_mask;
2833
2834         get_info = fifo_data->tx_curr_get_info;
2835         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2836         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2837             list_virt_addr;
2838         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2839                (get_info.offset != put_info.offset) &&
2840                (txdlp->Host_Control)) {
2841                 /* Check for TxD errors */
2842                 if (txdlp->Control_1 & TXD_T_CODE) {
2843                         unsigned long long err;
2844                         err = txdlp->Control_1 & TXD_T_CODE;
2845                         if (err & 0x1) {
2846                                 nic->mac_control.stats_info->sw_stat.
2847                                                 parity_err_cnt++;
2848                         }
2849
2850                         /* update t_code statistics */
2851                         err_mask = err >> 48;
2852                         switch(err_mask) {
2853                                 case 2:
2854                                         nic->mac_control.stats_info->sw_stat.
2855                                                         tx_buf_abort_cnt++;
2856                                 break;
2857
2858                                 case 3:
2859                                         nic->mac_control.stats_info->sw_stat.
2860                                                         tx_desc_abort_cnt++;
2861                                 break;
2862
2863                                 case 7:
2864                                         nic->mac_control.stats_info->sw_stat.
2865                                                         tx_parity_err_cnt++;
2866                                 break;
2867
2868                                 case 10:
2869                                         nic->mac_control.stats_info->sw_stat.
2870                                                         tx_link_loss_cnt++;
2871                                 break;
2872
2873                                 case 15:
2874                                         nic->mac_control.stats_info->sw_stat.
2875                                                         tx_list_proc_err_cnt++;
2876                                 break;
2877                         }
2878                 }
2879
2880                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2881                 if (skb == NULL) {
2882                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2883                         __FUNCTION__);
2884                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2885                         return;
2886                 }
2887
2888                 /* Updating the statistics block */
2889                 nic->stats.tx_bytes += skb->len;
2890                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2891                 dev_kfree_skb_irq(skb);
2892
2893                 get_info.offset++;
2894                 if (get_info.offset == get_info.fifo_len + 1)
2895                         get_info.offset = 0;
2896                 txdlp = (struct TxD *) fifo_data->list_info
2897                     [get_info.offset].list_virt_addr;
2898                 fifo_data->tx_curr_get_info.offset =
2899                     get_info.offset;
2900         }
2901
2902         spin_lock(&nic->tx_lock);
2903         if (netif_queue_stopped(dev))
2904                 netif_wake_queue(dev);
2905         spin_unlock(&nic->tx_lock);
2906 }
2907
2908 /**
2909  *  s2io_mdio_write - Function to write in to MDIO registers
2910  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2911  *  @addr     : address value
2912  *  @value    : data value
2913  *  @dev      : pointer to net_device structure
2914  *  Description:
2915  *  This function is used to write values to the MDIO registers
2916  *  NONE
2917  */
2918 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2919 {
2920         u64 val64 = 0x0;
2921         struct s2io_nic *sp = dev->priv;
2922         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2923
2924         //address transaction
2925         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2926                         | MDIO_MMD_DEV_ADDR(mmd_type)
2927                         | MDIO_MMS_PRT_ADDR(0x0);
2928         writeq(val64, &bar0->mdio_control);
2929         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2930         writeq(val64, &bar0->mdio_control);
2931         udelay(100);
2932
2933         //Data transaction
2934         val64 = 0x0;
2935         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2936                         | MDIO_MMD_DEV_ADDR(mmd_type)
2937                         | MDIO_MMS_PRT_ADDR(0x0)
2938                         | MDIO_MDIO_DATA(value)
2939                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2940         writeq(val64, &bar0->mdio_control);
2941         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2942         writeq(val64, &bar0->mdio_control);
2943         udelay(100);
2944
2945         val64 = 0x0;
2946         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2947         | MDIO_MMD_DEV_ADDR(mmd_type)
2948         | MDIO_MMS_PRT_ADDR(0x0)
2949         | MDIO_OP(MDIO_OP_READ_TRANS);
2950         writeq(val64, &bar0->mdio_control);
2951         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2952         writeq(val64, &bar0->mdio_control);
2953         udelay(100);
2954
2955 }
2956
2957 /**
2958  *  s2io_mdio_read - Function to write in to MDIO registers
2959  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2960  *  @addr     : address value
2961  *  @dev      : pointer to net_device structure
2962  *  Description:
2963  *  This function is used to read values to the MDIO registers
2964  *  NONE
2965  */
2966 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2967 {
2968         u64 val64 = 0x0;
2969         u64 rval64 = 0x0;
2970         struct s2io_nic *sp = dev->priv;
2971         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2972
2973         /* address transaction */
2974         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2975                         | MDIO_MMD_DEV_ADDR(mmd_type)
2976                         | MDIO_MMS_PRT_ADDR(0x0);
2977         writeq(val64, &bar0->mdio_control);
2978         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2979         writeq(val64, &bar0->mdio_control);
2980         udelay(100);
2981
2982         /* Data transaction */
2983         val64 = 0x0;
2984         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2985                         | MDIO_MMD_DEV_ADDR(mmd_type)
2986                         | MDIO_MMS_PRT_ADDR(0x0)
2987                         | MDIO_OP(MDIO_OP_READ_TRANS);
2988         writeq(val64, &bar0->mdio_control);
2989         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2990         writeq(val64, &bar0->mdio_control);
2991         udelay(100);
2992
2993         /* Read the value from regs */
2994         rval64 = readq(&bar0->mdio_control);
2995         rval64 = rval64 & 0xFFFF0000;
2996         rval64 = rval64 >> 16;
2997         return rval64;
2998 }
2999 /**
3000  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3001  *  @counter      : couter value to be updated
3002  *  @flag         : flag to indicate the status
3003  *  @type         : counter type
3004  *  Description:
3005  *  This function is to check the status of the xpak counters value
3006  *  NONE
3007  */
3008
3009 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3010 {
3011         u64 mask = 0x3;
3012         u64 val64;
3013         int i;
3014         for(i = 0; i <index; i++)
3015                 mask = mask << 0x2;
3016
3017         if(flag > 0)
3018         {
3019                 *counter = *counter + 1;
3020                 val64 = *regs_stat & mask;
3021                 val64 = val64 >> (index * 0x2);
3022                 val64 = val64 + 1;
3023                 if(val64 == 3)
3024                 {
3025                         switch(type)
3026                         {
3027                         case 1:
3028                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3029                                           "service. Excessive temperatures may "
3030                                           "result in premature transceiver "
3031                                           "failure \n");
3032                         break;
3033                         case 2:
3034                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3035                                           "service Excessive bias currents may "
3036                                           "indicate imminent laser diode "
3037                                           "failure \n");
3038                         break;
3039                         case 3:
3040                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3041                                           "service Excessive laser output "
3042                                           "power may saturate far-end "
3043                                           "receiver\n");
3044                         break;
3045                         default:
3046                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3047                                           "type \n");
3048                         }
3049                         val64 = 0x0;
3050                 }
3051                 val64 = val64 << (index * 0x2);
3052                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3053
3054         } else {
3055                 *regs_stat = *regs_stat & (~mask);
3056         }
3057 }
3058
3059 /**
3060  *  s2io_updt_xpak_counter - Function to update the xpak counters
3061  *  @dev         : pointer to net_device struct
3062  *  Description:
3063  *  This function is to upate the status of the xpak counters value
3064  *  NONE
3065  */
3066 static void s2io_updt_xpak_counter(struct net_device *dev)
3067 {
3068         u16 flag  = 0x0;
3069         u16 type  = 0x0;
3070         u16 val16 = 0x0;
3071         u64 val64 = 0x0;
3072         u64 addr  = 0x0;
3073
3074         struct s2io_nic *sp = dev->priv;
3075         struct stat_block *stat_info = sp->mac_control.stats_info;
3076
3077         /* Check the communication with the MDIO slave */
3078         addr = 0x0000;
3079         val64 = 0x0;
3080         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3081         if((val64 == 0xFFFF) || (val64 == 0x0000))
3082         {
3083                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3084                           "Returned %llx\n", (unsigned long long)val64);
3085                 return;
3086         }
3087
3088         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3089         if(val64 != 0x2040)
3090         {
3091                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3092                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3093                           (unsigned long long)val64);
3094                 return;
3095         }
3096
3097         /* Loading the DOM register to MDIO register */
3098         addr = 0xA100;
3099         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3100         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3101
3102         /* Reading the Alarm flags */
3103         addr = 0xA070;
3104         val64 = 0x0;
3105         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3106
3107         flag = CHECKBIT(val64, 0x7);
3108         type = 1;
3109         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3110                                 &stat_info->xpak_stat.xpak_regs_stat,
3111                                 0x0, flag, type);
3112
3113         if(CHECKBIT(val64, 0x6))
3114                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3115
3116         flag = CHECKBIT(val64, 0x3);
3117         type = 2;
3118         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3119                                 &stat_info->xpak_stat.xpak_regs_stat,
3120                                 0x2, flag, type);
3121
3122         if(CHECKBIT(val64, 0x2))
3123                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3124
3125         flag = CHECKBIT(val64, 0x1);
3126         type = 3;
3127         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3128                                 &stat_info->xpak_stat.xpak_regs_stat,
3129                                 0x4, flag, type);
3130
3131         if(CHECKBIT(val64, 0x0))
3132                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3133
3134         /* Reading the Warning flags */
3135         addr = 0xA074;
3136         val64 = 0x0;
3137         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3138
3139         if(CHECKBIT(val64, 0x7))
3140                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3141
3142         if(CHECKBIT(val64, 0x6))
3143                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3144
3145         if(CHECKBIT(val64, 0x3))
3146                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3147
3148         if(CHECKBIT(val64, 0x2))
3149                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3150
3151         if(CHECKBIT(val64, 0x1))
3152                 stat_info->xpak_stat.warn_laser_output_power_high++;
3153
3154         if(CHECKBIT(val64, 0x0))
3155                 stat_info->xpak_stat.warn_laser_output_power_low++;
3156 }
3157
3158 /**
3159  *  alarm_intr_handler - Alarm Interrrupt handler
3160  *  @nic: device private variable
3161  *  Description: If the interrupt was neither because of Rx packet or Tx
3162  *  complete, this function is called. If the interrupt was to indicate
3163  *  a loss of link, the OSM link status handler is invoked for any other
3164  *  alarm interrupt the block that raised the interrupt is displayed
3165  *  and a H/W reset is issued.
3166  *  Return Value:
3167  *  NONE
3168 */
3169
3170 static void alarm_intr_handler(struct s2io_nic *nic)
3171 {
3172         struct net_device *dev = (struct net_device *) nic->dev;
3173         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3174         register u64 val64 = 0, err_reg = 0;
3175         u64 cnt;
3176         int i;
3177         if (atomic_read(&nic->card_state) == CARD_DOWN)
3178                 return;
3179         if (pci_channel_offline(nic->pdev))
3180                 return;
3181         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3182         /* Handling the XPAK counters update */
3183         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3184                 /* waiting for an hour */
3185                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3186         } else {
3187                 s2io_updt_xpak_counter(dev);
3188                 /* reset the count to zero */
3189                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3190         }
3191
3192         /* Handling link status change error Intr */
3193         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3194                 err_reg = readq(&bar0->mac_rmac_err_reg);
3195                 writeq(err_reg, &bar0->mac_rmac_err_reg);
3196                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3197                         schedule_work(&nic->set_link_task);
3198                 }
3199         }
3200
3201         /* Handling Ecc errors */
3202         val64 = readq(&bar0->mc_err_reg);
3203         writeq(val64, &bar0->mc_err_reg);
3204         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3205                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3206                         nic->mac_control.stats_info->sw_stat.
3207                                 double_ecc_errs++;
3208                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3209                                   dev->name);
3210                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3211                         if (nic->device_type != XFRAME_II_DEVICE) {
3212                                 /* Reset XframeI only if critical error */
3213                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3214                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3215                                         netif_stop_queue(dev);
3216                                         schedule_work(&nic->rst_timer_task);
3217                                         nic->mac_control.stats_info->sw_stat.
3218                                                         soft_reset_cnt++;
3219                                 }
3220                         }
3221                 } else {
3222                         nic->mac_control.stats_info->sw_stat.
3223                                 single_ecc_errs++;
3224                 }
3225         }
3226
3227         /* In case of a serious error, the device will be Reset. */
3228         val64 = readq(&bar0->serr_source);
3229         if (val64 & SERR_SOURCE_ANY) {
3230                 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3231                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3232                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3233                           (unsigned long long)val64);
3234                 netif_stop_queue(dev);
3235                 schedule_work(&nic->rst_timer_task);
3236                 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3237         }
3238
3239         /*
3240          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3241          * Error occurs, the adapter will be recycled by disabling the
3242          * adapter enable bit and enabling it again after the device
3243          * becomes Quiescent.
3244          */
3245         val64 = readq(&bar0->pcc_err_reg);
3246         writeq(val64, &bar0->pcc_err_reg);
3247         if (val64 & PCC_FB_ECC_DB_ERR) {
3248                 u64 ac = readq(&bar0->adapter_control);
3249                 ac &= ~(ADAPTER_CNTL_EN);
3250                 writeq(ac, &bar0->adapter_control);
3251                 ac = readq(&bar0->adapter_control);
3252                 schedule_work(&nic->set_link_task);
3253         }
3254         /* Check for data parity error */
3255         val64 = readq(&bar0->pic_int_status);
3256         if (val64 & PIC_INT_GPIO) {
3257                 val64 = readq(&bar0->gpio_int_reg);
3258                 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3259                         nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3260                         schedule_work(&nic->rst_timer_task);
3261                         nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3262                 }
3263         }
3264
3265         /* Check for ring full counter */
3266         if (nic->device_type & XFRAME_II_DEVICE) {
3267                 val64 = readq(&bar0->ring_bump_counter1);
3268                 for (i=0; i<4; i++) {
3269                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3270                         cnt >>= 64 - ((i+1)*16);
3271                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3272                                 += cnt;
3273                 }
3274
3275                 val64 = readq(&bar0->ring_bump_counter2);
3276                 for (i=0; i<4; i++) {
3277                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3278                         cnt >>= 64 - ((i+1)*16);
3279                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3280                                 += cnt;
3281                 }
3282         }
3283
3284         /* Other type of interrupts are not being handled now,  TODO */
3285 }
3286
3287 /**
3288  *  wait_for_cmd_complete - waits for a command to complete.
3289  *  @sp : private member of the device structure, which is a pointer to the
3290  *  s2io_nic structure.
3291  *  Description: Function that waits for a command to Write into RMAC
3292  *  ADDR DATA registers to be completed and returns either success or
3293  *  error depending on whether the command was complete or not.
3294  *  Return value:
3295  *   SUCCESS on success and FAILURE on failure.
3296  */
3297
3298 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3299                                 int bit_state)
3300 {
3301         int ret = FAILURE, cnt = 0, delay = 1;
3302         u64 val64;
3303
3304         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3305                 return FAILURE;
3306
3307         do {
3308                 val64 = readq(addr);
3309                 if (bit_state == S2IO_BIT_RESET) {
3310                         if (!(val64 & busy_bit)) {
3311                                 ret = SUCCESS;
3312                                 break;
3313                         }
3314                 } else {
3315                         if (!(val64 & busy_bit)) {
3316                                 ret = SUCCESS;
3317                                 break;
3318                         }
3319                 }
3320
3321                 if(in_interrupt())
3322                         mdelay(delay);
3323                 else
3324                         msleep(delay);
3325
3326                 if (++cnt >= 10)
3327                         delay = 50;
3328         } while (cnt < 20);
3329         return ret;
3330 }
3331 /*
3332  * check_pci_device_id - Checks if the device id is supported
3333  * @id : device id
3334  * Description: Function to check if the pci device id is supported by driver.
3335  * Return value: Actual device id if supported else PCI_ANY_ID
3336  */
3337 static u16 check_pci_device_id(u16 id)
3338 {
3339         switch (id) {
3340         case PCI_DEVICE_ID_HERC_WIN:
3341         case PCI_DEVICE_ID_HERC_UNI:
3342                 return XFRAME_II_DEVICE;
3343         case PCI_DEVICE_ID_S2IO_UNI:
3344         case PCI_DEVICE_ID_S2IO_WIN:
3345                 return XFRAME_I_DEVICE;
3346         default:
3347                 return PCI_ANY_ID;
3348         }
3349 }
3350
3351 /**
3352  *  s2io_reset - Resets the card.
3353  *  @sp : private member of the device structure.
3354  *  Description: Function to Reset the card. This function then also
3355  *  restores the previously saved PCI configuration space registers as
3356  *  the card reset also resets the configuration space.
3357  *  Return value:
3358  *  void.
3359  */
3360
3361 static void s2io_reset(struct s2io_nic * sp)
3362 {
3363         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3364         u64 val64;
3365         u16 subid, pci_cmd;
3366         int i;
3367         u16 val16;
3368         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3369         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3370
3371         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3372                         __FUNCTION__, sp->dev->name);
3373
3374         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3375         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3376
3377         val64 = SW_RESET_ALL;
3378         writeq(val64, &bar0->sw_reset);
3379         if (strstr(sp->product_name, "CX4")) {
3380                 msleep(750);
3381         }
3382         msleep(250);
3383         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3384
3385                 /* Restore the PCI state saved during initialization. */
3386                 pci_restore_state(sp->pdev);
3387                 pci_read_config_word(sp->pdev, 0x2, &val16);
3388                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3389                         break;
3390                 msleep(200);
3391         }
3392
3393         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3394                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3395         }
3396
3397         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3398
3399         s2io_init_pci(sp);
3400
3401         /* Set swapper to enable I/O register access */
3402         s2io_set_swapper(sp);
3403
3404         /* Restore the MSIX table entries from local variables */
3405         restore_xmsi_data(sp);
3406
3407         /* Clear certain PCI/PCI-X fields after reset */
3408         if (sp->device_type == XFRAME_II_DEVICE) {
3409                 /* Clear "detected parity error" bit */
3410                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3411
3412                 /* Clearing PCIX Ecc status register */
3413                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3414
3415                 /* Clearing PCI_STATUS error reflected here */
3416                 writeq(BIT(62), &bar0->txpic_int_reg);
3417         }
3418
3419         /* Reset device statistics maintained by OS */
3420         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3421         
3422         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3423         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3424         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3425         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3426         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3427         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3428         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3429         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3430         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3431         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3432         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3433         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3434         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3435         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3436         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3437         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3438         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3439         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3440         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3441
3442         /* SXE-002: Configure link and activity LED to turn it off */
3443         subid = sp->pdev->subsystem_device;
3444         if (((subid & 0xFF) >= 0x07) &&
3445             (sp->device_type == XFRAME_I_DEVICE)) {
3446                 val64 = readq(&bar0->gpio_control);
3447                 val64 |= 0x0000800000000000ULL;
3448                 writeq(val64, &bar0->gpio_control);
3449                 val64 = 0x0411040400000000ULL;
3450                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3451         }
3452
3453         /*
3454          * Clear spurious ECC interrupts that would have occured on
3455          * XFRAME II cards after reset.
3456          */
3457         if (sp->device_type == XFRAME_II_DEVICE) {
3458                 val64 = readq(&bar0->pcc_err_reg);
3459                 writeq(val64, &bar0->pcc_err_reg);
3460         }
3461
3462         /* restore the previously assigned mac address */
3463         s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3464
3465         sp->device_enabled_once = FALSE;
3466 }
3467
3468 /**
3469  *  s2io_set_swapper - to set the swapper controle on the card
3470  *  @sp : private member of the device structure,
3471  *  pointer to the s2io_nic structure.
3472  *  Description: Function to set the swapper control on the card
3473  *  correctly depending on the 'endianness' of the system.
3474  *  Return value:
3475  *  SUCCESS on success and FAILURE on failure.
3476  */
3477
3478 static int s2io_set_swapper(struct s2io_nic * sp)
3479 {
3480         struct net_device *dev = sp->dev;
3481         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3482         u64 val64, valt, valr;
3483
3484         /*
3485          * Set proper endian settings and verify the same by reading
3486          * the PIF Feed-back register.
3487          */
3488
3489         val64 = readq(&bar0->pif_rd_swapper_fb);
3490         if (val64 != 0x0123456789ABCDEFULL) {
3491                 int i = 0;
3492                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3493                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3494                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3495                                 0};                     /* FE=0, SE=0 */
3496
3497                 while(i<4) {
3498                         writeq(value[i], &bar0->swapper_ctrl);
3499                         val64 = readq(&bar0->pif_rd_swapper_fb);
3500                         if (val64 == 0x0123456789ABCDEFULL)
3501                                 break;
3502                         i++;
3503                 }
3504                 if (i == 4) {
3505                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3506                                 dev->name);
3507                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3508                                 (unsigned long long) val64);
3509                         return FAILURE;
3510                 }
3511                 valr = value[i];
3512         } else {
3513                 valr = readq(&bar0->swapper_ctrl);
3514         }
3515
3516         valt = 0x0123456789ABCDEFULL;
3517         writeq(valt, &bar0->xmsi_address);
3518         val64 = readq(&bar0->xmsi_address);
3519
3520         if(val64 != valt) {
3521                 int i = 0;
3522                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3523                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3524                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3525                                 0};                     /* FE=0, SE=0 */
3526
3527                 while(i<4) {
3528                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3529                         writeq(valt, &bar0->xmsi_address);
3530                         val64 = readq(&bar0->xmsi_address);
3531                         if(val64 == valt)
3532                                 break;
3533                         i++;
3534                 }
3535                 if(i == 4) {
3536                         unsigned long long x = val64;
3537                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3538                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3539                         return FAILURE;
3540                 }
3541         }
3542         val64 = readq(&bar0->swapper_ctrl);
3543         val64 &= 0xFFFF000000000000ULL;
3544
3545 #ifdef  __BIG_ENDIAN
3546         /*
3547          * The device by default set to a big endian format, so a
3548          * big endian driver need not set anything.
3549          */
3550         val64 |= (SWAPPER_CTRL_TXP_FE |
3551                  SWAPPER_CTRL_TXP_SE |
3552                  SWAPPER_CTRL_TXD_R_FE |
3553                  SWAPPER_CTRL_TXD_W_FE |
3554                  SWAPPER_CTRL_TXF_R_FE |
3555                  SWAPPER_CTRL_RXD_R_FE |
3556                  SWAPPER_CTRL_RXD_W_FE |
3557                  SWAPPER_CTRL_RXF_W_FE |
3558                  SWAPPER_CTRL_XMSI_FE |
3559                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3560         if (sp->intr_type == INTA)
3561                 val64 |= SWAPPER_CTRL_XMSI_SE;
3562         writeq(val64, &bar0->swapper_ctrl);
3563 #else
3564         /*
3565          * Initially we enable all bits to make it accessible by the
3566          * driver, then we selectively enable only those bits that
3567          * we want to set.
3568          */
3569         val64 |= (SWAPPER_CTRL_TXP_FE |
3570                  SWAPPER_CTRL_TXP_SE |
3571                  SWAPPER_CTRL_TXD_R_FE |
3572                  SWAPPER_CTRL_TXD_R_SE |
3573                  SWAPPER_CTRL_TXD_W_FE |
3574                  SWAPPER_CTRL_TXD_W_SE |
3575                  SWAPPER_CTRL_TXF_R_FE |
3576                  SWAPPER_CTRL_RXD_R_FE |
3577                  SWAPPER_CTRL_RXD_R_SE |
3578                  SWAPPER_CTRL_RXD_W_FE |
3579                  SWAPPER_CTRL_RXD_W_SE |
3580                  SWAPPER_CTRL_RXF_W_FE |
3581                  SWAPPER_CTRL_XMSI_FE |
3582                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3583         if (sp->intr_type == INTA)
3584                 val64 |= SWAPPER_CTRL_XMSI_SE;
3585         writeq(val64, &bar0->swapper_ctrl);
3586 #endif
3587         val64 = readq(&bar0->swapper_ctrl);
3588
3589         /*
3590          * Verifying if endian settings are accurate by reading a
3591          * feedback register.
3592          */
3593         val64 = readq(&bar0->pif_rd_swapper_fb);
3594         if (val64 != 0x0123456789ABCDEFULL) {
3595                 /* Endian settings are incorrect, calls for another dekko. */
3596                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3597                           dev->name);
3598                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3599                           (unsigned long long) val64);
3600                 return FAILURE;
3601         }
3602
3603         return SUCCESS;
3604 }
3605
3606 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3607 {
3608         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3609         u64 val64;
3610         int ret = 0, cnt = 0;
3611
3612         do {
3613                 val64 = readq(&bar0->xmsi_access);
3614                 if (!(val64 & BIT(15)))
3615                         break;
3616                 mdelay(1);
3617                 cnt++;
3618         } while(cnt < 5);
3619         if (cnt == 5) {
3620                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3621                 ret = 1;
3622         }
3623
3624         return ret;
3625 }
3626
3627 static void restore_xmsi_data(struct s2io_nic *nic)
3628 {
3629         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3630         u64 val64;
3631         int i;
3632
3633         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3634                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3635                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3636                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3637                 writeq(val64, &bar0->xmsi_access);
3638                 if (wait_for_msix_trans(nic, i)) {
3639                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3640                         continue;
3641                 }
3642         }
3643 }
3644
3645 static void store_xmsi_data(struct s2io_nic *nic)
3646 {
3647         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3648         u64 val64, addr, data;
3649         int i;
3650
3651         /* Store and display */
3652         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3653                 val64 = (BIT(15) | vBIT(i, 26, 6));
3654                 writeq(val64, &bar0->xmsi_access);
3655                 if (wait_for_msix_trans(nic, i)) {
3656                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3657                         continue;
3658                 }
3659                 addr = readq(&bar0->xmsi_address);
3660                 data = readq(&bar0->xmsi_data);
3661                 if (addr && data) {
3662                         nic->msix_info[i].addr = addr;
3663                         nic->msix_info[i].data = data;
3664                 }
3665         }
3666 }
3667
3668 static int s2io_enable_msi_x(struct s2io_nic *nic)
3669 {
3670         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3671         u64 tx_mat, rx_mat;
3672         u16 msi_control; /* Temp variable */
3673         int ret, i, j, msix_indx = 1;
3674
3675         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3676                                GFP_KERNEL);
3677         if (nic->entries == NULL) {
3678                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3679                         __FUNCTION__);
3680                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3681                 return -ENOMEM;
3682         }
3683         nic->mac_control.stats_info->sw_stat.mem_allocated 
3684                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3685         memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3686
3687         nic->s2io_entries =
3688                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3689                                    GFP_KERNEL);
3690         if (nic->s2io_entries == NULL) {
3691                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 
3692                         __FUNCTION__);
3693                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3694                 kfree(nic->entries);
3695                 nic->mac_control.stats_info->sw_stat.mem_freed 
3696                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3697                 return -ENOMEM;
3698         }
3699          nic->mac_control.stats_info->sw_stat.mem_allocated 
3700                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3701         memset(nic->s2io_entries, 0,
3702                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3703
3704         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3705                 nic->entries[i].entry = i;
3706                 nic->s2io_entries[i].entry = i;
3707                 nic->s2io_entries[i].arg = NULL;
3708                 nic->s2io_entries[i].in_use = 0;
3709         }
3710
3711         tx_mat = readq(&bar0->tx_mat0_n[0]);
3712         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3713                 tx_mat |= TX_MAT_SET(i, msix_indx);
3714                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3715                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3716                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3717         }
3718         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3719
3720         if (!nic->config.bimodal) {
3721                 rx_mat = readq(&bar0->rx_mat);
3722                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3723                         rx_mat |= RX_MAT_SET(j, msix_indx);
3724                         nic->s2io_entries[msix_indx].arg 
3725                                 = &nic->mac_control.rings[j];
3726                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3727                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3728                 }
3729                 writeq(rx_mat, &bar0->rx_mat);
3730         } else {
3731                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3732                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3733                         tx_mat |= TX_MAT_SET(i, msix_indx);
3734                         nic->s2io_entries[msix_indx].arg 
3735                                 = &nic->mac_control.rings[j];
3736                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3737                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3738                 }
3739                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3740         }
3741
3742         nic->avail_msix_vectors = 0;
3743         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3744         /* We fail init if error or we get less vectors than min required */
3745         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3746                 nic->avail_msix_vectors = ret;
3747                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3748         }
3749         if (ret) {
3750                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3751                 kfree(nic->entries);
3752                 nic->mac_control.stats_info->sw_stat.mem_freed 
3753                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3754                 kfree(nic->s2io_entries);
3755                 nic->mac_control.stats_info->sw_stat.mem_freed 
3756                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3757                 nic->entries = NULL;
3758                 nic->s2io_entries = NULL;
3759                 nic->avail_msix_vectors = 0;
3760                 return -ENOMEM;
3761         }
3762         if (!nic->avail_msix_vectors)
3763                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3764
3765         /*
3766          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3767          * in the herc NIC. (Temp change, needs to be removed later)
3768          */
3769         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3770         msi_control |= 0x1; /* Enable MSI */
3771         pci_write_config_word(nic->pdev, 0x42, msi_control);
3772
3773         return 0;
3774 }
3775
3776 /* ********************************************************* *
3777  * Functions defined below concern the OS part of the driver *
3778  * ********************************************************* */
3779
3780 /**
3781  *  s2io_open - open entry point of the driver
3782  *  @dev : pointer to the device structure.
3783  *  Description:
3784  *  This function is the open entry point of the driver. It mainly calls a
3785  *  function to allocate Rx buffers and inserts them into the buffer
3786  *  descriptors and then enables the Rx part of the NIC.
3787  *  Return value:
3788  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3789  *   file on failure.
3790  */
3791
3792 static int s2io_open(struct net_device *dev)
3793 {
3794         struct s2io_nic *sp = dev->priv;
3795         int err = 0;
3796
3797         /*
3798          * Make sure you have link off by default every time
3799          * Nic is initialized
3800          */
3801         netif_carrier_off(dev);
3802         sp->last_link_state = 0;
3803
3804         napi_enable(&sp->napi);
3805
3806         /* Initialize H/W and enable interrupts */
3807         err = s2io_card_up(sp);
3808         if (err) {
3809                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3810                           dev->name);
3811                 goto hw_init_failed;
3812         }
3813
3814         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3815                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3816                 s2io_card_down(sp);
3817                 err = -ENODEV;
3818                 goto hw_init_failed;
3819         }
3820
3821         netif_start_queue(dev);
3822         return 0;
3823
3824 hw_init_failed:
3825         napi_disable(&sp->napi);
3826         if (sp->intr_type == MSI_X) {
3827                 if (sp->entries) {
3828                         kfree(sp->entries);
3829                         sp->mac_control.stats_info->sw_stat.mem_freed 
3830                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3831                 }
3832                 if (sp->s2io_entries) {
3833                         kfree(sp->s2io_entries);
3834                         sp->mac_control.stats_info->sw_stat.mem_freed 
3835                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3836                 }
3837         }
3838         return err;
3839 }
3840
3841 /**
3842  *  s2io_close -close entry point of the driver
3843  *  @dev : device pointer.
3844  *  Description:
3845  *  This is the stop entry point of the driver. It needs to undo exactly
3846  *  whatever was done by the open entry point,thus it's usually referred to
3847  *  as the close function.Among other things this function mainly stops the
3848  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3849  *  Return value:
3850  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3851  *  file on failure.
3852  */
3853
3854 static int s2io_close(struct net_device *dev)
3855 {
3856         struct s2io_nic *sp = dev->priv;
3857
3858         netif_stop_queue(dev);
3859         napi_disable(&sp->napi);
3860         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3861         s2io_card_down(sp);
3862
3863         return 0;
3864 }
3865
3866 /**
3867  *  s2io_xmit - Tx entry point of te driver
3868  *  @skb : the socket buffer containing the Tx data.
3869  *  @dev : device pointer.
3870  *  Description :
3871  *  This function is the Tx entry point of the driver. S2IO NIC supports
3872  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3873  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3874  *  not be upadted.
3875  *  Return value:
3876  *  0 on success & 1 on failure.
3877  */
3878
3879 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3880 {
3881         struct s2io_nic *sp = dev->priv;
3882         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3883         register u64 val64;
3884         struct TxD *txdp;
3885         struct TxFIFO_element __iomem *tx_fifo;
3886         unsigned long flags;
3887         u16 vlan_tag = 0;
3888         int vlan_priority = 0;
3889         struct mac_info *mac_control;
3890         struct config_param *config;
3891         int offload_type;
3892         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3893
3894         mac_control = &sp->mac_control;
3895         config = &sp->config;
3896
3897         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3898
3899         if (unlikely(skb->len <= 0)) {
3900                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3901                 dev_kfree_skb_any(skb);
3902                 return 0;
3903 }
3904
3905         spin_lock_irqsave(&sp->tx_lock, flags);
3906         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3907                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3908                           dev->name);
3909                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3910                 dev_kfree_skb(skb);
3911                 return 0;
3912         }
3913
3914         queue = 0;
3915         /* Get Fifo number to Transmit based on vlan priority */
3916         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3917                 vlan_tag = vlan_tx_tag_get(skb);
3918                 vlan_priority = vlan_tag >> 13;
3919                 queue = config->fifo_mapping[vlan_priority];
3920         }
3921
3922         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3923         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3924         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3925                 list_virt_addr;
3926
3927         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3928         /* Avoid "put" pointer going beyond "get" pointer */
3929         if (txdp->Host_Control ||
3930                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3931                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3932                 netif_stop_queue(dev);
3933                 dev_kfree_skb(skb);
3934                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3935                 return 0;
3936         }
3937
3938         offload_type = s2io_offload_type(skb);
3939         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3940                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3941                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3942         }
3943         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3944                 txdp->Control_2 |=
3945                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3946                      TXD_TX_CKO_UDP_EN);
3947         }
3948         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3949         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3950         txdp->Control_2 |= config->tx_intr_type;
3951
3952         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3953                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3954                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3955         }
3956
3957         frg_len = skb->len - skb->data_len;
3958         if (offload_type == SKB_GSO_UDP) {
3959                 int ufo_size;
3960
3961                 ufo_size = s2io_udp_mss(skb);
3962                 ufo_size &= ~7;
3963                 txdp->Control_1 |= TXD_UFO_EN;
3964                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3965                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3966 #ifdef __BIG_ENDIAN
3967                 sp->ufo_in_band_v[put_off] =
3968                                 (u64)skb_shinfo(skb)->ip6_frag_id;
3969 #else
3970                 sp->ufo_in_band_v[put_off] =
3971                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3972 #endif
3973                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3974                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3975                                         sp->ufo_in_band_v,
3976                                         sizeof(u64), PCI_DMA_TODEVICE);
3977                 if((txdp->Buffer_Pointer == 0) ||
3978                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3979                         goto pci_map_failed;
3980                 txdp++;
3981         }
3982
3983         txdp->Buffer_Pointer = pci_map_single
3984             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3985         if((txdp->Buffer_Pointer == 0) ||
3986                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3987                 goto pci_map_failed;
3988
3989         txdp->Host_Control = (unsigned long) skb;
3990         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3991         if (offload_type == SKB_GSO_UDP)
3992                 txdp->Control_1 |= TXD_UFO_EN;
3993
3994         frg_cnt = skb_shinfo(skb)->nr_frags;
3995         /* For fragmented SKB. */
3996         for (i = 0; i < frg_cnt; i++) {
3997                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3998                 /* A '0' length fragment will be ignored */
3999                 if (!frag->size)
4000                         continue;
4001                 txdp++;
4002                 txdp->Buffer_Pointer = (u64) pci_map_page
4003                     (sp->pdev, frag->page, frag->page_offset,
4004                      frag->size, PCI_DMA_TODEVICE);
4005                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4006                 if (offload_type == SKB_GSO_UDP)
4007                         txdp->Control_1 |= TXD_UFO_EN;
4008         }
4009         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4010
4011         if (offload_type == SKB_GSO_UDP)
4012                 frg_cnt++; /* as Txd0 was used for inband header */
4013
4014         tx_fifo = mac_control->tx_FIFO_start[queue];
4015         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4016         writeq(val64, &tx_fifo->TxDL_Pointer);
4017
4018         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4019                  TX_FIFO_LAST_LIST);
4020         if (offload_type)
4021                 val64 |= TX_FIFO_SPECIAL_FUNC;
4022
4023         writeq(val64, &tx_fifo->List_Control);
4024
4025         mmiowb();
4026
4027         put_off++;
4028         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4029                 put_off = 0;
4030         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4031
4032         /* Avoid "put" pointer going beyond "get" pointer */
4033         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4034                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4035                 DBG_PRINT(TX_DBG,
4036                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4037                           put_off, get_off);
4038                 netif_stop_queue(dev);
4039         }
4040         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4041         dev->trans_start = jiffies;
4042         spin_unlock_irqrestore(&sp->tx_lock, flags);
4043
4044         return 0;
4045 pci_map_failed:
4046         stats->pci_map_fail_cnt++;
4047         netif_stop_queue(dev);
4048         stats->mem_freed += skb->truesize;
4049         dev_kfree_skb(skb);
4050         spin_unlock_irqrestore(&sp->tx_lock, flags);
4051         return 0;
4052 }
4053
4054 static void
4055 s2io_alarm_handle(unsigned long data)
4056 {
4057         struct s2io_nic *sp = (struct s2io_nic *)data;
4058
4059         alarm_intr_handler(sp);
4060         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4061 }
4062
4063 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4064 {
4065         int rxb_size, level;
4066
4067         if (!sp->lro) {
4068                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4069                 level = rx_buffer_level(sp, rxb_size, rng_n);
4070
4071                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4072                         int ret;
4073                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4074                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4075                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4076                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4077                                           __FUNCTION__);
4078                                 clear_bit(0, (&sp->tasklet_status));
4079                                 return -1;
4080                         }
4081                         clear_bit(0, (&sp->tasklet_status));
4082                 } else if (level == LOW)
4083                         tasklet_schedule(&sp->task);
4084
4085         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4086                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4087                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4088         }
4089         return 0;
4090 }
4091
4092 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4093 {
4094         struct ring_info *ring = (struct ring_info *)dev_id;
4095         struct s2io_nic *sp = ring->nic;
4096
4097         atomic_inc(&sp->isr_cnt);
4098
4099         rx_intr_handler(ring);
4100         s2io_chk_rx_buffers(sp, ring->ring_no);
4101
4102         atomic_dec(&sp->isr_cnt);
4103         return IRQ_HANDLED;
4104 }
4105
4106 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4107 {
4108         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4109         struct s2io_nic *sp = fifo->nic;
4110
4111         atomic_inc(&sp->isr_cnt);
4112         tx_intr_handler(fifo);
4113         atomic_dec(&sp->isr_cnt);
4114         return IRQ_HANDLED;
4115 }
4116 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4117 {
4118         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4119         u64 val64;
4120
4121         val64 = readq(&bar0->pic_int_status);
4122         if (val64 & PIC_INT_GPIO) {
4123                 val64 = readq(&bar0->gpio_int_reg);
4124                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4125                     (val64 & GPIO_INT_REG_LINK_UP)) {
4126                         /*
4127                          * This is unstable state so clear both up/down
4128                          * interrupt and adapter to re-evaluate the link state.
4129                          */
4130                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4131                         val64 |= GPIO_INT_REG_LINK_UP;
4132                         writeq(val64, &bar0->gpio_int_reg);
4133                         val64 = readq(&bar0->gpio_int_mask);
4134                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4135                                    GPIO_INT_MASK_LINK_DOWN);
4136                         writeq(val64, &bar0->gpio_int_mask);
4137                 }
4138                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4139                         val64 = readq(&bar0->adapter_status);
4140                                 /* Enable Adapter */
4141                         val64 = readq(&bar0->adapter_control);
4142                         val64 |= ADAPTER_CNTL_EN;
4143                         writeq(val64, &bar0->adapter_control);
4144                         val64 |= ADAPTER_LED_ON;
4145                         writeq(val64, &bar0->adapter_control);
4146                         if (!sp->device_enabled_once)
4147                                 sp->device_enabled_once = 1;
4148
4149                         s2io_link(sp, LINK_UP);
4150                         /*
4151                          * unmask link down interrupt and mask link-up
4152                          * intr
4153                          */
4154                         val64 = readq(&bar0->gpio_int_mask);
4155                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4156                         val64 |= GPIO_INT_MASK_LINK_UP;
4157                         writeq(val64, &bar0->gpio_int_mask);
4158
4159                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4160                         val64 = readq(&bar0->adapter_status);
4161                         s2io_link(sp, LINK_DOWN);
4162                         /* Link is down so unmaks link up interrupt */
4163                         val64 = readq(&bar0->gpio_int_mask);
4164                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4165                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4166                         writeq(val64, &bar0->gpio_int_mask);
4167
4168                         /* turn off LED */
4169                         val64 = readq(&bar0->adapter_control);
4170                         val64 = val64 &(~ADAPTER_LED_ON);
4171                         writeq(val64, &bar0->adapter_control);
4172                 }
4173         }
4174         val64 = readq(&bar0->gpio_int_mask);
4175 }
4176
4177 /**
4178  *  s2io_isr - ISR handler of the device .
4179  *  @irq: the irq of the device.
4180  *  @dev_id: a void pointer to the dev structure of the NIC.
4181  *  Description:  This function is the ISR handler of the device. It
4182  *  identifies the reason for the interrupt and calls the relevant
4183  *  service routines. As a contongency measure, this ISR allocates the
4184  *  recv buffers, if their numbers are below the panic value which is
4185  *  presently set to 25% of the original number of rcv buffers allocated.
4186  *  Return value:
4187  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4188  *   IRQ_NONE: will be returned if interrupt is not from our device
4189  */
4190 static irqreturn_t s2io_isr(int irq, void *dev_id)
4191 {
4192         struct net_device *dev = (struct net_device *) dev_id;
4193         struct s2io_nic *sp = dev->priv;
4194         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4195         int i;
4196         u64 reason = 0;
4197         struct mac_info *mac_control;
4198         struct config_param *config;
4199
4200         /* Pretend we handled any irq's from a disconnected card */
4201         if (pci_channel_offline(sp->pdev))
4202                 return IRQ_NONE;
4203
4204         atomic_inc(&sp->isr_cnt);
4205         mac_control = &sp->mac_control;
4206         config = &sp->config;
4207
4208         /*
4209          * Identify the cause for interrupt and call the appropriate
4210          * interrupt handler. Causes for the interrupt could be;
4211          * 1. Rx of packet.
4212          * 2. Tx complete.
4213          * 3. Link down.
4214          * 4. Error in any functional blocks of the NIC.
4215          */
4216         reason = readq(&bar0->general_int_status);
4217
4218         if (!reason) {
4219                 /* The interrupt was not raised by us. */
4220                 atomic_dec(&sp->isr_cnt);
4221                 return IRQ_NONE;
4222         }
4223         else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4224                 /* Disable device and get out */
4225                 atomic_dec(&sp->isr_cnt);
4226                 return IRQ_NONE;
4227         }
4228
4229         if (napi) {
4230                 if (reason & GEN_INTR_RXTRAFFIC) {
4231                         if (likely (netif_rx_schedule_prep(dev, &sp->napi))) {
4232                                 __netif_rx_schedule(dev, &sp->napi);
4233                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4234                         }
4235                         else
4236                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4237                 }
4238         } else {
4239                 /*
4240                  * Rx handler is called by default, without checking for the
4241                  * cause of interrupt.
4242                  * rx_traffic_int reg is an R1 register, writing all 1's
4243                  * will ensure that the actual interrupt causing bit get's
4244                  * cleared and hence a read can be avoided.
4245                  */
4246                 if (reason & GEN_INTR_RXTRAFFIC)
4247                         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4248
4249                 for (i = 0; i < config->rx_ring_num; i++) {
4250                         rx_intr_handler(&mac_control->rings[i]);
4251                 }
4252         }
4253
4254         /*
4255          * tx_traffic_int reg is an R1 register, writing all 1's
4256          * will ensure that the actual interrupt causing bit get's
4257          * cleared and hence a read can be avoided.
4258          */
4259         if (reason & GEN_INTR_TXTRAFFIC)
4260                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4261
4262         for (i = 0; i < config->tx_fifo_num; i++)
4263                 tx_intr_handler(&mac_control->fifos[i]);
4264
4265         if (reason & GEN_INTR_TXPIC)
4266                 s2io_txpic_intr_handle(sp);
4267         /*
4268          * If the Rx buffer count is below the panic threshold then
4269          * reallocate the buffers from the interrupt handler itself,
4270          * else schedule a tasklet to reallocate the buffers.
4271          */
4272         if (!napi) {
4273                 for (i = 0; i < config->rx_ring_num; i++)
4274                         s2io_chk_rx_buffers(sp, i);
4275         }
4276
4277         writeq(0, &bar0->general_int_mask);
4278         readl(&bar0->general_int_status);
4279
4280         atomic_dec(&sp->isr_cnt);
4281         return IRQ_HANDLED;
4282 }
4283
4284 /**
4285  * s2io_updt_stats -
4286  */
4287 static void s2io_updt_stats(struct s2io_nic *sp)
4288 {
4289         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4290         u64 val64;
4291         int cnt = 0;
4292
4293         if (atomic_read(&sp->card_state) == CARD_UP) {
4294                 /* Apprx 30us on a 133 MHz bus */
4295                 val64 = SET_UPDT_CLICKS(10) |
4296                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4297                 writeq(val64, &bar0->stat_cfg);
4298                 do {
4299                         udelay(100);
4300                         val64 = readq(&bar0->stat_cfg);
4301                         if (!(val64 & BIT(0)))
4302                                 break;
4303                         cnt++;
4304                         if (cnt == 5)
4305                                 break; /* Updt failed */
4306                 } while(1);
4307         } 
4308 }
4309
4310 /**
4311  *  s2io_get_stats - Updates the device statistics structure.
4312  *  @dev : pointer to the device structure.
4313  *  Description:
4314  *  This function updates the device statistics structure in the s2io_nic
4315  *  structure and returns a pointer to the same.
4316  *  Return value:
4317  *  pointer to the updated net_device_stats structure.
4318  */
4319
4320 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4321 {
4322         struct s2io_nic *sp = dev->priv;
4323         struct mac_info *mac_control;
4324         struct config_param *config;
4325
4326
4327         mac_control = &sp->mac_control;
4328         config = &sp->config;
4329
4330         /* Configure Stats for immediate updt */
4331         s2io_updt_stats(sp);
4332
4333         sp->stats.tx_packets =
4334                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4335         sp->stats.tx_errors =
4336                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4337         sp->stats.rx_errors =
4338                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4339         sp->stats.multicast =
4340                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4341         sp->stats.rx_length_errors =
4342                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4343
4344         return (&sp->stats);
4345 }
4346
4347 /**
4348  *  s2io_set_multicast - entry point for multicast address enable/disable.
4349  *  @dev : pointer to the device structure
4350  *  Description:
4351  *  This function is a driver entry point which gets called by the kernel
4352  *  whenever multicast addresses must be enabled/disabled. This also gets
4353  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4354  *  determine, if multicast address must be enabled or if promiscuous mode
4355  *  is to be disabled etc.
4356  *  Return value:
4357  *  void.
4358  */
4359
4360 static void s2io_set_multicast(struct net_device *dev)
4361 {
4362         int i, j, prev_cnt;
4363         struct dev_mc_list *mclist;
4364         struct s2io_nic *sp = dev->priv;
4365         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4366         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4367             0xfeffffffffffULL;
4368         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4369         void __iomem *add;
4370
4371         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4372                 /*  Enable all Multicast addresses */
4373                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4374                        &bar0->rmac_addr_data0_mem);
4375                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4376                        &bar0->rmac_addr_data1_mem);
4377                 val64 = RMAC_ADDR_CMD_MEM_WE |
4378                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4379                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4380                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4381                 /* Wait till command completes */
4382                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4383                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4384                                         S2IO_BIT_RESET);
4385
4386                 sp->m_cast_flg = 1;
4387                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4388         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4389                 /*  Disable all Multicast addresses */
4390                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4391                        &bar0->rmac_addr_data0_mem);
4392                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4393                        &bar0->rmac_addr_data1_mem);
4394                 val64 = RMAC_ADDR_CMD_MEM_WE |
4395                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4396                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4397                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4398                 /* Wait till command completes */
4399                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4400                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4401                                         S2IO_BIT_RESET);
4402
4403                 sp->m_cast_flg = 0;
4404                 sp->all_multi_pos = 0;
4405         }
4406
4407         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4408                 /*  Put the NIC into promiscuous mode */
4409                 add = &bar0->mac_cfg;
4410                 val64 = readq(&bar0->mac_cfg);
4411                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4412
4413                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4414                 writel((u32) val64, add);
4415                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4416                 writel((u32) (val64 >> 32), (add + 4));
4417
4418                 if (vlan_tag_strip != 1) {
4419                         val64 = readq(&bar0->rx_pa_cfg);
4420                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4421                         writeq(val64, &bar0->rx_pa_cfg);
4422                         vlan_strip_flag = 0;
4423                 }
4424
4425                 val64 = readq(&bar0->mac_cfg);
4426                 sp->promisc_flg = 1;
4427                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4428                           dev->name);
4429         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4430                 /*  Remove the NIC from promiscuous mode */
4431                 add = &bar0->mac_cfg;
4432                 val64 = readq(&bar0->mac_cfg);
4433                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4434
4435                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4436                 writel((u32) val64, add);
4437                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4438                 writel((u32) (val64 >> 32), (add + 4));
4439
4440                 if (vlan_tag_strip != 0) {
4441                         val64 = readq(&bar0->rx_pa_cfg);
4442                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4443                         writeq(val64, &bar0->rx_pa_cfg);
4444                         vlan_strip_flag = 1;
4445                 }
4446
4447                 val64 = readq(&bar0->mac_cfg);
4448                 sp->promisc_flg = 0;
4449                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4450                           dev->name);
4451         }
4452
4453         /*  Update individual M_CAST address list */
4454         if ((!sp->m_cast_flg) && dev->mc_count) {
4455                 if (dev->mc_count >
4456                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4457                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4458                                   dev->name);
4459                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4460                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4461                         return;
4462                 }
4463
4464                 prev_cnt = sp->mc_addr_count;
4465                 sp->mc_addr_count = dev->mc_count;
4466
4467                 /* Clear out the previous list of Mc in the H/W. */
4468                 for (i = 0; i < prev_cnt; i++) {
4469                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4470                                &bar0->rmac_addr_data0_mem);
4471                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4472                                 &bar0->rmac_addr_data1_mem);
4473                         val64 = RMAC_ADDR_CMD_MEM_WE |
4474                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4475                             RMAC_ADDR_CMD_MEM_OFFSET
4476                             (MAC_MC_ADDR_START_OFFSET + i);
4477                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4478
4479                         /* Wait for command completes */
4480                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4481                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4482                                         S2IO_BIT_RESET)) {
4483                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4484                                           dev->name);
4485                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4486                                 return;
4487                         }
4488                 }
4489
4490                 /* Create the new Rx filter list and update the same in H/W. */
4491                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4492                      i++, mclist = mclist->next) {
4493                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4494                                ETH_ALEN);
4495                         mac_addr = 0;
4496                         for (j = 0; j < ETH_ALEN; j++) {
4497                                 mac_addr |= mclist->dmi_addr[j];
4498                                 mac_addr <<= 8;
4499                         }
4500                         mac_addr >>= 8;
4501                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4502                                &bar0->rmac_addr_data0_mem);
4503                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4504                                 &bar0->rmac_addr_data1_mem);
4505                         val64 = RMAC_ADDR_CMD_MEM_WE |
4506                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4507                             RMAC_ADDR_CMD_MEM_OFFSET
4508                             (i + MAC_MC_ADDR_START_OFFSET);
4509                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4510
4511                         /* Wait for command completes */
4512                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4513                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4514                                         S2IO_BIT_RESET)) {
4515                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4516                                           dev->name);
4517                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4518                                 return;
4519                         }
4520                 }
4521         }
4522 }
4523
4524 /**
4525  *  s2io_set_mac_addr - Programs the Xframe mac address
4526  *  @dev : pointer to the device structure.
4527  *  @addr: a uchar pointer to the new mac address which is to be set.
4528  *  Description : This procedure will program the Xframe to receive
4529  *  frames with new Mac Address
4530  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4531  *  as defined in errno.h file on failure.
4532  */
4533
4534 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4535 {
4536         struct s2io_nic *sp = dev->priv;
4537         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4538         register u64 val64, mac_addr = 0;
4539         int i;
4540         u64 old_mac_addr = 0;
4541
4542         /*
4543          * Set the new MAC address as the new unicast filter and reflect this
4544          * change on the device address registered with the OS. It will be
4545          * at offset 0.
4546          */
4547         for (i = 0; i < ETH_ALEN; i++) {
4548                 mac_addr <<= 8;
4549                 mac_addr |= addr[i];
4550                 old_mac_addr <<= 8;
4551                 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4552         }
4553
4554         if(0 == mac_addr)
4555                 return SUCCESS;
4556
4557         /* Update the internal structure with this new mac address */
4558         if(mac_addr != old_mac_addr) {
4559                 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4560                 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4561                 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4562                 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4563                 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4564                 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4565                 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4566         }
4567
4568         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4569                &bar0->rmac_addr_data0_mem);
4570
4571         val64 =
4572             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4573             RMAC_ADDR_CMD_MEM_OFFSET(0);
4574         writeq(val64, &bar0->rmac_addr_cmd_mem);
4575         /* Wait till command completes */
4576         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4577                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4578                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4579                 return FAILURE;
4580         }
4581
4582         return SUCCESS;
4583 }
4584
4585 /**
4586  * s2io_ethtool_sset - Sets different link parameters.
4587  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4588  * @info: pointer to the structure with parameters given by ethtool to set
4589  * link information.
4590  * Description:
4591  * The function sets different link parameters provided by the user onto
4592  * the NIC.
4593  * Return value:
4594  * 0 on success.
4595 */
4596
4597 static int s2io_ethtool_sset(struct net_device *dev,
4598                              struct ethtool_cmd *info)
4599 {
4600         struct s2io_nic *sp = dev->priv;
4601         if ((info->autoneg == AUTONEG_ENABLE) ||
4602             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4603                 return -EINVAL;
4604         else {
4605                 s2io_close(sp->dev);
4606                 s2io_open(sp->dev);
4607         }
4608
4609         return 0;
4610 }
4611
4612 /**
4613  * s2io_ethtol_gset - Return link specific information.
4614  * @sp : private member of the device structure, pointer to the
4615  *      s2io_nic structure.
4616  * @info : pointer to the structure with parameters given by ethtool
4617  * to return link information.
4618  * Description:
4619  * Returns link specific information like speed, duplex etc.. to ethtool.
4620  * Return value :
4621  * return 0 on success.
4622  */
4623
4624 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4625 {
4626         struct s2io_nic *sp = dev->priv;
4627         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4628         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4629         info->port = PORT_FIBRE;
4630         /* info->transceiver?? TODO */
4631
4632         if (netif_carrier_ok(sp->dev)) {
4633                 info->speed = 10000;
4634                 info->duplex = DUPLEX_FULL;
4635         } else {
4636                 info->speed = -1;
4637                 info->duplex = -1;
4638         }
4639
4640         info->autoneg = AUTONEG_DISABLE;
4641         return 0;
4642 }
4643
4644 /**
4645  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4646  * @sp : private member of the device structure, which is a pointer to the
4647  * s2io_nic structure.
4648  * @info : pointer to the structure with parameters given by ethtool to
4649  * return driver information.
4650  * Description:
4651  * Returns driver specefic information like name, version etc.. to ethtool.
4652  * Return value:
4653  *  void
4654  */
4655
4656 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4657                                   struct ethtool_drvinfo *info)
4658 {
4659         struct s2io_nic *sp = dev->priv;
4660
4661         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4662         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4663         strncpy(info->fw_version, "", sizeof(info->fw_version));
4664         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4665         info->regdump_len = XENA_REG_SPACE;
4666         info->eedump_len = XENA_EEPROM_SPACE;
4667         info->testinfo_len = S2IO_TEST_LEN;
4668
4669         if (sp->device_type == XFRAME_I_DEVICE)
4670                 info->n_stats = XFRAME_I_STAT_LEN;
4671         else
4672                 info->n_stats = XFRAME_II_STAT_LEN;
4673 }
4674
4675 /**
4676  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4677  *  @sp: private member of the device structure, which is a pointer to the
4678  *  s2io_nic structure.
4679  *  @regs : pointer to the structure with parameters given by ethtool for
4680  *  dumping the registers.
4681  *  @reg_space: The input argumnet into which all the registers are dumped.
4682  *  Description:
4683  *  Dumps the entire register space of xFrame NIC into the user given
4684  *  buffer area.
4685  * Return value :
4686  * void .
4687 */
4688
4689 static void s2io_ethtool_gregs(struct net_device *dev,
4690                                struct ethtool_regs *regs, void *space)
4691 {
4692         int i;
4693         u64 reg;
4694         u8 *reg_space = (u8 *) space;
4695         struct s2io_nic *sp = dev->priv;
4696
4697         regs->len = XENA_REG_SPACE;
4698         regs->version = sp->pdev->subsystem_device;
4699
4700         for (i = 0; i < regs->len; i += 8) {
4701                 reg = readq(sp->bar0 + i);
4702                 memcpy((reg_space + i), &reg, 8);
4703         }
4704 }
4705
4706 /**
4707  *  s2io_phy_id  - timer function that alternates adapter LED.
4708  *  @data : address of the private member of the device structure, which
4709  *  is a pointer to the s2io_nic structure, provided as an u32.
4710  * Description: This is actually the timer function that alternates the
4711  * adapter LED bit of the adapter control bit to set/reset every time on
4712  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4713  *  once every second.
4714 */
4715 static void s2io_phy_id(unsigned long data)
4716 {
4717         struct s2io_nic *sp = (struct s2io_nic *) data;
4718         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4719         u64 val64 = 0;
4720         u16 subid;
4721
4722         subid = sp->pdev->subsystem_device;
4723         if ((sp->device_type == XFRAME_II_DEVICE) ||
4724                    ((subid & 0xFF) >= 0x07)) {
4725                 val64 = readq(&bar0->gpio_control);
4726                 val64 ^= GPIO_CTRL_GPIO_0;
4727                 writeq(val64, &bar0->gpio_control);
4728         } else {
4729                 val64 = readq(&bar0->adapter_control);
4730                 val64 ^= ADAPTER_LED_ON;
4731                 writeq(val64, &bar0->adapter_control);
4732         }
4733
4734         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4735 }
4736
4737 /**
4738  * s2io_ethtool_idnic - To physically identify the nic on the system.
4739  * @sp : private member of the device structure, which is a pointer to the
4740  * s2io_nic structure.
4741  * @id : pointer to the structure with identification parameters given by
4742  * ethtool.
4743  * Description: Used to physically identify the NIC on the system.
4744  * The Link LED will blink for a time specified by the user for
4745  * identification.
4746  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4747  * identification is possible only if it's link is up.
4748  * Return value:
4749  * int , returns 0 on success
4750  */
4751
4752 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4753 {
4754         u64 val64 = 0, last_gpio_ctrl_val;
4755         struct s2io_nic *sp = dev->priv;
4756         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757         u16 subid;
4758
4759         subid = sp->pdev->subsystem_device;
4760         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4761         if ((sp->device_type == XFRAME_I_DEVICE) &&
4762                 ((subid & 0xFF) < 0x07)) {
4763                 val64 = readq(&bar0->adapter_control);
4764                 if (!(val64 & ADAPTER_CNTL_EN)) {
4765                         printk(KERN_ERR
4766                                "Adapter Link down, cannot blink LED\n");
4767                         return -EFAULT;
4768                 }
4769         }
4770         if (sp->id_timer.function == NULL) {
4771                 init_timer(&sp->id_timer);
4772                 sp->id_timer.function = s2io_phy_id;
4773                 sp->id_timer.data = (unsigned long) sp;
4774         }
4775         mod_timer(&sp->id_timer, jiffies);
4776         if (data)
4777                 msleep_interruptible(data * HZ);
4778         else
4779                 msleep_interruptible(MAX_FLICKER_TIME);
4780         del_timer_sync(&sp->id_timer);
4781
4782         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4783                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4784                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4785         }
4786
4787         return 0;
4788 }
4789
4790 static void s2io_ethtool_gringparam(struct net_device *dev,
4791                                     struct ethtool_ringparam *ering)
4792 {
4793         struct s2io_nic *sp = dev->priv;
4794         int i,tx_desc_count=0,rx_desc_count=0;
4795
4796         if (sp->rxd_mode == RXD_MODE_1)
4797                 ering->rx_max_pending = MAX_RX_DESC_1;
4798         else if (sp->rxd_mode == RXD_MODE_3B)
4799                 ering->rx_max_pending = MAX_RX_DESC_2;
4800
4801         ering->tx_max_pending = MAX_TX_DESC;
4802         for (i = 0 ; i < sp->config.tx_fifo_num ; i++) 
4803                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4804         
4805         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4806         ering->tx_pending = tx_desc_count;
4807         rx_desc_count = 0;
4808         for (i = 0 ; i < sp->config.rx_ring_num ; i++) 
4809                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4810
4811         ering->rx_pending = rx_desc_count;
4812
4813         ering->rx_mini_max_pending = 0;
4814         ering->rx_mini_pending = 0;
4815         if(sp->rxd_mode == RXD_MODE_1)
4816                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4817         else if (sp->rxd_mode == RXD_MODE_3B)
4818                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4819         ering->rx_jumbo_pending = rx_desc_count;
4820 }
4821
4822 /**
4823  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4824  * @sp : private member of the device structure, which is a pointer to the
4825  *      s2io_nic structure.
4826  * @ep : pointer to the structure with pause parameters given by ethtool.
4827  * Description:
4828  * Returns the Pause frame generation and reception capability of the NIC.
4829  * Return value:
4830  *  void
4831  */
4832 static void s2io_ethtool_getpause_data(struct net_device *dev,
4833                                        struct ethtool_pauseparam *ep)
4834 {
4835         u64 val64;
4836         struct s2io_nic *sp = dev->priv;
4837         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4838
4839         val64 = readq(&bar0->rmac_pause_cfg);
4840         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4841                 ep->tx_pause = TRUE;
4842         if (val64 & RMAC_PAUSE_RX_ENABLE)
4843                 ep->rx_pause = TRUE;
4844         ep->autoneg = FALSE;
4845 }
4846
4847 /**
4848  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4849  * @sp : private member of the device structure, which is a pointer to the
4850  *      s2io_nic structure.
4851  * @ep : pointer to the structure with pause parameters given by ethtool.
4852  * Description:
4853  * It can be used to set or reset Pause frame generation or reception
4854  * support of the NIC.
4855  * Return value:
4856  * int, returns 0 on Success
4857  */
4858
4859 static int s2io_ethtool_setpause_data(struct net_device *dev,
4860                                struct ethtool_pauseparam *ep)
4861 {
4862         u64 val64;
4863         struct s2io_nic *sp = dev->priv;
4864         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4865
4866         val64 = readq(&bar0->rmac_pause_cfg);
4867         if (ep->tx_pause)
4868                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4869         else
4870                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4871         if (ep->rx_pause)
4872                 val64 |= RMAC_PAUSE_RX_ENABLE;
4873         else
4874                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4875         writeq(val64, &bar0->rmac_pause_cfg);
4876         return 0;
4877 }
4878
4879 /**
4880  * read_eeprom - reads 4 bytes of data from user given offset.
4881  * @sp : private member of the device structure, which is a pointer to the
4882  *      s2io_nic structure.
4883  * @off : offset at which the data must be written
4884  * @data : Its an output parameter where the data read at the given
4885  *      offset is stored.
4886  * Description:
4887  * Will read 4 bytes of data from the user given offset and return the
4888  * read data.
4889  * NOTE: Will allow to read only part of the EEPROM visible through the
4890  *   I2C bus.
4891  * Return value:
4892  *  -1 on failure and 0 on success.
4893  */
4894
4895 #define S2IO_DEV_ID             5
4896 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4897 {
4898         int ret = -1;
4899         u32 exit_cnt = 0;
4900         u64 val64;
4901         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4902
4903         if (sp->device_type == XFRAME_I_DEVICE) {
4904                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4905                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4906                     I2C_CONTROL_CNTL_START;
4907                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4908
4909                 while (exit_cnt < 5) {
4910                         val64 = readq(&bar0->i2c_control);
4911                         if (I2C_CONTROL_CNTL_END(val64)) {
4912                                 *data = I2C_CONTROL_GET_DATA(val64);
4913                                 ret = 0;
4914                                 break;
4915                         }
4916                         msleep(50);
4917                         exit_cnt++;
4918                 }
4919         }
4920
4921         if (sp->device_type == XFRAME_II_DEVICE) {
4922                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4923                         SPI_CONTROL_BYTECNT(0x3) |
4924                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4925                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4926                 val64 |= SPI_CONTROL_REQ;
4927                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4928                 while (exit_cnt < 5) {
4929                         val64 = readq(&bar0->spi_control);
4930                         if (val64 & SPI_CONTROL_NACK) {
4931                                 ret = 1;
4932                                 break;
4933                         } else if (val64 & SPI_CONTROL_DONE) {
4934                                 *data = readq(&bar0->spi_data);
4935                                 *data &= 0xffffff;
4936                                 ret = 0;
4937                                 break;
4938                         }
4939                         msleep(50);
4940                         exit_cnt++;
4941                 }
4942         }
4943         return ret;
4944 }
4945
4946 /**
4947  *  write_eeprom - actually writes the relevant part of the data value.
4948  *  @sp : private member of the device structure, which is a pointer to the
4949  *       s2io_nic structure.
4950  *  @off : offset at which the data must be written
4951  *  @data : The data that is to be written
4952  *  @cnt : Number of bytes of the data that are actually to be written into
4953  *  the Eeprom. (max of 3)
4954  * Description:
4955  *  Actually writes the relevant part of the data value into the Eeprom
4956  *  through the I2C bus.
4957  * Return value:
4958  *  0 on success, -1 on failure.
4959  */
4960
4961 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4962 {
4963         int exit_cnt = 0, ret = -1;
4964         u64 val64;
4965         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4966
4967         if (sp->device_type == XFRAME_I_DEVICE) {
4968                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4969                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4970                     I2C_CONTROL_CNTL_START;
4971                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4972
4973                 while (exit_cnt < 5) {
4974                         val64 = readq(&bar0->i2c_control);
4975                         if (I2C_CONTROL_CNTL_END(val64)) {
4976                                 if (!(val64 & I2C_CONTROL_NACK))
4977                                         ret = 0;
4978                                 break;
4979                         }
4980                         msleep(50);
4981                         exit_cnt++;
4982                 }
4983         }
4984
4985         if (sp->device_type == XFRAME_II_DEVICE) {
4986                 int write_cnt = (cnt == 8) ? 0 : cnt;
4987                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4988
4989                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4990                         SPI_CONTROL_BYTECNT(write_cnt) |
4991                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4992                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4993                 val64 |= SPI_CONTROL_REQ;
4994                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4995                 while (exit_cnt < 5) {
4996                         val64 = readq(&bar0->spi_control);
4997                         if (val64 & SPI_CONTROL_NACK) {
4998                                 ret = 1;
4999                                 break;
5000                         } else if (val64 & SPI_CONTROL_DONE) {
5001                                 ret = 0;
5002                                 break;
5003                         }
5004                         msleep(50);
5005                         exit_cnt++;
5006                 }
5007         }
5008         return ret;
5009 }
5010 static void s2io_vpd_read(struct s2io_nic *nic)
5011 {
5012         u8 *vpd_data;
5013         u8 data;
5014         int i=0, cnt, fail = 0;
5015         int vpd_addr = 0x80;
5016
5017         if (nic->device_type == XFRAME_II_DEVICE) {
5018                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5019                 vpd_addr = 0x80;
5020         }
5021         else {
5022                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5023                 vpd_addr = 0x50;
5024         }
5025         strcpy(nic->serial_num, "NOT AVAILABLE");
5026
5027         vpd_data = kmalloc(256, GFP_KERNEL);
5028         if (!vpd_data) {
5029                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5030                 return;
5031         }
5032         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5033
5034         for (i = 0; i < 256; i +=4 ) {
5035                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5036                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5037                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5038                 for (cnt = 0; cnt <5; cnt++) {
5039                         msleep(2);
5040                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5041                         if (data == 0x80)
5042                                 break;
5043                 }
5044                 if (cnt >= 5) {
5045                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5046                         fail = 1;
5047                         break;
5048                 }
5049                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5050                                       (u32 *)&vpd_data[i]);
5051         }
5052
5053         if(!fail) {
5054                 /* read serial number of adapter */
5055                 for (cnt = 0; cnt < 256; cnt++) {
5056                 if ((vpd_data[cnt] == 'S') &&
5057                         (vpd_data[cnt+1] == 'N') &&
5058                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5059                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5060                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5061                                         vpd_data[cnt+2]);
5062                                 break;
5063                         }
5064                 }
5065         }
5066
5067         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5068                 memset(nic->product_name, 0, vpd_data[1]);
5069                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5070         }
5071         kfree(vpd_data);
5072         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5073 }
5074
5075 /**
5076  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5077  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5078  *  @eeprom : pointer to the user level structure provided by ethtool,
5079  *  containing all relevant information.
5080  *  @data_buf : user defined value to be written into Eeprom.
5081  *  Description: Reads the values stored in the Eeprom at given offset
5082  *  for a given length. Stores these values int the input argument data
5083  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5084  *  Return value:
5085  *  int  0 on success
5086  */
5087
5088 static int s2io_ethtool_geeprom(struct net_device *dev,
5089                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5090 {
5091         u32 i, valid;
5092         u64 data;
5093         struct s2io_nic *sp = dev->priv;
5094
5095         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5096
5097         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5098                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5099
5100         for (i = 0; i < eeprom->len; i += 4) {
5101                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5102                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5103                         return -EFAULT;
5104                 }
5105                 valid = INV(data);
5106                 memcpy((data_buf + i), &valid, 4);
5107         }
5108         return 0;
5109 }
5110
5111 /**
5112  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5113  *  @sp : private member of the device structure, which is a pointer to the
5114  *  s2io_nic structure.
5115  *  @eeprom : pointer to the user level structure provided by ethtool,
5116  *  containing all relevant information.
5117  *  @data_buf ; user defined value to be written into Eeprom.
5118  *  Description:
5119  *  Tries to write the user provided value in the Eeprom, at the offset
5120  *  given by the user.
5121  *  Return value:
5122  *  0 on success, -EFAULT on failure.
5123  */
5124
5125 static int s2io_ethtool_seeprom(struct net_device *dev,
5126                                 struct ethtool_eeprom *eeprom,
5127                                 u8 * data_buf)
5128 {
5129         int len = eeprom->len, cnt = 0;
5130         u64 valid = 0, data;
5131         struct s2io_nic *sp = dev->priv;
5132
5133         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5134                 DBG_PRINT(ERR_DBG,
5135                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5136                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5137                           eeprom->magic);
5138                 return -EFAULT;
5139         }
5140
5141         while (len) {
5142                 data = (u32) data_buf[cnt] & 0x000000FF;
5143                 if (data) {
5144                         valid = (u32) (data << 24);
5145                 } else
5146                         valid = data;
5147
5148                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5149                         DBG_PRINT(ERR_DBG,
5150                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5151                         DBG_PRINT(ERR_DBG,
5152                                   "write into the specified offset\n");
5153                         return -EFAULT;
5154                 }
5155                 cnt++;
5156                 len--;
5157         }
5158
5159         return 0;
5160 }
5161
5162 /**
5163  * s2io_register_test - reads and writes into all clock domains.
5164  * @sp : private member of the device structure, which is a pointer to the
5165  * s2io_nic structure.
5166  * @data : variable that returns the result of each of the test conducted b
5167  * by the driver.
5168  * Description:
5169  * Read and write into all clock domains. The NIC has 3 clock domains,
5170  * see that registers in all the three regions are accessible.
5171  * Return value:
5172  * 0 on success.
5173  */
5174
5175 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5176 {
5177         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5178         u64 val64 = 0, exp_val;
5179         int fail = 0;
5180
5181         val64 = readq(&bar0->pif_rd_swapper_fb);
5182         if (val64 != 0x123456789abcdefULL) {
5183                 fail = 1;
5184                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5185         }
5186
5187         val64 = readq(&bar0->rmac_pause_cfg);
5188         if (val64 != 0xc000ffff00000000ULL) {
5189                 fail = 1;
5190                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5191         }
5192
5193         val64 = readq(&bar0->rx_queue_cfg);
5194         if (sp->device_type == XFRAME_II_DEVICE)
5195                 exp_val = 0x0404040404040404ULL;
5196         else
5197                 exp_val = 0x0808080808080808ULL;
5198         if (val64 != exp_val) {
5199                 fail = 1;
5200                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5201         }
5202
5203         val64 = readq(&bar0->xgxs_efifo_cfg);
5204         if (val64 != 0x000000001923141EULL) {
5205                 fail = 1;
5206                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5207         }
5208
5209         val64 = 0x5A5A5A5A5A5A5A5AULL;
5210         writeq(val64, &bar0->xmsi_data);
5211         val64 = readq(&bar0->xmsi_data);
5212         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5213                 fail = 1;
5214                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5215         }
5216
5217         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5218         writeq(val64, &bar0->xmsi_data);
5219         val64 = readq(&bar0->xmsi_data);
5220         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5221                 fail = 1;
5222                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5223         }
5224
5225         *data = fail;
5226         return fail;
5227 }
5228
5229 /**
5230  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5231  * @sp : private member of the device structure, which is a pointer to the
5232  * s2io_nic structure.
5233  * @data:variable that returns the result of each of the test conducted by
5234  * the driver.
5235  * Description:
5236  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5237  * register.
5238  * Return value:
5239  * 0 on success.
5240  */
5241
5242 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5243 {
5244         int fail = 0;
5245         u64 ret_data, org_4F0, org_7F0;
5246         u8 saved_4F0 = 0, saved_7F0 = 0;
5247         struct net_device *dev = sp->dev;
5248
5249         /* Test Write Error at offset 0 */
5250         /* Note that SPI interface allows write access to all areas
5251          * of EEPROM. Hence doing all negative testing only for Xframe I.
5252          */
5253         if (sp->device_type == XFRAME_I_DEVICE)
5254                 if (!write_eeprom(sp, 0, 0, 3))
5255                         fail = 1;
5256
5257         /* Save current values at offsets 0x4F0 and 0x7F0 */
5258         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5259                 saved_4F0 = 1;
5260         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5261                 saved_7F0 = 1;
5262
5263         /* Test Write at offset 4f0 */
5264         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5265                 fail = 1;
5266         if (read_eeprom(sp, 0x4F0, &ret_data))
5267                 fail = 1;
5268
5269         if (ret_data != 0x012345) {
5270                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5271                         "Data written %llx Data read %llx\n",
5272                         dev->name, (unsigned long long)0x12345,
5273                         (unsigned long long)ret_data);
5274                 fail = 1;
5275         }
5276
5277         /* Reset the EEPROM data go FFFF */
5278         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5279
5280         /* Test Write Request Error at offset 0x7c */
5281         if (sp->device_type == XFRAME_I_DEVICE)
5282                 if (!write_eeprom(sp, 0x07C, 0, 3))
5283                         fail = 1;
5284
5285         /* Test Write Request at offset 0x7f0 */
5286         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5287                 fail = 1;
5288         if (read_eeprom(sp, 0x7F0, &ret_data))
5289                 fail = 1;
5290
5291         if (ret_data != 0x012345) {
5292                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5293                         "Data written %llx Data read %llx\n",
5294                         dev->name, (unsigned long long)0x12345,
5295                         (unsigned long long)ret_data);
5296                 fail = 1;
5297         }
5298
5299         /* Reset the EEPROM data go FFFF */
5300         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5301
5302         if (sp->device_type == XFRAME_I_DEVICE) {
5303                 /* Test Write Error at offset 0x80 */
5304                 if (!write_eeprom(sp, 0x080, 0, 3))
5305                         fail = 1;
5306
5307                 /* Test Write Error at offset 0xfc */
5308                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5309                         fail = 1;
5310
5311                 /* Test Write Error at offset 0x100 */
5312                 if (!write_eeprom(sp, 0x100, 0, 3))
5313                         fail = 1;
5314
5315                 /* Test Write Error at offset 4ec */
5316                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5317                         fail = 1;
5318         }
5319
5320         /* Restore values at offsets 0x4F0 and 0x7F0 */
5321         if (saved_4F0)
5322                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5323         if (saved_7F0)
5324                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5325
5326         *data = fail;
5327         return fail;
5328 }
5329
5330 /**
5331  * s2io_bist_test - invokes the MemBist test of the card .
5332  * @sp : private member of the device structure, which is a pointer to the
5333  * s2io_nic structure.
5334  * @data:variable that returns the result of each of the test conducted by
5335  * the driver.
5336  * Description:
5337  * This invokes the MemBist test of the card. We give around
5338  * 2 secs time for the Test to complete. If it's still not complete
5339  * within this peiod, we consider that the test failed.
5340  * Return value:
5341  * 0 on success and -1 on failure.
5342  */
5343
5344 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5345 {
5346         u8 bist = 0;
5347         int cnt = 0, ret = -1;
5348
5349         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5350         bist |= PCI_BIST_START;
5351         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5352
5353         while (cnt < 20) {
5354                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5355                 if (!(bist & PCI_BIST_START)) {
5356                         *data = (bist & PCI_BIST_CODE_MASK);
5357                         ret = 0;
5358                         break;
5359                 }
5360                 msleep(100);
5361                 cnt++;
5362         }
5363
5364         return ret;
5365 }
5366
5367 /**
5368  * s2io-link_test - verifies the link state of the nic
5369  * @sp ; private member of the device structure, which is a pointer to the
5370  * s2io_nic structure.
5371  * @data: variable that returns the result of each of the test conducted by
5372  * the driver.
5373  * Description:
5374  * The function verifies the link state of the NIC and updates the input
5375  * argument 'data' appropriately.
5376  * Return value:
5377  * 0 on success.
5378  */
5379
5380 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5381 {
5382         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5383         u64 val64;
5384
5385         val64 = readq(&bar0->adapter_status);
5386         if(!(LINK_IS_UP(val64)))
5387                 *data = 1;
5388         else
5389                 *data = 0;
5390
5391         return *data;
5392 }
5393
5394 /**
5395  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5396  * @sp - private member of the device structure, which is a pointer to the
5397  * s2io_nic structure.
5398  * @data - variable that returns the result of each of the test
5399  * conducted by the driver.
5400  * Description:
5401  *  This is one of the offline test that tests the read and write
5402  *  access to the RldRam chip on the NIC.
5403  * Return value:
5404  *  0 on success.
5405  */
5406
5407 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5408 {
5409         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5410         u64 val64;
5411         int cnt, iteration = 0, test_fail = 0;
5412
5413         val64 = readq(&bar0->adapter_control);
5414         val64 &= ~ADAPTER_ECC_EN;
5415         writeq(val64, &bar0->adapter_control);
5416
5417         val64 = readq(&bar0->mc_rldram_test_ctrl);
5418         val64 |= MC_RLDRAM_TEST_MODE;
5419         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5420
5421         val64 = readq(&bar0->mc_rldram_mrs);
5422         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5423         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5424
5425         val64 |= MC_RLDRAM_MRS_ENABLE;
5426         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5427
5428         while (iteration < 2) {
5429                 val64 = 0x55555555aaaa0000ULL;
5430                 if (iteration == 1) {
5431                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5432                 }
5433                 writeq(val64, &bar0->mc_rldram_test_d0);
5434
5435                 val64 = 0xaaaa5a5555550000ULL;
5436                 if (iteration == 1) {
5437                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5438                 }
5439                 writeq(val64, &bar0->mc_rldram_test_d1);
5440
5441                 val64 = 0x55aaaaaaaa5a0000ULL;
5442                 if (iteration == 1) {
5443                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5444                 }
5445                 writeq(val64, &bar0->mc_rldram_test_d2);
5446
5447                 val64 = (u64) (0x0000003ffffe0100ULL);
5448                 writeq(val64, &bar0->mc_rldram_test_add);
5449
5450                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5451                         MC_RLDRAM_TEST_GO;
5452                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5453
5454                 for (cnt = 0; cnt < 5; cnt++) {
5455                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5456                         if (val64 & MC_RLDRAM_TEST_DONE)
5457                                 break;
5458                         msleep(200);
5459                 }
5460
5461                 if (cnt == 5)
5462                         break;
5463
5464                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5465                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5466
5467                 for (cnt = 0; cnt < 5; cnt++) {
5468                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5469                         if (val64 & MC_RLDRAM_TEST_DONE)
5470                                 break;
5471                         msleep(500);
5472                 }
5473
5474                 if (cnt == 5)
5475                         break;
5476
5477                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5478                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5479                         test_fail = 1;
5480
5481                 iteration++;
5482         }
5483
5484         *data = test_fail;
5485
5486         /* Bring the adapter out of test mode */
5487         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5488
5489         return test_fail;
5490 }
5491
5492 /**
5493  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5494  *  @sp : private member of the device structure, which is a pointer to the
5495  *  s2io_nic structure.
5496  *  @ethtest : pointer to a ethtool command specific structure that will be
5497  *  returned to the user.
5498  *  @data : variable that returns the result of each of the test
5499  * conducted by the driver.
5500  * Description:
5501  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5502  *  the health of the card.
5503  * Return value:
5504  *  void
5505  */
5506
5507 static void s2io_ethtool_test(struct net_device *dev,
5508                               struct ethtool_test *ethtest,
5509                               uint64_t * data)
5510 {
5511         struct s2io_nic *sp = dev->priv;
5512         int orig_state = netif_running(sp->dev);
5513
5514         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5515                 /* Offline Tests. */
5516                 if (orig_state)
5517                         s2io_close(sp->dev);
5518
5519                 if (s2io_register_test(sp, &data[0]))
5520                         ethtest->flags |= ETH_TEST_FL_FAILED;
5521
5522                 s2io_reset(sp);
5523
5524                 if (s2io_rldram_test(sp, &data[3]))
5525                         ethtest->flags |= ETH_TEST_FL_FAILED;
5526
5527                 s2io_reset(sp);
5528
5529                 if (s2io_eeprom_test(sp, &data[1]))
5530                         ethtest->flags |= ETH_TEST_FL_FAILED;
5531
5532                 if (s2io_bist_test(sp, &data[4]))
5533                         ethtest->flags |= ETH_TEST_FL_FAILED;
5534
5535                 if (orig_state)
5536                         s2io_open(sp->dev);
5537
5538                 data[2] = 0;
5539         } else {
5540                 /* Online Tests. */
5541                 if (!orig_state) {
5542                         DBG_PRINT(ERR_DBG,
5543                                   "%s: is not up, cannot run test\n",
5544                                   dev->name);
5545                         data[0] = -1;
5546                         data[1] = -1;
5547                         data[2] = -1;
5548                         data[3] = -1;
5549                         data[4] = -1;
5550                 }
5551
5552                 if (s2io_link_test(sp, &data[2]))
5553                         ethtest->flags |= ETH_TEST_FL_FAILED;
5554
5555                 data[0] = 0;
5556                 data[1] = 0;
5557                 data[3] = 0;
5558                 data[4] = 0;
5559         }
5560 }
5561
5562 static void s2io_get_ethtool_stats(struct net_device *dev,
5563                                    struct ethtool_stats *estats,
5564                                    u64 * tmp_stats)
5565 {
5566         int i = 0;
5567         struct s2io_nic *sp = dev->priv;
5568         struct stat_block *stat_info = sp->mac_control.stats_info;
5569
5570         s2io_updt_stats(sp);
5571         tmp_stats[i++] =
5572                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5573                 le32_to_cpu(stat_info->tmac_frms);
5574         tmp_stats[i++] =
5575                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5576                 le32_to_cpu(stat_info->tmac_data_octets);
5577         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5578         tmp_stats[i++] =
5579                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5580                 le32_to_cpu(stat_info->tmac_mcst_frms);
5581         tmp_stats[i++] =
5582                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5583                 le32_to_cpu(stat_info->tmac_bcst_frms);
5584         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5585         tmp_stats[i++] =
5586                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5587                 le32_to_cpu(stat_info->tmac_ttl_octets);
5588         tmp_stats[i++] =
5589                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5590                 le32_to_cpu(stat_info->tmac_ucst_frms);
5591         tmp_stats[i++] =
5592                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5593                 le32_to_cpu(stat_info->tmac_nucst_frms);
5594         tmp_stats[i++] =
5595                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5596                 le32_to_cpu(stat_info->tmac_any_err_frms);
5597         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5598         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5599         tmp_stats[i++] =
5600                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5601                 le32_to_cpu(stat_info->tmac_vld_ip);
5602         tmp_stats[i++] =
5603                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5604                 le32_to_cpu(stat_info->tmac_drop_ip);
5605         tmp_stats[i++] =
5606                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5607                 le32_to_cpu(stat_info->tmac_icmp);
5608         tmp_stats[i++] =
5609                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5610                 le32_to_cpu(stat_info->tmac_rst_tcp);
5611         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5612         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5613                 le32_to_cpu(stat_info->tmac_udp);
5614         tmp_stats[i++] =
5615                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5616                 le32_to_cpu(stat_info->rmac_vld_frms);
5617         tmp_stats[i++] =
5618                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5619                 le32_to_cpu(stat_info->rmac_data_octets);
5620         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5621         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5622         tmp_stats[i++] =
5623                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5624                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5625         tmp_stats[i++] =
5626                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5627                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5628         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5629         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5630         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5631         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5632         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5633         tmp_stats[i++] =
5634                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5635                 le32_to_cpu(stat_info->rmac_ttl_octets);
5636         tmp_stats[i++] =
5637                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5638                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5639         tmp_stats[i++] =
5640                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5641                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5642         tmp_stats[i++] =
5643                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5644                 le32_to_cpu(stat_info->rmac_discarded_frms);
5645         tmp_stats[i++] =
5646                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5647                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5648         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5649         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5650         tmp_stats[i++] =
5651                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5652                 le32_to_cpu(stat_info->rmac_usized_frms);
5653         tmp_stats[i++] =
5654                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5655                 le32_to_cpu(stat_info->rmac_osized_frms);
5656         tmp_stats[i++] =
5657                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5658                 le32_to_cpu(stat_info->rmac_frag_frms);
5659         tmp_stats[i++] =
5660                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5661                 le32_to_cpu(stat_info->rmac_jabber_frms);
5662         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5663         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5664         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5665         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5666         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5667         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5668         tmp_stats[i++] =
5669                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5670                 le32_to_cpu(stat_info->rmac_ip);
5671         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5672         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5673         tmp_stats[i++] =
5674                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5675                 le32_to_cpu(stat_info->rmac_drop_ip);
5676         tmp_stats[i++] =
5677                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5678                 le32_to_cpu(stat_info->rmac_icmp);
5679         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5680         tmp_stats[i++] =
5681                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5682                 le32_to_cpu(stat_info->rmac_udp);
5683         tmp_stats[i++] =
5684                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5685                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5686         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5687         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5688         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5689         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5690         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5691         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5692         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5693         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5694         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5695         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5696         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5697         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5698         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5699         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5700         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5701         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5702         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5703         tmp_stats[i++] =
5704                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5705                 le32_to_cpu(stat_info->rmac_pause_cnt);
5706         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5707         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5708         tmp_stats[i++] =
5709                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5710                 le32_to_cpu(stat_info->rmac_accepted_ip);
5711         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5712         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5713         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5714         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5715         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5716         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5717         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5718         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5719         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5720         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5721         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5722         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5723         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5724         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5725         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5726         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5727         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5728         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5729         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5730
5731         /* Enhanced statistics exist only for Hercules */
5732         if(sp->device_type == XFRAME_II_DEVICE) {
5733                 tmp_stats[i++] =
5734                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5735                 tmp_stats[i++] =
5736                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5737                 tmp_stats[i++] =
5738                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5739                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5740                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5741                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5742                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5743                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5744                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5745                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5746                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5747                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5748                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5749                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5750                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5751                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5752         }
5753
5754         tmp_stats[i++] = 0;
5755         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5756         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5757         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5758         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5759         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5760         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5761         tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5762         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5763         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5764         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5765         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5766         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5767         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5768         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5769         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5770         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5771         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5772         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5773         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5774         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5775         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5776         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5777         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5778         if (stat_info->sw_stat.num_aggregations) {
5779                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5780                 int count = 0;
5781                 /*
5782                  * Since 64-bit divide does not work on all platforms,
5783                  * do repeated subtraction.
5784                  */
5785                 while (tmp >= stat_info->sw_stat.num_aggregations) {
5786                         tmp -= stat_info->sw_stat.num_aggregations;
5787                         count++;
5788                 }
5789                 tmp_stats[i++] = count;
5790         }
5791         else
5792                 tmp_stats[i++] = 0;
5793         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5794         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5795         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5796         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5797         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5798         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5799         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5800         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5801         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5802
5803         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5804         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5805         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5806         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5807         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5808
5809         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5810         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5811         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5812         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5813         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5814         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5815         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5816         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5817         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5818 }
5819
5820 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5821 {
5822         return (XENA_REG_SPACE);
5823 }
5824
5825
5826 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5827 {
5828         struct s2io_nic *sp = dev->priv;
5829
5830         return (sp->rx_csum);
5831 }
5832
5833 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5834 {
5835         struct s2io_nic *sp = dev->priv;
5836
5837         if (data)
5838                 sp->rx_csum = 1;
5839         else
5840                 sp->rx_csum = 0;
5841
5842         return 0;
5843 }
5844
5845 static int s2io_get_eeprom_len(struct net_device *dev)
5846 {
5847         return (XENA_EEPROM_SPACE);
5848 }
5849
5850 static int s2io_ethtool_self_test_count(struct net_device *dev)
5851 {
5852         return (S2IO_TEST_LEN);
5853 }
5854
5855 static void s2io_ethtool_get_strings(struct net_device *dev,
5856                                      u32 stringset, u8 * data)
5857 {
5858         int stat_size = 0;
5859         struct s2io_nic *sp = dev->priv;
5860
5861         switch (stringset) {
5862         case ETH_SS_TEST:
5863                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5864                 break;
5865         case ETH_SS_STATS:
5866                 stat_size = sizeof(ethtool_xena_stats_keys);
5867                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5868                 if(sp->device_type == XFRAME_II_DEVICE) {
5869                         memcpy(data + stat_size,
5870                                 &ethtool_enhanced_stats_keys,
5871                                 sizeof(ethtool_enhanced_stats_keys));
5872                         stat_size += sizeof(ethtool_enhanced_stats_keys);
5873                 }
5874
5875                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5876                         sizeof(ethtool_driver_stats_keys));
5877         }
5878 }
5879 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5880 {
5881         struct s2io_nic *sp = dev->priv;
5882         int stat_count = 0;
5883         switch(sp->device_type) {
5884         case XFRAME_I_DEVICE:
5885                 stat_count = XFRAME_I_STAT_LEN;
5886         break;
5887
5888         case XFRAME_II_DEVICE:
5889                 stat_count = XFRAME_II_STAT_LEN;
5890         break;
5891         }
5892
5893         return stat_count;
5894 }
5895
5896 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5897 {
5898         if (data)
5899                 dev->features |= NETIF_F_IP_CSUM;
5900         else
5901                 dev->features &= ~NETIF_F_IP_CSUM;
5902
5903         return 0;
5904 }
5905
5906 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5907 {
5908         return (dev->features & NETIF_F_TSO) != 0;
5909 }
5910 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5911 {
5912         if (data)
5913                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5914         else
5915                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5916
5917         return 0;
5918 }
5919
5920 static const struct ethtool_ops netdev_ethtool_ops = {
5921         .get_settings = s2io_ethtool_gset,
5922         .set_settings = s2io_ethtool_sset,
5923         .get_drvinfo = s2io_ethtool_gdrvinfo,
5924         .get_regs_len = s2io_ethtool_get_regs_len,
5925         .get_regs = s2io_ethtool_gregs,
5926         .get_link = ethtool_op_get_link,
5927         .get_eeprom_len = s2io_get_eeprom_len,
5928         .get_eeprom = s2io_ethtool_geeprom,
5929         .set_eeprom = s2io_ethtool_seeprom,
5930         .get_ringparam = s2io_ethtool_gringparam,
5931         .get_pauseparam = s2io_ethtool_getpause_data,
5932         .set_pauseparam = s2io_ethtool_setpause_data,
5933         .get_rx_csum = s2io_ethtool_get_rx_csum,
5934         .set_rx_csum = s2io_ethtool_set_rx_csum,
5935         .get_tx_csum = ethtool_op_get_tx_csum,
5936         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5937         .get_sg = ethtool_op_get_sg,
5938         .set_sg = ethtool_op_set_sg,
5939         .get_tso = s2io_ethtool_op_get_tso,
5940         .set_tso = s2io_ethtool_op_set_tso,
5941         .get_ufo = ethtool_op_get_ufo,
5942         .set_ufo = ethtool_op_set_ufo,
5943         .self_test_count = s2io_ethtool_self_test_count,
5944         .self_test = s2io_ethtool_test,
5945         .get_strings = s2io_ethtool_get_strings,
5946         .phys_id = s2io_ethtool_idnic,
5947         .get_stats_count = s2io_ethtool_get_stats_count,
5948         .get_ethtool_stats = s2io_get_ethtool_stats
5949 };
5950
5951 /**
5952  *  s2io_ioctl - Entry point for the Ioctl
5953  *  @dev :  Device pointer.
5954  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5955  *  a proprietary structure used to pass information to the driver.
5956  *  @cmd :  This is used to distinguish between the different commands that
5957  *  can be passed to the IOCTL functions.
5958  *  Description:
5959  *  Currently there are no special functionality supported in IOCTL, hence
5960  *  function always return EOPNOTSUPPORTED
5961  */
5962
5963 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5964 {
5965         return -EOPNOTSUPP;
5966 }
5967
5968 /**
5969  *  s2io_change_mtu - entry point to change MTU size for the device.
5970  *   @dev : device pointer.
5971  *   @new_mtu : the new MTU size for the device.
5972  *   Description: A driver entry point to change MTU size for the device.
5973  *   Before changing the MTU the device must be stopped.
5974  *  Return value:
5975  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5976  *   file on failure.
5977  */
5978
5979 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5980 {
5981         struct s2io_nic *sp = dev->priv;
5982
5983         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5984                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5985                           dev->name);
5986                 return -EPERM;
5987         }
5988
5989         dev->mtu = new_mtu;
5990         if (netif_running(dev)) {
5991                 s2io_card_down(sp);
5992                 netif_stop_queue(dev);
5993                 if (s2io_card_up(sp)) {
5994                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5995                                   __FUNCTION__);
5996                 }
5997                 if (netif_queue_stopped(dev))
5998                         netif_wake_queue(dev);
5999         } else { /* Device is down */
6000                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6001                 u64 val64 = new_mtu;
6002
6003                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6004         }
6005
6006         return 0;
6007 }
6008
6009 /**
6010  *  s2io_tasklet - Bottom half of the ISR.
6011  *  @dev_adr : address of the device structure in dma_addr_t format.
6012  *  Description:
6013  *  This is the tasklet or the bottom half of the ISR. This is
6014  *  an extension of the ISR which is scheduled by the scheduler to be run
6015  *  when the load on the CPU is low. All low priority tasks of the ISR can
6016  *  be pushed into the tasklet. For now the tasklet is used only to
6017  *  replenish the Rx buffers in the Rx buffer descriptors.
6018  *  Return value:
6019  *  void.
6020  */
6021
6022 static void s2io_tasklet(unsigned long dev_addr)
6023 {
6024         struct net_device *dev = (struct net_device *) dev_addr;
6025         struct s2io_nic *sp = dev->priv;
6026         int i, ret;
6027         struct mac_info *mac_control;
6028         struct config_param *config;
6029
6030         mac_control = &sp->mac_control;
6031         config = &sp->config;
6032
6033         if (!TASKLET_IN_USE) {
6034                 for (i = 0; i < config->rx_ring_num; i++) {
6035                         ret = fill_rx_buffers(sp, i);
6036                         if (ret == -ENOMEM) {
6037                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6038                                           dev->name);
6039                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6040                                 break;
6041                         } else if (ret == -EFILL) {
6042                                 DBG_PRINT(INFO_DBG,
6043                                           "%s: Rx Ring %d is full\n",
6044                                           dev->name, i);
6045                                 break;
6046                         }
6047                 }
6048                 clear_bit(0, (&sp->tasklet_status));
6049         }
6050 }
6051
6052 /**
6053  * s2io_set_link - Set the LInk status
6054  * @data: long pointer to device private structue
6055  * Description: Sets the link status for the adapter
6056  */
6057
6058 static void s2io_set_link(struct work_struct *work)
6059 {
6060         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6061         struct net_device *dev = nic->dev;
6062         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6063         register u64 val64;
6064         u16 subid;
6065
6066         rtnl_lock();
6067
6068         if (!netif_running(dev))
6069                 goto out_unlock;
6070
6071         if (test_and_set_bit(0, &(nic->link_state))) {
6072                 /* The card is being reset, no point doing anything */
6073                 goto out_unlock;
6074         }
6075
6076         subid = nic->pdev->subsystem_device;
6077         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6078                 /*
6079                  * Allow a small delay for the NICs self initiated
6080                  * cleanup to complete.
6081                  */
6082                 msleep(100);
6083         }
6084
6085         val64 = readq(&bar0->adapter_status);
6086         if (LINK_IS_UP(val64)) {
6087                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6088                         if (verify_xena_quiescence(nic)) {
6089                                 val64 = readq(&bar0->adapter_control);
6090                                 val64 |= ADAPTER_CNTL_EN;
6091                                 writeq(val64, &bar0->adapter_control);
6092                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6093                                         nic->device_type, subid)) {
6094                                         val64 = readq(&bar0->gpio_control);
6095                                         val64 |= GPIO_CTRL_GPIO_0;
6096                                         writeq(val64, &bar0->gpio_control);
6097                                         val64 = readq(&bar0->gpio_control);
6098                                 } else {
6099                                         val64 |= ADAPTER_LED_ON;
6100                                         writeq(val64, &bar0->adapter_control);
6101                                 }
6102                                 nic->device_enabled_once = TRUE;
6103                         } else {
6104                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6105                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6106                                 netif_stop_queue(dev);
6107                         }
6108                 }
6109                 val64 = readq(&bar0->adapter_status);
6110                 if (!LINK_IS_UP(val64)) {
6111                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
6112                         DBG_PRINT(ERR_DBG, " Link down after enabling ");
6113                         DBG_PRINT(ERR_DBG, "device \n");
6114                 } else
6115                         s2io_link(nic, LINK_UP);
6116         } else {
6117                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6118                                                       subid)) {
6119                         val64 = readq(&bar0->gpio_control);
6120                         val64 &= ~GPIO_CTRL_GPIO_0;
6121                         writeq(val64, &bar0->gpio_control);
6122                         val64 = readq(&bar0->gpio_control);
6123                 }
6124                 s2io_link(nic, LINK_DOWN);
6125         }
6126         clear_bit(0, &(nic->link_state));
6127
6128 out_unlock:
6129         rtnl_unlock();
6130 }
6131
6132 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6133                                 struct buffAdd *ba,
6134                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6135                                 u64 *temp2, int size)
6136 {
6137         struct net_device *dev = sp->dev;
6138         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6139
6140         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6141                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6142                 /* allocate skb */
6143                 if (*skb) {
6144                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6145                         /*
6146                          * As Rx frame are not going to be processed,
6147                          * using same mapped address for the Rxd
6148                          * buffer pointer
6149                          */
6150                         rxdp1->Buffer0_ptr = *temp0;
6151                 } else {
6152                         *skb = dev_alloc_skb(size);
6153                         if (!(*skb)) {
6154                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6155                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6156                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6157                                 sp->mac_control.stats_info->sw_stat. \
6158                                         mem_alloc_fail_cnt++;
6159                                 return -ENOMEM ;
6160                         }
6161                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6162                                 += (*skb)->truesize;
6163                         /* storing the mapped addr in a temp variable
6164                          * such it will be used for next rxd whose
6165                          * Host Control is NULL
6166                          */
6167                         rxdp1->Buffer0_ptr = *temp0 =
6168                                 pci_map_single( sp->pdev, (*skb)->data,
6169                                         size - NET_IP_ALIGN,
6170                                         PCI_DMA_FROMDEVICE);
6171                         if( (rxdp1->Buffer0_ptr == 0) ||
6172                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6173                                 goto memalloc_failed;
6174                         }
6175                         rxdp->Host_Control = (unsigned long) (*skb);
6176                 }
6177         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6178                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6179                 /* Two buffer Mode */
6180                 if (*skb) {
6181                         rxdp3->Buffer2_ptr = *temp2;
6182                         rxdp3->Buffer0_ptr = *temp0;
6183                         rxdp3->Buffer1_ptr = *temp1;
6184                 } else {
6185                         *skb = dev_alloc_skb(size);
6186                         if (!(*skb)) {
6187                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6188                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6189                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6190                                 sp->mac_control.stats_info->sw_stat. \
6191                                         mem_alloc_fail_cnt++;
6192                                 return -ENOMEM;
6193                         }
6194                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6195                                 += (*skb)->truesize;
6196                         rxdp3->Buffer2_ptr = *temp2 =
6197                                 pci_map_single(sp->pdev, (*skb)->data,
6198                                                dev->mtu + 4,
6199                                                PCI_DMA_FROMDEVICE);
6200                         if( (rxdp3->Buffer2_ptr == 0) ||
6201                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6202                                 goto memalloc_failed;
6203                         }
6204                         rxdp3->Buffer0_ptr = *temp0 =
6205                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6206                                                 PCI_DMA_FROMDEVICE);
6207                         if( (rxdp3->Buffer0_ptr == 0) ||
6208                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6209                                 pci_unmap_single (sp->pdev,
6210                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6211                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6212                                 goto memalloc_failed;
6213                         }
6214                         rxdp->Host_Control = (unsigned long) (*skb);
6215
6216                         /* Buffer-1 will be dummy buffer not used */
6217                         rxdp3->Buffer1_ptr = *temp1 =
6218                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6219                                                 PCI_DMA_FROMDEVICE);
6220                         if( (rxdp3->Buffer1_ptr == 0) ||
6221                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6222                                 pci_unmap_single (sp->pdev,
6223                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6224                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6225                                 pci_unmap_single (sp->pdev,
6226                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6227                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6228                                 goto memalloc_failed;
6229                         }
6230                 }
6231         }
6232         return 0;
6233         memalloc_failed:
6234                 stats->pci_map_fail_cnt++;
6235                 stats->mem_freed += (*skb)->truesize;
6236                 dev_kfree_skb(*skb);
6237                 return -ENOMEM;
6238 }
6239
6240 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6241                                 int size)
6242 {
6243         struct net_device *dev = sp->dev;
6244         if (sp->rxd_mode == RXD_MODE_1) {
6245                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6246         } else if (sp->rxd_mode == RXD_MODE_3B) {
6247                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6248                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6249                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6250         }
6251 }
6252
6253 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6254 {
6255         int i, j, k, blk_cnt = 0, size;
6256         struct mac_info * mac_control = &sp->mac_control;
6257         struct config_param *config = &sp->config;
6258         struct net_device *dev = sp->dev;
6259         struct RxD_t *rxdp = NULL;
6260         struct sk_buff *skb = NULL;
6261         struct buffAdd *ba = NULL;
6262         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6263
6264         /* Calculate the size based on ring mode */
6265         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6266                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6267         if (sp->rxd_mode == RXD_MODE_1)
6268                 size += NET_IP_ALIGN;
6269         else if (sp->rxd_mode == RXD_MODE_3B)
6270                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6271
6272         for (i = 0; i < config->rx_ring_num; i++) {
6273                 blk_cnt = config->rx_cfg[i].num_rxd /
6274                         (rxd_count[sp->rxd_mode] +1);
6275
6276                 for (j = 0; j < blk_cnt; j++) {
6277                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6278                                 rxdp = mac_control->rings[i].
6279                                         rx_blocks[j].rxds[k].virt_addr;
6280                                 if(sp->rxd_mode == RXD_MODE_3B)
6281                                         ba = &mac_control->rings[i].ba[j][k];
6282                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6283                                                        &skb,(u64 *)&temp0_64,
6284                                                        (u64 *)&temp1_64,
6285                                                        (u64 *)&temp2_64,
6286                                                         size) == ENOMEM) {
6287                                         return 0;
6288                                 }
6289
6290                                 set_rxd_buffer_size(sp, rxdp, size);
6291                                 wmb();
6292                                 /* flip the Ownership bit to Hardware */
6293                                 rxdp->Control_1 |= RXD_OWN_XENA;
6294                         }
6295                 }
6296         }
6297         return 0;
6298
6299 }
6300
6301 static int s2io_add_isr(struct s2io_nic * sp)
6302 {
6303         int ret = 0;
6304         struct net_device *dev = sp->dev;
6305         int err = 0;
6306
6307         if (sp->intr_type == MSI_X)
6308                 ret = s2io_enable_msi_x(sp);
6309         if (ret) {
6310                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6311                 sp->intr_type = INTA;
6312         }
6313
6314         /* Store the values of the MSIX table in the struct s2io_nic structure */
6315         store_xmsi_data(sp);
6316
6317         /* After proper initialization of H/W, register ISR */
6318         if (sp->intr_type == MSI_X) {
6319                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6320
6321                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6322                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6323                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6324                                         dev->name, i);
6325                                 err = request_irq(sp->entries[i].vector,
6326                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6327                                                   sp->s2io_entries[i].arg);
6328                                 /* If either data or addr is zero print it */
6329                                 if(!(sp->msix_info[i].addr &&
6330                                         sp->msix_info[i].data)) {
6331                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6332                                                 "Data:0x%lx\n",sp->desc[i],
6333                                                 (unsigned long long)
6334                                                 sp->msix_info[i].addr,
6335                                                 (unsigned long)
6336                                                 ntohl(sp->msix_info[i].data));
6337                                 } else {
6338                                         msix_tx_cnt++;
6339                                 }
6340                         } else {
6341                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6342                                         dev->name, i);
6343                                 err = request_irq(sp->entries[i].vector,
6344                                           s2io_msix_ring_handle, 0, sp->desc[i],
6345                                                   sp->s2io_entries[i].arg);
6346                                 /* If either data or addr is zero print it */
6347                                 if(!(sp->msix_info[i].addr &&
6348                                         sp->msix_info[i].data)) {
6349                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6350                                                 "Data:0x%lx\n",sp->desc[i],
6351                                                 (unsigned long long)
6352                                                 sp->msix_info[i].addr,
6353                                                 (unsigned long)
6354                                                 ntohl(sp->msix_info[i].data));
6355                                 } else {
6356                                         msix_rx_cnt++;
6357                                 }
6358                         }
6359                         if (err) {
6360                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6361                                           "failed\n", dev->name, i);
6362                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6363                                 return -1;
6364                         }
6365                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6366                 }
6367                 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6368                 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6369         }
6370         if (sp->intr_type == INTA) {
6371                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6372                                 sp->name, dev);
6373                 if (err) {
6374                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6375                                   dev->name);
6376                         return -1;
6377                 }
6378         }
6379         return 0;
6380 }
6381 static void s2io_rem_isr(struct s2io_nic * sp)
6382 {
6383         int cnt = 0;
6384         struct net_device *dev = sp->dev;
6385
6386         if (sp->intr_type == MSI_X) {
6387                 int i;
6388                 u16 msi_control;
6389
6390                 for (i=1; (sp->s2io_entries[i].in_use ==
6391                         MSIX_REGISTERED_SUCCESS); i++) {
6392                         int vector = sp->entries[i].vector;
6393                         void *arg = sp->s2io_entries[i].arg;
6394
6395                         free_irq(vector, arg);
6396                 }
6397                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6398                 msi_control &= 0xFFFE; /* Disable MSI */
6399                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6400
6401                 pci_disable_msix(sp->pdev);
6402         } else {
6403                 free_irq(sp->pdev->irq, dev);
6404         }
6405         /* Waiting till all Interrupt handlers are complete */
6406         cnt = 0;
6407         do {
6408                 msleep(10);
6409                 if (!atomic_read(&sp->isr_cnt))
6410                         break;
6411                 cnt++;
6412         } while(cnt < 5);
6413 }
6414
6415 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6416 {
6417         int cnt = 0;
6418         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6419         unsigned long flags;
6420         register u64 val64 = 0;
6421
6422         del_timer_sync(&sp->alarm_timer);
6423         /* If s2io_set_link task is executing, wait till it completes. */
6424         while (test_and_set_bit(0, &(sp->link_state))) {
6425                 msleep(50);
6426         }
6427         atomic_set(&sp->card_state, CARD_DOWN);
6428
6429         /* disable Tx and Rx traffic on the NIC */
6430         if (do_io)
6431                 stop_nic(sp);
6432
6433         s2io_rem_isr(sp);
6434
6435         /* Kill tasklet. */
6436         tasklet_kill(&sp->task);
6437
6438         /* Check if the device is Quiescent and then Reset the NIC */
6439         while(do_io) {
6440                 /* As per the HW requirement we need to replenish the
6441                  * receive buffer to avoid the ring bump. Since there is
6442                  * no intention of processing the Rx frame at this pointwe are
6443                  * just settting the ownership bit of rxd in Each Rx
6444                  * ring to HW and set the appropriate buffer size
6445                  * based on the ring mode
6446                  */
6447                 rxd_owner_bit_reset(sp);
6448
6449                 val64 = readq(&bar0->adapter_status);
6450                 if (verify_xena_quiescence(sp)) {
6451                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6452                         break;
6453                 }
6454
6455                 msleep(50);
6456                 cnt++;
6457                 if (cnt == 10) {
6458                         DBG_PRINT(ERR_DBG,
6459                                   "s2io_close:Device not Quiescent ");
6460                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6461                                   (unsigned long long) val64);
6462                         break;
6463                 }
6464         }
6465         if (do_io)
6466                 s2io_reset(sp);
6467
6468         spin_lock_irqsave(&sp->tx_lock, flags);
6469         /* Free all Tx buffers */
6470         free_tx_buffers(sp);
6471         spin_unlock_irqrestore(&sp->tx_lock, flags);
6472
6473         /* Free all Rx buffers */
6474         spin_lock_irqsave(&sp->rx_lock, flags);
6475         free_rx_buffers(sp);
6476         spin_unlock_irqrestore(&sp->rx_lock, flags);
6477
6478         clear_bit(0, &(sp->link_state));
6479 }
6480
6481 static void s2io_card_down(struct s2io_nic * sp)
6482 {
6483         do_s2io_card_down(sp, 1);
6484 }
6485
6486 static int s2io_card_up(struct s2io_nic * sp)
6487 {
6488         int i, ret = 0;
6489         struct mac_info *mac_control;
6490         struct config_param *config;
6491         struct net_device *dev = (struct net_device *) sp->dev;
6492         u16 interruptible;
6493
6494         /* Initialize the H/W I/O registers */
6495         if (init_nic(sp) != 0) {
6496                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6497                           dev->name);
6498                 s2io_reset(sp);
6499                 return -ENODEV;
6500         }
6501
6502         /*
6503          * Initializing the Rx buffers. For now we are considering only 1
6504          * Rx ring and initializing buffers into 30 Rx blocks
6505          */
6506         mac_control = &sp->mac_control;
6507         config = &sp->config;
6508
6509         for (i = 0; i < config->rx_ring_num; i++) {
6510                 if ((ret = fill_rx_buffers(sp, i))) {
6511                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6512                                   dev->name);
6513                         s2io_reset(sp);
6514                         free_rx_buffers(sp);
6515                         return -ENOMEM;
6516                 }
6517                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6518                           atomic_read(&sp->rx_bufs_left[i]));
6519         }
6520         /* Maintain the state prior to the open */
6521         if (sp->promisc_flg)
6522                 sp->promisc_flg = 0;
6523         if (sp->m_cast_flg) {
6524                 sp->m_cast_flg = 0;
6525                 sp->all_multi_pos= 0;
6526         }
6527
6528         /* Setting its receive mode */
6529         s2io_set_multicast(dev);
6530
6531         if (sp->lro) {
6532                 /* Initialize max aggregatable pkts per session based on MTU */
6533                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6534                 /* Check if we can use(if specified) user provided value */
6535                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6536                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6537         }
6538
6539         /* Enable Rx Traffic and interrupts on the NIC */
6540         if (start_nic(sp)) {
6541                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6542                 s2io_reset(sp);
6543                 free_rx_buffers(sp);
6544                 return -ENODEV;
6545         }
6546
6547         /* Add interrupt service routine */
6548         if (s2io_add_isr(sp) != 0) {
6549                 if (sp->intr_type == MSI_X)
6550                         s2io_rem_isr(sp);
6551                 s2io_reset(sp);
6552                 free_rx_buffers(sp);
6553                 return -ENODEV;
6554         }
6555
6556         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6557
6558         /* Enable tasklet for the device */
6559         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6560
6561         /*  Enable select interrupts */
6562         if (sp->intr_type != INTA)
6563                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6564         else {
6565                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6566                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6567                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6568                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6569         }
6570
6571
6572         atomic_set(&sp->card_state, CARD_UP);
6573         return 0;
6574 }
6575
6576 /**
6577  * s2io_restart_nic - Resets the NIC.
6578  * @data : long pointer to the device private structure
6579  * Description:
6580  * This function is scheduled to be run by the s2io_tx_watchdog
6581  * function after 0.5 secs to reset the NIC. The idea is to reduce
6582  * the run time of the watch dog routine which is run holding a
6583  * spin lock.
6584  */
6585
6586 static void s2io_restart_nic(struct work_struct *work)
6587 {
6588         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6589         struct net_device *dev = sp->dev;
6590
6591         rtnl_lock();
6592
6593         if (!netif_running(dev))
6594                 goto out_unlock;
6595
6596         s2io_card_down(sp);
6597         if (s2io_card_up(sp)) {
6598                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6599                           dev->name);
6600         }
6601         netif_wake_queue(dev);
6602         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6603                   dev->name);
6604 out_unlock:
6605         rtnl_unlock();
6606 }
6607
6608 /**
6609  *  s2io_tx_watchdog - Watchdog for transmit side.
6610  *  @dev : Pointer to net device structure
6611  *  Description:
6612  *  This function is triggered if the Tx Queue is stopped
6613  *  for a pre-defined amount of time when the Interface is still up.
6614  *  If the Interface is jammed in such a situation, the hardware is
6615  *  reset (by s2io_close) and restarted again (by s2io_open) to
6616  *  overcome any problem that might have been caused in the hardware.
6617  *  Return value:
6618  *  void
6619  */
6620
6621 static void s2io_tx_watchdog(struct net_device *dev)
6622 {
6623         struct s2io_nic *sp = dev->priv;
6624
6625         if (netif_carrier_ok(dev)) {
6626                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6627                 schedule_work(&sp->rst_timer_task);
6628                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6629         }
6630 }
6631
6632 /**
6633  *   rx_osm_handler - To perform some OS related operations on SKB.
6634  *   @sp: private member of the device structure,pointer to s2io_nic structure.
6635  *   @skb : the socket buffer pointer.
6636  *   @len : length of the packet
6637  *   @cksum : FCS checksum of the frame.
6638  *   @ring_no : the ring from which this RxD was extracted.
6639  *   Description:
6640  *   This function is called by the Rx interrupt serivce routine to perform
6641  *   some OS related operations on the SKB before passing it to the upper
6642  *   layers. It mainly checks if the checksum is OK, if so adds it to the
6643  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
6644  *   to the upper layer. If the checksum is wrong, it increments the Rx
6645  *   packet error count, frees the SKB and returns error.
6646  *   Return value:
6647  *   SUCCESS on success and -1 on failure.
6648  */
6649 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6650 {
6651         struct s2io_nic *sp = ring_data->nic;
6652         struct net_device *dev = (struct net_device *) sp->dev;
6653         struct sk_buff *skb = (struct sk_buff *)
6654                 ((unsigned long) rxdp->Host_Control);
6655         int ring_no = ring_data->ring_no;
6656         u16 l3_csum, l4_csum;
6657         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6658         struct lro *lro;
6659         u8 err_mask;
6660
6661         skb->dev = dev;
6662
6663         if (err) {
6664                 /* Check for parity error */
6665                 if (err & 0x1) {
6666                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6667                 }
6668                 err_mask = err >> 48;
6669                 switch(err_mask) {
6670                         case 1:
6671                                 sp->mac_control.stats_info->sw_stat.
6672                                 rx_parity_err_cnt++;
6673                         break;
6674
6675                         case 2:
6676                                 sp->mac_control.stats_info->sw_stat.
6677                                 rx_abort_cnt++;
6678                         break;
6679
6680                         case 3:
6681                                 sp->mac_control.stats_info->sw_stat.
6682                                 rx_parity_abort_cnt++;
6683                         break;
6684
6685                         case 4:
6686                                 sp->mac_control.stats_info->sw_stat.
6687                                 rx_rda_fail_cnt++;
6688                         break;
6689
6690                         case 5:
6691                                 sp->mac_control.stats_info->sw_stat.
6692                                 rx_unkn_prot_cnt++;
6693                         break;
6694
6695                         case 6:
6696                                 sp->mac_control.stats_info->sw_stat.
6697                                 rx_fcs_err_cnt++;
6698                         break;
6699
6700                         case 7:
6701                                 sp->mac_control.stats_info->sw_stat.
6702                                 rx_buf_size_err_cnt++;
6703                         break;
6704
6705                         case 8:
6706                                 sp->mac_control.stats_info->sw_stat.
6707                                 rx_rxd_corrupt_cnt++;
6708                         break;
6709
6710                         case 15:
6711                                 sp->mac_control.stats_info->sw_stat.
6712                                 rx_unkn_err_cnt++;
6713                         break;
6714                 }
6715                 /*
6716                 * Drop the packet if bad transfer code. Exception being
6717                 * 0x5, which could be due to unsupported IPv6 extension header.
6718                 * In this case, we let stack handle the packet.
6719                 * Note that in this case, since checksum will be incorrect,
6720                 * stack will validate the same.
6721                 */
6722                 if (err_mask != 0x5) {
6723                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6724                                 dev->name, err_mask);
6725                         sp->stats.rx_crc_errors++;
6726                         sp->mac_control.stats_info->sw_stat.mem_freed 
6727                                 += skb->truesize;
6728                         dev_kfree_skb(skb);
6729                         atomic_dec(&sp->rx_bufs_left[ring_no]);
6730                         rxdp->Host_Control = 0;
6731                         return 0;
6732                 }
6733         }
6734
6735         /* Updating statistics */
6736         sp->stats.rx_packets++;
6737         rxdp->Host_Control = 0;
6738         if (sp->rxd_mode == RXD_MODE_1) {
6739                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6740
6741                 sp->stats.rx_bytes += len;
6742                 skb_put(skb, len);
6743
6744         } else if (sp->rxd_mode == RXD_MODE_3B) {
6745                 int get_block = ring_data->rx_curr_get_info.block_index;
6746                 int get_off = ring_data->rx_curr_get_info.offset;
6747                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6748                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6749                 unsigned char *buff = skb_push(skb, buf0_len);
6750
6751                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6752                 sp->stats.rx_bytes += buf0_len + buf2_len;
6753                 memcpy(buff, ba->ba_0, buf0_len);
6754                 skb_put(skb, buf2_len);
6755         }
6756
6757         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6758             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6759             (sp->rx_csum)) {
6760                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6761                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6762                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6763                         /*
6764                          * NIC verifies if the Checksum of the received
6765                          * frame is Ok or not and accordingly returns
6766                          * a flag in the RxD.
6767                          */
6768                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6769                         if (sp->lro) {
6770                                 u32 tcp_len;
6771                                 u8 *tcp;
6772                                 int ret = 0;
6773
6774                                 ret = s2io_club_tcp_session(skb->data, &tcp,
6775                                                 &tcp_len, &lro, rxdp, sp);
6776                                 switch (ret) {
6777                                         case 3: /* Begin anew */
6778                                                 lro->parent = skb;
6779                                                 goto aggregate;
6780                                         case 1: /* Aggregate */
6781                                         {
6782                                                 lro_append_pkt(sp, lro,
6783                                                         skb, tcp_len);
6784                                                 goto aggregate;
6785                                         }
6786                                         case 4: /* Flush session */
6787                                         {
6788                                                 lro_append_pkt(sp, lro,
6789                                                         skb, tcp_len);
6790                                                 queue_rx_frame(lro->parent);
6791                                                 clear_lro_session(lro);
6792                                                 sp->mac_control.stats_info->
6793                                                     sw_stat.flush_max_pkts++;
6794                                                 goto aggregate;
6795                                         }
6796                                         case 2: /* Flush both */
6797                                                 lro->parent->data_len =
6798                                                         lro->frags_len;
6799                                                 sp->mac_control.stats_info->
6800                                                      sw_stat.sending_both++;
6801                                                 queue_rx_frame(lro->parent);
6802                                                 clear_lro_session(lro);
6803                                                 goto send_up;
6804                                         case 0: /* sessions exceeded */
6805                                         case -1: /* non-TCP or not
6806                                                   * L2 aggregatable
6807                                                   */
6808                                         case 5: /*
6809                                                  * First pkt in session not
6810                                                  * L3/L4 aggregatable
6811                                                  */
6812                                                 break;
6813                                         default:
6814                                                 DBG_PRINT(ERR_DBG,
6815                                                         "%s: Samadhana!!\n",
6816                                                          __FUNCTION__);
6817                                                 BUG();
6818                                 }
6819                         }
6820                 } else {
6821                         /*
6822                          * Packet with erroneous checksum, let the
6823                          * upper layers deal with it.
6824                          */
6825                         skb->ip_summed = CHECKSUM_NONE;
6826                 }
6827         } else {
6828                 skb->ip_summed = CHECKSUM_NONE;
6829         }
6830         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
6831         if (!sp->lro) {
6832                 skb->protocol = eth_type_trans(skb, dev);
6833                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6834                         vlan_strip_flag)) {
6835                         /* Queueing the vlan frame to the upper layer */
6836                         if (napi)
6837                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6838                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6839                         else
6840                                 vlan_hwaccel_rx(skb, sp->vlgrp,
6841                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6842                 } else {
6843                         if (napi)
6844                                 netif_receive_skb(skb);
6845                         else
6846                                 netif_rx(skb);
6847                 }
6848         } else {
6849 send_up:
6850                 queue_rx_frame(skb);
6851         }
6852         dev->last_rx = jiffies;
6853 aggregate:
6854         atomic_dec(&sp->rx_bufs_left[ring_no]);
6855         return SUCCESS;
6856 }
6857
6858 /**
6859  *  s2io_link - stops/starts the Tx queue.
6860  *  @sp : private member of the device structure, which is a pointer to the
6861  *  s2io_nic structure.
6862  *  @link : inidicates whether link is UP/DOWN.
6863  *  Description:
6864  *  This function stops/starts the Tx queue depending on whether the link
6865  *  status of the NIC is is down or up. This is called by the Alarm
6866  *  interrupt handler whenever a link change interrupt comes up.
6867  *  Return value:
6868  *  void.
6869  */
6870
6871 static void s2io_link(struct s2io_nic * sp, int link)
6872 {
6873         struct net_device *dev = (struct net_device *) sp->dev;
6874
6875         if (link != sp->last_link_state) {
6876                 if (link == LINK_DOWN) {
6877                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6878                         netif_carrier_off(dev);
6879                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
6880                         sp->mac_control.stats_info->sw_stat.link_up_time = 
6881                                 jiffies - sp->start_time;
6882                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
6883                 } else {
6884                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6885                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
6886                         sp->mac_control.stats_info->sw_stat.link_down_time = 
6887                                 jiffies - sp->start_time;
6888                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
6889                         netif_carrier_on(dev);
6890                 }
6891         }
6892         sp->last_link_state = link;
6893         sp->start_time = jiffies;
6894 }
6895
6896 /**
6897  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6898  *  @sp : private member of the device structure, which is a pointer to the
6899  *  s2io_nic structure.
6900  *  Description:
6901  *  This function initializes a few of the PCI and PCI-X configuration registers
6902  *  with recommended values.
6903  *  Return value:
6904  *  void
6905  */
6906
6907 static void s2io_init_pci(struct s2io_nic * sp)
6908 {
6909         u16 pci_cmd = 0, pcix_cmd = 0;
6910
6911         /* Enable Data Parity Error Recovery in PCI-X command register. */
6912         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6913                              &(pcix_cmd));
6914         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6915                               (pcix_cmd | 1));
6916         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6917                              &(pcix_cmd));
6918
6919         /* Set the PErr Response bit in PCI command register. */
6920         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6921         pci_write_config_word(sp->pdev, PCI_COMMAND,
6922                               (pci_cmd | PCI_COMMAND_PARITY));
6923         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6924 }
6925
6926 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6927 {
6928         if ( tx_fifo_num > 8) {
6929                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6930                          "supported\n");
6931                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6932                 tx_fifo_num = 8;
6933         }
6934         if ( rx_ring_num > 8) {
6935                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6936                          "supported\n");
6937                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6938                 rx_ring_num = 8;
6939         }
6940         if (*dev_intr_type != INTA)
6941                 napi = 0;
6942
6943 #ifndef CONFIG_PCI_MSI
6944         if (*dev_intr_type != INTA) {
6945                 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6946                           "MSI/MSI-X. Defaulting to INTA\n");
6947                 *dev_intr_type = INTA;
6948         }
6949 #else
6950         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
6951                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6952                           "Defaulting to INTA\n");
6953                 *dev_intr_type = INTA;
6954         }
6955 #endif
6956         if ((*dev_intr_type == MSI_X) &&
6957                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6958                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6959                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6960                                         "Defaulting to INTA\n");
6961                 *dev_intr_type = INTA;
6962         }
6963
6964         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
6965                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6966                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
6967                 rx_ring_mode = 1;
6968         }
6969         return SUCCESS;
6970 }
6971
6972 /**
6973  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6974  * or Traffic class respectively.
6975  * @nic: device peivate variable
6976  * Description: The function configures the receive steering to
6977  * desired receive ring.
6978  * Return Value:  SUCCESS on success and
6979  * '-1' on failure (endian settings incorrect).
6980  */
6981 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6982 {
6983         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6984         register u64 val64 = 0;
6985
6986         if (ds_codepoint > 63)
6987                 return FAILURE;
6988
6989         val64 = RTS_DS_MEM_DATA(ring);
6990         writeq(val64, &bar0->rts_ds_mem_data);
6991
6992         val64 = RTS_DS_MEM_CTRL_WE |
6993                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6994                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6995
6996         writeq(val64, &bar0->rts_ds_mem_ctrl);
6997
6998         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6999                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7000                                 S2IO_BIT_RESET);
7001 }
7002
7003 /**
7004  *  s2io_init_nic - Initialization of the adapter .
7005  *  @pdev : structure containing the PCI related information of the device.
7006  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7007  *  Description:
7008  *  The function initializes an adapter identified by the pci_dec structure.
7009  *  All OS related initialization including memory and device structure and
7010  *  initlaization of the device private variable is done. Also the swapper
7011  *  control register is initialized to enable read and write into the I/O
7012  *  registers of the device.
7013  *  Return value:
7014  *  returns 0 on success and negative on failure.
7015  */
7016
7017 static int __devinit
7018 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7019 {
7020         struct s2io_nic *sp;
7021         struct net_device *dev;
7022         int i, j, ret;
7023         int dma_flag = FALSE;
7024         u32 mac_up, mac_down;
7025         u64 val64 = 0, tmp64 = 0;
7026         struct XENA_dev_config __iomem *bar0 = NULL;
7027         u16 subid;
7028         struct mac_info *mac_control;
7029         struct config_param *config;
7030         int mode;
7031         u8 dev_intr_type = intr_type;
7032
7033         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7034                 return ret;
7035
7036         if ((ret = pci_enable_device(pdev))) {
7037                 DBG_PRINT(ERR_DBG,
7038                           "s2io_init_nic: pci_enable_device failed\n");
7039                 return ret;
7040         }
7041
7042         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7043                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7044                 dma_flag = TRUE;
7045                 if (pci_set_consistent_dma_mask
7046                     (pdev, DMA_64BIT_MASK)) {
7047                         DBG_PRINT(ERR_DBG,
7048                                   "Unable to obtain 64bit DMA for \
7049                                         consistent allocations\n");
7050                         pci_disable_device(pdev);
7051                         return -ENOMEM;
7052                 }
7053         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7054                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7055         } else {
7056                 pci_disable_device(pdev);
7057                 return -ENOMEM;
7058         }
7059         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7060                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7061                 pci_disable_device(pdev);
7062                 return -ENODEV;
7063         }
7064
7065         dev = alloc_etherdev(sizeof(struct s2io_nic));
7066         if (dev == NULL) {
7067                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7068                 pci_disable_device(pdev);
7069                 pci_release_regions(pdev);
7070                 return -ENODEV;
7071         }
7072
7073         pci_set_master(pdev);
7074         pci_set_drvdata(pdev, dev);
7075         SET_MODULE_OWNER(dev);
7076         SET_NETDEV_DEV(dev, &pdev->dev);
7077
7078         /*  Private member variable initialized to s2io NIC structure */
7079         sp = dev->priv;
7080         memset(sp, 0, sizeof(struct s2io_nic));
7081         sp->dev = dev;
7082         sp->pdev = pdev;
7083         sp->high_dma_flag = dma_flag;
7084         sp->device_enabled_once = FALSE;
7085         if (rx_ring_mode == 1)
7086                 sp->rxd_mode = RXD_MODE_1;
7087         if (rx_ring_mode == 2)
7088                 sp->rxd_mode = RXD_MODE_3B;
7089
7090         sp->intr_type = dev_intr_type;
7091
7092         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7093                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7094                 sp->device_type = XFRAME_II_DEVICE;
7095         else
7096                 sp->device_type = XFRAME_I_DEVICE;
7097
7098         sp->lro = lro;
7099
7100         /* Initialize some PCI/PCI-X fields of the NIC. */
7101         s2io_init_pci(sp);
7102
7103         /*
7104          * Setting the device configuration parameters.
7105          * Most of these parameters can be specified by the user during
7106          * module insertion as they are module loadable parameters. If
7107          * these parameters are not not specified during load time, they
7108          * are initialized with default values.
7109          */
7110         mac_control = &sp->mac_control;
7111         config = &sp->config;
7112
7113         /* Tx side parameters. */
7114         config->tx_fifo_num = tx_fifo_num;
7115         for (i = 0; i < MAX_TX_FIFOS; i++) {
7116                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7117                 config->tx_cfg[i].fifo_priority = i;
7118         }
7119
7120         /* mapping the QoS priority to the configured fifos */
7121         for (i = 0; i < MAX_TX_FIFOS; i++)
7122                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7123
7124         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7125         for (i = 0; i < config->tx_fifo_num; i++) {
7126                 config->tx_cfg[i].f_no_snoop =
7127                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7128                 if (config->tx_cfg[i].fifo_len < 65) {
7129                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7130                         break;
7131                 }
7132         }
7133         /* + 2 because one Txd for skb->data and one Txd for UFO */
7134         config->max_txds = MAX_SKB_FRAGS + 2;
7135
7136         /* Rx side parameters. */
7137         config->rx_ring_num = rx_ring_num;
7138         for (i = 0; i < MAX_RX_RINGS; i++) {
7139                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7140                     (rxd_count[sp->rxd_mode] + 1);
7141                 config->rx_cfg[i].ring_priority = i;
7142         }
7143
7144         for (i = 0; i < rx_ring_num; i++) {
7145                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7146                 config->rx_cfg[i].f_no_snoop =
7147                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7148         }
7149
7150         /*  Setting Mac Control parameters */
7151         mac_control->rmac_pause_time = rmac_pause_time;
7152         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7153         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7154
7155
7156         /* Initialize Ring buffer parameters. */
7157         for (i = 0; i < config->rx_ring_num; i++)
7158                 atomic_set(&sp->rx_bufs_left[i], 0);
7159
7160         /* Initialize the number of ISRs currently running */
7161         atomic_set(&sp->isr_cnt, 0);
7162
7163         /*  initialize the shared memory used by the NIC and the host */
7164         if (init_shared_mem(sp)) {
7165                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7166                           dev->name);
7167                 ret = -ENOMEM;
7168                 goto mem_alloc_failed;
7169         }
7170
7171         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7172                                      pci_resource_len(pdev, 0));
7173         if (!sp->bar0) {
7174                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7175                           dev->name);
7176                 ret = -ENOMEM;
7177                 goto bar0_remap_failed;
7178         }
7179
7180         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7181                                      pci_resource_len(pdev, 2));
7182         if (!sp->bar1) {
7183                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7184                           dev->name);
7185                 ret = -ENOMEM;
7186                 goto bar1_remap_failed;
7187         }
7188
7189         dev->irq = pdev->irq;
7190         dev->base_addr = (unsigned long) sp->bar0;
7191
7192         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7193         for (j = 0; j < MAX_TX_FIFOS; j++) {
7194                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7195                     (sp->bar1 + (j * 0x00020000));
7196         }
7197
7198         /*  Driver entry points */
7199         dev->open = &s2io_open;
7200         dev->stop = &s2io_close;
7201         dev->hard_start_xmit = &s2io_xmit;
7202         dev->get_stats = &s2io_get_stats;
7203         dev->set_multicast_list = &s2io_set_multicast;
7204         dev->do_ioctl = &s2io_ioctl;
7205         dev->change_mtu = &s2io_change_mtu;
7206         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7207         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7208         dev->vlan_rx_register = s2io_vlan_rx_register;
7209
7210         /*
7211          * will use eth_mac_addr() for  dev->set_mac_address
7212          * mac address will be set every time dev->open() is called
7213          */
7214         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7215
7216 #ifdef CONFIG_NET_POLL_CONTROLLER
7217         dev->poll_controller = s2io_netpoll;
7218 #endif
7219
7220         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7221         if (sp->high_dma_flag == TRUE)
7222                 dev->features |= NETIF_F_HIGHDMA;
7223         dev->features |= NETIF_F_TSO;
7224         dev->features |= NETIF_F_TSO6;
7225         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7226                 dev->features |= NETIF_F_UFO;
7227                 dev->features |= NETIF_F_HW_CSUM;
7228         }
7229
7230         dev->tx_timeout = &s2io_tx_watchdog;
7231         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7232         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7233         INIT_WORK(&sp->set_link_task, s2io_set_link);
7234
7235         pci_save_state(sp->pdev);
7236
7237         /* Setting swapper control on the NIC, for proper reset operation */
7238         if (s2io_set_swapper(sp)) {
7239                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7240                           dev->name);
7241                 ret = -EAGAIN;
7242                 goto set_swap_failed;
7243         }
7244
7245         /* Verify if the Herc works on the slot its placed into */
7246         if (sp->device_type & XFRAME_II_DEVICE) {
7247                 mode = s2io_verify_pci_mode(sp);
7248                 if (mode < 0) {
7249                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7250                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7251                         ret = -EBADSLT;
7252                         goto set_swap_failed;
7253                 }
7254         }
7255
7256         /* Not needed for Herc */
7257         if (sp->device_type & XFRAME_I_DEVICE) {
7258                 /*
7259                  * Fix for all "FFs" MAC address problems observed on
7260                  * Alpha platforms
7261                  */
7262                 fix_mac_address(sp);
7263                 s2io_reset(sp);
7264         }
7265
7266         /*
7267          * MAC address initialization.
7268          * For now only one mac address will be read and used.
7269          */
7270         bar0 = sp->bar0;
7271         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7272             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7273         writeq(val64, &bar0->rmac_addr_cmd_mem);
7274         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7275                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7276         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7277         mac_down = (u32) tmp64;
7278         mac_up = (u32) (tmp64 >> 32);
7279
7280         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7281         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7282         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7283         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7284         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7285         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7286
7287         /*  Set the factory defined MAC address initially   */
7288         dev->addr_len = ETH_ALEN;
7289         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7290
7291         /* reset Nic and bring it to known state */
7292         s2io_reset(sp);
7293
7294         /*
7295          * Initialize the tasklet status and link state flags
7296          * and the card state parameter
7297          */
7298         atomic_set(&(sp->card_state), 0);
7299         sp->tasklet_status = 0;
7300         sp->link_state = 0;
7301
7302         /* Initialize spinlocks */
7303         spin_lock_init(&sp->tx_lock);
7304
7305         if (!napi)
7306                 spin_lock_init(&sp->put_lock);
7307         spin_lock_init(&sp->rx_lock);
7308
7309         /*
7310          * SXE-002: Configure link and activity LED to init state
7311          * on driver load.
7312          */
7313         subid = sp->pdev->subsystem_device;
7314         if ((subid & 0xFF) >= 0x07) {
7315                 val64 = readq(&bar0->gpio_control);
7316                 val64 |= 0x0000800000000000ULL;
7317                 writeq(val64, &bar0->gpio_control);
7318                 val64 = 0x0411040400000000ULL;
7319                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7320                 val64 = readq(&bar0->gpio_control);
7321         }
7322
7323         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7324
7325         if (register_netdev(dev)) {
7326                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7327                 ret = -ENODEV;
7328                 goto register_failed;
7329         }
7330         s2io_vpd_read(sp);
7331         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7332         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7333                   sp->product_name, pdev->revision);
7334         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7335                   s2io_driver_version);
7336         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7337                           "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7338                           sp->def_mac_addr[0].mac_addr[0],
7339                           sp->def_mac_addr[0].mac_addr[1],
7340                           sp->def_mac_addr[0].mac_addr[2],
7341                           sp->def_mac_addr[0].mac_addr[3],
7342                           sp->def_mac_addr[0].mac_addr[4],
7343                           sp->def_mac_addr[0].mac_addr[5]);
7344         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7345         if (sp->device_type & XFRAME_II_DEVICE) {
7346                 mode = s2io_print_pci_mode(sp);
7347                 if (mode < 0) {
7348                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7349                         ret = -EBADSLT;
7350                         unregister_netdev(dev);
7351                         goto set_swap_failed;
7352                 }
7353         }
7354         switch(sp->rxd_mode) {
7355                 case RXD_MODE_1:
7356                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7357                                                 dev->name);
7358                     break;
7359                 case RXD_MODE_3B:
7360                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7361                                                 dev->name);
7362                     break;
7363         }
7364
7365         if (napi)
7366                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7367         switch(sp->intr_type) {
7368                 case INTA:
7369                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7370                     break;
7371                 case MSI_X:
7372                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7373                     break;
7374         }
7375         if (sp->lro)
7376                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7377                           dev->name);
7378         if (ufo)
7379                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7380                                         " enabled\n", dev->name);
7381         /* Initialize device name */
7382         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7383
7384         /* Initialize bimodal Interrupts */
7385         sp->config.bimodal = bimodal;
7386         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7387                 sp->config.bimodal = 0;
7388                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7389                         dev->name);
7390         }
7391
7392         /*
7393          * Make Link state as off at this point, when the Link change
7394          * interrupt comes the state will be automatically changed to
7395          * the right state.
7396          */
7397         netif_carrier_off(dev);
7398
7399         return 0;
7400
7401       register_failed:
7402       set_swap_failed:
7403         iounmap(sp->bar1);
7404       bar1_remap_failed:
7405         iounmap(sp->bar0);
7406       bar0_remap_failed:
7407       mem_alloc_failed:
7408         free_shared_mem(sp);
7409         pci_disable_device(pdev);
7410         pci_release_regions(pdev);
7411         pci_set_drvdata(pdev, NULL);
7412         free_netdev(dev);
7413
7414         return ret;
7415 }
7416
7417 /**
7418  * s2io_rem_nic - Free the PCI device
7419  * @pdev: structure containing the PCI related information of the device.
7420  * Description: This function is called by the Pci subsystem to release a
7421  * PCI device and free up all resource held up by the device. This could
7422  * be in response to a Hot plug event or when the driver is to be removed
7423  * from memory.
7424  */
7425
7426 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7427 {
7428         struct net_device *dev =
7429             (struct net_device *) pci_get_drvdata(pdev);
7430         struct s2io_nic *sp;
7431
7432         if (dev == NULL) {
7433                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7434                 return;
7435         }
7436
7437         flush_scheduled_work();
7438
7439         sp = dev->priv;
7440         unregister_netdev(dev);
7441
7442         free_shared_mem(sp);
7443         iounmap(sp->bar0);
7444         iounmap(sp->bar1);
7445         pci_release_regions(pdev);
7446         pci_set_drvdata(pdev, NULL);
7447         free_netdev(dev);
7448         pci_disable_device(pdev);
7449 }
7450
7451 /**
7452  * s2io_starter - Entry point for the driver
7453  * Description: This function is the entry point for the driver. It verifies
7454  * the module loadable parameters and initializes PCI configuration space.
7455  */
7456
7457 int __init s2io_starter(void)
7458 {
7459         return pci_register_driver(&s2io_driver);
7460 }
7461
7462 /**
7463  * s2io_closer - Cleanup routine for the driver
7464  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7465  */
7466
7467 static __exit void s2io_closer(void)
7468 {
7469         pci_unregister_driver(&s2io_driver);
7470         DBG_PRINT(INIT_DBG, "cleanup done\n");
7471 }
7472
7473 module_init(s2io_starter);
7474 module_exit(s2io_closer);
7475
7476 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7477                 struct tcphdr **tcp, struct RxD_t *rxdp)
7478 {
7479         int ip_off;
7480         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7481
7482         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7483                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7484                           __FUNCTION__);
7485                 return -1;
7486         }
7487
7488         /* TODO:
7489          * By default the VLAN field in the MAC is stripped by the card, if this
7490          * feature is turned off in rx_pa_cfg register, then the ip_off field
7491          * has to be shifted by a further 2 bytes
7492          */
7493         switch (l2_type) {
7494                 case 0: /* DIX type */
7495                 case 4: /* DIX type with VLAN */
7496                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7497                         break;
7498                 /* LLC, SNAP etc are considered non-mergeable */
7499                 default:
7500                         return -1;
7501         }
7502
7503         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7504         ip_len = (u8)((*ip)->ihl);
7505         ip_len <<= 2;
7506         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7507
7508         return 0;
7509 }
7510
7511 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7512                                   struct tcphdr *tcp)
7513 {
7514         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7515         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7516            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7517                 return -1;
7518         return 0;
7519 }
7520
7521 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7522 {
7523         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7524 }
7525
7526 static void initiate_new_session(struct lro *lro, u8 *l2h,
7527                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7528 {
7529         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7530         lro->l2h = l2h;
7531         lro->iph = ip;
7532         lro->tcph = tcp;
7533         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7534         lro->tcp_ack = ntohl(tcp->ack_seq);
7535         lro->sg_num = 1;
7536         lro->total_len = ntohs(ip->tot_len);
7537         lro->frags_len = 0;
7538         /*
7539          * check if we saw TCP timestamp. Other consistency checks have
7540          * already been done.
7541          */
7542         if (tcp->doff == 8) {
7543                 u32 *ptr;
7544                 ptr = (u32 *)(tcp+1);
7545                 lro->saw_ts = 1;
7546                 lro->cur_tsval = *(ptr+1);
7547                 lro->cur_tsecr = *(ptr+2);
7548         }
7549         lro->in_use = 1;
7550 }
7551
7552 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7553 {
7554         struct iphdr *ip = lro->iph;
7555         struct tcphdr *tcp = lro->tcph;
7556         __sum16 nchk;
7557         struct stat_block *statinfo = sp->mac_control.stats_info;
7558         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7559
7560         /* Update L3 header */
7561         ip->tot_len = htons(lro->total_len);
7562         ip->check = 0;
7563         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7564         ip->check = nchk;
7565
7566         /* Update L4 header */
7567         tcp->ack_seq = lro->tcp_ack;
7568         tcp->window = lro->window;
7569
7570         /* Update tsecr field if this session has timestamps enabled */
7571         if (lro->saw_ts) {
7572                 u32 *ptr = (u32 *)(tcp + 1);
7573                 *(ptr+2) = lro->cur_tsecr;
7574         }
7575
7576         /* Update counters required for calculation of
7577          * average no. of packets aggregated.
7578          */
7579         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7580         statinfo->sw_stat.num_aggregations++;
7581 }
7582
7583 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7584                 struct tcphdr *tcp, u32 l4_pyld)
7585 {
7586         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7587         lro->total_len += l4_pyld;
7588         lro->frags_len += l4_pyld;
7589         lro->tcp_next_seq += l4_pyld;
7590         lro->sg_num++;
7591
7592         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7593         lro->tcp_ack = tcp->ack_seq;
7594         lro->window = tcp->window;
7595
7596         if (lro->saw_ts) {
7597                 u32 *ptr;
7598                 /* Update tsecr and tsval from this packet */
7599                 ptr = (u32 *) (tcp + 1);
7600                 lro->cur_tsval = *(ptr + 1);
7601                 lro->cur_tsecr = *(ptr + 2);
7602         }
7603 }
7604
7605 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7606                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7607 {
7608         u8 *ptr;
7609
7610         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7611
7612         if (!tcp_pyld_len) {
7613                 /* Runt frame or a pure ack */
7614                 return -1;
7615         }
7616
7617         if (ip->ihl != 5) /* IP has options */
7618                 return -1;
7619
7620         /* If we see CE codepoint in IP header, packet is not mergeable */
7621         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7622                 return -1;
7623
7624         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7625         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7626                                     tcp->ece || tcp->cwr || !tcp->ack) {
7627                 /*
7628                  * Currently recognize only the ack control word and
7629                  * any other control field being set would result in
7630                  * flushing the LRO session
7631                  */
7632                 return -1;
7633         }
7634
7635         /*
7636          * Allow only one TCP timestamp option. Don't aggregate if
7637          * any other options are detected.
7638          */
7639         if (tcp->doff != 5 && tcp->doff != 8)
7640                 return -1;
7641
7642         if (tcp->doff == 8) {
7643                 ptr = (u8 *)(tcp + 1);
7644                 while (*ptr == TCPOPT_NOP)
7645                         ptr++;
7646                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7647                         return -1;
7648
7649                 /* Ensure timestamp value increases monotonically */
7650                 if (l_lro)
7651                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7652                                 return -1;
7653
7654                 /* timestamp echo reply should be non-zero */
7655                 if (*((u32 *)(ptr+6)) == 0)
7656                         return -1;
7657         }
7658
7659         return 0;
7660 }
7661
7662 static int
7663 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7664                       struct RxD_t *rxdp, struct s2io_nic *sp)
7665 {
7666         struct iphdr *ip;
7667         struct tcphdr *tcph;
7668         int ret = 0, i;
7669
7670         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7671                                          rxdp))) {
7672                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7673                           ip->saddr, ip->daddr);
7674         } else {
7675                 return ret;
7676         }
7677
7678         tcph = (struct tcphdr *)*tcp;
7679         *tcp_len = get_l4_pyld_length(ip, tcph);
7680         for (i=0; i<MAX_LRO_SESSIONS; i++) {
7681                 struct lro *l_lro = &sp->lro0_n[i];
7682                 if (l_lro->in_use) {
7683                         if (check_for_socket_match(l_lro, ip, tcph))
7684                                 continue;
7685                         /* Sock pair matched */
7686                         *lro = l_lro;
7687
7688                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7689                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7690                                           "0x%x, actual 0x%x\n", __FUNCTION__,
7691                                           (*lro)->tcp_next_seq,
7692                                           ntohl(tcph->seq));
7693
7694                                 sp->mac_control.stats_info->
7695                                    sw_stat.outof_sequence_pkts++;
7696                                 ret = 2;
7697                                 break;
7698                         }
7699
7700                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7701                                 ret = 1; /* Aggregate */
7702                         else
7703                                 ret = 2; /* Flush both */
7704                         break;
7705                 }
7706         }
7707
7708         if (ret == 0) {
7709                 /* Before searching for available LRO objects,
7710                  * check if the pkt is L3/L4 aggregatable. If not
7711                  * don't create new LRO session. Just send this
7712                  * packet up.
7713                  */
7714                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7715                         return 5;
7716                 }
7717
7718                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7719                         struct lro *l_lro = &sp->lro0_n[i];
7720                         if (!(l_lro->in_use)) {
7721                                 *lro = l_lro;
7722                                 ret = 3; /* Begin anew */
7723                                 break;
7724                         }
7725                 }
7726         }
7727
7728         if (ret == 0) { /* sessions exceeded */
7729                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7730                           __FUNCTION__);
7731                 *lro = NULL;
7732                 return ret;
7733         }
7734
7735         switch (ret) {
7736                 case 3:
7737                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7738                         break;
7739                 case 2:
7740                         update_L3L4_header(sp, *lro);
7741                         break;
7742                 case 1:
7743                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7744                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7745                                 update_L3L4_header(sp, *lro);
7746                                 ret = 4; /* Flush the LRO */
7747                         }
7748                         break;
7749                 default:
7750                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7751                                 __FUNCTION__);
7752                         break;
7753         }
7754
7755         return ret;
7756 }
7757
7758 static void clear_lro_session(struct lro *lro)
7759 {
7760         static u16 lro_struct_size = sizeof(struct lro);
7761
7762         memset(lro, 0, lro_struct_size);
7763 }
7764
7765 static void queue_rx_frame(struct sk_buff *skb)
7766 {
7767         struct net_device *dev = skb->dev;
7768
7769         skb->protocol = eth_type_trans(skb, dev);
7770         if (napi)
7771                 netif_receive_skb(skb);
7772         else
7773                 netif_rx(skb);
7774 }
7775
7776 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7777                            struct sk_buff *skb,
7778                            u32 tcp_len)
7779 {
7780         struct sk_buff *first = lro->parent;
7781
7782         first->len += tcp_len;
7783         first->data_len = lro->frags_len;
7784         skb_pull(skb, (skb->len - tcp_len));
7785         if (skb_shinfo(first)->frag_list)
7786                 lro->last_frag->next = skb;
7787         else
7788                 skb_shinfo(first)->frag_list = skb;
7789         first->truesize += skb->truesize;
7790         lro->last_frag = skb;
7791         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7792         return;
7793 }
7794
7795 /**
7796  * s2io_io_error_detected - called when PCI error is detected
7797  * @pdev: Pointer to PCI device
7798  * @state: The current pci connection state
7799  *
7800  * This function is called after a PCI bus error affecting
7801  * this device has been detected.
7802  */
7803 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
7804                                                pci_channel_state_t state)
7805 {
7806         struct net_device *netdev = pci_get_drvdata(pdev);
7807         struct s2io_nic *sp = netdev->priv;
7808
7809         netif_device_detach(netdev);
7810
7811         if (netif_running(netdev)) {
7812                 /* Bring down the card, while avoiding PCI I/O */
7813                 do_s2io_card_down(sp, 0);
7814         }
7815         pci_disable_device(pdev);
7816
7817         return PCI_ERS_RESULT_NEED_RESET;
7818 }
7819
7820 /**
7821  * s2io_io_slot_reset - called after the pci bus has been reset.
7822  * @pdev: Pointer to PCI device
7823  *
7824  * Restart the card from scratch, as if from a cold-boot.
7825  * At this point, the card has exprienced a hard reset,
7826  * followed by fixups by BIOS, and has its config space
7827  * set up identically to what it was at cold boot.
7828  */
7829 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
7830 {
7831         struct net_device *netdev = pci_get_drvdata(pdev);
7832         struct s2io_nic *sp = netdev->priv;
7833
7834         if (pci_enable_device(pdev)) {
7835                 printk(KERN_ERR "s2io: "
7836                        "Cannot re-enable PCI device after reset.\n");
7837                 return PCI_ERS_RESULT_DISCONNECT;
7838         }
7839
7840         pci_set_master(pdev);
7841         s2io_reset(sp);
7842
7843         return PCI_ERS_RESULT_RECOVERED;
7844 }
7845
7846 /**
7847  * s2io_io_resume - called when traffic can start flowing again.
7848  * @pdev: Pointer to PCI device
7849  *
7850  * This callback is called when the error recovery driver tells
7851  * us that its OK to resume normal operation.
7852  */
7853 static void s2io_io_resume(struct pci_dev *pdev)
7854 {
7855         struct net_device *netdev = pci_get_drvdata(pdev);
7856         struct s2io_nic *sp = netdev->priv;
7857
7858         if (netif_running(netdev)) {
7859                 if (s2io_card_up(sp)) {
7860                         printk(KERN_ERR "s2io: "
7861                                "Can't bring device back up after reset.\n");
7862                         return;
7863                 }
7864
7865                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
7866                         s2io_card_down(sp);
7867                         printk(KERN_ERR "s2io: "
7868                                "Can't resetore mac addr after reset.\n");
7869                         return;
7870                 }
7871         }
7872
7873         netif_device_attach(netdev);
7874         netif_wake_queue(netdev);
7875 }