S2IO: Fixes in MSIX related code.
[pandora-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.25.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135         "Register test\t(offline)",
136         "Eeprom test\t(offline)",
137         "Link test\t(online)",
138         "RLDRAM test\t(offline)",
139         "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143         {"tmac_frms"},
144         {"tmac_data_octets"},
145         {"tmac_drop_frms"},
146         {"tmac_mcst_frms"},
147         {"tmac_bcst_frms"},
148         {"tmac_pause_ctrl_frms"},
149         {"tmac_ttl_octets"},
150         {"tmac_ucst_frms"},
151         {"tmac_nucst_frms"},
152         {"tmac_any_err_frms"},
153         {"tmac_ttl_less_fb_octets"},
154         {"tmac_vld_ip_octets"},
155         {"tmac_vld_ip"},
156         {"tmac_drop_ip"},
157         {"tmac_icmp"},
158         {"tmac_rst_tcp"},
159         {"tmac_tcp"},
160         {"tmac_udp"},
161         {"rmac_vld_frms"},
162         {"rmac_data_octets"},
163         {"rmac_fcs_err_frms"},
164         {"rmac_drop_frms"},
165         {"rmac_vld_mcst_frms"},
166         {"rmac_vld_bcst_frms"},
167         {"rmac_in_rng_len_err_frms"},
168         {"rmac_out_rng_len_err_frms"},
169         {"rmac_long_frms"},
170         {"rmac_pause_ctrl_frms"},
171         {"rmac_unsup_ctrl_frms"},
172         {"rmac_ttl_octets"},
173         {"rmac_accepted_ucst_frms"},
174         {"rmac_accepted_nucst_frms"},
175         {"rmac_discarded_frms"},
176         {"rmac_drop_events"},
177         {"rmac_ttl_less_fb_octets"},
178         {"rmac_ttl_frms"},
179         {"rmac_usized_frms"},
180         {"rmac_osized_frms"},
181         {"rmac_frag_frms"},
182         {"rmac_jabber_frms"},
183         {"rmac_ttl_64_frms"},
184         {"rmac_ttl_65_127_frms"},
185         {"rmac_ttl_128_255_frms"},
186         {"rmac_ttl_256_511_frms"},
187         {"rmac_ttl_512_1023_frms"},
188         {"rmac_ttl_1024_1518_frms"},
189         {"rmac_ip"},
190         {"rmac_ip_octets"},
191         {"rmac_hdr_err_ip"},
192         {"rmac_drop_ip"},
193         {"rmac_icmp"},
194         {"rmac_tcp"},
195         {"rmac_udp"},
196         {"rmac_err_drp_udp"},
197         {"rmac_xgmii_err_sym"},
198         {"rmac_frms_q0"},
199         {"rmac_frms_q1"},
200         {"rmac_frms_q2"},
201         {"rmac_frms_q3"},
202         {"rmac_frms_q4"},
203         {"rmac_frms_q5"},
204         {"rmac_frms_q6"},
205         {"rmac_frms_q7"},
206         {"rmac_full_q0"},
207         {"rmac_full_q1"},
208         {"rmac_full_q2"},
209         {"rmac_full_q3"},
210         {"rmac_full_q4"},
211         {"rmac_full_q5"},
212         {"rmac_full_q6"},
213         {"rmac_full_q7"},
214         {"rmac_pause_cnt"},
215         {"rmac_xgmii_data_err_cnt"},
216         {"rmac_xgmii_ctrl_err_cnt"},
217         {"rmac_accepted_ip"},
218         {"rmac_err_tcp"},
219         {"rd_req_cnt"},
220         {"new_rd_req_cnt"},
221         {"new_rd_req_rtry_cnt"},
222         {"rd_rtry_cnt"},
223         {"wr_rtry_rd_ack_cnt"},
224         {"wr_req_cnt"},
225         {"new_wr_req_cnt"},
226         {"new_wr_req_rtry_cnt"},
227         {"wr_rtry_cnt"},
228         {"wr_disc_cnt"},
229         {"rd_rtry_wr_ack_cnt"},
230         {"txp_wr_cnt"},
231         {"txd_rd_cnt"},
232         {"txd_wr_cnt"},
233         {"rxd_rd_cnt"},
234         {"rxd_wr_cnt"},
235         {"txf_rd_cnt"},
236         {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240         {"rmac_ttl_1519_4095_frms"},
241         {"rmac_ttl_4096_8191_frms"},
242         {"rmac_ttl_8192_max_frms"},
243         {"rmac_ttl_gt_max_frms"},
244         {"rmac_osized_alt_frms"},
245         {"rmac_jabber_alt_frms"},
246         {"rmac_gt_max_alt_frms"},
247         {"rmac_vlan_frms"},
248         {"rmac_len_discard"},
249         {"rmac_fcs_discard"},
250         {"rmac_pf_discard"},
251         {"rmac_da_discard"},
252         {"rmac_red_discard"},
253         {"rmac_rts_discard"},
254         {"rmac_ingm_full_discard"},
255         {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259         {"\n DRIVER STATISTICS"},
260         {"single_bit_ecc_errs"},
261         {"double_bit_ecc_errs"},
262         {"parity_err_cnt"},
263         {"serious_err_cnt"},
264         {"soft_reset_cnt"},
265         {"fifo_full_cnt"},
266         {"ring_full_cnt"},
267         ("alarm_transceiver_temp_high"),
268         ("alarm_transceiver_temp_low"),
269         ("alarm_laser_bias_current_high"),
270         ("alarm_laser_bias_current_low"),
271         ("alarm_laser_output_power_high"),
272         ("alarm_laser_output_power_low"),
273         ("warn_transceiver_temp_high"),
274         ("warn_transceiver_temp_low"),
275         ("warn_laser_bias_current_high"),
276         ("warn_laser_bias_current_low"),
277         ("warn_laser_output_power_high"),
278         ("warn_laser_output_power_low"),
279         ("lro_aggregated_pkts"),
280         ("lro_flush_both_count"),
281         ("lro_out_of_sequence_pkts"),
282         ("lro_flush_due_to_max_pkts"),
283         ("lro_avg_aggr_pkts"),
284         ("mem_alloc_fail_cnt"),
285         ("pci_map_fail_cnt"),
286         ("watchdog_timer_cnt"),
287         ("mem_allocated"),
288         ("mem_freed"),
289         ("link_up_cnt"),
290         ("link_down_cnt"),
291         ("link_up_time"),
292         ("link_down_time"),
293         ("tx_tcode_buf_abort_cnt"),
294         ("tx_tcode_desc_abort_cnt"),
295         ("tx_tcode_parity_err_cnt"),
296         ("tx_tcode_link_loss_cnt"),
297         ("tx_tcode_list_proc_err_cnt"),
298         ("rx_tcode_parity_err_cnt"),
299         ("rx_tcode_abort_cnt"),
300         ("rx_tcode_parity_abort_cnt"),
301         ("rx_tcode_rda_fail_cnt"),
302         ("rx_tcode_unkn_prot_cnt"),
303         ("rx_tcode_fcs_err_cnt"),
304         ("rx_tcode_buf_size_err_cnt"),
305         ("rx_tcode_rxd_corrupt_cnt"),
306         ("rx_tcode_unkn_err_cnt")
307 };
308
309 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
310 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
311                                         ETH_GSTRING_LEN
312 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
313
314 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
315 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
316
317 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
318 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
319
320 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
321 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
322
323 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
324                         init_timer(&timer);                     \
325                         timer.function = handle;                \
326                         timer.data = (unsigned long) arg;       \
327                         mod_timer(&timer, (jiffies + exp))      \
328
329 /* Add the vlan */
330 static void s2io_vlan_rx_register(struct net_device *dev,
331                                         struct vlan_group *grp)
332 {
333         struct s2io_nic *nic = dev->priv;
334         unsigned long flags;
335
336         spin_lock_irqsave(&nic->tx_lock, flags);
337         nic->vlgrp = grp;
338         spin_unlock_irqrestore(&nic->tx_lock, flags);
339 }
340
341 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
342 static int vlan_strip_flag;
343
344 /*
345  * Constants to be programmed into the Xena's registers, to configure
346  * the XAUI.
347  */
348
349 #define END_SIGN        0x0
350 static const u64 herc_act_dtx_cfg[] = {
351         /* Set address */
352         0x8000051536750000ULL, 0x80000515367500E0ULL,
353         /* Write data */
354         0x8000051536750004ULL, 0x80000515367500E4ULL,
355         /* Set address */
356         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
357         /* Write data */
358         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
359         /* Set address */
360         0x801205150D440000ULL, 0x801205150D4400E0ULL,
361         /* Write data */
362         0x801205150D440004ULL, 0x801205150D4400E4ULL,
363         /* Set address */
364         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
365         /* Write data */
366         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
367         /* Done */
368         END_SIGN
369 };
370
371 static const u64 xena_dtx_cfg[] = {
372         /* Set address */
373         0x8000051500000000ULL, 0x80000515000000E0ULL,
374         /* Write data */
375         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
376         /* Set address */
377         0x8001051500000000ULL, 0x80010515000000E0ULL,
378         /* Write data */
379         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
380         /* Set address */
381         0x8002051500000000ULL, 0x80020515000000E0ULL,
382         /* Write data */
383         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
384         END_SIGN
385 };
386
387 /*
388  * Constants for Fixing the MacAddress problem seen mostly on
389  * Alpha machines.
390  */
391 static const u64 fix_mac[] = {
392         0x0060000000000000ULL, 0x0060600000000000ULL,
393         0x0040600000000000ULL, 0x0000600000000000ULL,
394         0x0020600000000000ULL, 0x0060600000000000ULL,
395         0x0020600000000000ULL, 0x0060600000000000ULL,
396         0x0020600000000000ULL, 0x0060600000000000ULL,
397         0x0020600000000000ULL, 0x0060600000000000ULL,
398         0x0020600000000000ULL, 0x0060600000000000ULL,
399         0x0020600000000000ULL, 0x0060600000000000ULL,
400         0x0020600000000000ULL, 0x0060600000000000ULL,
401         0x0020600000000000ULL, 0x0060600000000000ULL,
402         0x0020600000000000ULL, 0x0060600000000000ULL,
403         0x0020600000000000ULL, 0x0060600000000000ULL,
404         0x0020600000000000ULL, 0x0000600000000000ULL,
405         0x0040600000000000ULL, 0x0060600000000000ULL,
406         END_SIGN
407 };
408
409 MODULE_LICENSE("GPL");
410 MODULE_VERSION(DRV_VERSION);
411
412
413 /* Module Loadable parameters. */
414 S2IO_PARM_INT(tx_fifo_num, 1);
415 S2IO_PARM_INT(rx_ring_num, 1);
416
417
418 S2IO_PARM_INT(rx_ring_mode, 1);
419 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
420 S2IO_PARM_INT(rmac_pause_time, 0x100);
421 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
422 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
423 S2IO_PARM_INT(shared_splits, 0);
424 S2IO_PARM_INT(tmac_util_period, 5);
425 S2IO_PARM_INT(rmac_util_period, 5);
426 S2IO_PARM_INT(bimodal, 0);
427 S2IO_PARM_INT(l3l4hdr_size, 128);
428 /* Frequency of Rx desc syncs expressed as power of 2 */
429 S2IO_PARM_INT(rxsync_frequency, 3);
430 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
431 S2IO_PARM_INT(intr_type, 2);
432 /* Large receive offload feature */
433 S2IO_PARM_INT(lro, 0);
434 /* Max pkts to be aggregated by LRO at one time. If not specified,
435  * aggregation happens until we hit max IP pkt size(64K)
436  */
437 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
438 S2IO_PARM_INT(indicate_max_pkts, 0);
439
440 S2IO_PARM_INT(napi, 1);
441 S2IO_PARM_INT(ufo, 0);
442 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
443
444 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
445     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
446 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
447     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
448 static unsigned int rts_frm_len[MAX_RX_RINGS] =
449     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
450
451 module_param_array(tx_fifo_len, uint, NULL, 0);
452 module_param_array(rx_ring_sz, uint, NULL, 0);
453 module_param_array(rts_frm_len, uint, NULL, 0);
454
455 /*
456  * S2IO device table.
457  * This table lists all the devices that this driver supports.
458  */
459 static struct pci_device_id s2io_tbl[] __devinitdata = {
460         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
461          PCI_ANY_ID, PCI_ANY_ID},
462         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
463          PCI_ANY_ID, PCI_ANY_ID},
464         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
465          PCI_ANY_ID, PCI_ANY_ID},
466         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
467          PCI_ANY_ID, PCI_ANY_ID},
468         {0,}
469 };
470
471 MODULE_DEVICE_TABLE(pci, s2io_tbl);
472
473 static struct pci_error_handlers s2io_err_handler = {
474         .error_detected = s2io_io_error_detected,
475         .slot_reset = s2io_io_slot_reset,
476         .resume = s2io_io_resume,
477 };
478
479 static struct pci_driver s2io_driver = {
480       .name = "S2IO",
481       .id_table = s2io_tbl,
482       .probe = s2io_init_nic,
483       .remove = __devexit_p(s2io_rem_nic),
484       .err_handler = &s2io_err_handler,
485 };
486
487 /* A simplifier macro used both by init and free shared_mem Fns(). */
488 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
489
490 /**
491  * init_shared_mem - Allocation and Initialization of Memory
492  * @nic: Device private variable.
493  * Description: The function allocates all the memory areas shared
494  * between the NIC and the driver. This includes Tx descriptors,
495  * Rx descriptors and the statistics block.
496  */
497
498 static int init_shared_mem(struct s2io_nic *nic)
499 {
500         u32 size;
501         void *tmp_v_addr, *tmp_v_addr_next;
502         dma_addr_t tmp_p_addr, tmp_p_addr_next;
503         struct RxD_block *pre_rxd_blk = NULL;
504         int i, j, blk_cnt;
505         int lst_size, lst_per_page;
506         struct net_device *dev = nic->dev;
507         unsigned long tmp;
508         struct buffAdd *ba;
509
510         struct mac_info *mac_control;
511         struct config_param *config;
512         unsigned long long mem_allocated = 0;
513
514         mac_control = &nic->mac_control;
515         config = &nic->config;
516
517
518         /* Allocation and initialization of TXDLs in FIOFs */
519         size = 0;
520         for (i = 0; i < config->tx_fifo_num; i++) {
521                 size += config->tx_cfg[i].fifo_len;
522         }
523         if (size > MAX_AVAILABLE_TXDS) {
524                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
525                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
526                 return -EINVAL;
527         }
528
529         lst_size = (sizeof(struct TxD) * config->max_txds);
530         lst_per_page = PAGE_SIZE / lst_size;
531
532         for (i = 0; i < config->tx_fifo_num; i++) {
533                 int fifo_len = config->tx_cfg[i].fifo_len;
534                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
535                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
536                                                           GFP_KERNEL);
537                 if (!mac_control->fifos[i].list_info) {
538                         DBG_PRINT(INFO_DBG,
539                                   "Malloc failed for list_info\n");
540                         return -ENOMEM;
541                 }
542                 mem_allocated += list_holder_size;
543                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
544         }
545         for (i = 0; i < config->tx_fifo_num; i++) {
546                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
547                                                 lst_per_page);
548                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
549                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
550                     config->tx_cfg[i].fifo_len - 1;
551                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
552                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
553                     config->tx_cfg[i].fifo_len - 1;
554                 mac_control->fifos[i].fifo_no = i;
555                 mac_control->fifos[i].nic = nic;
556                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
557
558                 for (j = 0; j < page_num; j++) {
559                         int k = 0;
560                         dma_addr_t tmp_p;
561                         void *tmp_v;
562                         tmp_v = pci_alloc_consistent(nic->pdev,
563                                                      PAGE_SIZE, &tmp_p);
564                         if (!tmp_v) {
565                                 DBG_PRINT(INFO_DBG,
566                                           "pci_alloc_consistent ");
567                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
568                                 return -ENOMEM;
569                         }
570                         /* If we got a zero DMA address(can happen on
571                          * certain platforms like PPC), reallocate.
572                          * Store virtual address of page we don't want,
573                          * to be freed later.
574                          */
575                         if (!tmp_p) {
576                                 mac_control->zerodma_virt_addr = tmp_v;
577                                 DBG_PRINT(INIT_DBG,
578                                 "%s: Zero DMA address for TxDL. ", dev->name);
579                                 DBG_PRINT(INIT_DBG,
580                                 "Virtual address %p\n", tmp_v);
581                                 tmp_v = pci_alloc_consistent(nic->pdev,
582                                                      PAGE_SIZE, &tmp_p);
583                                 if (!tmp_v) {
584                                         DBG_PRINT(INFO_DBG,
585                                           "pci_alloc_consistent ");
586                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
587                                         return -ENOMEM;
588                                 }
589                                 mem_allocated += PAGE_SIZE;
590                         }
591                         while (k < lst_per_page) {
592                                 int l = (j * lst_per_page) + k;
593                                 if (l == config->tx_cfg[i].fifo_len)
594                                         break;
595                                 mac_control->fifos[i].list_info[l].list_virt_addr =
596                                     tmp_v + (k * lst_size);
597                                 mac_control->fifos[i].list_info[l].list_phy_addr =
598                                     tmp_p + (k * lst_size);
599                                 k++;
600                         }
601                 }
602         }
603
604         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
605         if (!nic->ufo_in_band_v)
606                 return -ENOMEM;
607          mem_allocated += (size * sizeof(u64));
608
609         /* Allocation and initialization of RXDs in Rings */
610         size = 0;
611         for (i = 0; i < config->rx_ring_num; i++) {
612                 if (config->rx_cfg[i].num_rxd %
613                     (rxd_count[nic->rxd_mode] + 1)) {
614                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
615                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
616                                   i);
617                         DBG_PRINT(ERR_DBG, "RxDs per Block");
618                         return FAILURE;
619                 }
620                 size += config->rx_cfg[i].num_rxd;
621                 mac_control->rings[i].block_count =
622                         config->rx_cfg[i].num_rxd /
623                         (rxd_count[nic->rxd_mode] + 1 );
624                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
625                         mac_control->rings[i].block_count;
626         }
627         if (nic->rxd_mode == RXD_MODE_1)
628                 size = (size * (sizeof(struct RxD1)));
629         else
630                 size = (size * (sizeof(struct RxD3)));
631
632         for (i = 0; i < config->rx_ring_num; i++) {
633                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
634                 mac_control->rings[i].rx_curr_get_info.offset = 0;
635                 mac_control->rings[i].rx_curr_get_info.ring_len =
636                     config->rx_cfg[i].num_rxd - 1;
637                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
638                 mac_control->rings[i].rx_curr_put_info.offset = 0;
639                 mac_control->rings[i].rx_curr_put_info.ring_len =
640                     config->rx_cfg[i].num_rxd - 1;
641                 mac_control->rings[i].nic = nic;
642                 mac_control->rings[i].ring_no = i;
643
644                 blk_cnt = config->rx_cfg[i].num_rxd /
645                                 (rxd_count[nic->rxd_mode] + 1);
646                 /*  Allocating all the Rx blocks */
647                 for (j = 0; j < blk_cnt; j++) {
648                         struct rx_block_info *rx_blocks;
649                         int l;
650
651                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
652                         size = SIZE_OF_BLOCK; //size is always page size
653                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
654                                                           &tmp_p_addr);
655                         if (tmp_v_addr == NULL) {
656                                 /*
657                                  * In case of failure, free_shared_mem()
658                                  * is called, which should free any
659                                  * memory that was alloced till the
660                                  * failure happened.
661                                  */
662                                 rx_blocks->block_virt_addr = tmp_v_addr;
663                                 return -ENOMEM;
664                         }
665                         mem_allocated += size;
666                         memset(tmp_v_addr, 0, size);
667                         rx_blocks->block_virt_addr = tmp_v_addr;
668                         rx_blocks->block_dma_addr = tmp_p_addr;
669                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
670                                                   rxd_count[nic->rxd_mode],
671                                                   GFP_KERNEL);
672                         if (!rx_blocks->rxds)
673                                 return -ENOMEM;
674                         mem_allocated += 
675                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
676                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
677                                 rx_blocks->rxds[l].virt_addr =
678                                         rx_blocks->block_virt_addr +
679                                         (rxd_size[nic->rxd_mode] * l);
680                                 rx_blocks->rxds[l].dma_addr =
681                                         rx_blocks->block_dma_addr +
682                                         (rxd_size[nic->rxd_mode] * l);
683                         }
684                 }
685                 /* Interlinking all Rx Blocks */
686                 for (j = 0; j < blk_cnt; j++) {
687                         tmp_v_addr =
688                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
689                         tmp_v_addr_next =
690                                 mac_control->rings[i].rx_blocks[(j + 1) %
691                                               blk_cnt].block_virt_addr;
692                         tmp_p_addr =
693                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
694                         tmp_p_addr_next =
695                                 mac_control->rings[i].rx_blocks[(j + 1) %
696                                               blk_cnt].block_dma_addr;
697
698                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
699                         pre_rxd_blk->reserved_2_pNext_RxD_block =
700                             (unsigned long) tmp_v_addr_next;
701                         pre_rxd_blk->pNext_RxD_Blk_physical =
702                             (u64) tmp_p_addr_next;
703                 }
704         }
705         if (nic->rxd_mode == RXD_MODE_3B) {
706                 /*
707                  * Allocation of Storages for buffer addresses in 2BUFF mode
708                  * and the buffers as well.
709                  */
710                 for (i = 0; i < config->rx_ring_num; i++) {
711                         blk_cnt = config->rx_cfg[i].num_rxd /
712                            (rxd_count[nic->rxd_mode]+ 1);
713                         mac_control->rings[i].ba =
714                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
715                                      GFP_KERNEL);
716                         if (!mac_control->rings[i].ba)
717                                 return -ENOMEM;
718                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
719                         for (j = 0; j < blk_cnt; j++) {
720                                 int k = 0;
721                                 mac_control->rings[i].ba[j] =
722                                         kmalloc((sizeof(struct buffAdd) *
723                                                 (rxd_count[nic->rxd_mode] + 1)),
724                                                 GFP_KERNEL);
725                                 if (!mac_control->rings[i].ba[j])
726                                         return -ENOMEM;
727                                 mem_allocated += (sizeof(struct buffAdd) *  \
728                                         (rxd_count[nic->rxd_mode] + 1));
729                                 while (k != rxd_count[nic->rxd_mode]) {
730                                         ba = &mac_control->rings[i].ba[j][k];
731
732                                         ba->ba_0_org = (void *) kmalloc
733                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
734                                         if (!ba->ba_0_org)
735                                                 return -ENOMEM;
736                                         mem_allocated += 
737                                                 (BUF0_LEN + ALIGN_SIZE);
738                                         tmp = (unsigned long)ba->ba_0_org;
739                                         tmp += ALIGN_SIZE;
740                                         tmp &= ~((unsigned long) ALIGN_SIZE);
741                                         ba->ba_0 = (void *) tmp;
742
743                                         ba->ba_1_org = (void *) kmalloc
744                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
745                                         if (!ba->ba_1_org)
746                                                 return -ENOMEM;
747                                         mem_allocated 
748                                                 += (BUF1_LEN + ALIGN_SIZE);
749                                         tmp = (unsigned long) ba->ba_1_org;
750                                         tmp += ALIGN_SIZE;
751                                         tmp &= ~((unsigned long) ALIGN_SIZE);
752                                         ba->ba_1 = (void *) tmp;
753                                         k++;
754                                 }
755                         }
756                 }
757         }
758
759         /* Allocation and initialization of Statistics block */
760         size = sizeof(struct stat_block);
761         mac_control->stats_mem = pci_alloc_consistent
762             (nic->pdev, size, &mac_control->stats_mem_phy);
763
764         if (!mac_control->stats_mem) {
765                 /*
766                  * In case of failure, free_shared_mem() is called, which
767                  * should free any memory that was alloced till the
768                  * failure happened.
769                  */
770                 return -ENOMEM;
771         }
772         mem_allocated += size;
773         mac_control->stats_mem_sz = size;
774
775         tmp_v_addr = mac_control->stats_mem;
776         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
777         memset(tmp_v_addr, 0, size);
778         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
779                   (unsigned long long) tmp_p_addr);
780         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
781         return SUCCESS;
782 }
783
784 /**
785  * free_shared_mem - Free the allocated Memory
786  * @nic:  Device private variable.
787  * Description: This function is to free all memory locations allocated by
788  * the init_shared_mem() function and return it to the kernel.
789  */
790
791 static void free_shared_mem(struct s2io_nic *nic)
792 {
793         int i, j, blk_cnt, size;
794         u32 ufo_size = 0;
795         void *tmp_v_addr;
796         dma_addr_t tmp_p_addr;
797         struct mac_info *mac_control;
798         struct config_param *config;
799         int lst_size, lst_per_page;
800         struct net_device *dev;
801         int page_num = 0;
802
803         if (!nic)
804                 return;
805
806         dev = nic->dev;
807
808         mac_control = &nic->mac_control;
809         config = &nic->config;
810
811         lst_size = (sizeof(struct TxD) * config->max_txds);
812         lst_per_page = PAGE_SIZE / lst_size;
813
814         for (i = 0; i < config->tx_fifo_num; i++) {
815                 ufo_size += config->tx_cfg[i].fifo_len;
816                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
817                                                         lst_per_page);
818                 for (j = 0; j < page_num; j++) {
819                         int mem_blks = (j * lst_per_page);
820                         if (!mac_control->fifos[i].list_info)
821                                 return;
822                         if (!mac_control->fifos[i].list_info[mem_blks].
823                                  list_virt_addr)
824                                 break;
825                         pci_free_consistent(nic->pdev, PAGE_SIZE,
826                                             mac_control->fifos[i].
827                                             list_info[mem_blks].
828                                             list_virt_addr,
829                                             mac_control->fifos[i].
830                                             list_info[mem_blks].
831                                             list_phy_addr);
832                         nic->mac_control.stats_info->sw_stat.mem_freed 
833                                                 += PAGE_SIZE;
834                 }
835                 /* If we got a zero DMA address during allocation,
836                  * free the page now
837                  */
838                 if (mac_control->zerodma_virt_addr) {
839                         pci_free_consistent(nic->pdev, PAGE_SIZE,
840                                             mac_control->zerodma_virt_addr,
841                                             (dma_addr_t)0);
842                         DBG_PRINT(INIT_DBG,
843                                 "%s: Freeing TxDL with zero DMA addr. ",
844                                 dev->name);
845                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
846                                 mac_control->zerodma_virt_addr);
847                         nic->mac_control.stats_info->sw_stat.mem_freed 
848                                                 += PAGE_SIZE;
849                 }
850                 kfree(mac_control->fifos[i].list_info);
851                 nic->mac_control.stats_info->sw_stat.mem_freed += 
852                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
853         }
854
855         size = SIZE_OF_BLOCK;
856         for (i = 0; i < config->rx_ring_num; i++) {
857                 blk_cnt = mac_control->rings[i].block_count;
858                 for (j = 0; j < blk_cnt; j++) {
859                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
860                                 block_virt_addr;
861                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
862                                 block_dma_addr;
863                         if (tmp_v_addr == NULL)
864                                 break;
865                         pci_free_consistent(nic->pdev, size,
866                                             tmp_v_addr, tmp_p_addr);
867                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
868                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
869                         nic->mac_control.stats_info->sw_stat.mem_freed += 
870                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
871                 }
872         }
873
874         if (nic->rxd_mode == RXD_MODE_3B) {
875                 /* Freeing buffer storage addresses in 2BUFF mode. */
876                 for (i = 0; i < config->rx_ring_num; i++) {
877                         blk_cnt = config->rx_cfg[i].num_rxd /
878                             (rxd_count[nic->rxd_mode] + 1);
879                         for (j = 0; j < blk_cnt; j++) {
880                                 int k = 0;
881                                 if (!mac_control->rings[i].ba[j])
882                                         continue;
883                                 while (k != rxd_count[nic->rxd_mode]) {
884                                         struct buffAdd *ba =
885                                                 &mac_control->rings[i].ba[j][k];
886                                         kfree(ba->ba_0_org);
887                                         nic->mac_control.stats_info->sw_stat.\
888                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
889                                         kfree(ba->ba_1_org);
890                                         nic->mac_control.stats_info->sw_stat.\
891                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
892                                         k++;
893                                 }
894                                 kfree(mac_control->rings[i].ba[j]);
895                                 nic->mac_control.stats_info->sw_stat.mem_freed                          += (sizeof(struct buffAdd) * 
896                                 (rxd_count[nic->rxd_mode] + 1));
897                         }
898                         kfree(mac_control->rings[i].ba);
899                         nic->mac_control.stats_info->sw_stat.mem_freed += 
900                         (sizeof(struct buffAdd *) * blk_cnt);
901                 }
902         }
903
904         if (mac_control->stats_mem) {
905                 pci_free_consistent(nic->pdev,
906                                     mac_control->stats_mem_sz,
907                                     mac_control->stats_mem,
908                                     mac_control->stats_mem_phy);
909                 nic->mac_control.stats_info->sw_stat.mem_freed += 
910                         mac_control->stats_mem_sz;
911         }
912         if (nic->ufo_in_band_v) {
913                 kfree(nic->ufo_in_band_v);
914                 nic->mac_control.stats_info->sw_stat.mem_freed 
915                         += (ufo_size * sizeof(u64));
916         }
917 }
918
919 /**
920  * s2io_verify_pci_mode -
921  */
922
923 static int s2io_verify_pci_mode(struct s2io_nic *nic)
924 {
925         struct XENA_dev_config __iomem *bar0 = nic->bar0;
926         register u64 val64 = 0;
927         int     mode;
928
929         val64 = readq(&bar0->pci_mode);
930         mode = (u8)GET_PCI_MODE(val64);
931
932         if ( val64 & PCI_MODE_UNKNOWN_MODE)
933                 return -1;      /* Unknown PCI mode */
934         return mode;
935 }
936
937 #define NEC_VENID   0x1033
938 #define NEC_DEVID   0x0125
939 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
940 {
941         struct pci_dev *tdev = NULL;
942         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
943                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
944                         if (tdev->bus == s2io_pdev->bus->parent)
945                                 pci_dev_put(tdev);
946                                 return 1;
947                 }
948         }
949         return 0;
950 }
951
952 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
953 /**
954  * s2io_print_pci_mode -
955  */
956 static int s2io_print_pci_mode(struct s2io_nic *nic)
957 {
958         struct XENA_dev_config __iomem *bar0 = nic->bar0;
959         register u64 val64 = 0;
960         int     mode;
961         struct config_param *config = &nic->config;
962
963         val64 = readq(&bar0->pci_mode);
964         mode = (u8)GET_PCI_MODE(val64);
965
966         if ( val64 & PCI_MODE_UNKNOWN_MODE)
967                 return -1;      /* Unknown PCI mode */
968
969         config->bus_speed = bus_speed[mode];
970
971         if (s2io_on_nec_bridge(nic->pdev)) {
972                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
973                                                         nic->dev->name);
974                 return mode;
975         }
976
977         if (val64 & PCI_MODE_32_BITS) {
978                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
979         } else {
980                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
981         }
982
983         switch(mode) {
984                 case PCI_MODE_PCI_33:
985                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
986                         break;
987                 case PCI_MODE_PCI_66:
988                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
989                         break;
990                 case PCI_MODE_PCIX_M1_66:
991                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
992                         break;
993                 case PCI_MODE_PCIX_M1_100:
994                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
995                         break;
996                 case PCI_MODE_PCIX_M1_133:
997                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
998                         break;
999                 case PCI_MODE_PCIX_M2_66:
1000                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1001                         break;
1002                 case PCI_MODE_PCIX_M2_100:
1003                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1004                         break;
1005                 case PCI_MODE_PCIX_M2_133:
1006                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1007                         break;
1008                 default:
1009                         return -1;      /* Unsupported bus speed */
1010         }
1011
1012         return mode;
1013 }
1014
1015 /**
1016  *  init_nic - Initialization of hardware
1017  *  @nic: device peivate variable
1018  *  Description: The function sequentially configures every block
1019  *  of the H/W from their reset values.
1020  *  Return Value:  SUCCESS on success and
1021  *  '-1' on failure (endian settings incorrect).
1022  */
1023
1024 static int init_nic(struct s2io_nic *nic)
1025 {
1026         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1027         struct net_device *dev = nic->dev;
1028         register u64 val64 = 0;
1029         void __iomem *add;
1030         u32 time;
1031         int i, j;
1032         struct mac_info *mac_control;
1033         struct config_param *config;
1034         int dtx_cnt = 0;
1035         unsigned long long mem_share;
1036         int mem_size;
1037
1038         mac_control = &nic->mac_control;
1039         config = &nic->config;
1040
1041         /* to set the swapper controle on the card */
1042         if(s2io_set_swapper(nic)) {
1043                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1044                 return -1;
1045         }
1046
1047         /*
1048          * Herc requires EOI to be removed from reset before XGXS, so..
1049          */
1050         if (nic->device_type & XFRAME_II_DEVICE) {
1051                 val64 = 0xA500000000ULL;
1052                 writeq(val64, &bar0->sw_reset);
1053                 msleep(500);
1054                 val64 = readq(&bar0->sw_reset);
1055         }
1056
1057         /* Remove XGXS from reset state */
1058         val64 = 0;
1059         writeq(val64, &bar0->sw_reset);
1060         msleep(500);
1061         val64 = readq(&bar0->sw_reset);
1062
1063         /*  Enable Receiving broadcasts */
1064         add = &bar0->mac_cfg;
1065         val64 = readq(&bar0->mac_cfg);
1066         val64 |= MAC_RMAC_BCAST_ENABLE;
1067         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1068         writel((u32) val64, add);
1069         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1070         writel((u32) (val64 >> 32), (add + 4));
1071
1072         /* Read registers in all blocks */
1073         val64 = readq(&bar0->mac_int_mask);
1074         val64 = readq(&bar0->mc_int_mask);
1075         val64 = readq(&bar0->xgxs_int_mask);
1076
1077         /*  Set MTU */
1078         val64 = dev->mtu;
1079         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1080
1081         if (nic->device_type & XFRAME_II_DEVICE) {
1082                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1083                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1084                                           &bar0->dtx_control, UF);
1085                         if (dtx_cnt & 0x1)
1086                                 msleep(1); /* Necessary!! */
1087                         dtx_cnt++;
1088                 }
1089         } else {
1090                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1091                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1092                                           &bar0->dtx_control, UF);
1093                         val64 = readq(&bar0->dtx_control);
1094                         dtx_cnt++;
1095                 }
1096         }
1097
1098         /*  Tx DMA Initialization */
1099         val64 = 0;
1100         writeq(val64, &bar0->tx_fifo_partition_0);
1101         writeq(val64, &bar0->tx_fifo_partition_1);
1102         writeq(val64, &bar0->tx_fifo_partition_2);
1103         writeq(val64, &bar0->tx_fifo_partition_3);
1104
1105
1106         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1107                 val64 |=
1108                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1109                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1110                                     ((i * 32) + 5), 3);
1111
1112                 if (i == (config->tx_fifo_num - 1)) {
1113                         if (i % 2 == 0)
1114                                 i++;
1115                 }
1116
1117                 switch (i) {
1118                 case 1:
1119                         writeq(val64, &bar0->tx_fifo_partition_0);
1120                         val64 = 0;
1121                         break;
1122                 case 3:
1123                         writeq(val64, &bar0->tx_fifo_partition_1);
1124                         val64 = 0;
1125                         break;
1126                 case 5:
1127                         writeq(val64, &bar0->tx_fifo_partition_2);
1128                         val64 = 0;
1129                         break;
1130                 case 7:
1131                         writeq(val64, &bar0->tx_fifo_partition_3);
1132                         break;
1133                 }
1134         }
1135
1136         /*
1137          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1138          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1139          */
1140         if ((nic->device_type == XFRAME_I_DEVICE) &&
1141                 (nic->pdev->revision < 4))
1142                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1143
1144         val64 = readq(&bar0->tx_fifo_partition_0);
1145         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1146                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1147
1148         /*
1149          * Initialization of Tx_PA_CONFIG register to ignore packet
1150          * integrity checking.
1151          */
1152         val64 = readq(&bar0->tx_pa_cfg);
1153         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1154             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1155         writeq(val64, &bar0->tx_pa_cfg);
1156
1157         /* Rx DMA intialization. */
1158         val64 = 0;
1159         for (i = 0; i < config->rx_ring_num; i++) {
1160                 val64 |=
1161                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1162                          3);
1163         }
1164         writeq(val64, &bar0->rx_queue_priority);
1165
1166         /*
1167          * Allocating equal share of memory to all the
1168          * configured Rings.
1169          */
1170         val64 = 0;
1171         if (nic->device_type & XFRAME_II_DEVICE)
1172                 mem_size = 32;
1173         else
1174                 mem_size = 64;
1175
1176         for (i = 0; i < config->rx_ring_num; i++) {
1177                 switch (i) {
1178                 case 0:
1179                         mem_share = (mem_size / config->rx_ring_num +
1180                                      mem_size % config->rx_ring_num);
1181                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1182                         continue;
1183                 case 1:
1184                         mem_share = (mem_size / config->rx_ring_num);
1185                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1186                         continue;
1187                 case 2:
1188                         mem_share = (mem_size / config->rx_ring_num);
1189                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1190                         continue;
1191                 case 3:
1192                         mem_share = (mem_size / config->rx_ring_num);
1193                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1194                         continue;
1195                 case 4:
1196                         mem_share = (mem_size / config->rx_ring_num);
1197                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1198                         continue;
1199                 case 5:
1200                         mem_share = (mem_size / config->rx_ring_num);
1201                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1202                         continue;
1203                 case 6:
1204                         mem_share = (mem_size / config->rx_ring_num);
1205                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1206                         continue;
1207                 case 7:
1208                         mem_share = (mem_size / config->rx_ring_num);
1209                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1210                         continue;
1211                 }
1212         }
1213         writeq(val64, &bar0->rx_queue_cfg);
1214
1215         /*
1216          * Filling Tx round robin registers
1217          * as per the number of FIFOs
1218          */
1219         switch (config->tx_fifo_num) {
1220         case 1:
1221                 val64 = 0x0000000000000000ULL;
1222                 writeq(val64, &bar0->tx_w_round_robin_0);
1223                 writeq(val64, &bar0->tx_w_round_robin_1);
1224                 writeq(val64, &bar0->tx_w_round_robin_2);
1225                 writeq(val64, &bar0->tx_w_round_robin_3);
1226                 writeq(val64, &bar0->tx_w_round_robin_4);
1227                 break;
1228         case 2:
1229                 val64 = 0x0000010000010000ULL;
1230                 writeq(val64, &bar0->tx_w_round_robin_0);
1231                 val64 = 0x0100000100000100ULL;
1232                 writeq(val64, &bar0->tx_w_round_robin_1);
1233                 val64 = 0x0001000001000001ULL;
1234                 writeq(val64, &bar0->tx_w_round_robin_2);
1235                 val64 = 0x0000010000010000ULL;
1236                 writeq(val64, &bar0->tx_w_round_robin_3);
1237                 val64 = 0x0100000000000000ULL;
1238                 writeq(val64, &bar0->tx_w_round_robin_4);
1239                 break;
1240         case 3:
1241                 val64 = 0x0001000102000001ULL;
1242                 writeq(val64, &bar0->tx_w_round_robin_0);
1243                 val64 = 0x0001020000010001ULL;
1244                 writeq(val64, &bar0->tx_w_round_robin_1);
1245                 val64 = 0x0200000100010200ULL;
1246                 writeq(val64, &bar0->tx_w_round_robin_2);
1247                 val64 = 0x0001000102000001ULL;
1248                 writeq(val64, &bar0->tx_w_round_robin_3);
1249                 val64 = 0x0001020000000000ULL;
1250                 writeq(val64, &bar0->tx_w_round_robin_4);
1251                 break;
1252         case 4:
1253                 val64 = 0x0001020300010200ULL;
1254                 writeq(val64, &bar0->tx_w_round_robin_0);
1255                 val64 = 0x0100000102030001ULL;
1256                 writeq(val64, &bar0->tx_w_round_robin_1);
1257                 val64 = 0x0200010000010203ULL;
1258                 writeq(val64, &bar0->tx_w_round_robin_2);
1259                 val64 = 0x0001020001000001ULL;
1260                 writeq(val64, &bar0->tx_w_round_robin_3);
1261                 val64 = 0x0203000100000000ULL;
1262                 writeq(val64, &bar0->tx_w_round_robin_4);
1263                 break;
1264         case 5:
1265                 val64 = 0x0001000203000102ULL;
1266                 writeq(val64, &bar0->tx_w_round_robin_0);
1267                 val64 = 0x0001020001030004ULL;
1268                 writeq(val64, &bar0->tx_w_round_robin_1);
1269                 val64 = 0x0001000203000102ULL;
1270                 writeq(val64, &bar0->tx_w_round_robin_2);
1271                 val64 = 0x0001020001030004ULL;
1272                 writeq(val64, &bar0->tx_w_round_robin_3);
1273                 val64 = 0x0001000000000000ULL;
1274                 writeq(val64, &bar0->tx_w_round_robin_4);
1275                 break;
1276         case 6:
1277                 val64 = 0x0001020304000102ULL;
1278                 writeq(val64, &bar0->tx_w_round_robin_0);
1279                 val64 = 0x0304050001020001ULL;
1280                 writeq(val64, &bar0->tx_w_round_robin_1);
1281                 val64 = 0x0203000100000102ULL;
1282                 writeq(val64, &bar0->tx_w_round_robin_2);
1283                 val64 = 0x0304000102030405ULL;
1284                 writeq(val64, &bar0->tx_w_round_robin_3);
1285                 val64 = 0x0001000200000000ULL;
1286                 writeq(val64, &bar0->tx_w_round_robin_4);
1287                 break;
1288         case 7:
1289                 val64 = 0x0001020001020300ULL;
1290                 writeq(val64, &bar0->tx_w_round_robin_0);
1291                 val64 = 0x0102030400010203ULL;
1292                 writeq(val64, &bar0->tx_w_round_robin_1);
1293                 val64 = 0x0405060001020001ULL;
1294                 writeq(val64, &bar0->tx_w_round_robin_2);
1295                 val64 = 0x0304050000010200ULL;
1296                 writeq(val64, &bar0->tx_w_round_robin_3);
1297                 val64 = 0x0102030000000000ULL;
1298                 writeq(val64, &bar0->tx_w_round_robin_4);
1299                 break;
1300         case 8:
1301                 val64 = 0x0001020300040105ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_0);
1303                 val64 = 0x0200030106000204ULL;
1304                 writeq(val64, &bar0->tx_w_round_robin_1);
1305                 val64 = 0x0103000502010007ULL;
1306                 writeq(val64, &bar0->tx_w_round_robin_2);
1307                 val64 = 0x0304010002060500ULL;
1308                 writeq(val64, &bar0->tx_w_round_robin_3);
1309                 val64 = 0x0103020400000000ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_4);
1311                 break;
1312         }
1313
1314         /* Enable all configured Tx FIFO partitions */
1315         val64 = readq(&bar0->tx_fifo_partition_0);
1316         val64 |= (TX_FIFO_PARTITION_EN);
1317         writeq(val64, &bar0->tx_fifo_partition_0);
1318
1319         /* Filling the Rx round robin registers as per the
1320          * number of Rings and steering based on QoS.
1321          */
1322         switch (config->rx_ring_num) {
1323         case 1:
1324                 val64 = 0x8080808080808080ULL;
1325                 writeq(val64, &bar0->rts_qos_steering);
1326                 break;
1327         case 2:
1328                 val64 = 0x0000010000010000ULL;
1329                 writeq(val64, &bar0->rx_w_round_robin_0);
1330                 val64 = 0x0100000100000100ULL;
1331                 writeq(val64, &bar0->rx_w_round_robin_1);
1332                 val64 = 0x0001000001000001ULL;
1333                 writeq(val64, &bar0->rx_w_round_robin_2);
1334                 val64 = 0x0000010000010000ULL;
1335                 writeq(val64, &bar0->rx_w_round_robin_3);
1336                 val64 = 0x0100000000000000ULL;
1337                 writeq(val64, &bar0->rx_w_round_robin_4);
1338
1339                 val64 = 0x8080808040404040ULL;
1340                 writeq(val64, &bar0->rts_qos_steering);
1341                 break;
1342         case 3:
1343                 val64 = 0x0001000102000001ULL;
1344                 writeq(val64, &bar0->rx_w_round_robin_0);
1345                 val64 = 0x0001020000010001ULL;
1346                 writeq(val64, &bar0->rx_w_round_robin_1);
1347                 val64 = 0x0200000100010200ULL;
1348                 writeq(val64, &bar0->rx_w_round_robin_2);
1349                 val64 = 0x0001000102000001ULL;
1350                 writeq(val64, &bar0->rx_w_round_robin_3);
1351                 val64 = 0x0001020000000000ULL;
1352                 writeq(val64, &bar0->rx_w_round_robin_4);
1353
1354                 val64 = 0x8080804040402020ULL;
1355                 writeq(val64, &bar0->rts_qos_steering);
1356                 break;
1357         case 4:
1358                 val64 = 0x0001020300010200ULL;
1359                 writeq(val64, &bar0->rx_w_round_robin_0);
1360                 val64 = 0x0100000102030001ULL;
1361                 writeq(val64, &bar0->rx_w_round_robin_1);
1362                 val64 = 0x0200010000010203ULL;
1363                 writeq(val64, &bar0->rx_w_round_robin_2);
1364                 val64 = 0x0001020001000001ULL;
1365                 writeq(val64, &bar0->rx_w_round_robin_3);
1366                 val64 = 0x0203000100000000ULL;
1367                 writeq(val64, &bar0->rx_w_round_robin_4);
1368
1369                 val64 = 0x8080404020201010ULL;
1370                 writeq(val64, &bar0->rts_qos_steering);
1371                 break;
1372         case 5:
1373                 val64 = 0x0001000203000102ULL;
1374                 writeq(val64, &bar0->rx_w_round_robin_0);
1375                 val64 = 0x0001020001030004ULL;
1376                 writeq(val64, &bar0->rx_w_round_robin_1);
1377                 val64 = 0x0001000203000102ULL;
1378                 writeq(val64, &bar0->rx_w_round_robin_2);
1379                 val64 = 0x0001020001030004ULL;
1380                 writeq(val64, &bar0->rx_w_round_robin_3);
1381                 val64 = 0x0001000000000000ULL;
1382                 writeq(val64, &bar0->rx_w_round_robin_4);
1383
1384                 val64 = 0x8080404020201008ULL;
1385                 writeq(val64, &bar0->rts_qos_steering);
1386                 break;
1387         case 6:
1388                 val64 = 0x0001020304000102ULL;
1389                 writeq(val64, &bar0->rx_w_round_robin_0);
1390                 val64 = 0x0304050001020001ULL;
1391                 writeq(val64, &bar0->rx_w_round_robin_1);
1392                 val64 = 0x0203000100000102ULL;
1393                 writeq(val64, &bar0->rx_w_round_robin_2);
1394                 val64 = 0x0304000102030405ULL;
1395                 writeq(val64, &bar0->rx_w_round_robin_3);
1396                 val64 = 0x0001000200000000ULL;
1397                 writeq(val64, &bar0->rx_w_round_robin_4);
1398
1399                 val64 = 0x8080404020100804ULL;
1400                 writeq(val64, &bar0->rts_qos_steering);
1401                 break;
1402         case 7:
1403                 val64 = 0x0001020001020300ULL;
1404                 writeq(val64, &bar0->rx_w_round_robin_0);
1405                 val64 = 0x0102030400010203ULL;
1406                 writeq(val64, &bar0->rx_w_round_robin_1);
1407                 val64 = 0x0405060001020001ULL;
1408                 writeq(val64, &bar0->rx_w_round_robin_2);
1409                 val64 = 0x0304050000010200ULL;
1410                 writeq(val64, &bar0->rx_w_round_robin_3);
1411                 val64 = 0x0102030000000000ULL;
1412                 writeq(val64, &bar0->rx_w_round_robin_4);
1413
1414                 val64 = 0x8080402010080402ULL;
1415                 writeq(val64, &bar0->rts_qos_steering);
1416                 break;
1417         case 8:
1418                 val64 = 0x0001020300040105ULL;
1419                 writeq(val64, &bar0->rx_w_round_robin_0);
1420                 val64 = 0x0200030106000204ULL;
1421                 writeq(val64, &bar0->rx_w_round_robin_1);
1422                 val64 = 0x0103000502010007ULL;
1423                 writeq(val64, &bar0->rx_w_round_robin_2);
1424                 val64 = 0x0304010002060500ULL;
1425                 writeq(val64, &bar0->rx_w_round_robin_3);
1426                 val64 = 0x0103020400000000ULL;
1427                 writeq(val64, &bar0->rx_w_round_robin_4);
1428
1429                 val64 = 0x8040201008040201ULL;
1430                 writeq(val64, &bar0->rts_qos_steering);
1431                 break;
1432         }
1433
1434         /* UDP Fix */
1435         val64 = 0;
1436         for (i = 0; i < 8; i++)
1437                 writeq(val64, &bar0->rts_frm_len_n[i]);
1438
1439         /* Set the default rts frame length for the rings configured */
1440         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1441         for (i = 0 ; i < config->rx_ring_num ; i++)
1442                 writeq(val64, &bar0->rts_frm_len_n[i]);
1443
1444         /* Set the frame length for the configured rings
1445          * desired by the user
1446          */
1447         for (i = 0; i < config->rx_ring_num; i++) {
1448                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1449                  * specified frame length steering.
1450                  * If the user provides the frame length then program
1451                  * the rts_frm_len register for those values or else
1452                  * leave it as it is.
1453                  */
1454                 if (rts_frm_len[i] != 0) {
1455                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1456                                 &bar0->rts_frm_len_n[i]);
1457                 }
1458         }
1459         
1460         /* Disable differentiated services steering logic */
1461         for (i = 0; i < 64; i++) {
1462                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1463                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1464                                 dev->name);
1465                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1466                         return FAILURE;
1467                 }
1468         }
1469
1470         /* Program statistics memory */
1471         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1472
1473         if (nic->device_type == XFRAME_II_DEVICE) {
1474                 val64 = STAT_BC(0x320);
1475                 writeq(val64, &bar0->stat_byte_cnt);
1476         }
1477
1478         /*
1479          * Initializing the sampling rate for the device to calculate the
1480          * bandwidth utilization.
1481          */
1482         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1483             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1484         writeq(val64, &bar0->mac_link_util);
1485
1486
1487         /*
1488          * Initializing the Transmit and Receive Traffic Interrupt
1489          * Scheme.
1490          */
1491         /*
1492          * TTI Initialization. Default Tx timer gets us about
1493          * 250 interrupts per sec. Continuous interrupts are enabled
1494          * by default.
1495          */
1496         if (nic->device_type == XFRAME_II_DEVICE) {
1497                 int count = (nic->config.bus_speed * 125)/2;
1498                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1499         } else {
1500
1501                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1502         }
1503         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1504             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1505             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1506                 if (use_continuous_tx_intrs)
1507                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1508         writeq(val64, &bar0->tti_data1_mem);
1509
1510         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1511             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1512             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1513         writeq(val64, &bar0->tti_data2_mem);
1514
1515         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1516         writeq(val64, &bar0->tti_command_mem);
1517
1518         /*
1519          * Once the operation completes, the Strobe bit of the command
1520          * register will be reset. We poll for this particular condition
1521          * We wait for a maximum of 500ms for the operation to complete,
1522          * if it's not complete by then we return error.
1523          */
1524         time = 0;
1525         while (TRUE) {
1526                 val64 = readq(&bar0->tti_command_mem);
1527                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1528                         break;
1529                 }
1530                 if (time > 10) {
1531                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1532                                   dev->name);
1533                         return -1;
1534                 }
1535                 msleep(50);
1536                 time++;
1537         }
1538
1539         if (nic->config.bimodal) {
1540                 int k = 0;
1541                 for (k = 0; k < config->rx_ring_num; k++) {
1542                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1543                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1544                         writeq(val64, &bar0->tti_command_mem);
1545
1546                 /*
1547                  * Once the operation completes, the Strobe bit of the command
1548                  * register will be reset. We poll for this particular condition
1549                  * We wait for a maximum of 500ms for the operation to complete,
1550                  * if it's not complete by then we return error.
1551                 */
1552                         time = 0;
1553                         while (TRUE) {
1554                                 val64 = readq(&bar0->tti_command_mem);
1555                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1556                                         break;
1557                                 }
1558                                 if (time > 10) {
1559                                         DBG_PRINT(ERR_DBG,
1560                                                 "%s: TTI init Failed\n",
1561                                         dev->name);
1562                                         return -1;
1563                                 }
1564                                 time++;
1565                                 msleep(50);
1566                         }
1567                 }
1568         } else {
1569
1570                 /* RTI Initialization */
1571                 if (nic->device_type == XFRAME_II_DEVICE) {
1572                         /*
1573                          * Programmed to generate Apprx 500 Intrs per
1574                          * second
1575                          */
1576                         int count = (nic->config.bus_speed * 125)/4;
1577                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1578                 } else {
1579                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1580                 }
1581                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1582                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1583                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1584
1585                 writeq(val64, &bar0->rti_data1_mem);
1586
1587                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1588                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1589                 if (nic->intr_type == MSI_X)
1590                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1591                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1592                 else
1593                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1594                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1595                 writeq(val64, &bar0->rti_data2_mem);
1596
1597                 for (i = 0; i < config->rx_ring_num; i++) {
1598                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1599                                         | RTI_CMD_MEM_OFFSET(i);
1600                         writeq(val64, &bar0->rti_command_mem);
1601
1602                         /*
1603                          * Once the operation completes, the Strobe bit of the
1604                          * command register will be reset. We poll for this
1605                          * particular condition. We wait for a maximum of 500ms
1606                          * for the operation to complete, if it's not complete
1607                          * by then we return error.
1608                          */
1609                         time = 0;
1610                         while (TRUE) {
1611                                 val64 = readq(&bar0->rti_command_mem);
1612                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1613                                         break;
1614                                 }
1615                                 if (time > 10) {
1616                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1617                                                   dev->name);
1618                                         return -1;
1619                                 }
1620                                 time++;
1621                                 msleep(50);
1622                         }
1623                 }
1624         }
1625
1626         /*
1627          * Initializing proper values as Pause threshold into all
1628          * the 8 Queues on Rx side.
1629          */
1630         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1631         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1632
1633         /* Disable RMAC PAD STRIPPING */
1634         add = &bar0->mac_cfg;
1635         val64 = readq(&bar0->mac_cfg);
1636         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1637         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1638         writel((u32) (val64), add);
1639         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1640         writel((u32) (val64 >> 32), (add + 4));
1641         val64 = readq(&bar0->mac_cfg);
1642
1643         /* Enable FCS stripping by adapter */
1644         add = &bar0->mac_cfg;
1645         val64 = readq(&bar0->mac_cfg);
1646         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1647         if (nic->device_type == XFRAME_II_DEVICE)
1648                 writeq(val64, &bar0->mac_cfg);
1649         else {
1650                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1651                 writel((u32) (val64), add);
1652                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1653                 writel((u32) (val64 >> 32), (add + 4));
1654         }
1655
1656         /*
1657          * Set the time value to be inserted in the pause frame
1658          * generated by xena.
1659          */
1660         val64 = readq(&bar0->rmac_pause_cfg);
1661         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1662         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1663         writeq(val64, &bar0->rmac_pause_cfg);
1664
1665         /*
1666          * Set the Threshold Limit for Generating the pause frame
1667          * If the amount of data in any Queue exceeds ratio of
1668          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1669          * pause frame is generated
1670          */
1671         val64 = 0;
1672         for (i = 0; i < 4; i++) {
1673                 val64 |=
1674                     (((u64) 0xFF00 | nic->mac_control.
1675                       mc_pause_threshold_q0q3)
1676                      << (i * 2 * 8));
1677         }
1678         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1679
1680         val64 = 0;
1681         for (i = 0; i < 4; i++) {
1682                 val64 |=
1683                     (((u64) 0xFF00 | nic->mac_control.
1684                       mc_pause_threshold_q4q7)
1685                      << (i * 2 * 8));
1686         }
1687         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1688
1689         /*
1690          * TxDMA will stop Read request if the number of read split has
1691          * exceeded the limit pointed by shared_splits
1692          */
1693         val64 = readq(&bar0->pic_control);
1694         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1695         writeq(val64, &bar0->pic_control);
1696
1697         if (nic->config.bus_speed == 266) {
1698                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1699                 writeq(0x0, &bar0->read_retry_delay);
1700                 writeq(0x0, &bar0->write_retry_delay);
1701         }
1702
1703         /*
1704          * Programming the Herc to split every write transaction
1705          * that does not start on an ADB to reduce disconnects.
1706          */
1707         if (nic->device_type == XFRAME_II_DEVICE) {
1708                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1709                         MISC_LINK_STABILITY_PRD(3);
1710                 writeq(val64, &bar0->misc_control);
1711                 val64 = readq(&bar0->pic_control2);
1712                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1713                 writeq(val64, &bar0->pic_control2);
1714         }
1715         if (strstr(nic->product_name, "CX4")) {
1716                 val64 = TMAC_AVG_IPG(0x17);
1717                 writeq(val64, &bar0->tmac_avg_ipg);
1718         }
1719
1720         return SUCCESS;
1721 }
1722 #define LINK_UP_DOWN_INTERRUPT          1
1723 #define MAC_RMAC_ERR_TIMER              2
1724
1725 static int s2io_link_fault_indication(struct s2io_nic *nic)
1726 {
1727         if (nic->intr_type != INTA)
1728                 return MAC_RMAC_ERR_TIMER;
1729         if (nic->device_type == XFRAME_II_DEVICE)
1730                 return LINK_UP_DOWN_INTERRUPT;
1731         else
1732                 return MAC_RMAC_ERR_TIMER;
1733 }
1734
1735 /**
1736  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1737  *  @nic: device private variable,
1738  *  @mask: A mask indicating which Intr block must be modified and,
1739  *  @flag: A flag indicating whether to enable or disable the Intrs.
1740  *  Description: This function will either disable or enable the interrupts
1741  *  depending on the flag argument. The mask argument can be used to
1742  *  enable/disable any Intr block.
1743  *  Return Value: NONE.
1744  */
1745
1746 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1747 {
1748         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1749         register u64 val64 = 0, temp64 = 0;
1750
1751         /*  Top level interrupt classification */
1752         /*  PIC Interrupts */
1753         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1754                 /*  Enable PIC Intrs in the general intr mask register */
1755                 val64 = TXPIC_INT_M;
1756                 if (flag == ENABLE_INTRS) {
1757                         temp64 = readq(&bar0->general_int_mask);
1758                         temp64 &= ~((u64) val64);
1759                         writeq(temp64, &bar0->general_int_mask);
1760                         /*
1761                          * If Hercules adapter enable GPIO otherwise
1762                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1763                          * interrupts for now.
1764                          * TODO
1765                          */
1766                         if (s2io_link_fault_indication(nic) ==
1767                                         LINK_UP_DOWN_INTERRUPT ) {
1768                                 temp64 = readq(&bar0->pic_int_mask);
1769                                 temp64 &= ~((u64) PIC_INT_GPIO);
1770                                 writeq(temp64, &bar0->pic_int_mask);
1771                                 temp64 = readq(&bar0->gpio_int_mask);
1772                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1773                                 writeq(temp64, &bar0->gpio_int_mask);
1774                         } else {
1775                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1776                         }
1777                         /*
1778                          * No MSI Support is available presently, so TTI and
1779                          * RTI interrupts are also disabled.
1780                          */
1781                 } else if (flag == DISABLE_INTRS) {
1782                         /*
1783                          * Disable PIC Intrs in the general
1784                          * intr mask register
1785                          */
1786                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1787                         temp64 = readq(&bar0->general_int_mask);
1788                         val64 |= temp64;
1789                         writeq(val64, &bar0->general_int_mask);
1790                 }
1791         }
1792
1793         /*  MAC Interrupts */
1794         /*  Enabling/Disabling MAC interrupts */
1795         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1796                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1797                 if (flag == ENABLE_INTRS) {
1798                         temp64 = readq(&bar0->general_int_mask);
1799                         temp64 &= ~((u64) val64);
1800                         writeq(temp64, &bar0->general_int_mask);
1801                         /*
1802                          * All MAC block error interrupts are disabled for now
1803                          * TODO
1804                          */
1805                 } else if (flag == DISABLE_INTRS) {
1806                         /*
1807                          * Disable MAC Intrs in the general intr mask register
1808                          */
1809                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1810                         writeq(DISABLE_ALL_INTRS,
1811                                &bar0->mac_rmac_err_mask);
1812
1813                         temp64 = readq(&bar0->general_int_mask);
1814                         val64 |= temp64;
1815                         writeq(val64, &bar0->general_int_mask);
1816                 }
1817         }
1818
1819         /*  Tx traffic interrupts */
1820         if (mask & TX_TRAFFIC_INTR) {
1821                 val64 = TXTRAFFIC_INT_M;
1822                 if (flag == ENABLE_INTRS) {
1823                         temp64 = readq(&bar0->general_int_mask);
1824                         temp64 &= ~((u64) val64);
1825                         writeq(temp64, &bar0->general_int_mask);
1826                         /*
1827                          * Enable all the Tx side interrupts
1828                          * writing 0 Enables all 64 TX interrupt levels
1829                          */
1830                         writeq(0x0, &bar0->tx_traffic_mask);
1831                 } else if (flag == DISABLE_INTRS) {
1832                         /*
1833                          * Disable Tx Traffic Intrs in the general intr mask
1834                          * register.
1835                          */
1836                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1837                         temp64 = readq(&bar0->general_int_mask);
1838                         val64 |= temp64;
1839                         writeq(val64, &bar0->general_int_mask);
1840                 }
1841         }
1842
1843         /*  Rx traffic interrupts */
1844         if (mask & RX_TRAFFIC_INTR) {
1845                 val64 = RXTRAFFIC_INT_M;
1846                 if (flag == ENABLE_INTRS) {
1847                         temp64 = readq(&bar0->general_int_mask);
1848                         temp64 &= ~((u64) val64);
1849                         writeq(temp64, &bar0->general_int_mask);
1850                         /* writing 0 Enables all 8 RX interrupt levels */
1851                         writeq(0x0, &bar0->rx_traffic_mask);
1852                 } else if (flag == DISABLE_INTRS) {
1853                         /*
1854                          * Disable Rx Traffic Intrs in the general intr mask
1855                          * register.
1856                          */
1857                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1858                         temp64 = readq(&bar0->general_int_mask);
1859                         val64 |= temp64;
1860                         writeq(val64, &bar0->general_int_mask);
1861                 }
1862         }
1863 }
1864
1865 /**
1866  *  verify_pcc_quiescent- Checks for PCC quiescent state
1867  *  Return: 1 If PCC is quiescence
1868  *          0 If PCC is not quiescence
1869  */
1870 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1871 {
1872         int ret = 0, herc;
1873         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1874         u64 val64 = readq(&bar0->adapter_status);
1875         
1876         herc = (sp->device_type == XFRAME_II_DEVICE);
1877
1878         if (flag == FALSE) {
1879                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1880                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1881                                 ret = 1;
1882                 } else {
1883                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1884                                 ret = 1;
1885                 }
1886         } else {
1887                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1888                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1889                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1890                                 ret = 1;
1891                 } else {
1892                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1893                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1894                                 ret = 1;
1895                 }
1896         }
1897
1898         return ret;
1899 }
1900 /**
1901  *  verify_xena_quiescence - Checks whether the H/W is ready
1902  *  Description: Returns whether the H/W is ready to go or not. Depending
1903  *  on whether adapter enable bit was written or not the comparison
1904  *  differs and the calling function passes the input argument flag to
1905  *  indicate this.
1906  *  Return: 1 If xena is quiescence
1907  *          0 If Xena is not quiescence
1908  */
1909
1910 static int verify_xena_quiescence(struct s2io_nic *sp)
1911 {
1912         int  mode;
1913         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1914         u64 val64 = readq(&bar0->adapter_status);
1915         mode = s2io_verify_pci_mode(sp);
1916
1917         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1918                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1919                 return 0;
1920         }
1921         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1922         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1923                 return 0;
1924         }
1925         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1926                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1927                 return 0;
1928         }
1929         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1930                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1931                 return 0;
1932         }
1933         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1934                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1935                 return 0;
1936         }
1937         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1938                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1939                 return 0;
1940         }
1941         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1942                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1943                 return 0;
1944         }
1945         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1946                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1947                 return 0;
1948         }
1949
1950         /*
1951          * In PCI 33 mode, the P_PLL is not used, and therefore,
1952          * the the P_PLL_LOCK bit in the adapter_status register will
1953          * not be asserted.
1954          */
1955         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1956                 sp->device_type == XFRAME_II_DEVICE && mode !=
1957                 PCI_MODE_PCI_33) {
1958                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1959                 return 0;
1960         }
1961         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1962                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1963                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1964                 return 0;
1965         }
1966         return 1;
1967 }
1968
1969 /**
1970  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1971  * @sp: Pointer to device specifc structure
1972  * Description :
1973  * New procedure to clear mac address reading  problems on Alpha platforms
1974  *
1975  */
1976
1977 static void fix_mac_address(struct s2io_nic * sp)
1978 {
1979         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1980         u64 val64;
1981         int i = 0;
1982
1983         while (fix_mac[i] != END_SIGN) {
1984                 writeq(fix_mac[i++], &bar0->gpio_control);
1985                 udelay(10);
1986                 val64 = readq(&bar0->gpio_control);
1987         }
1988 }
1989
1990 /**
1991  *  start_nic - Turns the device on
1992  *  @nic : device private variable.
1993  *  Description:
1994  *  This function actually turns the device on. Before this  function is
1995  *  called,all Registers are configured from their reset states
1996  *  and shared memory is allocated but the NIC is still quiescent. On
1997  *  calling this function, the device interrupts are cleared and the NIC is
1998  *  literally switched on by writing into the adapter control register.
1999  *  Return Value:
2000  *  SUCCESS on success and -1 on failure.
2001  */
2002
2003 static int start_nic(struct s2io_nic *nic)
2004 {
2005         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2006         struct net_device *dev = nic->dev;
2007         register u64 val64 = 0;
2008         u16 subid, i;
2009         struct mac_info *mac_control;
2010         struct config_param *config;
2011
2012         mac_control = &nic->mac_control;
2013         config = &nic->config;
2014
2015         /*  PRC Initialization and configuration */
2016         for (i = 0; i < config->rx_ring_num; i++) {
2017                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2018                        &bar0->prc_rxd0_n[i]);
2019
2020                 val64 = readq(&bar0->prc_ctrl_n[i]);
2021                 if (nic->config.bimodal)
2022                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2023                 if (nic->rxd_mode == RXD_MODE_1)
2024                         val64 |= PRC_CTRL_RC_ENABLED;
2025                 else
2026                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2027                 if (nic->device_type == XFRAME_II_DEVICE)
2028                         val64 |= PRC_CTRL_GROUP_READS;
2029                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2030                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2031                 writeq(val64, &bar0->prc_ctrl_n[i]);
2032         }
2033
2034         if (nic->rxd_mode == RXD_MODE_3B) {
2035                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2036                 val64 = readq(&bar0->rx_pa_cfg);
2037                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2038                 writeq(val64, &bar0->rx_pa_cfg);
2039         }
2040
2041         if (vlan_tag_strip == 0) {
2042                 val64 = readq(&bar0->rx_pa_cfg);
2043                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2044                 writeq(val64, &bar0->rx_pa_cfg);
2045                 vlan_strip_flag = 0;
2046         }
2047
2048         /*
2049          * Enabling MC-RLDRAM. After enabling the device, we timeout
2050          * for around 100ms, which is approximately the time required
2051          * for the device to be ready for operation.
2052          */
2053         val64 = readq(&bar0->mc_rldram_mrs);
2054         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2055         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2056         val64 = readq(&bar0->mc_rldram_mrs);
2057
2058         msleep(100);    /* Delay by around 100 ms. */
2059
2060         /* Enabling ECC Protection. */
2061         val64 = readq(&bar0->adapter_control);
2062         val64 &= ~ADAPTER_ECC_EN;
2063         writeq(val64, &bar0->adapter_control);
2064
2065         /*
2066          * Clearing any possible Link state change interrupts that
2067          * could have popped up just before Enabling the card.
2068          */
2069         val64 = readq(&bar0->mac_rmac_err_reg);
2070         if (val64)
2071                 writeq(val64, &bar0->mac_rmac_err_reg);
2072
2073         /*
2074          * Verify if the device is ready to be enabled, if so enable
2075          * it.
2076          */
2077         val64 = readq(&bar0->adapter_status);
2078         if (!verify_xena_quiescence(nic)) {
2079                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2080                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2081                           (unsigned long long) val64);
2082                 return FAILURE;
2083         }
2084
2085         /*
2086          * With some switches, link might be already up at this point.
2087          * Because of this weird behavior, when we enable laser,
2088          * we may not get link. We need to handle this. We cannot
2089          * figure out which switch is misbehaving. So we are forced to
2090          * make a global change.
2091          */
2092
2093         /* Enabling Laser. */
2094         val64 = readq(&bar0->adapter_control);
2095         val64 |= ADAPTER_EOI_TX_ON;
2096         writeq(val64, &bar0->adapter_control);
2097
2098         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2099                 /*
2100                  * Dont see link state interrupts initally on some switches,
2101                  * so directly scheduling the link state task here.
2102                  */
2103                 schedule_work(&nic->set_link_task);
2104         }
2105         /* SXE-002: Initialize link and activity LED */
2106         subid = nic->pdev->subsystem_device;
2107         if (((subid & 0xFF) >= 0x07) &&
2108             (nic->device_type == XFRAME_I_DEVICE)) {
2109                 val64 = readq(&bar0->gpio_control);
2110                 val64 |= 0x0000800000000000ULL;
2111                 writeq(val64, &bar0->gpio_control);
2112                 val64 = 0x0411040400000000ULL;
2113                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2114         }
2115
2116         return SUCCESS;
2117 }
2118 /**
2119  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2120  */
2121 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2122                                         TxD *txdlp, int get_off)
2123 {
2124         struct s2io_nic *nic = fifo_data->nic;
2125         struct sk_buff *skb;
2126         struct TxD *txds;
2127         u16 j, frg_cnt;
2128
2129         txds = txdlp;
2130         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2131                 pci_unmap_single(nic->pdev, (dma_addr_t)
2132                         txds->Buffer_Pointer, sizeof(u64),
2133                         PCI_DMA_TODEVICE);
2134                 txds++;
2135         }
2136
2137         skb = (struct sk_buff *) ((unsigned long)
2138                         txds->Host_Control);
2139         if (!skb) {
2140                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2141                 return NULL;
2142         }
2143         pci_unmap_single(nic->pdev, (dma_addr_t)
2144                          txds->Buffer_Pointer,
2145                          skb->len - skb->data_len,
2146                          PCI_DMA_TODEVICE);
2147         frg_cnt = skb_shinfo(skb)->nr_frags;
2148         if (frg_cnt) {
2149                 txds++;
2150                 for (j = 0; j < frg_cnt; j++, txds++) {
2151                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2152                         if (!txds->Buffer_Pointer)
2153                                 break;
2154                         pci_unmap_page(nic->pdev, (dma_addr_t)
2155                                         txds->Buffer_Pointer,
2156                                        frag->size, PCI_DMA_TODEVICE);
2157                 }
2158         }
2159         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2160         return(skb);
2161 }
2162
2163 /**
2164  *  free_tx_buffers - Free all queued Tx buffers
2165  *  @nic : device private variable.
2166  *  Description:
2167  *  Free all queued Tx buffers.
2168  *  Return Value: void
2169 */
2170
2171 static void free_tx_buffers(struct s2io_nic *nic)
2172 {
2173         struct net_device *dev = nic->dev;
2174         struct sk_buff *skb;
2175         struct TxD *txdp;
2176         int i, j;
2177         struct mac_info *mac_control;
2178         struct config_param *config;
2179         int cnt = 0;
2180
2181         mac_control = &nic->mac_control;
2182         config = &nic->config;
2183
2184         for (i = 0; i < config->tx_fifo_num; i++) {
2185                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2186                         txdp = (struct TxD *) \
2187                         mac_control->fifos[i].list_info[j].list_virt_addr;
2188                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2189                         if (skb) {
2190                                 nic->mac_control.stats_info->sw_stat.mem_freed 
2191                                         += skb->truesize;
2192                                 dev_kfree_skb(skb);
2193                                 cnt++;
2194                         }
2195                 }
2196                 DBG_PRINT(INTR_DBG,
2197                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2198                           dev->name, cnt, i);
2199                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2200                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2201         }
2202 }
2203
2204 /**
2205  *   stop_nic -  To stop the nic
2206  *   @nic ; device private variable.
2207  *   Description:
2208  *   This function does exactly the opposite of what the start_nic()
2209  *   function does. This function is called to stop the device.
2210  *   Return Value:
2211  *   void.
2212  */
2213
2214 static void stop_nic(struct s2io_nic *nic)
2215 {
2216         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2217         register u64 val64 = 0;
2218         u16 interruptible;
2219         struct mac_info *mac_control;
2220         struct config_param *config;
2221
2222         mac_control = &nic->mac_control;
2223         config = &nic->config;
2224
2225         /*  Disable all interrupts */
2226         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2227         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2228         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2229         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2230
2231         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2232         val64 = readq(&bar0->adapter_control);
2233         val64 &= ~(ADAPTER_CNTL_EN);
2234         writeq(val64, &bar0->adapter_control);
2235 }
2236
2237 /**
2238  *  fill_rx_buffers - Allocates the Rx side skbs
2239  *  @nic:  device private variable
2240  *  @ring_no: ring number
2241  *  Description:
2242  *  The function allocates Rx side skbs and puts the physical
2243  *  address of these buffers into the RxD buffer pointers, so that the NIC
2244  *  can DMA the received frame into these locations.
2245  *  The NIC supports 3 receive modes, viz
2246  *  1. single buffer,
2247  *  2. three buffer and
2248  *  3. Five buffer modes.
2249  *  Each mode defines how many fragments the received frame will be split
2250  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2251  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2252  *  is split into 3 fragments. As of now only single buffer mode is
2253  *  supported.
2254  *   Return Value:
2255  *  SUCCESS on success or an appropriate -ve value on failure.
2256  */
2257
2258 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2259 {
2260         struct net_device *dev = nic->dev;
2261         struct sk_buff *skb;
2262         struct RxD_t *rxdp;
2263         int off, off1, size, block_no, block_no1;
2264         u32 alloc_tab = 0;
2265         u32 alloc_cnt;
2266         struct mac_info *mac_control;
2267         struct config_param *config;
2268         u64 tmp;
2269         struct buffAdd *ba;
2270         unsigned long flags;
2271         struct RxD_t *first_rxdp = NULL;
2272         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2273         struct RxD1 *rxdp1;
2274         struct RxD3 *rxdp3;
2275         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2276
2277         mac_control = &nic->mac_control;
2278         config = &nic->config;
2279         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2280             atomic_read(&nic->rx_bufs_left[ring_no]);
2281
2282         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2283         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2284         while (alloc_tab < alloc_cnt) {
2285                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2286                     block_index;
2287                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2288
2289                 rxdp = mac_control->rings[ring_no].
2290                                 rx_blocks[block_no].rxds[off].virt_addr;
2291
2292                 if ((block_no == block_no1) && (off == off1) &&
2293                                         (rxdp->Host_Control)) {
2294                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2295                                   dev->name);
2296                         DBG_PRINT(INTR_DBG, " info equated\n");
2297                         goto end;
2298                 }
2299                 if (off && (off == rxd_count[nic->rxd_mode])) {
2300                         mac_control->rings[ring_no].rx_curr_put_info.
2301                             block_index++;
2302                         if (mac_control->rings[ring_no].rx_curr_put_info.
2303                             block_index == mac_control->rings[ring_no].
2304                                         block_count)
2305                                 mac_control->rings[ring_no].rx_curr_put_info.
2306                                         block_index = 0;
2307                         block_no = mac_control->rings[ring_no].
2308                                         rx_curr_put_info.block_index;
2309                         if (off == rxd_count[nic->rxd_mode])
2310                                 off = 0;
2311                         mac_control->rings[ring_no].rx_curr_put_info.
2312                                 offset = off;
2313                         rxdp = mac_control->rings[ring_no].
2314                                 rx_blocks[block_no].block_virt_addr;
2315                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2316                                   dev->name, rxdp);
2317                 }
2318                 if(!napi) {
2319                         spin_lock_irqsave(&nic->put_lock, flags);
2320                         mac_control->rings[ring_no].put_pos =
2321                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2322                         spin_unlock_irqrestore(&nic->put_lock, flags);
2323                 } else {
2324                         mac_control->rings[ring_no].put_pos =
2325                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326                 }
2327                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2328                         ((nic->rxd_mode == RXD_MODE_3B) &&
2329                                 (rxdp->Control_2 & BIT(0)))) {
2330                         mac_control->rings[ring_no].rx_curr_put_info.
2331                                         offset = off;
2332                         goto end;
2333                 }
2334                 /* calculate size of skb based on ring mode */
2335                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2336                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2337                 if (nic->rxd_mode == RXD_MODE_1)
2338                         size += NET_IP_ALIGN;
2339                 else
2340                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2341
2342                 /* allocate skb */
2343                 skb = dev_alloc_skb(size);
2344                 if(!skb) {
2345                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2346                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2347                         if (first_rxdp) {
2348                                 wmb();
2349                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2350                         }
2351                         nic->mac_control.stats_info->sw_stat. \
2352                                 mem_alloc_fail_cnt++;
2353                         return -ENOMEM ;
2354                 }
2355                 nic->mac_control.stats_info->sw_stat.mem_allocated 
2356                         += skb->truesize;
2357                 if (nic->rxd_mode == RXD_MODE_1) {
2358                         /* 1 buffer mode - normal operation mode */
2359                         rxdp1 = (struct RxD1*)rxdp;
2360                         memset(rxdp, 0, sizeof(struct RxD1));
2361                         skb_reserve(skb, NET_IP_ALIGN);
2362                         rxdp1->Buffer0_ptr = pci_map_single
2363                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2364                                 PCI_DMA_FROMDEVICE);
2365                         if( (rxdp1->Buffer0_ptr == 0) ||
2366                                 (rxdp1->Buffer0_ptr ==
2367                                 DMA_ERROR_CODE))
2368                                 goto pci_map_failed;
2369
2370                         rxdp->Control_2 = 
2371                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2372
2373                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2374                         /*
2375                          * 2 buffer mode -
2376                          * 2 buffer mode provides 128
2377                          * byte aligned receive buffers.
2378                          */
2379
2380                         rxdp3 = (struct RxD3*)rxdp;
2381                         /* save buffer pointers to avoid frequent dma mapping */
2382                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2383                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2384                         memset(rxdp, 0, sizeof(struct RxD3));
2385                         /* restore the buffer pointers for dma sync*/
2386                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2387                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2388
2389                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2390                         skb_reserve(skb, BUF0_LEN);
2391                         tmp = (u64)(unsigned long) skb->data;
2392                         tmp += ALIGN_SIZE;
2393                         tmp &= ~ALIGN_SIZE;
2394                         skb->data = (void *) (unsigned long)tmp;
2395                         skb_reset_tail_pointer(skb);
2396
2397                         if (!(rxdp3->Buffer0_ptr))
2398                                 rxdp3->Buffer0_ptr =
2399                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2400                                            PCI_DMA_FROMDEVICE);
2401                         else
2402                                 pci_dma_sync_single_for_device(nic->pdev,
2403                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2404                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2405                         if( (rxdp3->Buffer0_ptr == 0) ||
2406                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2407                                 goto pci_map_failed;
2408
2409                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2410                         if (nic->rxd_mode == RXD_MODE_3B) {
2411                                 /* Two buffer mode */
2412
2413                                 /*
2414                                  * Buffer2 will have L3/L4 header plus
2415                                  * L4 payload
2416                                  */
2417                                 rxdp3->Buffer2_ptr = pci_map_single
2418                                 (nic->pdev, skb->data, dev->mtu + 4,
2419                                                 PCI_DMA_FROMDEVICE);
2420
2421                                 if( (rxdp3->Buffer2_ptr == 0) ||
2422                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2423                                         goto pci_map_failed;
2424
2425                                 rxdp3->Buffer1_ptr =
2426                                                 pci_map_single(nic->pdev,
2427                                                 ba->ba_1, BUF1_LEN,
2428                                                 PCI_DMA_FROMDEVICE);
2429                                 if( (rxdp3->Buffer1_ptr == 0) ||
2430                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2431                                         pci_unmap_single
2432                                                 (nic->pdev,
2433                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2434                                                 dev->mtu + 4,
2435                                                 PCI_DMA_FROMDEVICE);
2436                                         goto pci_map_failed;
2437                                 }
2438                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2439                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2440                                                                 (dev->mtu + 4);
2441                         }
2442                         rxdp->Control_2 |= BIT(0);
2443                 }
2444                 rxdp->Host_Control = (unsigned long) (skb);
2445                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2446                         rxdp->Control_1 |= RXD_OWN_XENA;
2447                 off++;
2448                 if (off == (rxd_count[nic->rxd_mode] + 1))
2449                         off = 0;
2450                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2451
2452                 rxdp->Control_2 |= SET_RXD_MARKER;
2453                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2454                         if (first_rxdp) {
2455                                 wmb();
2456                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2457                         }
2458                         first_rxdp = rxdp;
2459                 }
2460                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2461                 alloc_tab++;
2462         }
2463
2464       end:
2465         /* Transfer ownership of first descriptor to adapter just before
2466          * exiting. Before that, use memory barrier so that ownership
2467          * and other fields are seen by adapter correctly.
2468          */
2469         if (first_rxdp) {
2470                 wmb();
2471                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2472         }
2473
2474         return SUCCESS;
2475 pci_map_failed:
2476         stats->pci_map_fail_cnt++;
2477         stats->mem_freed += skb->truesize;
2478         dev_kfree_skb_irq(skb);
2479         return -ENOMEM;
2480 }
2481
2482 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2483 {
2484         struct net_device *dev = sp->dev;
2485         int j;
2486         struct sk_buff *skb;
2487         struct RxD_t *rxdp;
2488         struct mac_info *mac_control;
2489         struct buffAdd *ba;
2490         struct RxD1 *rxdp1;
2491         struct RxD3 *rxdp3;
2492
2493         mac_control = &sp->mac_control;
2494         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2495                 rxdp = mac_control->rings[ring_no].
2496                                 rx_blocks[blk].rxds[j].virt_addr;
2497                 skb = (struct sk_buff *)
2498                         ((unsigned long) rxdp->Host_Control);
2499                 if (!skb) {
2500                         continue;
2501                 }
2502                 if (sp->rxd_mode == RXD_MODE_1) {
2503                         rxdp1 = (struct RxD1*)rxdp;
2504                         pci_unmap_single(sp->pdev, (dma_addr_t)
2505                                 rxdp1->Buffer0_ptr,
2506                                 dev->mtu +
2507                                 HEADER_ETHERNET_II_802_3_SIZE
2508                                 + HEADER_802_2_SIZE +
2509                                 HEADER_SNAP_SIZE,
2510                                 PCI_DMA_FROMDEVICE);
2511                         memset(rxdp, 0, sizeof(struct RxD1));
2512                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2513                         rxdp3 = (struct RxD3*)rxdp;
2514                         ba = &mac_control->rings[ring_no].
2515                                 ba[blk][j];
2516                         pci_unmap_single(sp->pdev, (dma_addr_t)
2517                                 rxdp3->Buffer0_ptr,
2518                                 BUF0_LEN,
2519                                 PCI_DMA_FROMDEVICE);
2520                         pci_unmap_single(sp->pdev, (dma_addr_t)
2521                                 rxdp3->Buffer1_ptr,
2522                                 BUF1_LEN,
2523                                 PCI_DMA_FROMDEVICE);
2524                         pci_unmap_single(sp->pdev, (dma_addr_t)
2525                                 rxdp3->Buffer2_ptr,
2526                                 dev->mtu + 4,
2527                                 PCI_DMA_FROMDEVICE);
2528                         memset(rxdp, 0, sizeof(struct RxD3));
2529                 }
2530                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2531                 dev_kfree_skb(skb);
2532                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2533         }
2534 }
2535
2536 /**
2537  *  free_rx_buffers - Frees all Rx buffers
2538  *  @sp: device private variable.
2539  *  Description:
2540  *  This function will free all Rx buffers allocated by host.
2541  *  Return Value:
2542  *  NONE.
2543  */
2544
2545 static void free_rx_buffers(struct s2io_nic *sp)
2546 {
2547         struct net_device *dev = sp->dev;
2548         int i, blk = 0, buf_cnt = 0;
2549         struct mac_info *mac_control;
2550         struct config_param *config;
2551
2552         mac_control = &sp->mac_control;
2553         config = &sp->config;
2554
2555         for (i = 0; i < config->rx_ring_num; i++) {
2556                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2557                         free_rxd_blk(sp,i,blk);
2558
2559                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2560                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2561                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2562                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2563                 atomic_set(&sp->rx_bufs_left[i], 0);
2564                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2565                           dev->name, buf_cnt, i);
2566         }
2567 }
2568
2569 /**
2570  * s2io_poll - Rx interrupt handler for NAPI support
2571  * @napi : pointer to the napi structure.
2572  * @budget : The number of packets that were budgeted to be processed
2573  * during  one pass through the 'Poll" function.
2574  * Description:
2575  * Comes into picture only if NAPI support has been incorporated. It does
2576  * the same thing that rx_intr_handler does, but not in a interrupt context
2577  * also It will process only a given number of packets.
2578  * Return value:
2579  * 0 on success and 1 if there are No Rx packets to be processed.
2580  */
2581
2582 static int s2io_poll(struct napi_struct *napi, int budget)
2583 {
2584         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2585         struct net_device *dev = nic->dev;
2586         int pkt_cnt = 0, org_pkts_to_process;
2587         struct mac_info *mac_control;
2588         struct config_param *config;
2589         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2590         int i;
2591
2592         atomic_inc(&nic->isr_cnt);
2593         mac_control = &nic->mac_control;
2594         config = &nic->config;
2595
2596         nic->pkts_to_process = budget;
2597         org_pkts_to_process = nic->pkts_to_process;
2598
2599         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2600         readl(&bar0->rx_traffic_int);
2601
2602         for (i = 0; i < config->rx_ring_num; i++) {
2603                 rx_intr_handler(&mac_control->rings[i]);
2604                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2605                 if (!nic->pkts_to_process) {
2606                         /* Quota for the current iteration has been met */
2607                         goto no_rx;
2608                 }
2609         }
2610
2611         netif_rx_complete(dev, napi);
2612
2613         for (i = 0; i < config->rx_ring_num; i++) {
2614                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2615                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2616                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2617                         break;
2618                 }
2619         }
2620         /* Re enable the Rx interrupts. */
2621         writeq(0x0, &bar0->rx_traffic_mask);
2622         readl(&bar0->rx_traffic_mask);
2623         atomic_dec(&nic->isr_cnt);
2624         return pkt_cnt;
2625
2626 no_rx:
2627         for (i = 0; i < config->rx_ring_num; i++) {
2628                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2629                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2630                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2631                         break;
2632                 }
2633         }
2634         atomic_dec(&nic->isr_cnt);
2635         return pkt_cnt;
2636 }
2637
2638 #ifdef CONFIG_NET_POLL_CONTROLLER
2639 /**
2640  * s2io_netpoll - netpoll event handler entry point
2641  * @dev : pointer to the device structure.
2642  * Description:
2643  *      This function will be called by upper layer to check for events on the
2644  * interface in situations where interrupts are disabled. It is used for
2645  * specific in-kernel networking tasks, such as remote consoles and kernel
2646  * debugging over the network (example netdump in RedHat).
2647  */
2648 static void s2io_netpoll(struct net_device *dev)
2649 {
2650         struct s2io_nic *nic = dev->priv;
2651         struct mac_info *mac_control;
2652         struct config_param *config;
2653         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2654         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2655         int i;
2656
2657         if (pci_channel_offline(nic->pdev))
2658                 return;
2659
2660         disable_irq(dev->irq);
2661
2662         atomic_inc(&nic->isr_cnt);
2663         mac_control = &nic->mac_control;
2664         config = &nic->config;
2665
2666         writeq(val64, &bar0->rx_traffic_int);
2667         writeq(val64, &bar0->tx_traffic_int);
2668
2669         /* we need to free up the transmitted skbufs or else netpoll will
2670          * run out of skbs and will fail and eventually netpoll application such
2671          * as netdump will fail.
2672          */
2673         for (i = 0; i < config->tx_fifo_num; i++)
2674                 tx_intr_handler(&mac_control->fifos[i]);
2675
2676         /* check for received packet and indicate up to network */
2677         for (i = 0; i < config->rx_ring_num; i++)
2678                 rx_intr_handler(&mac_control->rings[i]);
2679
2680         for (i = 0; i < config->rx_ring_num; i++) {
2681                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2682                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2683                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2684                         break;
2685                 }
2686         }
2687         atomic_dec(&nic->isr_cnt);
2688         enable_irq(dev->irq);
2689         return;
2690 }
2691 #endif
2692
2693 /**
2694  *  rx_intr_handler - Rx interrupt handler
2695  *  @nic: device private variable.
2696  *  Description:
2697  *  If the interrupt is because of a received frame or if the
2698  *  receive ring contains fresh as yet un-processed frames,this function is
2699  *  called. It picks out the RxD at which place the last Rx processing had
2700  *  stopped and sends the skb to the OSM's Rx handler and then increments
2701  *  the offset.
2702  *  Return Value:
2703  *  NONE.
2704  */
2705 static void rx_intr_handler(struct ring_info *ring_data)
2706 {
2707         struct s2io_nic *nic = ring_data->nic;
2708         struct net_device *dev = (struct net_device *) nic->dev;
2709         int get_block, put_block, put_offset;
2710         struct rx_curr_get_info get_info, put_info;
2711         struct RxD_t *rxdp;
2712         struct sk_buff *skb;
2713         int pkt_cnt = 0;
2714         int i;
2715         struct RxD1* rxdp1;
2716         struct RxD3* rxdp3;
2717
2718         spin_lock(&nic->rx_lock);
2719         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2720                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2721                           __FUNCTION__, dev->name);
2722                 spin_unlock(&nic->rx_lock);
2723                 return;
2724         }
2725
2726         get_info = ring_data->rx_curr_get_info;
2727         get_block = get_info.block_index;
2728         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2729         put_block = put_info.block_index;
2730         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2731         if (!napi) {
2732                 spin_lock(&nic->put_lock);
2733                 put_offset = ring_data->put_pos;
2734                 spin_unlock(&nic->put_lock);
2735         } else
2736                 put_offset = ring_data->put_pos;
2737
2738         while (RXD_IS_UP2DT(rxdp)) {
2739                 /*
2740                  * If your are next to put index then it's
2741                  * FIFO full condition
2742                  */
2743                 if ((get_block == put_block) &&
2744                     (get_info.offset + 1) == put_info.offset) {
2745                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2746                         break;
2747                 }
2748                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2749                 if (skb == NULL) {
2750                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2751                                   dev->name);
2752                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2753                         spin_unlock(&nic->rx_lock);
2754                         return;
2755                 }
2756                 if (nic->rxd_mode == RXD_MODE_1) {
2757                         rxdp1 = (struct RxD1*)rxdp;
2758                         pci_unmap_single(nic->pdev, (dma_addr_t)
2759                                 rxdp1->Buffer0_ptr,
2760                                 dev->mtu +
2761                                 HEADER_ETHERNET_II_802_3_SIZE +
2762                                 HEADER_802_2_SIZE +
2763                                 HEADER_SNAP_SIZE,
2764                                 PCI_DMA_FROMDEVICE);
2765                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2766                         rxdp3 = (struct RxD3*)rxdp;
2767                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2768                                 rxdp3->Buffer0_ptr,
2769                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2770                         pci_unmap_single(nic->pdev, (dma_addr_t)
2771                                 rxdp3->Buffer2_ptr,
2772                                 dev->mtu + 4,
2773                                 PCI_DMA_FROMDEVICE);
2774                 }
2775                 prefetch(skb->data);
2776                 rx_osm_handler(ring_data, rxdp);
2777                 get_info.offset++;
2778                 ring_data->rx_curr_get_info.offset = get_info.offset;
2779                 rxdp = ring_data->rx_blocks[get_block].
2780                                 rxds[get_info.offset].virt_addr;
2781                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2782                         get_info.offset = 0;
2783                         ring_data->rx_curr_get_info.offset = get_info.offset;
2784                         get_block++;
2785                         if (get_block == ring_data->block_count)
2786                                 get_block = 0;
2787                         ring_data->rx_curr_get_info.block_index = get_block;
2788                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2789                 }
2790
2791                 nic->pkts_to_process -= 1;
2792                 if ((napi) && (!nic->pkts_to_process))
2793                         break;
2794                 pkt_cnt++;
2795                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2796                         break;
2797         }
2798         if (nic->lro) {
2799                 /* Clear all LRO sessions before exiting */
2800                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2801                         struct lro *lro = &nic->lro0_n[i];
2802                         if (lro->in_use) {
2803                                 update_L3L4_header(nic, lro);
2804                                 queue_rx_frame(lro->parent);
2805                                 clear_lro_session(lro);
2806                         }
2807                 }
2808         }
2809
2810         spin_unlock(&nic->rx_lock);
2811 }
2812
2813 /**
2814  *  tx_intr_handler - Transmit interrupt handler
2815  *  @nic : device private variable
2816  *  Description:
2817  *  If an interrupt was raised to indicate DMA complete of the
2818  *  Tx packet, this function is called. It identifies the last TxD
2819  *  whose buffer was freed and frees all skbs whose data have already
2820  *  DMA'ed into the NICs internal memory.
2821  *  Return Value:
2822  *  NONE
2823  */
2824
2825 static void tx_intr_handler(struct fifo_info *fifo_data)
2826 {
2827         struct s2io_nic *nic = fifo_data->nic;
2828         struct net_device *dev = (struct net_device *) nic->dev;
2829         struct tx_curr_get_info get_info, put_info;
2830         struct sk_buff *skb;
2831         struct TxD *txdlp;
2832         u8 err_mask;
2833
2834         get_info = fifo_data->tx_curr_get_info;
2835         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2836         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2837             list_virt_addr;
2838         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2839                (get_info.offset != put_info.offset) &&
2840                (txdlp->Host_Control)) {
2841                 /* Check for TxD errors */
2842                 if (txdlp->Control_1 & TXD_T_CODE) {
2843                         unsigned long long err;
2844                         err = txdlp->Control_1 & TXD_T_CODE;
2845                         if (err & 0x1) {
2846                                 nic->mac_control.stats_info->sw_stat.
2847                                                 parity_err_cnt++;
2848                         }
2849
2850                         /* update t_code statistics */
2851                         err_mask = err >> 48;
2852                         switch(err_mask) {
2853                                 case 2:
2854                                         nic->mac_control.stats_info->sw_stat.
2855                                                         tx_buf_abort_cnt++;
2856                                 break;
2857
2858                                 case 3:
2859                                         nic->mac_control.stats_info->sw_stat.
2860                                                         tx_desc_abort_cnt++;
2861                                 break;
2862
2863                                 case 7:
2864                                         nic->mac_control.stats_info->sw_stat.
2865                                                         tx_parity_err_cnt++;
2866                                 break;
2867
2868                                 case 10:
2869                                         nic->mac_control.stats_info->sw_stat.
2870                                                         tx_link_loss_cnt++;
2871                                 break;
2872
2873                                 case 15:
2874                                         nic->mac_control.stats_info->sw_stat.
2875                                                         tx_list_proc_err_cnt++;
2876                                 break;
2877                         }
2878                 }
2879
2880                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2881                 if (skb == NULL) {
2882                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2883                         __FUNCTION__);
2884                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2885                         return;
2886                 }
2887
2888                 /* Updating the statistics block */
2889                 nic->stats.tx_bytes += skb->len;
2890                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2891                 dev_kfree_skb_irq(skb);
2892
2893                 get_info.offset++;
2894                 if (get_info.offset == get_info.fifo_len + 1)
2895                         get_info.offset = 0;
2896                 txdlp = (struct TxD *) fifo_data->list_info
2897                     [get_info.offset].list_virt_addr;
2898                 fifo_data->tx_curr_get_info.offset =
2899                     get_info.offset;
2900         }
2901
2902         spin_lock(&nic->tx_lock);
2903         if (netif_queue_stopped(dev))
2904                 netif_wake_queue(dev);
2905         spin_unlock(&nic->tx_lock);
2906 }
2907
2908 /**
2909  *  s2io_mdio_write - Function to write in to MDIO registers
2910  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2911  *  @addr     : address value
2912  *  @value    : data value
2913  *  @dev      : pointer to net_device structure
2914  *  Description:
2915  *  This function is used to write values to the MDIO registers
2916  *  NONE
2917  */
2918 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2919 {
2920         u64 val64 = 0x0;
2921         struct s2io_nic *sp = dev->priv;
2922         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2923
2924         //address transaction
2925         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2926                         | MDIO_MMD_DEV_ADDR(mmd_type)
2927                         | MDIO_MMS_PRT_ADDR(0x0);
2928         writeq(val64, &bar0->mdio_control);
2929         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2930         writeq(val64, &bar0->mdio_control);
2931         udelay(100);
2932
2933         //Data transaction
2934         val64 = 0x0;
2935         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2936                         | MDIO_MMD_DEV_ADDR(mmd_type)
2937                         | MDIO_MMS_PRT_ADDR(0x0)
2938                         | MDIO_MDIO_DATA(value)
2939                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2940         writeq(val64, &bar0->mdio_control);
2941         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2942         writeq(val64, &bar0->mdio_control);
2943         udelay(100);
2944
2945         val64 = 0x0;
2946         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2947         | MDIO_MMD_DEV_ADDR(mmd_type)
2948         | MDIO_MMS_PRT_ADDR(0x0)
2949         | MDIO_OP(MDIO_OP_READ_TRANS);
2950         writeq(val64, &bar0->mdio_control);
2951         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2952         writeq(val64, &bar0->mdio_control);
2953         udelay(100);
2954
2955 }
2956
2957 /**
2958  *  s2io_mdio_read - Function to write in to MDIO registers
2959  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2960  *  @addr     : address value
2961  *  @dev      : pointer to net_device structure
2962  *  Description:
2963  *  This function is used to read values to the MDIO registers
2964  *  NONE
2965  */
2966 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2967 {
2968         u64 val64 = 0x0;
2969         u64 rval64 = 0x0;
2970         struct s2io_nic *sp = dev->priv;
2971         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2972
2973         /* address transaction */
2974         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2975                         | MDIO_MMD_DEV_ADDR(mmd_type)
2976                         | MDIO_MMS_PRT_ADDR(0x0);
2977         writeq(val64, &bar0->mdio_control);
2978         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2979         writeq(val64, &bar0->mdio_control);
2980         udelay(100);
2981
2982         /* Data transaction */
2983         val64 = 0x0;
2984         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2985                         | MDIO_MMD_DEV_ADDR(mmd_type)
2986                         | MDIO_MMS_PRT_ADDR(0x0)
2987                         | MDIO_OP(MDIO_OP_READ_TRANS);
2988         writeq(val64, &bar0->mdio_control);
2989         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2990         writeq(val64, &bar0->mdio_control);
2991         udelay(100);
2992
2993         /* Read the value from regs */
2994         rval64 = readq(&bar0->mdio_control);
2995         rval64 = rval64 & 0xFFFF0000;
2996         rval64 = rval64 >> 16;
2997         return rval64;
2998 }
2999 /**
3000  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3001  *  @counter      : couter value to be updated
3002  *  @flag         : flag to indicate the status
3003  *  @type         : counter type
3004  *  Description:
3005  *  This function is to check the status of the xpak counters value
3006  *  NONE
3007  */
3008
3009 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3010 {
3011         u64 mask = 0x3;
3012         u64 val64;
3013         int i;
3014         for(i = 0; i <index; i++)
3015                 mask = mask << 0x2;
3016
3017         if(flag > 0)
3018         {
3019                 *counter = *counter + 1;
3020                 val64 = *regs_stat & mask;
3021                 val64 = val64 >> (index * 0x2);
3022                 val64 = val64 + 1;
3023                 if(val64 == 3)
3024                 {
3025                         switch(type)
3026                         {
3027                         case 1:
3028                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3029                                           "service. Excessive temperatures may "
3030                                           "result in premature transceiver "
3031                                           "failure \n");
3032                         break;
3033                         case 2:
3034                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3035                                           "service Excessive bias currents may "
3036                                           "indicate imminent laser diode "
3037                                           "failure \n");
3038                         break;
3039                         case 3:
3040                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3041                                           "service Excessive laser output "
3042                                           "power may saturate far-end "
3043                                           "receiver\n");
3044                         break;
3045                         default:
3046                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3047                                           "type \n");
3048                         }
3049                         val64 = 0x0;
3050                 }
3051                 val64 = val64 << (index * 0x2);
3052                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3053
3054         } else {
3055                 *regs_stat = *regs_stat & (~mask);
3056         }
3057 }
3058
3059 /**
3060  *  s2io_updt_xpak_counter - Function to update the xpak counters
3061  *  @dev         : pointer to net_device struct
3062  *  Description:
3063  *  This function is to upate the status of the xpak counters value
3064  *  NONE
3065  */
3066 static void s2io_updt_xpak_counter(struct net_device *dev)
3067 {
3068         u16 flag  = 0x0;
3069         u16 type  = 0x0;
3070         u16 val16 = 0x0;
3071         u64 val64 = 0x0;
3072         u64 addr  = 0x0;
3073
3074         struct s2io_nic *sp = dev->priv;
3075         struct stat_block *stat_info = sp->mac_control.stats_info;
3076
3077         /* Check the communication with the MDIO slave */
3078         addr = 0x0000;
3079         val64 = 0x0;
3080         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3081         if((val64 == 0xFFFF) || (val64 == 0x0000))
3082         {
3083                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3084                           "Returned %llx\n", (unsigned long long)val64);
3085                 return;
3086         }
3087
3088         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3089         if(val64 != 0x2040)
3090         {
3091                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3092                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3093                           (unsigned long long)val64);
3094                 return;
3095         }
3096
3097         /* Loading the DOM register to MDIO register */
3098         addr = 0xA100;
3099         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3100         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3101
3102         /* Reading the Alarm flags */
3103         addr = 0xA070;
3104         val64 = 0x0;
3105         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3106
3107         flag = CHECKBIT(val64, 0x7);
3108         type = 1;
3109         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3110                                 &stat_info->xpak_stat.xpak_regs_stat,
3111                                 0x0, flag, type);
3112
3113         if(CHECKBIT(val64, 0x6))
3114                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3115
3116         flag = CHECKBIT(val64, 0x3);
3117         type = 2;
3118         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3119                                 &stat_info->xpak_stat.xpak_regs_stat,
3120                                 0x2, flag, type);
3121
3122         if(CHECKBIT(val64, 0x2))
3123                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3124
3125         flag = CHECKBIT(val64, 0x1);
3126         type = 3;
3127         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3128                                 &stat_info->xpak_stat.xpak_regs_stat,
3129                                 0x4, flag, type);
3130
3131         if(CHECKBIT(val64, 0x0))
3132                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3133
3134         /* Reading the Warning flags */
3135         addr = 0xA074;
3136         val64 = 0x0;
3137         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3138
3139         if(CHECKBIT(val64, 0x7))
3140                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3141
3142         if(CHECKBIT(val64, 0x6))
3143                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3144
3145         if(CHECKBIT(val64, 0x3))
3146                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3147
3148         if(CHECKBIT(val64, 0x2))
3149                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3150
3151         if(CHECKBIT(val64, 0x1))
3152                 stat_info->xpak_stat.warn_laser_output_power_high++;
3153
3154         if(CHECKBIT(val64, 0x0))
3155                 stat_info->xpak_stat.warn_laser_output_power_low++;
3156 }
3157
3158 /**
3159  *  alarm_intr_handler - Alarm Interrrupt handler
3160  *  @nic: device private variable
3161  *  Description: If the interrupt was neither because of Rx packet or Tx
3162  *  complete, this function is called. If the interrupt was to indicate
3163  *  a loss of link, the OSM link status handler is invoked for any other
3164  *  alarm interrupt the block that raised the interrupt is displayed
3165  *  and a H/W reset is issued.
3166  *  Return Value:
3167  *  NONE
3168 */
3169
3170 static void alarm_intr_handler(struct s2io_nic *nic)
3171 {
3172         struct net_device *dev = (struct net_device *) nic->dev;
3173         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3174         register u64 val64 = 0, err_reg = 0;
3175         u64 cnt;
3176         int i;
3177         if (atomic_read(&nic->card_state) == CARD_DOWN)
3178                 return;
3179         if (pci_channel_offline(nic->pdev))
3180                 return;
3181         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3182         /* Handling the XPAK counters update */
3183         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3184                 /* waiting for an hour */
3185                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3186         } else {
3187                 s2io_updt_xpak_counter(dev);
3188                 /* reset the count to zero */
3189                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3190         }
3191
3192         /* Handling link status change error Intr */
3193         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3194                 err_reg = readq(&bar0->mac_rmac_err_reg);
3195                 writeq(err_reg, &bar0->mac_rmac_err_reg);
3196                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3197                         schedule_work(&nic->set_link_task);
3198                 }
3199         }
3200
3201         /* Handling Ecc errors */
3202         val64 = readq(&bar0->mc_err_reg);
3203         writeq(val64, &bar0->mc_err_reg);
3204         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3205                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3206                         nic->mac_control.stats_info->sw_stat.
3207                                 double_ecc_errs++;
3208                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3209                                   dev->name);
3210                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3211                         if (nic->device_type != XFRAME_II_DEVICE) {
3212                                 /* Reset XframeI only if critical error */
3213                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3214                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3215                                         netif_stop_queue(dev);
3216                                         schedule_work(&nic->rst_timer_task);
3217                                         nic->mac_control.stats_info->sw_stat.
3218                                                         soft_reset_cnt++;
3219                                 }
3220                         }
3221                 } else {
3222                         nic->mac_control.stats_info->sw_stat.
3223                                 single_ecc_errs++;
3224                 }
3225         }
3226
3227         /* In case of a serious error, the device will be Reset. */
3228         val64 = readq(&bar0->serr_source);
3229         if (val64 & SERR_SOURCE_ANY) {
3230                 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3231                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3232                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3233                           (unsigned long long)val64);
3234                 netif_stop_queue(dev);
3235                 schedule_work(&nic->rst_timer_task);
3236                 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3237         }
3238
3239         /*
3240          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3241          * Error occurs, the adapter will be recycled by disabling the
3242          * adapter enable bit and enabling it again after the device
3243          * becomes Quiescent.
3244          */
3245         val64 = readq(&bar0->pcc_err_reg);
3246         writeq(val64, &bar0->pcc_err_reg);
3247         if (val64 & PCC_FB_ECC_DB_ERR) {
3248                 u64 ac = readq(&bar0->adapter_control);
3249                 ac &= ~(ADAPTER_CNTL_EN);
3250                 writeq(ac, &bar0->adapter_control);
3251                 ac = readq(&bar0->adapter_control);
3252                 schedule_work(&nic->set_link_task);
3253         }
3254         /* Check for data parity error */
3255         val64 = readq(&bar0->pic_int_status);
3256         if (val64 & PIC_INT_GPIO) {
3257                 val64 = readq(&bar0->gpio_int_reg);
3258                 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3259                         nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3260                         schedule_work(&nic->rst_timer_task);
3261                         nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3262                 }
3263         }
3264
3265         /* Check for ring full counter */
3266         if (nic->device_type & XFRAME_II_DEVICE) {
3267                 val64 = readq(&bar0->ring_bump_counter1);
3268                 for (i=0; i<4; i++) {
3269                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3270                         cnt >>= 64 - ((i+1)*16);
3271                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3272                                 += cnt;
3273                 }
3274
3275                 val64 = readq(&bar0->ring_bump_counter2);
3276                 for (i=0; i<4; i++) {
3277                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3278                         cnt >>= 64 - ((i+1)*16);
3279                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3280                                 += cnt;
3281                 }
3282         }
3283
3284         /* Other type of interrupts are not being handled now,  TODO */
3285 }
3286
3287 /**
3288  *  wait_for_cmd_complete - waits for a command to complete.
3289  *  @sp : private member of the device structure, which is a pointer to the
3290  *  s2io_nic structure.
3291  *  Description: Function that waits for a command to Write into RMAC
3292  *  ADDR DATA registers to be completed and returns either success or
3293  *  error depending on whether the command was complete or not.
3294  *  Return value:
3295  *   SUCCESS on success and FAILURE on failure.
3296  */
3297
3298 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3299                                 int bit_state)
3300 {
3301         int ret = FAILURE, cnt = 0, delay = 1;
3302         u64 val64;
3303
3304         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3305                 return FAILURE;
3306
3307         do {
3308                 val64 = readq(addr);
3309                 if (bit_state == S2IO_BIT_RESET) {
3310                         if (!(val64 & busy_bit)) {
3311                                 ret = SUCCESS;
3312                                 break;
3313                         }
3314                 } else {
3315                         if (!(val64 & busy_bit)) {
3316                                 ret = SUCCESS;
3317                                 break;
3318                         }
3319                 }
3320
3321                 if(in_interrupt())
3322                         mdelay(delay);
3323                 else
3324                         msleep(delay);
3325
3326                 if (++cnt >= 10)
3327                         delay = 50;
3328         } while (cnt < 20);
3329         return ret;
3330 }
3331 /*
3332  * check_pci_device_id - Checks if the device id is supported
3333  * @id : device id
3334  * Description: Function to check if the pci device id is supported by driver.
3335  * Return value: Actual device id if supported else PCI_ANY_ID
3336  */
3337 static u16 check_pci_device_id(u16 id)
3338 {
3339         switch (id) {
3340         case PCI_DEVICE_ID_HERC_WIN:
3341         case PCI_DEVICE_ID_HERC_UNI:
3342                 return XFRAME_II_DEVICE;
3343         case PCI_DEVICE_ID_S2IO_UNI:
3344         case PCI_DEVICE_ID_S2IO_WIN:
3345                 return XFRAME_I_DEVICE;
3346         default:
3347                 return PCI_ANY_ID;
3348         }
3349 }
3350
3351 /**
3352  *  s2io_reset - Resets the card.
3353  *  @sp : private member of the device structure.
3354  *  Description: Function to Reset the card. This function then also
3355  *  restores the previously saved PCI configuration space registers as
3356  *  the card reset also resets the configuration space.
3357  *  Return value:
3358  *  void.
3359  */
3360
3361 static void s2io_reset(struct s2io_nic * sp)
3362 {
3363         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3364         u64 val64;
3365         u16 subid, pci_cmd;
3366         int i;
3367         u16 val16;
3368         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3369         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3370
3371         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3372                         __FUNCTION__, sp->dev->name);
3373
3374         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3375         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3376
3377         val64 = SW_RESET_ALL;
3378         writeq(val64, &bar0->sw_reset);
3379         if (strstr(sp->product_name, "CX4")) {
3380                 msleep(750);
3381         }
3382         msleep(250);
3383         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3384
3385                 /* Restore the PCI state saved during initialization. */
3386                 pci_restore_state(sp->pdev);
3387                 pci_read_config_word(sp->pdev, 0x2, &val16);
3388                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3389                         break;
3390                 msleep(200);
3391         }
3392
3393         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3394                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3395         }
3396
3397         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3398
3399         s2io_init_pci(sp);
3400
3401         /* Set swapper to enable I/O register access */
3402         s2io_set_swapper(sp);
3403
3404         /* Restore the MSIX table entries from local variables */
3405         restore_xmsi_data(sp);
3406
3407         /* Clear certain PCI/PCI-X fields after reset */
3408         if (sp->device_type == XFRAME_II_DEVICE) {
3409                 /* Clear "detected parity error" bit */
3410                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3411
3412                 /* Clearing PCIX Ecc status register */
3413                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3414
3415                 /* Clearing PCI_STATUS error reflected here */
3416                 writeq(BIT(62), &bar0->txpic_int_reg);
3417         }
3418
3419         /* Reset device statistics maintained by OS */
3420         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3421         
3422         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3423         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3424         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3425         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3426         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3427         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3428         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3429         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3430         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3431         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3432         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3433         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3434         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3435         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3436         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3437         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3438         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3439         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3440         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3441
3442         /* SXE-002: Configure link and activity LED to turn it off */
3443         subid = sp->pdev->subsystem_device;
3444         if (((subid & 0xFF) >= 0x07) &&
3445             (sp->device_type == XFRAME_I_DEVICE)) {
3446                 val64 = readq(&bar0->gpio_control);
3447                 val64 |= 0x0000800000000000ULL;
3448                 writeq(val64, &bar0->gpio_control);
3449                 val64 = 0x0411040400000000ULL;
3450                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3451         }
3452
3453         /*
3454          * Clear spurious ECC interrupts that would have occured on
3455          * XFRAME II cards after reset.
3456          */
3457         if (sp->device_type == XFRAME_II_DEVICE) {
3458                 val64 = readq(&bar0->pcc_err_reg);
3459                 writeq(val64, &bar0->pcc_err_reg);
3460         }
3461
3462         /* restore the previously assigned mac address */
3463         s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3464
3465         sp->device_enabled_once = FALSE;
3466 }
3467
3468 /**
3469  *  s2io_set_swapper - to set the swapper controle on the card
3470  *  @sp : private member of the device structure,
3471  *  pointer to the s2io_nic structure.
3472  *  Description: Function to set the swapper control on the card
3473  *  correctly depending on the 'endianness' of the system.
3474  *  Return value:
3475  *  SUCCESS on success and FAILURE on failure.
3476  */
3477
3478 static int s2io_set_swapper(struct s2io_nic * sp)
3479 {
3480         struct net_device *dev = sp->dev;
3481         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3482         u64 val64, valt, valr;
3483
3484         /*
3485          * Set proper endian settings and verify the same by reading
3486          * the PIF Feed-back register.
3487          */
3488
3489         val64 = readq(&bar0->pif_rd_swapper_fb);
3490         if (val64 != 0x0123456789ABCDEFULL) {
3491                 int i = 0;
3492                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3493                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3494                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3495                                 0};                     /* FE=0, SE=0 */
3496
3497                 while(i<4) {
3498                         writeq(value[i], &bar0->swapper_ctrl);
3499                         val64 = readq(&bar0->pif_rd_swapper_fb);
3500                         if (val64 == 0x0123456789ABCDEFULL)
3501                                 break;
3502                         i++;
3503                 }
3504                 if (i == 4) {
3505                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3506                                 dev->name);
3507                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3508                                 (unsigned long long) val64);
3509                         return FAILURE;
3510                 }
3511                 valr = value[i];
3512         } else {
3513                 valr = readq(&bar0->swapper_ctrl);
3514         }
3515
3516         valt = 0x0123456789ABCDEFULL;
3517         writeq(valt, &bar0->xmsi_address);
3518         val64 = readq(&bar0->xmsi_address);
3519
3520         if(val64 != valt) {
3521                 int i = 0;
3522                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3523                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3524                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3525                                 0};                     /* FE=0, SE=0 */
3526
3527                 while(i<4) {
3528                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3529                         writeq(valt, &bar0->xmsi_address);
3530                         val64 = readq(&bar0->xmsi_address);
3531                         if(val64 == valt)
3532                                 break;
3533                         i++;
3534                 }
3535                 if(i == 4) {
3536                         unsigned long long x = val64;
3537                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3538                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3539                         return FAILURE;
3540                 }
3541         }
3542         val64 = readq(&bar0->swapper_ctrl);
3543         val64 &= 0xFFFF000000000000ULL;
3544
3545 #ifdef  __BIG_ENDIAN
3546         /*
3547          * The device by default set to a big endian format, so a
3548          * big endian driver need not set anything.
3549          */
3550         val64 |= (SWAPPER_CTRL_TXP_FE |
3551                  SWAPPER_CTRL_TXP_SE |
3552                  SWAPPER_CTRL_TXD_R_FE |
3553                  SWAPPER_CTRL_TXD_W_FE |
3554                  SWAPPER_CTRL_TXF_R_FE |
3555                  SWAPPER_CTRL_RXD_R_FE |
3556                  SWAPPER_CTRL_RXD_W_FE |
3557                  SWAPPER_CTRL_RXF_W_FE |
3558                  SWAPPER_CTRL_XMSI_FE |
3559                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3560         if (sp->intr_type == INTA)
3561                 val64 |= SWAPPER_CTRL_XMSI_SE;
3562         writeq(val64, &bar0->swapper_ctrl);
3563 #else
3564         /*
3565          * Initially we enable all bits to make it accessible by the
3566          * driver, then we selectively enable only those bits that
3567          * we want to set.
3568          */
3569         val64 |= (SWAPPER_CTRL_TXP_FE |
3570                  SWAPPER_CTRL_TXP_SE |
3571                  SWAPPER_CTRL_TXD_R_FE |
3572                  SWAPPER_CTRL_TXD_R_SE |
3573                  SWAPPER_CTRL_TXD_W_FE |
3574                  SWAPPER_CTRL_TXD_W_SE |
3575                  SWAPPER_CTRL_TXF_R_FE |
3576                  SWAPPER_CTRL_RXD_R_FE |
3577                  SWAPPER_CTRL_RXD_R_SE |
3578                  SWAPPER_CTRL_RXD_W_FE |
3579                  SWAPPER_CTRL_RXD_W_SE |
3580                  SWAPPER_CTRL_RXF_W_FE |
3581                  SWAPPER_CTRL_XMSI_FE |
3582                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3583         if (sp->intr_type == INTA)
3584                 val64 |= SWAPPER_CTRL_XMSI_SE;
3585         writeq(val64, &bar0->swapper_ctrl);
3586 #endif
3587         val64 = readq(&bar0->swapper_ctrl);
3588
3589         /*
3590          * Verifying if endian settings are accurate by reading a
3591          * feedback register.
3592          */
3593         val64 = readq(&bar0->pif_rd_swapper_fb);
3594         if (val64 != 0x0123456789ABCDEFULL) {
3595                 /* Endian settings are incorrect, calls for another dekko. */
3596                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3597                           dev->name);
3598                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3599                           (unsigned long long) val64);
3600                 return FAILURE;
3601         }
3602
3603         return SUCCESS;
3604 }
3605
3606 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3607 {
3608         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3609         u64 val64;
3610         int ret = 0, cnt = 0;
3611
3612         do {
3613                 val64 = readq(&bar0->xmsi_access);
3614                 if (!(val64 & BIT(15)))
3615                         break;
3616                 mdelay(1);
3617                 cnt++;
3618         } while(cnt < 5);
3619         if (cnt == 5) {
3620                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3621                 ret = 1;
3622         }
3623
3624         return ret;
3625 }
3626
3627 static void restore_xmsi_data(struct s2io_nic *nic)
3628 {
3629         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3630         u64 val64;
3631         int i;
3632
3633         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3634                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3635                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3636                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3637                 writeq(val64, &bar0->xmsi_access);
3638                 if (wait_for_msix_trans(nic, i)) {
3639                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3640                         continue;
3641                 }
3642         }
3643 }
3644
3645 static void store_xmsi_data(struct s2io_nic *nic)
3646 {
3647         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3648         u64 val64, addr, data;
3649         int i;
3650
3651         /* Store and display */
3652         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3653                 val64 = (BIT(15) | vBIT(i, 26, 6));
3654                 writeq(val64, &bar0->xmsi_access);
3655                 if (wait_for_msix_trans(nic, i)) {
3656                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3657                         continue;
3658                 }
3659                 addr = readq(&bar0->xmsi_address);
3660                 data = readq(&bar0->xmsi_data);
3661                 if (addr && data) {
3662                         nic->msix_info[i].addr = addr;
3663                         nic->msix_info[i].data = data;
3664                 }
3665         }
3666 }
3667
3668 static int s2io_enable_msi_x(struct s2io_nic *nic)
3669 {
3670         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3671         u64 tx_mat, rx_mat;
3672         u16 msi_control; /* Temp variable */
3673         int ret, i, j, msix_indx = 1;
3674
3675         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3676                                GFP_KERNEL);
3677         if (nic->entries == NULL) {
3678                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3679                         __FUNCTION__);
3680                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3681                 return -ENOMEM;
3682         }
3683         nic->mac_control.stats_info->sw_stat.mem_allocated 
3684                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3685         memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3686
3687         nic->s2io_entries =
3688                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3689                                    GFP_KERNEL);
3690         if (nic->s2io_entries == NULL) {
3691                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 
3692                         __FUNCTION__);
3693                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3694                 kfree(nic->entries);
3695                 nic->mac_control.stats_info->sw_stat.mem_freed 
3696                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3697                 return -ENOMEM;
3698         }
3699          nic->mac_control.stats_info->sw_stat.mem_allocated 
3700                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3701         memset(nic->s2io_entries, 0,
3702                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3703
3704         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3705                 nic->entries[i].entry = i;
3706                 nic->s2io_entries[i].entry = i;
3707                 nic->s2io_entries[i].arg = NULL;
3708                 nic->s2io_entries[i].in_use = 0;
3709         }
3710
3711         tx_mat = readq(&bar0->tx_mat0_n[0]);
3712         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3713                 tx_mat |= TX_MAT_SET(i, msix_indx);
3714                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3715                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3716                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3717         }
3718         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3719
3720         if (!nic->config.bimodal) {
3721                 rx_mat = readq(&bar0->rx_mat);
3722                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3723                         rx_mat |= RX_MAT_SET(j, msix_indx);
3724                         nic->s2io_entries[msix_indx].arg 
3725                                 = &nic->mac_control.rings[j];
3726                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3727                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3728                 }
3729                 writeq(rx_mat, &bar0->rx_mat);
3730         } else {
3731                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3732                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3733                         tx_mat |= TX_MAT_SET(i, msix_indx);
3734                         nic->s2io_entries[msix_indx].arg 
3735                                 = &nic->mac_control.rings[j];
3736                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3737                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3738                 }
3739                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3740         }
3741
3742         nic->avail_msix_vectors = 0;
3743         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3744         /* We fail init if error or we get less vectors than min required */
3745         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3746                 nic->avail_msix_vectors = ret;
3747                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3748         }
3749         if (ret) {
3750                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3751                 kfree(nic->entries);
3752                 nic->mac_control.stats_info->sw_stat.mem_freed 
3753                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3754                 kfree(nic->s2io_entries);
3755                 nic->mac_control.stats_info->sw_stat.mem_freed 
3756                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3757                 nic->entries = NULL;
3758                 nic->s2io_entries = NULL;
3759                 nic->avail_msix_vectors = 0;
3760                 return -ENOMEM;
3761         }
3762         if (!nic->avail_msix_vectors)
3763                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3764
3765         /*
3766          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3767          * in the herc NIC. (Temp change, needs to be removed later)
3768          */
3769         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3770         msi_control |= 0x1; /* Enable MSI */
3771         pci_write_config_word(nic->pdev, 0x42, msi_control);
3772
3773         return 0;
3774 }
3775
3776 /* Handle software interrupt used during MSI(X) test */
3777 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3778 {
3779         struct s2io_nic *sp = dev_id;
3780
3781         sp->msi_detected = 1;
3782         wake_up(&sp->msi_wait);
3783
3784         return IRQ_HANDLED;
3785 }
3786
3787 /* Test interrupt path by forcing a a software IRQ */
3788 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3789 {
3790         struct pci_dev *pdev = sp->pdev;
3791         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3792         int err;
3793         u64 val64, saved64;
3794
3795         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3796                         sp->name, sp);
3797         if (err) {
3798                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3799                        sp->dev->name, pci_name(pdev), pdev->irq);
3800                 return err;
3801         }
3802
3803         init_waitqueue_head (&sp->msi_wait);
3804         sp->msi_detected = 0;
3805
3806         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3807         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3808         val64 |= SCHED_INT_CTRL_TIMER_EN;
3809         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3810         writeq(val64, &bar0->scheduled_int_ctrl);
3811
3812         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3813
3814         if (!sp->msi_detected) {
3815                 /* MSI(X) test failed, go back to INTx mode */
3816                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3817                         "using MSI(X) during test\n", sp->dev->name,
3818                         pci_name(pdev));
3819
3820                 err = -EOPNOTSUPP;
3821         }
3822
3823         free_irq(sp->entries[1].vector, sp);
3824
3825         writeq(saved64, &bar0->scheduled_int_ctrl);
3826
3827         return err;
3828 }
3829 /* ********************************************************* *
3830  * Functions defined below concern the OS part of the driver *
3831  * ********************************************************* */
3832
3833 /**
3834  *  s2io_open - open entry point of the driver
3835  *  @dev : pointer to the device structure.
3836  *  Description:
3837  *  This function is the open entry point of the driver. It mainly calls a
3838  *  function to allocate Rx buffers and inserts them into the buffer
3839  *  descriptors and then enables the Rx part of the NIC.
3840  *  Return value:
3841  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3842  *   file on failure.
3843  */
3844
3845 static int s2io_open(struct net_device *dev)
3846 {
3847         struct s2io_nic *sp = dev->priv;
3848         int err = 0;
3849
3850         /*
3851          * Make sure you have link off by default every time
3852          * Nic is initialized
3853          */
3854         netif_carrier_off(dev);
3855         sp->last_link_state = 0;
3856
3857         napi_enable(&sp->napi);
3858
3859         if (sp->intr_type == MSI_X) {
3860                 int ret = s2io_enable_msi_x(sp);
3861
3862                 if (!ret) {
3863                         u16 msi_control;
3864
3865                         ret = s2io_test_msi(sp);
3866
3867                         /* rollback MSI-X, will re-enable during add_isr() */
3868                         kfree(sp->entries);
3869                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3870                                 (MAX_REQUESTED_MSI_X *
3871                                 sizeof(struct msix_entry));
3872                         kfree(sp->s2io_entries);
3873                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3874                                 (MAX_REQUESTED_MSI_X *
3875                                 sizeof(struct s2io_msix_entry));
3876                         sp->entries = NULL;
3877                         sp->s2io_entries = NULL;
3878
3879                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3880                         msi_control &= 0xFFFE; /* Disable MSI */
3881                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3882
3883                         pci_disable_msix(sp->pdev);
3884
3885                 }
3886                 if (ret) {
3887
3888                         DBG_PRINT(ERR_DBG,
3889                           "%s: MSI-X requested but failed to enable\n",
3890                           dev->name);
3891                         sp->intr_type = INTA;
3892                 }
3893         }
3894
3895         /* NAPI doesn't work well with MSI(X) */
3896          if (sp->intr_type != INTA) {
3897                 if(sp->config.napi)
3898                         sp->config.napi = 0;
3899         }
3900
3901         /* Initialize H/W and enable interrupts */
3902         err = s2io_card_up(sp);
3903         if (err) {
3904                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3905                           dev->name);
3906                 goto hw_init_failed;
3907         }
3908
3909         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3910                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3911                 s2io_card_down(sp);
3912                 err = -ENODEV;
3913                 goto hw_init_failed;
3914         }
3915
3916         netif_start_queue(dev);
3917         return 0;
3918
3919 hw_init_failed:
3920         napi_disable(&sp->napi);
3921         if (sp->intr_type == MSI_X) {
3922                 if (sp->entries) {
3923                         kfree(sp->entries);
3924                         sp->mac_control.stats_info->sw_stat.mem_freed 
3925                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3926                 }
3927                 if (sp->s2io_entries) {
3928                         kfree(sp->s2io_entries);
3929                         sp->mac_control.stats_info->sw_stat.mem_freed 
3930                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3931                 }
3932         }
3933         return err;
3934 }
3935
3936 /**
3937  *  s2io_close -close entry point of the driver
3938  *  @dev : device pointer.
3939  *  Description:
3940  *  This is the stop entry point of the driver. It needs to undo exactly
3941  *  whatever was done by the open entry point,thus it's usually referred to
3942  *  as the close function.Among other things this function mainly stops the
3943  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3944  *  Return value:
3945  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3946  *  file on failure.
3947  */
3948
3949 static int s2io_close(struct net_device *dev)
3950 {
3951         struct s2io_nic *sp = dev->priv;
3952
3953         netif_stop_queue(dev);
3954         napi_disable(&sp->napi);
3955         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3956         s2io_card_down(sp);
3957
3958         return 0;
3959 }
3960
3961 /**
3962  *  s2io_xmit - Tx entry point of te driver
3963  *  @skb : the socket buffer containing the Tx data.
3964  *  @dev : device pointer.
3965  *  Description :
3966  *  This function is the Tx entry point of the driver. S2IO NIC supports
3967  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3968  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3969  *  not be upadted.
3970  *  Return value:
3971  *  0 on success & 1 on failure.
3972  */
3973
3974 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3975 {
3976         struct s2io_nic *sp = dev->priv;
3977         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3978         register u64 val64;
3979         struct TxD *txdp;
3980         struct TxFIFO_element __iomem *tx_fifo;
3981         unsigned long flags;
3982         u16 vlan_tag = 0;
3983         int vlan_priority = 0;
3984         struct mac_info *mac_control;
3985         struct config_param *config;
3986         int offload_type;
3987         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3988
3989         mac_control = &sp->mac_control;
3990         config = &sp->config;
3991
3992         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3993
3994         if (unlikely(skb->len <= 0)) {
3995                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3996                 dev_kfree_skb_any(skb);
3997                 return 0;
3998 }
3999
4000         spin_lock_irqsave(&sp->tx_lock, flags);
4001         if (atomic_read(&sp->card_state) == CARD_DOWN) {
4002                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4003                           dev->name);
4004                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4005                 dev_kfree_skb(skb);
4006                 return 0;
4007         }
4008
4009         queue = 0;
4010         /* Get Fifo number to Transmit based on vlan priority */
4011         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4012                 vlan_tag = vlan_tx_tag_get(skb);
4013                 vlan_priority = vlan_tag >> 13;
4014                 queue = config->fifo_mapping[vlan_priority];
4015         }
4016
4017         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4018         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4019         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4020                 list_virt_addr;
4021
4022         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4023         /* Avoid "put" pointer going beyond "get" pointer */
4024         if (txdp->Host_Control ||
4025                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4026                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4027                 netif_stop_queue(dev);
4028                 dev_kfree_skb(skb);
4029                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4030                 return 0;
4031         }
4032
4033         offload_type = s2io_offload_type(skb);
4034         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4035                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4036                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4037         }
4038         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4039                 txdp->Control_2 |=
4040                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4041                      TXD_TX_CKO_UDP_EN);
4042         }
4043         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4044         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4045         txdp->Control_2 |= config->tx_intr_type;
4046
4047         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4048                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4049                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4050         }
4051
4052         frg_len = skb->len - skb->data_len;
4053         if (offload_type == SKB_GSO_UDP) {
4054                 int ufo_size;
4055
4056                 ufo_size = s2io_udp_mss(skb);
4057                 ufo_size &= ~7;
4058                 txdp->Control_1 |= TXD_UFO_EN;
4059                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4060                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4061 #ifdef __BIG_ENDIAN
4062                 sp->ufo_in_band_v[put_off] =
4063                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4064 #else
4065                 sp->ufo_in_band_v[put_off] =
4066                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4067 #endif
4068                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4069                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4070                                         sp->ufo_in_band_v,
4071                                         sizeof(u64), PCI_DMA_TODEVICE);
4072                 if((txdp->Buffer_Pointer == 0) ||
4073                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4074                         goto pci_map_failed;
4075                 txdp++;
4076         }
4077
4078         txdp->Buffer_Pointer = pci_map_single
4079             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4080         if((txdp->Buffer_Pointer == 0) ||
4081                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4082                 goto pci_map_failed;
4083
4084         txdp->Host_Control = (unsigned long) skb;
4085         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4086         if (offload_type == SKB_GSO_UDP)
4087                 txdp->Control_1 |= TXD_UFO_EN;
4088
4089         frg_cnt = skb_shinfo(skb)->nr_frags;
4090         /* For fragmented SKB. */
4091         for (i = 0; i < frg_cnt; i++) {
4092                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4093                 /* A '0' length fragment will be ignored */
4094                 if (!frag->size)
4095                         continue;
4096                 txdp++;
4097                 txdp->Buffer_Pointer = (u64) pci_map_page
4098                     (sp->pdev, frag->page, frag->page_offset,
4099                      frag->size, PCI_DMA_TODEVICE);
4100                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4101                 if (offload_type == SKB_GSO_UDP)
4102                         txdp->Control_1 |= TXD_UFO_EN;
4103         }
4104         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4105
4106         if (offload_type == SKB_GSO_UDP)
4107                 frg_cnt++; /* as Txd0 was used for inband header */
4108
4109         tx_fifo = mac_control->tx_FIFO_start[queue];
4110         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4111         writeq(val64, &tx_fifo->TxDL_Pointer);
4112
4113         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4114                  TX_FIFO_LAST_LIST);
4115         if (offload_type)
4116                 val64 |= TX_FIFO_SPECIAL_FUNC;
4117
4118         writeq(val64, &tx_fifo->List_Control);
4119
4120         mmiowb();
4121
4122         put_off++;
4123         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4124                 put_off = 0;
4125         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4126
4127         /* Avoid "put" pointer going beyond "get" pointer */
4128         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4129                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4130                 DBG_PRINT(TX_DBG,
4131                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4132                           put_off, get_off);
4133                 netif_stop_queue(dev);
4134         }
4135         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4136         dev->trans_start = jiffies;
4137         spin_unlock_irqrestore(&sp->tx_lock, flags);
4138
4139         return 0;
4140 pci_map_failed:
4141         stats->pci_map_fail_cnt++;
4142         netif_stop_queue(dev);
4143         stats->mem_freed += skb->truesize;
4144         dev_kfree_skb(skb);
4145         spin_unlock_irqrestore(&sp->tx_lock, flags);
4146         return 0;
4147 }
4148
4149 static void
4150 s2io_alarm_handle(unsigned long data)
4151 {
4152         struct s2io_nic *sp = (struct s2io_nic *)data;
4153
4154         alarm_intr_handler(sp);
4155         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4156 }
4157
4158 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4159 {
4160         int rxb_size, level;
4161
4162         if (!sp->lro) {
4163                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4164                 level = rx_buffer_level(sp, rxb_size, rng_n);
4165
4166                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4167                         int ret;
4168                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4169                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4170                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4171                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4172                                           __FUNCTION__);
4173                                 clear_bit(0, (&sp->tasklet_status));
4174                                 return -1;
4175                         }
4176                         clear_bit(0, (&sp->tasklet_status));
4177                 } else if (level == LOW)
4178                         tasklet_schedule(&sp->task);
4179
4180         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4181                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4182                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4183         }
4184         return 0;
4185 }
4186
4187 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4188 {
4189         struct ring_info *ring = (struct ring_info *)dev_id;
4190         struct s2io_nic *sp = ring->nic;
4191
4192         atomic_inc(&sp->isr_cnt);
4193
4194         rx_intr_handler(ring);
4195         s2io_chk_rx_buffers(sp, ring->ring_no);
4196
4197         atomic_dec(&sp->isr_cnt);
4198         return IRQ_HANDLED;
4199 }
4200
4201 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4202 {
4203         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4204         struct s2io_nic *sp = fifo->nic;
4205
4206         atomic_inc(&sp->isr_cnt);
4207         tx_intr_handler(fifo);
4208         atomic_dec(&sp->isr_cnt);
4209         return IRQ_HANDLED;
4210 }
4211 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4212 {
4213         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4214         u64 val64;
4215
4216         val64 = readq(&bar0->pic_int_status);
4217         if (val64 & PIC_INT_GPIO) {
4218                 val64 = readq(&bar0->gpio_int_reg);
4219                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4220                     (val64 & GPIO_INT_REG_LINK_UP)) {
4221                         /*
4222                          * This is unstable state so clear both up/down
4223                          * interrupt and adapter to re-evaluate the link state.
4224                          */
4225                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4226                         val64 |= GPIO_INT_REG_LINK_UP;
4227                         writeq(val64, &bar0->gpio_int_reg);
4228                         val64 = readq(&bar0->gpio_int_mask);
4229                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4230                                    GPIO_INT_MASK_LINK_DOWN);
4231                         writeq(val64, &bar0->gpio_int_mask);
4232                 }
4233                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4234                         val64 = readq(&bar0->adapter_status);
4235                                 /* Enable Adapter */
4236                         val64 = readq(&bar0->adapter_control);
4237                         val64 |= ADAPTER_CNTL_EN;
4238                         writeq(val64, &bar0->adapter_control);
4239                         val64 |= ADAPTER_LED_ON;
4240                         writeq(val64, &bar0->adapter_control);
4241                         if (!sp->device_enabled_once)
4242                                 sp->device_enabled_once = 1;
4243
4244                         s2io_link(sp, LINK_UP);
4245                         /*
4246                          * unmask link down interrupt and mask link-up
4247                          * intr
4248                          */
4249                         val64 = readq(&bar0->gpio_int_mask);
4250                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4251                         val64 |= GPIO_INT_MASK_LINK_UP;
4252                         writeq(val64, &bar0->gpio_int_mask);
4253
4254                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4255                         val64 = readq(&bar0->adapter_status);
4256                         s2io_link(sp, LINK_DOWN);
4257                         /* Link is down so unmaks link up interrupt */
4258                         val64 = readq(&bar0->gpio_int_mask);
4259                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4260                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4261                         writeq(val64, &bar0->gpio_int_mask);
4262
4263                         /* turn off LED */
4264                         val64 = readq(&bar0->adapter_control);
4265                         val64 = val64 &(~ADAPTER_LED_ON);
4266                         writeq(val64, &bar0->adapter_control);
4267                 }
4268         }
4269         val64 = readq(&bar0->gpio_int_mask);
4270 }
4271
4272 /**
4273  *  s2io_isr - ISR handler of the device .
4274  *  @irq: the irq of the device.
4275  *  @dev_id: a void pointer to the dev structure of the NIC.
4276  *  Description:  This function is the ISR handler of the device. It
4277  *  identifies the reason for the interrupt and calls the relevant
4278  *  service routines. As a contongency measure, this ISR allocates the
4279  *  recv buffers, if their numbers are below the panic value which is
4280  *  presently set to 25% of the original number of rcv buffers allocated.
4281  *  Return value:
4282  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4283  *   IRQ_NONE: will be returned if interrupt is not from our device
4284  */
4285 static irqreturn_t s2io_isr(int irq, void *dev_id)
4286 {
4287         struct net_device *dev = (struct net_device *) dev_id;
4288         struct s2io_nic *sp = dev->priv;
4289         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4290         int i;
4291         u64 reason = 0;
4292         struct mac_info *mac_control;
4293         struct config_param *config;
4294
4295         /* Pretend we handled any irq's from a disconnected card */
4296         if (pci_channel_offline(sp->pdev))
4297                 return IRQ_NONE;
4298
4299         atomic_inc(&sp->isr_cnt);
4300         mac_control = &sp->mac_control;
4301         config = &sp->config;
4302
4303         /*
4304          * Identify the cause for interrupt and call the appropriate
4305          * interrupt handler. Causes for the interrupt could be;
4306          * 1. Rx of packet.
4307          * 2. Tx complete.
4308          * 3. Link down.
4309          * 4. Error in any functional blocks of the NIC.
4310          */
4311         reason = readq(&bar0->general_int_status);
4312
4313         if (!reason) {
4314                 /* The interrupt was not raised by us. */
4315                 atomic_dec(&sp->isr_cnt);
4316                 return IRQ_NONE;
4317         }
4318         else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4319                 /* Disable device and get out */
4320                 atomic_dec(&sp->isr_cnt);
4321                 return IRQ_NONE;
4322         }
4323
4324         if (napi) {
4325                 if (reason & GEN_INTR_RXTRAFFIC) {
4326                         if (likely (netif_rx_schedule_prep(dev, &sp->napi))) {
4327                                 __netif_rx_schedule(dev, &sp->napi);
4328                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4329                         }
4330                         else
4331                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4332                 }
4333         } else {
4334                 /*
4335                  * Rx handler is called by default, without checking for the
4336                  * cause of interrupt.
4337                  * rx_traffic_int reg is an R1 register, writing all 1's
4338                  * will ensure that the actual interrupt causing bit get's
4339                  * cleared and hence a read can be avoided.
4340                  */
4341                 if (reason & GEN_INTR_RXTRAFFIC)
4342                         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4343
4344                 for (i = 0; i < config->rx_ring_num; i++) {
4345                         rx_intr_handler(&mac_control->rings[i]);
4346                 }
4347         }
4348
4349         /*
4350          * tx_traffic_int reg is an R1 register, writing all 1's
4351          * will ensure that the actual interrupt causing bit get's
4352          * cleared and hence a read can be avoided.
4353          */
4354         if (reason & GEN_INTR_TXTRAFFIC)
4355                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4356
4357         for (i = 0; i < config->tx_fifo_num; i++)
4358                 tx_intr_handler(&mac_control->fifos[i]);
4359
4360         if (reason & GEN_INTR_TXPIC)
4361                 s2io_txpic_intr_handle(sp);
4362         /*
4363          * If the Rx buffer count is below the panic threshold then
4364          * reallocate the buffers from the interrupt handler itself,
4365          * else schedule a tasklet to reallocate the buffers.
4366          */
4367         if (!napi) {
4368                 for (i = 0; i < config->rx_ring_num; i++)
4369                         s2io_chk_rx_buffers(sp, i);
4370         }
4371
4372         writeq(0, &bar0->general_int_mask);
4373         readl(&bar0->general_int_status);
4374
4375         atomic_dec(&sp->isr_cnt);
4376         return IRQ_HANDLED;
4377 }
4378
4379 /**
4380  * s2io_updt_stats -
4381  */
4382 static void s2io_updt_stats(struct s2io_nic *sp)
4383 {
4384         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4385         u64 val64;
4386         int cnt = 0;
4387
4388         if (atomic_read(&sp->card_state) == CARD_UP) {
4389                 /* Apprx 30us on a 133 MHz bus */
4390                 val64 = SET_UPDT_CLICKS(10) |
4391                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4392                 writeq(val64, &bar0->stat_cfg);
4393                 do {
4394                         udelay(100);
4395                         val64 = readq(&bar0->stat_cfg);
4396                         if (!(val64 & BIT(0)))
4397                                 break;
4398                         cnt++;
4399                         if (cnt == 5)
4400                                 break; /* Updt failed */
4401                 } while(1);
4402         } 
4403 }
4404
4405 /**
4406  *  s2io_get_stats - Updates the device statistics structure.
4407  *  @dev : pointer to the device structure.
4408  *  Description:
4409  *  This function updates the device statistics structure in the s2io_nic
4410  *  structure and returns a pointer to the same.
4411  *  Return value:
4412  *  pointer to the updated net_device_stats structure.
4413  */
4414
4415 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4416 {
4417         struct s2io_nic *sp = dev->priv;
4418         struct mac_info *mac_control;
4419         struct config_param *config;
4420
4421
4422         mac_control = &sp->mac_control;
4423         config = &sp->config;
4424
4425         /* Configure Stats for immediate updt */
4426         s2io_updt_stats(sp);
4427
4428         sp->stats.tx_packets =
4429                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4430         sp->stats.tx_errors =
4431                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4432         sp->stats.rx_errors =
4433                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4434         sp->stats.multicast =
4435                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4436         sp->stats.rx_length_errors =
4437                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4438
4439         return (&sp->stats);
4440 }
4441
4442 /**
4443  *  s2io_set_multicast - entry point for multicast address enable/disable.
4444  *  @dev : pointer to the device structure
4445  *  Description:
4446  *  This function is a driver entry point which gets called by the kernel
4447  *  whenever multicast addresses must be enabled/disabled. This also gets
4448  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4449  *  determine, if multicast address must be enabled or if promiscuous mode
4450  *  is to be disabled etc.
4451  *  Return value:
4452  *  void.
4453  */
4454
4455 static void s2io_set_multicast(struct net_device *dev)
4456 {
4457         int i, j, prev_cnt;
4458         struct dev_mc_list *mclist;
4459         struct s2io_nic *sp = dev->priv;
4460         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4461         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4462             0xfeffffffffffULL;
4463         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4464         void __iomem *add;
4465
4466         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4467                 /*  Enable all Multicast addresses */
4468                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4469                        &bar0->rmac_addr_data0_mem);
4470                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4471                        &bar0->rmac_addr_data1_mem);
4472                 val64 = RMAC_ADDR_CMD_MEM_WE |
4473                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4474                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4475                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4476                 /* Wait till command completes */
4477                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4478                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4479                                         S2IO_BIT_RESET);
4480
4481                 sp->m_cast_flg = 1;
4482                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4483         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4484                 /*  Disable all Multicast addresses */
4485                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4486                        &bar0->rmac_addr_data0_mem);
4487                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4488                        &bar0->rmac_addr_data1_mem);
4489                 val64 = RMAC_ADDR_CMD_MEM_WE |
4490                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4491                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4492                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4493                 /* Wait till command completes */
4494                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4495                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4496                                         S2IO_BIT_RESET);
4497
4498                 sp->m_cast_flg = 0;
4499                 sp->all_multi_pos = 0;
4500         }
4501
4502         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4503                 /*  Put the NIC into promiscuous mode */
4504                 add = &bar0->mac_cfg;
4505                 val64 = readq(&bar0->mac_cfg);
4506                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4507
4508                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4509                 writel((u32) val64, add);
4510                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4511                 writel((u32) (val64 >> 32), (add + 4));
4512
4513                 if (vlan_tag_strip != 1) {
4514                         val64 = readq(&bar0->rx_pa_cfg);
4515                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4516                         writeq(val64, &bar0->rx_pa_cfg);
4517                         vlan_strip_flag = 0;
4518                 }
4519
4520                 val64 = readq(&bar0->mac_cfg);
4521                 sp->promisc_flg = 1;
4522                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4523                           dev->name);
4524         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4525                 /*  Remove the NIC from promiscuous mode */
4526                 add = &bar0->mac_cfg;
4527                 val64 = readq(&bar0->mac_cfg);
4528                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4529
4530                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4531                 writel((u32) val64, add);
4532                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4533                 writel((u32) (val64 >> 32), (add + 4));
4534
4535                 if (vlan_tag_strip != 0) {
4536                         val64 = readq(&bar0->rx_pa_cfg);
4537                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4538                         writeq(val64, &bar0->rx_pa_cfg);
4539                         vlan_strip_flag = 1;
4540                 }
4541
4542                 val64 = readq(&bar0->mac_cfg);
4543                 sp->promisc_flg = 0;
4544                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4545                           dev->name);
4546         }
4547
4548         /*  Update individual M_CAST address list */
4549         if ((!sp->m_cast_flg) && dev->mc_count) {
4550                 if (dev->mc_count >
4551                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4552                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4553                                   dev->name);
4554                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4555                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4556                         return;
4557                 }
4558
4559                 prev_cnt = sp->mc_addr_count;
4560                 sp->mc_addr_count = dev->mc_count;
4561
4562                 /* Clear out the previous list of Mc in the H/W. */
4563                 for (i = 0; i < prev_cnt; i++) {
4564                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4565                                &bar0->rmac_addr_data0_mem);
4566                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4567                                 &bar0->rmac_addr_data1_mem);
4568                         val64 = RMAC_ADDR_CMD_MEM_WE |
4569                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4570                             RMAC_ADDR_CMD_MEM_OFFSET
4571                             (MAC_MC_ADDR_START_OFFSET + i);
4572                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4573
4574                         /* Wait for command completes */
4575                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4576                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4577                                         S2IO_BIT_RESET)) {
4578                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4579                                           dev->name);
4580                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4581                                 return;
4582                         }
4583                 }
4584
4585                 /* Create the new Rx filter list and update the same in H/W. */
4586                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4587                      i++, mclist = mclist->next) {
4588                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4589                                ETH_ALEN);
4590                         mac_addr = 0;
4591                         for (j = 0; j < ETH_ALEN; j++) {
4592                                 mac_addr |= mclist->dmi_addr[j];
4593                                 mac_addr <<= 8;
4594                         }
4595                         mac_addr >>= 8;
4596                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4597                                &bar0->rmac_addr_data0_mem);
4598                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4599                                 &bar0->rmac_addr_data1_mem);
4600                         val64 = RMAC_ADDR_CMD_MEM_WE |
4601                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4602                             RMAC_ADDR_CMD_MEM_OFFSET
4603                             (i + MAC_MC_ADDR_START_OFFSET);
4604                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4605
4606                         /* Wait for command completes */
4607                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4608                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4609                                         S2IO_BIT_RESET)) {
4610                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4611                                           dev->name);
4612                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4613                                 return;
4614                         }
4615                 }
4616         }
4617 }
4618
4619 /**
4620  *  s2io_set_mac_addr - Programs the Xframe mac address
4621  *  @dev : pointer to the device structure.
4622  *  @addr: a uchar pointer to the new mac address which is to be set.
4623  *  Description : This procedure will program the Xframe to receive
4624  *  frames with new Mac Address
4625  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4626  *  as defined in errno.h file on failure.
4627  */
4628
4629 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4630 {
4631         struct s2io_nic *sp = dev->priv;
4632         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4633         register u64 val64, mac_addr = 0;
4634         int i;
4635         u64 old_mac_addr = 0;
4636
4637         /*
4638          * Set the new MAC address as the new unicast filter and reflect this
4639          * change on the device address registered with the OS. It will be
4640          * at offset 0.
4641          */
4642         for (i = 0; i < ETH_ALEN; i++) {
4643                 mac_addr <<= 8;
4644                 mac_addr |= addr[i];
4645                 old_mac_addr <<= 8;
4646                 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4647         }
4648
4649         if(0 == mac_addr)
4650                 return SUCCESS;
4651
4652         /* Update the internal structure with this new mac address */
4653         if(mac_addr != old_mac_addr) {
4654                 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4655                 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4656                 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4657                 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4658                 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4659                 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4660                 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4661         }
4662
4663         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4664                &bar0->rmac_addr_data0_mem);
4665
4666         val64 =
4667             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4668             RMAC_ADDR_CMD_MEM_OFFSET(0);
4669         writeq(val64, &bar0->rmac_addr_cmd_mem);
4670         /* Wait till command completes */
4671         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4672                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4673                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4674                 return FAILURE;
4675         }
4676
4677         return SUCCESS;
4678 }
4679
4680 /**
4681  * s2io_ethtool_sset - Sets different link parameters.
4682  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4683  * @info: pointer to the structure with parameters given by ethtool to set
4684  * link information.
4685  * Description:
4686  * The function sets different link parameters provided by the user onto
4687  * the NIC.
4688  * Return value:
4689  * 0 on success.
4690 */
4691
4692 static int s2io_ethtool_sset(struct net_device *dev,
4693                              struct ethtool_cmd *info)
4694 {
4695         struct s2io_nic *sp = dev->priv;
4696         if ((info->autoneg == AUTONEG_ENABLE) ||
4697             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4698                 return -EINVAL;
4699         else {
4700                 s2io_close(sp->dev);
4701                 s2io_open(sp->dev);
4702         }
4703
4704         return 0;
4705 }
4706
4707 /**
4708  * s2io_ethtol_gset - Return link specific information.
4709  * @sp : private member of the device structure, pointer to the
4710  *      s2io_nic structure.
4711  * @info : pointer to the structure with parameters given by ethtool
4712  * to return link information.
4713  * Description:
4714  * Returns link specific information like speed, duplex etc.. to ethtool.
4715  * Return value :
4716  * return 0 on success.
4717  */
4718
4719 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4720 {
4721         struct s2io_nic *sp = dev->priv;
4722         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4723         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4724         info->port = PORT_FIBRE;
4725         /* info->transceiver?? TODO */
4726
4727         if (netif_carrier_ok(sp->dev)) {
4728                 info->speed = 10000;
4729                 info->duplex = DUPLEX_FULL;
4730         } else {
4731                 info->speed = -1;
4732                 info->duplex = -1;
4733         }
4734
4735         info->autoneg = AUTONEG_DISABLE;
4736         return 0;
4737 }
4738
4739 /**
4740  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4741  * @sp : private member of the device structure, which is a pointer to the
4742  * s2io_nic structure.
4743  * @info : pointer to the structure with parameters given by ethtool to
4744  * return driver information.
4745  * Description:
4746  * Returns driver specefic information like name, version etc.. to ethtool.
4747  * Return value:
4748  *  void
4749  */
4750
4751 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4752                                   struct ethtool_drvinfo *info)
4753 {
4754         struct s2io_nic *sp = dev->priv;
4755
4756         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4757         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4758         strncpy(info->fw_version, "", sizeof(info->fw_version));
4759         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4760         info->regdump_len = XENA_REG_SPACE;
4761         info->eedump_len = XENA_EEPROM_SPACE;
4762         info->testinfo_len = S2IO_TEST_LEN;
4763
4764         if (sp->device_type == XFRAME_I_DEVICE)
4765                 info->n_stats = XFRAME_I_STAT_LEN;
4766         else
4767                 info->n_stats = XFRAME_II_STAT_LEN;
4768 }
4769
4770 /**
4771  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4772  *  @sp: private member of the device structure, which is a pointer to the
4773  *  s2io_nic structure.
4774  *  @regs : pointer to the structure with parameters given by ethtool for
4775  *  dumping the registers.
4776  *  @reg_space: The input argumnet into which all the registers are dumped.
4777  *  Description:
4778  *  Dumps the entire register space of xFrame NIC into the user given
4779  *  buffer area.
4780  * Return value :
4781  * void .
4782 */
4783
4784 static void s2io_ethtool_gregs(struct net_device *dev,
4785                                struct ethtool_regs *regs, void *space)
4786 {
4787         int i;
4788         u64 reg;
4789         u8 *reg_space = (u8 *) space;
4790         struct s2io_nic *sp = dev->priv;
4791
4792         regs->len = XENA_REG_SPACE;
4793         regs->version = sp->pdev->subsystem_device;
4794
4795         for (i = 0; i < regs->len; i += 8) {
4796                 reg = readq(sp->bar0 + i);
4797                 memcpy((reg_space + i), &reg, 8);
4798         }
4799 }
4800
4801 /**
4802  *  s2io_phy_id  - timer function that alternates adapter LED.
4803  *  @data : address of the private member of the device structure, which
4804  *  is a pointer to the s2io_nic structure, provided as an u32.
4805  * Description: This is actually the timer function that alternates the
4806  * adapter LED bit of the adapter control bit to set/reset every time on
4807  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4808  *  once every second.
4809 */
4810 static void s2io_phy_id(unsigned long data)
4811 {
4812         struct s2io_nic *sp = (struct s2io_nic *) data;
4813         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4814         u64 val64 = 0;
4815         u16 subid;
4816
4817         subid = sp->pdev->subsystem_device;
4818         if ((sp->device_type == XFRAME_II_DEVICE) ||
4819                    ((subid & 0xFF) >= 0x07)) {
4820                 val64 = readq(&bar0->gpio_control);
4821                 val64 ^= GPIO_CTRL_GPIO_0;
4822                 writeq(val64, &bar0->gpio_control);
4823         } else {
4824                 val64 = readq(&bar0->adapter_control);
4825                 val64 ^= ADAPTER_LED_ON;
4826                 writeq(val64, &bar0->adapter_control);
4827         }
4828
4829         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4830 }
4831
4832 /**
4833  * s2io_ethtool_idnic - To physically identify the nic on the system.
4834  * @sp : private member of the device structure, which is a pointer to the
4835  * s2io_nic structure.
4836  * @id : pointer to the structure with identification parameters given by
4837  * ethtool.
4838  * Description: Used to physically identify the NIC on the system.
4839  * The Link LED will blink for a time specified by the user for
4840  * identification.
4841  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4842  * identification is possible only if it's link is up.
4843  * Return value:
4844  * int , returns 0 on success
4845  */
4846
4847 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4848 {
4849         u64 val64 = 0, last_gpio_ctrl_val;
4850         struct s2io_nic *sp = dev->priv;
4851         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4852         u16 subid;
4853
4854         subid = sp->pdev->subsystem_device;
4855         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4856         if ((sp->device_type == XFRAME_I_DEVICE) &&
4857                 ((subid & 0xFF) < 0x07)) {
4858                 val64 = readq(&bar0->adapter_control);
4859                 if (!(val64 & ADAPTER_CNTL_EN)) {
4860                         printk(KERN_ERR
4861                                "Adapter Link down, cannot blink LED\n");
4862                         return -EFAULT;
4863                 }
4864         }
4865         if (sp->id_timer.function == NULL) {
4866                 init_timer(&sp->id_timer);
4867                 sp->id_timer.function = s2io_phy_id;
4868                 sp->id_timer.data = (unsigned long) sp;
4869         }
4870         mod_timer(&sp->id_timer, jiffies);
4871         if (data)
4872                 msleep_interruptible(data * HZ);
4873         else
4874                 msleep_interruptible(MAX_FLICKER_TIME);
4875         del_timer_sync(&sp->id_timer);
4876
4877         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4878                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4879                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4880         }
4881
4882         return 0;
4883 }
4884
4885 static void s2io_ethtool_gringparam(struct net_device *dev,
4886                                     struct ethtool_ringparam *ering)
4887 {
4888         struct s2io_nic *sp = dev->priv;
4889         int i,tx_desc_count=0,rx_desc_count=0;
4890
4891         if (sp->rxd_mode == RXD_MODE_1)
4892                 ering->rx_max_pending = MAX_RX_DESC_1;
4893         else if (sp->rxd_mode == RXD_MODE_3B)
4894                 ering->rx_max_pending = MAX_RX_DESC_2;
4895
4896         ering->tx_max_pending = MAX_TX_DESC;
4897         for (i = 0 ; i < sp->config.tx_fifo_num ; i++) 
4898                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4899         
4900         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4901         ering->tx_pending = tx_desc_count;
4902         rx_desc_count = 0;
4903         for (i = 0 ; i < sp->config.rx_ring_num ; i++) 
4904                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4905
4906         ering->rx_pending = rx_desc_count;
4907
4908         ering->rx_mini_max_pending = 0;
4909         ering->rx_mini_pending = 0;
4910         if(sp->rxd_mode == RXD_MODE_1)
4911                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4912         else if (sp->rxd_mode == RXD_MODE_3B)
4913                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4914         ering->rx_jumbo_pending = rx_desc_count;
4915 }
4916
4917 /**
4918  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4919  * @sp : private member of the device structure, which is a pointer to the
4920  *      s2io_nic structure.
4921  * @ep : pointer to the structure with pause parameters given by ethtool.
4922  * Description:
4923  * Returns the Pause frame generation and reception capability of the NIC.
4924  * Return value:
4925  *  void
4926  */
4927 static void s2io_ethtool_getpause_data(struct net_device *dev,
4928                                        struct ethtool_pauseparam *ep)
4929 {
4930         u64 val64;
4931         struct s2io_nic *sp = dev->priv;
4932         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4933
4934         val64 = readq(&bar0->rmac_pause_cfg);
4935         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4936                 ep->tx_pause = TRUE;
4937         if (val64 & RMAC_PAUSE_RX_ENABLE)
4938                 ep->rx_pause = TRUE;
4939         ep->autoneg = FALSE;
4940 }
4941
4942 /**
4943  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4944  * @sp : private member of the device structure, which is a pointer to the
4945  *      s2io_nic structure.
4946  * @ep : pointer to the structure with pause parameters given by ethtool.
4947  * Description:
4948  * It can be used to set or reset Pause frame generation or reception
4949  * support of the NIC.
4950  * Return value:
4951  * int, returns 0 on Success
4952  */
4953
4954 static int s2io_ethtool_setpause_data(struct net_device *dev,
4955                                struct ethtool_pauseparam *ep)
4956 {
4957         u64 val64;
4958         struct s2io_nic *sp = dev->priv;
4959         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4960
4961         val64 = readq(&bar0->rmac_pause_cfg);
4962         if (ep->tx_pause)
4963                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4964         else
4965                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4966         if (ep->rx_pause)
4967                 val64 |= RMAC_PAUSE_RX_ENABLE;
4968         else
4969                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4970         writeq(val64, &bar0->rmac_pause_cfg);
4971         return 0;
4972 }
4973
4974 /**
4975  * read_eeprom - reads 4 bytes of data from user given offset.
4976  * @sp : private member of the device structure, which is a pointer to the
4977  *      s2io_nic structure.
4978  * @off : offset at which the data must be written
4979  * @data : Its an output parameter where the data read at the given
4980  *      offset is stored.
4981  * Description:
4982  * Will read 4 bytes of data from the user given offset and return the
4983  * read data.
4984  * NOTE: Will allow to read only part of the EEPROM visible through the
4985  *   I2C bus.
4986  * Return value:
4987  *  -1 on failure and 0 on success.
4988  */
4989
4990 #define S2IO_DEV_ID             5
4991 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4992 {
4993         int ret = -1;
4994         u32 exit_cnt = 0;
4995         u64 val64;
4996         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4997
4998         if (sp->device_type == XFRAME_I_DEVICE) {
4999                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5000                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5001                     I2C_CONTROL_CNTL_START;
5002                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5003
5004                 while (exit_cnt < 5) {
5005                         val64 = readq(&bar0->i2c_control);
5006                         if (I2C_CONTROL_CNTL_END(val64)) {
5007                                 *data = I2C_CONTROL_GET_DATA(val64);
5008                                 ret = 0;
5009                                 break;
5010                         }
5011                         msleep(50);
5012                         exit_cnt++;
5013                 }
5014         }
5015
5016         if (sp->device_type == XFRAME_II_DEVICE) {
5017                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5018                         SPI_CONTROL_BYTECNT(0x3) |
5019                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5020                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5021                 val64 |= SPI_CONTROL_REQ;
5022                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5023                 while (exit_cnt < 5) {
5024                         val64 = readq(&bar0->spi_control);
5025                         if (val64 & SPI_CONTROL_NACK) {
5026                                 ret = 1;
5027                                 break;
5028                         } else if (val64 & SPI_CONTROL_DONE) {
5029                                 *data = readq(&bar0->spi_data);
5030                                 *data &= 0xffffff;
5031                                 ret = 0;
5032                                 break;
5033                         }
5034                         msleep(50);
5035                         exit_cnt++;
5036                 }
5037         }
5038         return ret;
5039 }
5040
5041 /**
5042  *  write_eeprom - actually writes the relevant part of the data value.
5043  *  @sp : private member of the device structure, which is a pointer to the
5044  *       s2io_nic structure.
5045  *  @off : offset at which the data must be written
5046  *  @data : The data that is to be written
5047  *  @cnt : Number of bytes of the data that are actually to be written into
5048  *  the Eeprom. (max of 3)
5049  * Description:
5050  *  Actually writes the relevant part of the data value into the Eeprom
5051  *  through the I2C bus.
5052  * Return value:
5053  *  0 on success, -1 on failure.
5054  */
5055
5056 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5057 {
5058         int exit_cnt = 0, ret = -1;
5059         u64 val64;
5060         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5061
5062         if (sp->device_type == XFRAME_I_DEVICE) {
5063                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5064                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5065                     I2C_CONTROL_CNTL_START;
5066                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5067
5068                 while (exit_cnt < 5) {
5069                         val64 = readq(&bar0->i2c_control);
5070                         if (I2C_CONTROL_CNTL_END(val64)) {
5071                                 if (!(val64 & I2C_CONTROL_NACK))
5072                                         ret = 0;
5073                                 break;
5074                         }
5075                         msleep(50);
5076                         exit_cnt++;
5077                 }
5078         }
5079
5080         if (sp->device_type == XFRAME_II_DEVICE) {
5081                 int write_cnt = (cnt == 8) ? 0 : cnt;
5082                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5083
5084                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5085                         SPI_CONTROL_BYTECNT(write_cnt) |
5086                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5087                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5088                 val64 |= SPI_CONTROL_REQ;
5089                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5090                 while (exit_cnt < 5) {
5091                         val64 = readq(&bar0->spi_control);
5092                         if (val64 & SPI_CONTROL_NACK) {
5093                                 ret = 1;
5094                                 break;
5095                         } else if (val64 & SPI_CONTROL_DONE) {
5096                                 ret = 0;
5097                                 break;
5098                         }
5099                         msleep(50);
5100                         exit_cnt++;
5101                 }
5102         }
5103         return ret;
5104 }
5105 static void s2io_vpd_read(struct s2io_nic *nic)
5106 {
5107         u8 *vpd_data;
5108         u8 data;
5109         int i=0, cnt, fail = 0;
5110         int vpd_addr = 0x80;
5111
5112         if (nic->device_type == XFRAME_II_DEVICE) {
5113                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5114                 vpd_addr = 0x80;
5115         }
5116         else {
5117                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5118                 vpd_addr = 0x50;
5119         }
5120         strcpy(nic->serial_num, "NOT AVAILABLE");
5121
5122         vpd_data = kmalloc(256, GFP_KERNEL);
5123         if (!vpd_data) {
5124                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5125                 return;
5126         }
5127         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5128
5129         for (i = 0; i < 256; i +=4 ) {
5130                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5131                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5132                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5133                 for (cnt = 0; cnt <5; cnt++) {
5134                         msleep(2);
5135                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5136                         if (data == 0x80)
5137                                 break;
5138                 }
5139                 if (cnt >= 5) {
5140                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5141                         fail = 1;
5142                         break;
5143                 }
5144                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5145                                       (u32 *)&vpd_data[i]);
5146         }
5147
5148         if(!fail) {
5149                 /* read serial number of adapter */
5150                 for (cnt = 0; cnt < 256; cnt++) {
5151                 if ((vpd_data[cnt] == 'S') &&
5152                         (vpd_data[cnt+1] == 'N') &&
5153                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5154                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5155                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5156                                         vpd_data[cnt+2]);
5157                                 break;
5158                         }
5159                 }
5160         }
5161
5162         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5163                 memset(nic->product_name, 0, vpd_data[1]);
5164                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5165         }
5166         kfree(vpd_data);
5167         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5168 }
5169
5170 /**
5171  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5172  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5173  *  @eeprom : pointer to the user level structure provided by ethtool,
5174  *  containing all relevant information.
5175  *  @data_buf : user defined value to be written into Eeprom.
5176  *  Description: Reads the values stored in the Eeprom at given offset
5177  *  for a given length. Stores these values int the input argument data
5178  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5179  *  Return value:
5180  *  int  0 on success
5181  */
5182
5183 static int s2io_ethtool_geeprom(struct net_device *dev,
5184                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5185 {
5186         u32 i, valid;
5187         u64 data;
5188         struct s2io_nic *sp = dev->priv;
5189
5190         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5191
5192         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5193                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5194
5195         for (i = 0; i < eeprom->len; i += 4) {
5196                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5197                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5198                         return -EFAULT;
5199                 }
5200                 valid = INV(data);
5201                 memcpy((data_buf + i), &valid, 4);
5202         }
5203         return 0;
5204 }
5205
5206 /**
5207  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5208  *  @sp : private member of the device structure, which is a pointer to the
5209  *  s2io_nic structure.
5210  *  @eeprom : pointer to the user level structure provided by ethtool,
5211  *  containing all relevant information.
5212  *  @data_buf ; user defined value to be written into Eeprom.
5213  *  Description:
5214  *  Tries to write the user provided value in the Eeprom, at the offset
5215  *  given by the user.
5216  *  Return value:
5217  *  0 on success, -EFAULT on failure.
5218  */
5219
5220 static int s2io_ethtool_seeprom(struct net_device *dev,
5221                                 struct ethtool_eeprom *eeprom,
5222                                 u8 * data_buf)
5223 {
5224         int len = eeprom->len, cnt = 0;
5225         u64 valid = 0, data;
5226         struct s2io_nic *sp = dev->priv;
5227
5228         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5229                 DBG_PRINT(ERR_DBG,
5230                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5231                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5232                           eeprom->magic);
5233                 return -EFAULT;
5234         }
5235
5236         while (len) {
5237                 data = (u32) data_buf[cnt] & 0x000000FF;
5238                 if (data) {
5239                         valid = (u32) (data << 24);
5240                 } else
5241                         valid = data;
5242
5243                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5244                         DBG_PRINT(ERR_DBG,
5245                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5246                         DBG_PRINT(ERR_DBG,
5247                                   "write into the specified offset\n");
5248                         return -EFAULT;
5249                 }
5250                 cnt++;
5251                 len--;
5252         }
5253
5254         return 0;
5255 }
5256
5257 /**
5258  * s2io_register_test - reads and writes into all clock domains.
5259  * @sp : private member of the device structure, which is a pointer to the
5260  * s2io_nic structure.
5261  * @data : variable that returns the result of each of the test conducted b
5262  * by the driver.
5263  * Description:
5264  * Read and write into all clock domains. The NIC has 3 clock domains,
5265  * see that registers in all the three regions are accessible.
5266  * Return value:
5267  * 0 on success.
5268  */
5269
5270 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5271 {
5272         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5273         u64 val64 = 0, exp_val;
5274         int fail = 0;
5275
5276         val64 = readq(&bar0->pif_rd_swapper_fb);
5277         if (val64 != 0x123456789abcdefULL) {
5278                 fail = 1;
5279                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5280         }
5281
5282         val64 = readq(&bar0->rmac_pause_cfg);
5283         if (val64 != 0xc000ffff00000000ULL) {
5284                 fail = 1;
5285                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5286         }
5287
5288         val64 = readq(&bar0->rx_queue_cfg);
5289         if (sp->device_type == XFRAME_II_DEVICE)
5290                 exp_val = 0x0404040404040404ULL;
5291         else
5292                 exp_val = 0x0808080808080808ULL;
5293         if (val64 != exp_val) {
5294                 fail = 1;
5295                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5296         }
5297
5298         val64 = readq(&bar0->xgxs_efifo_cfg);
5299         if (val64 != 0x000000001923141EULL) {
5300                 fail = 1;
5301                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5302         }
5303
5304         val64 = 0x5A5A5A5A5A5A5A5AULL;
5305         writeq(val64, &bar0->xmsi_data);
5306         val64 = readq(&bar0->xmsi_data);
5307         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5308                 fail = 1;
5309                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5310         }
5311
5312         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5313         writeq(val64, &bar0->xmsi_data);
5314         val64 = readq(&bar0->xmsi_data);
5315         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5316                 fail = 1;
5317                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5318         }
5319
5320         *data = fail;
5321         return fail;
5322 }
5323
5324 /**
5325  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5326  * @sp : private member of the device structure, which is a pointer to the
5327  * s2io_nic structure.
5328  * @data:variable that returns the result of each of the test conducted by
5329  * the driver.
5330  * Description:
5331  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5332  * register.
5333  * Return value:
5334  * 0 on success.
5335  */
5336
5337 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5338 {
5339         int fail = 0;
5340         u64 ret_data, org_4F0, org_7F0;
5341         u8 saved_4F0 = 0, saved_7F0 = 0;
5342         struct net_device *dev = sp->dev;
5343
5344         /* Test Write Error at offset 0 */
5345         /* Note that SPI interface allows write access to all areas
5346          * of EEPROM. Hence doing all negative testing only for Xframe I.
5347          */
5348         if (sp->device_type == XFRAME_I_DEVICE)
5349                 if (!write_eeprom(sp, 0, 0, 3))
5350                         fail = 1;
5351
5352         /* Save current values at offsets 0x4F0 and 0x7F0 */
5353         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5354                 saved_4F0 = 1;
5355         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5356                 saved_7F0 = 1;
5357
5358         /* Test Write at offset 4f0 */
5359         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5360                 fail = 1;
5361         if (read_eeprom(sp, 0x4F0, &ret_data))
5362                 fail = 1;
5363
5364         if (ret_data != 0x012345) {
5365                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5366                         "Data written %llx Data read %llx\n",
5367                         dev->name, (unsigned long long)0x12345,
5368                         (unsigned long long)ret_data);
5369                 fail = 1;
5370         }
5371
5372         /* Reset the EEPROM data go FFFF */
5373         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5374
5375         /* Test Write Request Error at offset 0x7c */
5376         if (sp->device_type == XFRAME_I_DEVICE)
5377                 if (!write_eeprom(sp, 0x07C, 0, 3))
5378                         fail = 1;
5379
5380         /* Test Write Request at offset 0x7f0 */
5381         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5382                 fail = 1;
5383         if (read_eeprom(sp, 0x7F0, &ret_data))
5384                 fail = 1;
5385
5386         if (ret_data != 0x012345) {
5387                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5388                         "Data written %llx Data read %llx\n",
5389                         dev->name, (unsigned long long)0x12345,
5390                         (unsigned long long)ret_data);
5391                 fail = 1;
5392         }
5393
5394         /* Reset the EEPROM data go FFFF */
5395         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5396
5397         if (sp->device_type == XFRAME_I_DEVICE) {
5398                 /* Test Write Error at offset 0x80 */
5399                 if (!write_eeprom(sp, 0x080, 0, 3))
5400                         fail = 1;
5401
5402                 /* Test Write Error at offset 0xfc */
5403                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5404                         fail = 1;
5405
5406                 /* Test Write Error at offset 0x100 */
5407                 if (!write_eeprom(sp, 0x100, 0, 3))
5408                         fail = 1;
5409
5410                 /* Test Write Error at offset 4ec */
5411                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5412                         fail = 1;
5413         }
5414
5415         /* Restore values at offsets 0x4F0 and 0x7F0 */
5416         if (saved_4F0)
5417                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5418         if (saved_7F0)
5419                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5420
5421         *data = fail;
5422         return fail;
5423 }
5424
5425 /**
5426  * s2io_bist_test - invokes the MemBist test of the card .
5427  * @sp : private member of the device structure, which is a pointer to the
5428  * s2io_nic structure.
5429  * @data:variable that returns the result of each of the test conducted by
5430  * the driver.
5431  * Description:
5432  * This invokes the MemBist test of the card. We give around
5433  * 2 secs time for the Test to complete. If it's still not complete
5434  * within this peiod, we consider that the test failed.
5435  * Return value:
5436  * 0 on success and -1 on failure.
5437  */
5438
5439 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5440 {
5441         u8 bist = 0;
5442         int cnt = 0, ret = -1;
5443
5444         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5445         bist |= PCI_BIST_START;
5446         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5447
5448         while (cnt < 20) {
5449                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5450                 if (!(bist & PCI_BIST_START)) {
5451                         *data = (bist & PCI_BIST_CODE_MASK);
5452                         ret = 0;
5453                         break;
5454                 }
5455                 msleep(100);
5456                 cnt++;
5457         }
5458
5459         return ret;
5460 }
5461
5462 /**
5463  * s2io-link_test - verifies the link state of the nic
5464  * @sp ; private member of the device structure, which is a pointer to the
5465  * s2io_nic structure.
5466  * @data: variable that returns the result of each of the test conducted by
5467  * the driver.
5468  * Description:
5469  * The function verifies the link state of the NIC and updates the input
5470  * argument 'data' appropriately.
5471  * Return value:
5472  * 0 on success.
5473  */
5474
5475 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5476 {
5477         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5478         u64 val64;
5479
5480         val64 = readq(&bar0->adapter_status);
5481         if(!(LINK_IS_UP(val64)))
5482                 *data = 1;
5483         else
5484                 *data = 0;
5485
5486         return *data;
5487 }
5488
5489 /**
5490  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5491  * @sp - private member of the device structure, which is a pointer to the
5492  * s2io_nic structure.
5493  * @data - variable that returns the result of each of the test
5494  * conducted by the driver.
5495  * Description:
5496  *  This is one of the offline test that tests the read and write
5497  *  access to the RldRam chip on the NIC.
5498  * Return value:
5499  *  0 on success.
5500  */
5501
5502 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5503 {
5504         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5505         u64 val64;
5506         int cnt, iteration = 0, test_fail = 0;
5507
5508         val64 = readq(&bar0->adapter_control);
5509         val64 &= ~ADAPTER_ECC_EN;
5510         writeq(val64, &bar0->adapter_control);
5511
5512         val64 = readq(&bar0->mc_rldram_test_ctrl);
5513         val64 |= MC_RLDRAM_TEST_MODE;
5514         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5515
5516         val64 = readq(&bar0->mc_rldram_mrs);
5517         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5518         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5519
5520         val64 |= MC_RLDRAM_MRS_ENABLE;
5521         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5522
5523         while (iteration < 2) {
5524                 val64 = 0x55555555aaaa0000ULL;
5525                 if (iteration == 1) {
5526                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5527                 }
5528                 writeq(val64, &bar0->mc_rldram_test_d0);
5529
5530                 val64 = 0xaaaa5a5555550000ULL;
5531                 if (iteration == 1) {
5532                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5533                 }
5534                 writeq(val64, &bar0->mc_rldram_test_d1);
5535
5536                 val64 = 0x55aaaaaaaa5a0000ULL;
5537                 if (iteration == 1) {
5538                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5539                 }
5540                 writeq(val64, &bar0->mc_rldram_test_d2);
5541
5542                 val64 = (u64) (0x0000003ffffe0100ULL);
5543                 writeq(val64, &bar0->mc_rldram_test_add);
5544
5545                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5546                         MC_RLDRAM_TEST_GO;
5547                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5548
5549                 for (cnt = 0; cnt < 5; cnt++) {
5550                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5551                         if (val64 & MC_RLDRAM_TEST_DONE)
5552                                 break;
5553                         msleep(200);
5554                 }
5555
5556                 if (cnt == 5)
5557                         break;
5558
5559                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5560                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5561
5562                 for (cnt = 0; cnt < 5; cnt++) {
5563                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5564                         if (val64 & MC_RLDRAM_TEST_DONE)
5565                                 break;
5566                         msleep(500);
5567                 }
5568
5569                 if (cnt == 5)
5570                         break;
5571
5572                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5573                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5574                         test_fail = 1;
5575
5576                 iteration++;
5577         }
5578
5579         *data = test_fail;
5580
5581         /* Bring the adapter out of test mode */
5582         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5583
5584         return test_fail;
5585 }
5586
5587 /**
5588  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5589  *  @sp : private member of the device structure, which is a pointer to the
5590  *  s2io_nic structure.
5591  *  @ethtest : pointer to a ethtool command specific structure that will be
5592  *  returned to the user.
5593  *  @data : variable that returns the result of each of the test
5594  * conducted by the driver.
5595  * Description:
5596  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5597  *  the health of the card.
5598  * Return value:
5599  *  void
5600  */
5601
5602 static void s2io_ethtool_test(struct net_device *dev,
5603                               struct ethtool_test *ethtest,
5604                               uint64_t * data)
5605 {
5606         struct s2io_nic *sp = dev->priv;
5607         int orig_state = netif_running(sp->dev);
5608
5609         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5610                 /* Offline Tests. */
5611                 if (orig_state)
5612                         s2io_close(sp->dev);
5613
5614                 if (s2io_register_test(sp, &data[0]))
5615                         ethtest->flags |= ETH_TEST_FL_FAILED;
5616
5617                 s2io_reset(sp);
5618
5619                 if (s2io_rldram_test(sp, &data[3]))
5620                         ethtest->flags |= ETH_TEST_FL_FAILED;
5621
5622                 s2io_reset(sp);
5623
5624                 if (s2io_eeprom_test(sp, &data[1]))
5625                         ethtest->flags |= ETH_TEST_FL_FAILED;
5626
5627                 if (s2io_bist_test(sp, &data[4]))
5628                         ethtest->flags |= ETH_TEST_FL_FAILED;
5629
5630                 if (orig_state)
5631                         s2io_open(sp->dev);
5632
5633                 data[2] = 0;
5634         } else {
5635                 /* Online Tests. */
5636                 if (!orig_state) {
5637                         DBG_PRINT(ERR_DBG,
5638                                   "%s: is not up, cannot run test\n",
5639                                   dev->name);
5640                         data[0] = -1;
5641                         data[1] = -1;
5642                         data[2] = -1;
5643                         data[3] = -1;
5644                         data[4] = -1;
5645                 }
5646
5647                 if (s2io_link_test(sp, &data[2]))
5648                         ethtest->flags |= ETH_TEST_FL_FAILED;
5649
5650                 data[0] = 0;
5651                 data[1] = 0;
5652                 data[3] = 0;
5653                 data[4] = 0;
5654         }
5655 }
5656
5657 static void s2io_get_ethtool_stats(struct net_device *dev,
5658                                    struct ethtool_stats *estats,
5659                                    u64 * tmp_stats)
5660 {
5661         int i = 0;
5662         struct s2io_nic *sp = dev->priv;
5663         struct stat_block *stat_info = sp->mac_control.stats_info;
5664
5665         s2io_updt_stats(sp);
5666         tmp_stats[i++] =
5667                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5668                 le32_to_cpu(stat_info->tmac_frms);
5669         tmp_stats[i++] =
5670                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5671                 le32_to_cpu(stat_info->tmac_data_octets);
5672         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5673         tmp_stats[i++] =
5674                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5675                 le32_to_cpu(stat_info->tmac_mcst_frms);
5676         tmp_stats[i++] =
5677                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5678                 le32_to_cpu(stat_info->tmac_bcst_frms);
5679         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5680         tmp_stats[i++] =
5681                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5682                 le32_to_cpu(stat_info->tmac_ttl_octets);
5683         tmp_stats[i++] =
5684                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5685                 le32_to_cpu(stat_info->tmac_ucst_frms);
5686         tmp_stats[i++] =
5687                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5688                 le32_to_cpu(stat_info->tmac_nucst_frms);
5689         tmp_stats[i++] =
5690                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5691                 le32_to_cpu(stat_info->tmac_any_err_frms);
5692         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5693         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5694         tmp_stats[i++] =
5695                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5696                 le32_to_cpu(stat_info->tmac_vld_ip);
5697         tmp_stats[i++] =
5698                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5699                 le32_to_cpu(stat_info->tmac_drop_ip);
5700         tmp_stats[i++] =
5701                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5702                 le32_to_cpu(stat_info->tmac_icmp);
5703         tmp_stats[i++] =
5704                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5705                 le32_to_cpu(stat_info->tmac_rst_tcp);
5706         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5707         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5708                 le32_to_cpu(stat_info->tmac_udp);
5709         tmp_stats[i++] =
5710                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5711                 le32_to_cpu(stat_info->rmac_vld_frms);
5712         tmp_stats[i++] =
5713                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5714                 le32_to_cpu(stat_info->rmac_data_octets);
5715         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5716         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5717         tmp_stats[i++] =
5718                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5719                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5720         tmp_stats[i++] =
5721                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5722                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5723         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5724         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5725         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5726         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5727         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5728         tmp_stats[i++] =
5729                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5730                 le32_to_cpu(stat_info->rmac_ttl_octets);
5731         tmp_stats[i++] =
5732                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5733                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5734         tmp_stats[i++] =
5735                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5736                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5737         tmp_stats[i++] =
5738                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5739                 le32_to_cpu(stat_info->rmac_discarded_frms);
5740         tmp_stats[i++] =
5741                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5742                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5743         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5744         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5745         tmp_stats[i++] =
5746                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5747                 le32_to_cpu(stat_info->rmac_usized_frms);
5748         tmp_stats[i++] =
5749                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5750                 le32_to_cpu(stat_info->rmac_osized_frms);
5751         tmp_stats[i++] =
5752                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5753                 le32_to_cpu(stat_info->rmac_frag_frms);
5754         tmp_stats[i++] =
5755                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5756                 le32_to_cpu(stat_info->rmac_jabber_frms);
5757         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5758         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5759         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5760         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5761         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5762         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5763         tmp_stats[i++] =
5764                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5765                 le32_to_cpu(stat_info->rmac_ip);
5766         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5767         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5768         tmp_stats[i++] =
5769                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5770                 le32_to_cpu(stat_info->rmac_drop_ip);
5771         tmp_stats[i++] =
5772                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5773                 le32_to_cpu(stat_info->rmac_icmp);
5774         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5775         tmp_stats[i++] =
5776                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5777                 le32_to_cpu(stat_info->rmac_udp);
5778         tmp_stats[i++] =
5779                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5780                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5781         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5782         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5783         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5784         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5785         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5786         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5787         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5788         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5789         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5790         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5791         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5792         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5793         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5794         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5795         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5796         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5797         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5798         tmp_stats[i++] =
5799                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5800                 le32_to_cpu(stat_info->rmac_pause_cnt);
5801         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5802         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5803         tmp_stats[i++] =
5804                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5805                 le32_to_cpu(stat_info->rmac_accepted_ip);
5806         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5807         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5808         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5809         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5810         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5811         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5812         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5813         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5814         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5815         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5816         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5817         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5818         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5819         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5820         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5821         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5822         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5823         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5824         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5825
5826         /* Enhanced statistics exist only for Hercules */
5827         if(sp->device_type == XFRAME_II_DEVICE) {
5828                 tmp_stats[i++] =
5829                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5830                 tmp_stats[i++] =
5831                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5832                 tmp_stats[i++] =
5833                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5834                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5835                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5836                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5837                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5838                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5839                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5840                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5841                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5842                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5843                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5844                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5845                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5846                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5847         }
5848
5849         tmp_stats[i++] = 0;
5850         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5851         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5852         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5853         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5854         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5855         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5856         tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5857         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5858         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5859         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5860         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5861         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5862         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5863         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5864         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5865         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5866         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5867         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5868         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5869         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5870         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5871         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5872         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5873         if (stat_info->sw_stat.num_aggregations) {
5874                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5875                 int count = 0;
5876                 /*
5877                  * Since 64-bit divide does not work on all platforms,
5878                  * do repeated subtraction.
5879                  */
5880                 while (tmp >= stat_info->sw_stat.num_aggregations) {
5881                         tmp -= stat_info->sw_stat.num_aggregations;
5882                         count++;
5883                 }
5884                 tmp_stats[i++] = count;
5885         }
5886         else
5887                 tmp_stats[i++] = 0;
5888         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5889         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5890         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5891         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5892         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5893         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5894         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5895         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5896         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5897
5898         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5899         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5900         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5901         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5902         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5903
5904         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5905         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5906         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5907         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5908         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5909         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5910         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5911         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5912         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5913 }
5914
5915 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5916 {
5917         return (XENA_REG_SPACE);
5918 }
5919
5920
5921 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5922 {
5923         struct s2io_nic *sp = dev->priv;
5924
5925         return (sp->rx_csum);
5926 }
5927
5928 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5929 {
5930         struct s2io_nic *sp = dev->priv;
5931
5932         if (data)
5933                 sp->rx_csum = 1;
5934         else
5935                 sp->rx_csum = 0;
5936
5937         return 0;
5938 }
5939
5940 static int s2io_get_eeprom_len(struct net_device *dev)
5941 {
5942         return (XENA_EEPROM_SPACE);
5943 }
5944
5945 static int s2io_ethtool_self_test_count(struct net_device *dev)
5946 {
5947         return (S2IO_TEST_LEN);
5948 }
5949
5950 static void s2io_ethtool_get_strings(struct net_device *dev,
5951                                      u32 stringset, u8 * data)
5952 {
5953         int stat_size = 0;
5954         struct s2io_nic *sp = dev->priv;
5955
5956         switch (stringset) {
5957         case ETH_SS_TEST:
5958                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5959                 break;
5960         case ETH_SS_STATS:
5961                 stat_size = sizeof(ethtool_xena_stats_keys);
5962                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5963                 if(sp->device_type == XFRAME_II_DEVICE) {
5964                         memcpy(data + stat_size,
5965                                 &ethtool_enhanced_stats_keys,
5966                                 sizeof(ethtool_enhanced_stats_keys));
5967                         stat_size += sizeof(ethtool_enhanced_stats_keys);
5968                 }
5969
5970                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5971                         sizeof(ethtool_driver_stats_keys));
5972         }
5973 }
5974 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5975 {
5976         struct s2io_nic *sp = dev->priv;
5977         int stat_count = 0;
5978         switch(sp->device_type) {
5979         case XFRAME_I_DEVICE:
5980                 stat_count = XFRAME_I_STAT_LEN;
5981         break;
5982
5983         case XFRAME_II_DEVICE:
5984                 stat_count = XFRAME_II_STAT_LEN;
5985         break;
5986         }
5987
5988         return stat_count;
5989 }
5990
5991 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5992 {
5993         if (data)
5994                 dev->features |= NETIF_F_IP_CSUM;
5995         else
5996                 dev->features &= ~NETIF_F_IP_CSUM;
5997
5998         return 0;
5999 }
6000
6001 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6002 {
6003         return (dev->features & NETIF_F_TSO) != 0;
6004 }
6005 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6006 {
6007         if (data)
6008                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6009         else
6010                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6011
6012         return 0;
6013 }
6014
6015 static const struct ethtool_ops netdev_ethtool_ops = {
6016         .get_settings = s2io_ethtool_gset,
6017         .set_settings = s2io_ethtool_sset,
6018         .get_drvinfo = s2io_ethtool_gdrvinfo,
6019         .get_regs_len = s2io_ethtool_get_regs_len,
6020         .get_regs = s2io_ethtool_gregs,
6021         .get_link = ethtool_op_get_link,
6022         .get_eeprom_len = s2io_get_eeprom_len,
6023         .get_eeprom = s2io_ethtool_geeprom,
6024         .set_eeprom = s2io_ethtool_seeprom,
6025         .get_ringparam = s2io_ethtool_gringparam,
6026         .get_pauseparam = s2io_ethtool_getpause_data,
6027         .set_pauseparam = s2io_ethtool_setpause_data,
6028         .get_rx_csum = s2io_ethtool_get_rx_csum,
6029         .set_rx_csum = s2io_ethtool_set_rx_csum,
6030         .get_tx_csum = ethtool_op_get_tx_csum,
6031         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6032         .get_sg = ethtool_op_get_sg,
6033         .set_sg = ethtool_op_set_sg,
6034         .get_tso = s2io_ethtool_op_get_tso,
6035         .set_tso = s2io_ethtool_op_set_tso,
6036         .get_ufo = ethtool_op_get_ufo,
6037         .set_ufo = ethtool_op_set_ufo,
6038         .self_test_count = s2io_ethtool_self_test_count,
6039         .self_test = s2io_ethtool_test,
6040         .get_strings = s2io_ethtool_get_strings,
6041         .phys_id = s2io_ethtool_idnic,
6042         .get_stats_count = s2io_ethtool_get_stats_count,
6043         .get_ethtool_stats = s2io_get_ethtool_stats
6044 };
6045
6046 /**
6047  *  s2io_ioctl - Entry point for the Ioctl
6048  *  @dev :  Device pointer.
6049  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6050  *  a proprietary structure used to pass information to the driver.
6051  *  @cmd :  This is used to distinguish between the different commands that
6052  *  can be passed to the IOCTL functions.
6053  *  Description:
6054  *  Currently there are no special functionality supported in IOCTL, hence
6055  *  function always return EOPNOTSUPPORTED
6056  */
6057
6058 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6059 {
6060         return -EOPNOTSUPP;
6061 }
6062
6063 /**
6064  *  s2io_change_mtu - entry point to change MTU size for the device.
6065  *   @dev : device pointer.
6066  *   @new_mtu : the new MTU size for the device.
6067  *   Description: A driver entry point to change MTU size for the device.
6068  *   Before changing the MTU the device must be stopped.
6069  *  Return value:
6070  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6071  *   file on failure.
6072  */
6073
6074 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6075 {
6076         struct s2io_nic *sp = dev->priv;
6077
6078         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6079                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6080                           dev->name);
6081                 return -EPERM;
6082         }
6083
6084         dev->mtu = new_mtu;
6085         if (netif_running(dev)) {
6086                 s2io_card_down(sp);
6087                 netif_stop_queue(dev);
6088                 if (s2io_card_up(sp)) {
6089                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6090                                   __FUNCTION__);
6091                 }
6092                 if (netif_queue_stopped(dev))
6093                         netif_wake_queue(dev);
6094         } else { /* Device is down */
6095                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6096                 u64 val64 = new_mtu;
6097
6098                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6099         }
6100
6101         return 0;
6102 }
6103
6104 /**
6105  *  s2io_tasklet - Bottom half of the ISR.
6106  *  @dev_adr : address of the device structure in dma_addr_t format.
6107  *  Description:
6108  *  This is the tasklet or the bottom half of the ISR. This is
6109  *  an extension of the ISR which is scheduled by the scheduler to be run
6110  *  when the load on the CPU is low. All low priority tasks of the ISR can
6111  *  be pushed into the tasklet. For now the tasklet is used only to
6112  *  replenish the Rx buffers in the Rx buffer descriptors.
6113  *  Return value:
6114  *  void.
6115  */
6116
6117 static void s2io_tasklet(unsigned long dev_addr)
6118 {
6119         struct net_device *dev = (struct net_device *) dev_addr;
6120         struct s2io_nic *sp = dev->priv;
6121         int i, ret;
6122         struct mac_info *mac_control;
6123         struct config_param *config;
6124
6125         mac_control = &sp->mac_control;
6126         config = &sp->config;
6127
6128         if (!TASKLET_IN_USE) {
6129                 for (i = 0; i < config->rx_ring_num; i++) {
6130                         ret = fill_rx_buffers(sp, i);
6131                         if (ret == -ENOMEM) {
6132                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6133                                           dev->name);
6134                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6135                                 break;
6136                         } else if (ret == -EFILL) {
6137                                 DBG_PRINT(INFO_DBG,
6138                                           "%s: Rx Ring %d is full\n",
6139                                           dev->name, i);
6140                                 break;
6141                         }
6142                 }
6143                 clear_bit(0, (&sp->tasklet_status));
6144         }
6145 }
6146
6147 /**
6148  * s2io_set_link - Set the LInk status
6149  * @data: long pointer to device private structue
6150  * Description: Sets the link status for the adapter
6151  */
6152
6153 static void s2io_set_link(struct work_struct *work)
6154 {
6155         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6156         struct net_device *dev = nic->dev;
6157         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6158         register u64 val64;
6159         u16 subid;
6160
6161         rtnl_lock();
6162
6163         if (!netif_running(dev))
6164                 goto out_unlock;
6165
6166         if (test_and_set_bit(0, &(nic->link_state))) {
6167                 /* The card is being reset, no point doing anything */
6168                 goto out_unlock;
6169         }
6170
6171         subid = nic->pdev->subsystem_device;
6172         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6173                 /*
6174                  * Allow a small delay for the NICs self initiated
6175                  * cleanup to complete.
6176                  */
6177                 msleep(100);
6178         }
6179
6180         val64 = readq(&bar0->adapter_status);
6181         if (LINK_IS_UP(val64)) {
6182                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6183                         if (verify_xena_quiescence(nic)) {
6184                                 val64 = readq(&bar0->adapter_control);
6185                                 val64 |= ADAPTER_CNTL_EN;
6186                                 writeq(val64, &bar0->adapter_control);
6187                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6188                                         nic->device_type, subid)) {
6189                                         val64 = readq(&bar0->gpio_control);
6190                                         val64 |= GPIO_CTRL_GPIO_0;
6191                                         writeq(val64, &bar0->gpio_control);
6192                                         val64 = readq(&bar0->gpio_control);
6193                                 } else {
6194                                         val64 |= ADAPTER_LED_ON;
6195                                         writeq(val64, &bar0->adapter_control);
6196                                 }
6197                                 nic->device_enabled_once = TRUE;
6198                         } else {
6199                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6200                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6201                                 netif_stop_queue(dev);
6202                         }
6203                 }
6204                 val64 = readq(&bar0->adapter_status);
6205                 if (!LINK_IS_UP(val64)) {
6206                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
6207                         DBG_PRINT(ERR_DBG, " Link down after enabling ");
6208                         DBG_PRINT(ERR_DBG, "device \n");
6209                 } else
6210                         s2io_link(nic, LINK_UP);
6211         } else {
6212                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6213                                                       subid)) {
6214                         val64 = readq(&bar0->gpio_control);
6215                         val64 &= ~GPIO_CTRL_GPIO_0;
6216                         writeq(val64, &bar0->gpio_control);
6217                         val64 = readq(&bar0->gpio_control);
6218                 }
6219                 s2io_link(nic, LINK_DOWN);
6220         }
6221         clear_bit(0, &(nic->link_state));
6222
6223 out_unlock:
6224         rtnl_unlock();
6225 }
6226
6227 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6228                                 struct buffAdd *ba,
6229                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6230                                 u64 *temp2, int size)
6231 {
6232         struct net_device *dev = sp->dev;
6233         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6234
6235         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6236                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6237                 /* allocate skb */
6238                 if (*skb) {
6239                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6240                         /*
6241                          * As Rx frame are not going to be processed,
6242                          * using same mapped address for the Rxd
6243                          * buffer pointer
6244                          */
6245                         rxdp1->Buffer0_ptr = *temp0;
6246                 } else {
6247                         *skb = dev_alloc_skb(size);
6248                         if (!(*skb)) {
6249                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6250                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6251                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6252                                 sp->mac_control.stats_info->sw_stat. \
6253                                         mem_alloc_fail_cnt++;
6254                                 return -ENOMEM ;
6255                         }
6256                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6257                                 += (*skb)->truesize;
6258                         /* storing the mapped addr in a temp variable
6259                          * such it will be used for next rxd whose
6260                          * Host Control is NULL
6261                          */
6262                         rxdp1->Buffer0_ptr = *temp0 =
6263                                 pci_map_single( sp->pdev, (*skb)->data,
6264                                         size - NET_IP_ALIGN,
6265                                         PCI_DMA_FROMDEVICE);
6266                         if( (rxdp1->Buffer0_ptr == 0) ||
6267                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6268                                 goto memalloc_failed;
6269                         }
6270                         rxdp->Host_Control = (unsigned long) (*skb);
6271                 }
6272         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6273                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6274                 /* Two buffer Mode */
6275                 if (*skb) {
6276                         rxdp3->Buffer2_ptr = *temp2;
6277                         rxdp3->Buffer0_ptr = *temp0;
6278                         rxdp3->Buffer1_ptr = *temp1;
6279                 } else {
6280                         *skb = dev_alloc_skb(size);
6281                         if (!(*skb)) {
6282                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6283                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6284                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6285                                 sp->mac_control.stats_info->sw_stat. \
6286                                         mem_alloc_fail_cnt++;
6287                                 return -ENOMEM;
6288                         }
6289                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6290                                 += (*skb)->truesize;
6291                         rxdp3->Buffer2_ptr = *temp2 =
6292                                 pci_map_single(sp->pdev, (*skb)->data,
6293                                                dev->mtu + 4,
6294                                                PCI_DMA_FROMDEVICE);
6295                         if( (rxdp3->Buffer2_ptr == 0) ||
6296                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6297                                 goto memalloc_failed;
6298                         }
6299                         rxdp3->Buffer0_ptr = *temp0 =
6300                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6301                                                 PCI_DMA_FROMDEVICE);
6302                         if( (rxdp3->Buffer0_ptr == 0) ||
6303                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6304                                 pci_unmap_single (sp->pdev,
6305                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6306                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6307                                 goto memalloc_failed;
6308                         }
6309                         rxdp->Host_Control = (unsigned long) (*skb);
6310
6311                         /* Buffer-1 will be dummy buffer not used */
6312                         rxdp3->Buffer1_ptr = *temp1 =
6313                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6314                                                 PCI_DMA_FROMDEVICE);
6315                         if( (rxdp3->Buffer1_ptr == 0) ||
6316                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6317                                 pci_unmap_single (sp->pdev,
6318                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6319                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6320                                 pci_unmap_single (sp->pdev,
6321                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6322                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6323                                 goto memalloc_failed;
6324                         }
6325                 }
6326         }
6327         return 0;
6328         memalloc_failed:
6329                 stats->pci_map_fail_cnt++;
6330                 stats->mem_freed += (*skb)->truesize;
6331                 dev_kfree_skb(*skb);
6332                 return -ENOMEM;
6333 }
6334
6335 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6336                                 int size)
6337 {
6338         struct net_device *dev = sp->dev;
6339         if (sp->rxd_mode == RXD_MODE_1) {
6340                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6341         } else if (sp->rxd_mode == RXD_MODE_3B) {
6342                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6343                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6344                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6345         }
6346 }
6347
6348 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6349 {
6350         int i, j, k, blk_cnt = 0, size;
6351         struct mac_info * mac_control = &sp->mac_control;
6352         struct config_param *config = &sp->config;
6353         struct net_device *dev = sp->dev;
6354         struct RxD_t *rxdp = NULL;
6355         struct sk_buff *skb = NULL;
6356         struct buffAdd *ba = NULL;
6357         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6358
6359         /* Calculate the size based on ring mode */
6360         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6361                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6362         if (sp->rxd_mode == RXD_MODE_1)
6363                 size += NET_IP_ALIGN;
6364         else if (sp->rxd_mode == RXD_MODE_3B)
6365                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6366
6367         for (i = 0; i < config->rx_ring_num; i++) {
6368                 blk_cnt = config->rx_cfg[i].num_rxd /
6369                         (rxd_count[sp->rxd_mode] +1);
6370
6371                 for (j = 0; j < blk_cnt; j++) {
6372                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6373                                 rxdp = mac_control->rings[i].
6374                                         rx_blocks[j].rxds[k].virt_addr;
6375                                 if(sp->rxd_mode == RXD_MODE_3B)
6376                                         ba = &mac_control->rings[i].ba[j][k];
6377                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6378                                                        &skb,(u64 *)&temp0_64,
6379                                                        (u64 *)&temp1_64,
6380                                                        (u64 *)&temp2_64,
6381                                                         size) == ENOMEM) {
6382                                         return 0;
6383                                 }
6384
6385                                 set_rxd_buffer_size(sp, rxdp, size);
6386                                 wmb();
6387                                 /* flip the Ownership bit to Hardware */
6388                                 rxdp->Control_1 |= RXD_OWN_XENA;
6389                         }
6390                 }
6391         }
6392         return 0;
6393
6394 }
6395
6396 static int s2io_add_isr(struct s2io_nic * sp)
6397 {
6398         int ret = 0;
6399         struct net_device *dev = sp->dev;
6400         int err = 0;
6401
6402         if (sp->intr_type == MSI_X)
6403                 ret = s2io_enable_msi_x(sp);
6404         if (ret) {
6405                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6406                 sp->intr_type = INTA;
6407         }
6408
6409         /* Store the values of the MSIX table in the struct s2io_nic structure */
6410         store_xmsi_data(sp);
6411
6412         /* After proper initialization of H/W, register ISR */
6413         if (sp->intr_type == MSI_X) {
6414                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6415
6416                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6417                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6418                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6419                                         dev->name, i);
6420                                 err = request_irq(sp->entries[i].vector,
6421                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6422                                                   sp->s2io_entries[i].arg);
6423                                 /* If either data or addr is zero print it */
6424                                 if(!(sp->msix_info[i].addr &&
6425                                         sp->msix_info[i].data)) {
6426                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6427                                                 "Data:0x%lx\n",sp->desc[i],
6428                                                 (unsigned long long)
6429                                                 sp->msix_info[i].addr,
6430                                                 (unsigned long)
6431                                                 ntohl(sp->msix_info[i].data));
6432                                 } else {
6433                                         msix_tx_cnt++;
6434                                 }
6435                         } else {
6436                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6437                                         dev->name, i);
6438                                 err = request_irq(sp->entries[i].vector,
6439                                           s2io_msix_ring_handle, 0, sp->desc[i],
6440                                                   sp->s2io_entries[i].arg);
6441                                 /* If either data or addr is zero print it */
6442                                 if(!(sp->msix_info[i].addr &&
6443                                         sp->msix_info[i].data)) {
6444                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6445                                                 "Data:0x%lx\n",sp->desc[i],
6446                                                 (unsigned long long)
6447                                                 sp->msix_info[i].addr,
6448                                                 (unsigned long)
6449                                                 ntohl(sp->msix_info[i].data));
6450                                 } else {
6451                                         msix_rx_cnt++;
6452                                 }
6453                         }
6454                         if (err) {
6455                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6456                                           "failed\n", dev->name, i);
6457                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6458                                 return -1;
6459                         }
6460                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6461                 }
6462                 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6463                 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6464         }
6465         if (sp->intr_type == INTA) {
6466                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6467                                 sp->name, dev);
6468                 if (err) {
6469                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6470                                   dev->name);
6471                         return -1;
6472                 }
6473         }
6474         return 0;
6475 }
6476 static void s2io_rem_isr(struct s2io_nic * sp)
6477 {
6478         int cnt = 0;
6479         struct net_device *dev = sp->dev;
6480         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6481
6482         if (sp->intr_type == MSI_X) {
6483                 int i;
6484                 u16 msi_control;
6485
6486                 for (i=1; (sp->s2io_entries[i].in_use ==
6487                         MSIX_REGISTERED_SUCCESS); i++) {
6488                         int vector = sp->entries[i].vector;
6489                         void *arg = sp->s2io_entries[i].arg;
6490
6491                         free_irq(vector, arg);
6492                 }
6493
6494                 kfree(sp->entries);
6495                 stats->mem_freed +=
6496                         (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6497                 kfree(sp->s2io_entries);
6498                 stats->mem_freed +=
6499                         (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6500                 sp->entries = NULL;
6501                 sp->s2io_entries = NULL;
6502
6503                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6504                 msi_control &= 0xFFFE; /* Disable MSI */
6505                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6506
6507                 pci_disable_msix(sp->pdev);
6508         } else {
6509                 free_irq(sp->pdev->irq, dev);
6510         }
6511         /* Waiting till all Interrupt handlers are complete */
6512         cnt = 0;
6513         do {
6514                 msleep(10);
6515                 if (!atomic_read(&sp->isr_cnt))
6516                         break;
6517                 cnt++;
6518         } while(cnt < 5);
6519 }
6520
6521 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6522 {
6523         int cnt = 0;
6524         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6525         unsigned long flags;
6526         register u64 val64 = 0;
6527
6528         del_timer_sync(&sp->alarm_timer);
6529         /* If s2io_set_link task is executing, wait till it completes. */
6530         while (test_and_set_bit(0, &(sp->link_state))) {
6531                 msleep(50);
6532         }
6533         atomic_set(&sp->card_state, CARD_DOWN);
6534
6535         /* disable Tx and Rx traffic on the NIC */
6536         if (do_io)
6537                 stop_nic(sp);
6538
6539         s2io_rem_isr(sp);
6540
6541         /* Kill tasklet. */
6542         tasklet_kill(&sp->task);
6543
6544         /* Check if the device is Quiescent and then Reset the NIC */
6545         while(do_io) {
6546                 /* As per the HW requirement we need to replenish the
6547                  * receive buffer to avoid the ring bump. Since there is
6548                  * no intention of processing the Rx frame at this pointwe are
6549                  * just settting the ownership bit of rxd in Each Rx
6550                  * ring to HW and set the appropriate buffer size
6551                  * based on the ring mode
6552                  */
6553                 rxd_owner_bit_reset(sp);
6554
6555                 val64 = readq(&bar0->adapter_status);
6556                 if (verify_xena_quiescence(sp)) {
6557                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6558                         break;
6559                 }
6560
6561                 msleep(50);
6562                 cnt++;
6563                 if (cnt == 10) {
6564                         DBG_PRINT(ERR_DBG,
6565                                   "s2io_close:Device not Quiescent ");
6566                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6567                                   (unsigned long long) val64);
6568                         break;
6569                 }
6570         }
6571         if (do_io)
6572                 s2io_reset(sp);
6573
6574         spin_lock_irqsave(&sp->tx_lock, flags);
6575         /* Free all Tx buffers */
6576         free_tx_buffers(sp);
6577         spin_unlock_irqrestore(&sp->tx_lock, flags);
6578
6579         /* Free all Rx buffers */
6580         spin_lock_irqsave(&sp->rx_lock, flags);
6581         free_rx_buffers(sp);
6582         spin_unlock_irqrestore(&sp->rx_lock, flags);
6583
6584         clear_bit(0, &(sp->link_state));
6585 }
6586
6587 static void s2io_card_down(struct s2io_nic * sp)
6588 {
6589         do_s2io_card_down(sp, 1);
6590 }
6591
6592 static int s2io_card_up(struct s2io_nic * sp)
6593 {
6594         int i, ret = 0;
6595         struct mac_info *mac_control;
6596         struct config_param *config;
6597         struct net_device *dev = (struct net_device *) sp->dev;
6598         u16 interruptible;
6599
6600         /* Initialize the H/W I/O registers */
6601         if (init_nic(sp) != 0) {
6602                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6603                           dev->name);
6604                 s2io_reset(sp);
6605                 return -ENODEV;
6606         }
6607
6608         /*
6609          * Initializing the Rx buffers. For now we are considering only 1
6610          * Rx ring and initializing buffers into 30 Rx blocks
6611          */
6612         mac_control = &sp->mac_control;
6613         config = &sp->config;
6614
6615         for (i = 0; i < config->rx_ring_num; i++) {
6616                 if ((ret = fill_rx_buffers(sp, i))) {
6617                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6618                                   dev->name);
6619                         s2io_reset(sp);
6620                         free_rx_buffers(sp);
6621                         return -ENOMEM;
6622                 }
6623                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6624                           atomic_read(&sp->rx_bufs_left[i]));
6625         }
6626         /* Maintain the state prior to the open */
6627         if (sp->promisc_flg)
6628                 sp->promisc_flg = 0;
6629         if (sp->m_cast_flg) {
6630                 sp->m_cast_flg = 0;
6631                 sp->all_multi_pos= 0;
6632         }
6633
6634         /* Setting its receive mode */
6635         s2io_set_multicast(dev);
6636
6637         if (sp->lro) {
6638                 /* Initialize max aggregatable pkts per session based on MTU */
6639                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6640                 /* Check if we can use(if specified) user provided value */
6641                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6642                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6643         }
6644
6645         /* Enable Rx Traffic and interrupts on the NIC */
6646         if (start_nic(sp)) {
6647                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6648                 s2io_reset(sp);
6649                 free_rx_buffers(sp);
6650                 return -ENODEV;
6651         }
6652
6653         /* Add interrupt service routine */
6654         if (s2io_add_isr(sp) != 0) {
6655                 if (sp->intr_type == MSI_X)
6656                         s2io_rem_isr(sp);
6657                 s2io_reset(sp);
6658                 free_rx_buffers(sp);
6659                 return -ENODEV;
6660         }
6661
6662         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6663
6664         /* Enable tasklet for the device */
6665         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6666
6667         /*  Enable select interrupts */
6668         if (sp->intr_type != INTA)
6669                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6670         else {
6671                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6672                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6673                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6674                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6675         }
6676
6677
6678         atomic_set(&sp->card_state, CARD_UP);
6679         return 0;
6680 }
6681
6682 /**
6683  * s2io_restart_nic - Resets the NIC.
6684  * @data : long pointer to the device private structure
6685  * Description:
6686  * This function is scheduled to be run by the s2io_tx_watchdog
6687  * function after 0.5 secs to reset the NIC. The idea is to reduce
6688  * the run time of the watch dog routine which is run holding a
6689  * spin lock.
6690  */
6691
6692 static void s2io_restart_nic(struct work_struct *work)
6693 {
6694         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6695         struct net_device *dev = sp->dev;
6696
6697         rtnl_lock();
6698
6699         if (!netif_running(dev))
6700                 goto out_unlock;
6701
6702         s2io_card_down(sp);
6703         if (s2io_card_up(sp)) {
6704                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6705                           dev->name);
6706         }
6707         netif_wake_queue(dev);
6708         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6709                   dev->name);
6710 out_unlock:
6711         rtnl_unlock();
6712 }
6713
6714 /**
6715  *  s2io_tx_watchdog - Watchdog for transmit side.
6716  *  @dev : Pointer to net device structure
6717  *  Description:
6718  *  This function is triggered if the Tx Queue is stopped
6719  *  for a pre-defined amount of time when the Interface is still up.
6720  *  If the Interface is jammed in such a situation, the hardware is
6721  *  reset (by s2io_close) and restarted again (by s2io_open) to
6722  *  overcome any problem that might have been caused in the hardware.
6723  *  Return value:
6724  *  void
6725  */
6726
6727 static void s2io_tx_watchdog(struct net_device *dev)
6728 {
6729         struct s2io_nic *sp = dev->priv;
6730
6731         if (netif_carrier_ok(dev)) {
6732                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6733                 schedule_work(&sp->rst_timer_task);
6734                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6735         }
6736 }
6737
6738 /**
6739  *   rx_osm_handler - To perform some OS related operations on SKB.
6740  *   @sp: private member of the device structure,pointer to s2io_nic structure.
6741  *   @skb : the socket buffer pointer.
6742  *   @len : length of the packet
6743  *   @cksum : FCS checksum of the frame.
6744  *   @ring_no : the ring from which this RxD was extracted.
6745  *   Description:
6746  *   This function is called by the Rx interrupt serivce routine to perform
6747  *   some OS related operations on the SKB before passing it to the upper
6748  *   layers. It mainly checks if the checksum is OK, if so adds it to the
6749  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
6750  *   to the upper layer. If the checksum is wrong, it increments the Rx
6751  *   packet error count, frees the SKB and returns error.
6752  *   Return value:
6753  *   SUCCESS on success and -1 on failure.
6754  */
6755 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6756 {
6757         struct s2io_nic *sp = ring_data->nic;
6758         struct net_device *dev = (struct net_device *) sp->dev;
6759         struct sk_buff *skb = (struct sk_buff *)
6760                 ((unsigned long) rxdp->Host_Control);
6761         int ring_no = ring_data->ring_no;
6762         u16 l3_csum, l4_csum;
6763         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6764         struct lro *lro;
6765         u8 err_mask;
6766
6767         skb->dev = dev;
6768
6769         if (err) {
6770                 /* Check for parity error */
6771                 if (err & 0x1) {
6772                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6773                 }
6774                 err_mask = err >> 48;
6775                 switch(err_mask) {
6776                         case 1:
6777                                 sp->mac_control.stats_info->sw_stat.
6778                                 rx_parity_err_cnt++;
6779                         break;
6780
6781                         case 2:
6782                                 sp->mac_control.stats_info->sw_stat.
6783                                 rx_abort_cnt++;
6784                         break;
6785
6786                         case 3:
6787                                 sp->mac_control.stats_info->sw_stat.
6788                                 rx_parity_abort_cnt++;
6789                         break;
6790
6791                         case 4:
6792                                 sp->mac_control.stats_info->sw_stat.
6793                                 rx_rda_fail_cnt++;
6794                         break;
6795
6796                         case 5:
6797                                 sp->mac_control.stats_info->sw_stat.
6798                                 rx_unkn_prot_cnt++;
6799                         break;
6800
6801                         case 6:
6802                                 sp->mac_control.stats_info->sw_stat.
6803                                 rx_fcs_err_cnt++;
6804                         break;
6805
6806                         case 7:
6807                                 sp->mac_control.stats_info->sw_stat.
6808                                 rx_buf_size_err_cnt++;
6809                         break;
6810
6811                         case 8:
6812                                 sp->mac_control.stats_info->sw_stat.
6813                                 rx_rxd_corrupt_cnt++;
6814                         break;
6815
6816                         case 15:
6817                                 sp->mac_control.stats_info->sw_stat.
6818                                 rx_unkn_err_cnt++;
6819                         break;
6820                 }
6821                 /*
6822                 * Drop the packet if bad transfer code. Exception being
6823                 * 0x5, which could be due to unsupported IPv6 extension header.
6824                 * In this case, we let stack handle the packet.
6825                 * Note that in this case, since checksum will be incorrect,
6826                 * stack will validate the same.
6827                 */
6828                 if (err_mask != 0x5) {
6829                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6830                                 dev->name, err_mask);
6831                         sp->stats.rx_crc_errors++;
6832                         sp->mac_control.stats_info->sw_stat.mem_freed 
6833                                 += skb->truesize;
6834                         dev_kfree_skb(skb);
6835                         atomic_dec(&sp->rx_bufs_left[ring_no]);
6836                         rxdp->Host_Control = 0;
6837                         return 0;
6838                 }
6839         }
6840
6841         /* Updating statistics */
6842         sp->stats.rx_packets++;
6843         rxdp->Host_Control = 0;
6844         if (sp->rxd_mode == RXD_MODE_1) {
6845                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6846
6847                 sp->stats.rx_bytes += len;
6848                 skb_put(skb, len);
6849
6850         } else if (sp->rxd_mode == RXD_MODE_3B) {
6851                 int get_block = ring_data->rx_curr_get_info.block_index;
6852                 int get_off = ring_data->rx_curr_get_info.offset;
6853                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6854                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6855                 unsigned char *buff = skb_push(skb, buf0_len);
6856
6857                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6858                 sp->stats.rx_bytes += buf0_len + buf2_len;
6859                 memcpy(buff, ba->ba_0, buf0_len);
6860                 skb_put(skb, buf2_len);
6861         }
6862
6863         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6864             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6865             (sp->rx_csum)) {
6866                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6867                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6868                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6869                         /*
6870                          * NIC verifies if the Checksum of the received
6871                          * frame is Ok or not and accordingly returns
6872                          * a flag in the RxD.
6873                          */
6874                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6875                         if (sp->lro) {
6876                                 u32 tcp_len;
6877                                 u8 *tcp;
6878                                 int ret = 0;
6879
6880                                 ret = s2io_club_tcp_session(skb->data, &tcp,
6881                                                 &tcp_len, &lro, rxdp, sp);
6882                                 switch (ret) {
6883                                         case 3: /* Begin anew */
6884                                                 lro->parent = skb;
6885                                                 goto aggregate;
6886                                         case 1: /* Aggregate */
6887                                         {
6888                                                 lro_append_pkt(sp, lro,
6889                                                         skb, tcp_len);
6890                                                 goto aggregate;
6891                                         }
6892                                         case 4: /* Flush session */
6893                                         {
6894                                                 lro_append_pkt(sp, lro,
6895                                                         skb, tcp_len);
6896                                                 queue_rx_frame(lro->parent);
6897                                                 clear_lro_session(lro);
6898                                                 sp->mac_control.stats_info->
6899                                                     sw_stat.flush_max_pkts++;
6900                                                 goto aggregate;
6901                                         }
6902                                         case 2: /* Flush both */
6903                                                 lro->parent->data_len =
6904                                                         lro->frags_len;
6905                                                 sp->mac_control.stats_info->
6906                                                      sw_stat.sending_both++;
6907                                                 queue_rx_frame(lro->parent);
6908                                                 clear_lro_session(lro);
6909                                                 goto send_up;
6910                                         case 0: /* sessions exceeded */
6911                                         case -1: /* non-TCP or not
6912                                                   * L2 aggregatable
6913                                                   */
6914                                         case 5: /*
6915                                                  * First pkt in session not
6916                                                  * L3/L4 aggregatable
6917                                                  */
6918                                                 break;
6919                                         default:
6920                                                 DBG_PRINT(ERR_DBG,
6921                                                         "%s: Samadhana!!\n",
6922                                                          __FUNCTION__);
6923                                                 BUG();
6924                                 }
6925                         }
6926                 } else {
6927                         /*
6928                          * Packet with erroneous checksum, let the
6929                          * upper layers deal with it.
6930                          */
6931                         skb->ip_summed = CHECKSUM_NONE;
6932                 }
6933         } else {
6934                 skb->ip_summed = CHECKSUM_NONE;
6935         }
6936         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
6937         if (!sp->lro) {
6938                 skb->protocol = eth_type_trans(skb, dev);
6939                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6940                         vlan_strip_flag)) {
6941                         /* Queueing the vlan frame to the upper layer */
6942                         if (napi)
6943                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6944                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6945                         else
6946                                 vlan_hwaccel_rx(skb, sp->vlgrp,
6947                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6948                 } else {
6949                         if (napi)
6950                                 netif_receive_skb(skb);
6951                         else
6952                                 netif_rx(skb);
6953                 }
6954         } else {
6955 send_up:
6956                 queue_rx_frame(skb);
6957         }
6958         dev->last_rx = jiffies;
6959 aggregate:
6960         atomic_dec(&sp->rx_bufs_left[ring_no]);
6961         return SUCCESS;
6962 }
6963
6964 /**
6965  *  s2io_link - stops/starts the Tx queue.
6966  *  @sp : private member of the device structure, which is a pointer to the
6967  *  s2io_nic structure.
6968  *  @link : inidicates whether link is UP/DOWN.
6969  *  Description:
6970  *  This function stops/starts the Tx queue depending on whether the link
6971  *  status of the NIC is is down or up. This is called by the Alarm
6972  *  interrupt handler whenever a link change interrupt comes up.
6973  *  Return value:
6974  *  void.
6975  */
6976
6977 static void s2io_link(struct s2io_nic * sp, int link)
6978 {
6979         struct net_device *dev = (struct net_device *) sp->dev;
6980
6981         if (link != sp->last_link_state) {
6982                 if (link == LINK_DOWN) {
6983                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6984                         netif_carrier_off(dev);
6985                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
6986                         sp->mac_control.stats_info->sw_stat.link_up_time = 
6987                                 jiffies - sp->start_time;
6988                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
6989                 } else {
6990                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6991                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
6992                         sp->mac_control.stats_info->sw_stat.link_down_time = 
6993                                 jiffies - sp->start_time;
6994                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
6995                         netif_carrier_on(dev);
6996                 }
6997         }
6998         sp->last_link_state = link;
6999         sp->start_time = jiffies;
7000 }
7001
7002 /**
7003  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7004  *  @sp : private member of the device structure, which is a pointer to the
7005  *  s2io_nic structure.
7006  *  Description:
7007  *  This function initializes a few of the PCI and PCI-X configuration registers
7008  *  with recommended values.
7009  *  Return value:
7010  *  void
7011  */
7012
7013 static void s2io_init_pci(struct s2io_nic * sp)
7014 {
7015         u16 pci_cmd = 0, pcix_cmd = 0;
7016
7017         /* Enable Data Parity Error Recovery in PCI-X command register. */
7018         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7019                              &(pcix_cmd));
7020         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7021                               (pcix_cmd | 1));
7022         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7023                              &(pcix_cmd));
7024
7025         /* Set the PErr Response bit in PCI command register. */
7026         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7027         pci_write_config_word(sp->pdev, PCI_COMMAND,
7028                               (pci_cmd | PCI_COMMAND_PARITY));
7029         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7030 }
7031
7032 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7033 {
7034         if ( tx_fifo_num > 8) {
7035                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7036                          "supported\n");
7037                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7038                 tx_fifo_num = 8;
7039         }
7040         if ( rx_ring_num > 8) {
7041                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7042                          "supported\n");
7043                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7044                 rx_ring_num = 8;
7045         }
7046         if (*dev_intr_type != INTA)
7047                 napi = 0;
7048
7049 #ifndef CONFIG_PCI_MSI
7050         if (*dev_intr_type != INTA) {
7051                 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7052                           "MSI/MSI-X. Defaulting to INTA\n");
7053                 *dev_intr_type = INTA;
7054         }
7055 #else
7056         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7057                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7058                           "Defaulting to INTA\n");
7059                 *dev_intr_type = INTA;
7060         }
7061 #endif
7062         if ((*dev_intr_type == MSI_X) &&
7063                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7064                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7065                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7066                                         "Defaulting to INTA\n");
7067                 *dev_intr_type = INTA;
7068         }
7069
7070         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7071                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7072                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7073                 rx_ring_mode = 1;
7074         }
7075         return SUCCESS;
7076 }
7077
7078 /**
7079  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7080  * or Traffic class respectively.
7081  * @nic: device peivate variable
7082  * Description: The function configures the receive steering to
7083  * desired receive ring.
7084  * Return Value:  SUCCESS on success and
7085  * '-1' on failure (endian settings incorrect).
7086  */
7087 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7088 {
7089         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7090         register u64 val64 = 0;
7091
7092         if (ds_codepoint > 63)
7093                 return FAILURE;
7094
7095         val64 = RTS_DS_MEM_DATA(ring);
7096         writeq(val64, &bar0->rts_ds_mem_data);
7097
7098         val64 = RTS_DS_MEM_CTRL_WE |
7099                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7100                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7101
7102         writeq(val64, &bar0->rts_ds_mem_ctrl);
7103
7104         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7105                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7106                                 S2IO_BIT_RESET);
7107 }
7108
7109 /**
7110  *  s2io_init_nic - Initialization of the adapter .
7111  *  @pdev : structure containing the PCI related information of the device.
7112  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7113  *  Description:
7114  *  The function initializes an adapter identified by the pci_dec structure.
7115  *  All OS related initialization including memory and device structure and
7116  *  initlaization of the device private variable is done. Also the swapper
7117  *  control register is initialized to enable read and write into the I/O
7118  *  registers of the device.
7119  *  Return value:
7120  *  returns 0 on success and negative on failure.
7121  */
7122
7123 static int __devinit
7124 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7125 {
7126         struct s2io_nic *sp;
7127         struct net_device *dev;
7128         int i, j, ret;
7129         int dma_flag = FALSE;
7130         u32 mac_up, mac_down;
7131         u64 val64 = 0, tmp64 = 0;
7132         struct XENA_dev_config __iomem *bar0 = NULL;
7133         u16 subid;
7134         struct mac_info *mac_control;
7135         struct config_param *config;
7136         int mode;
7137         u8 dev_intr_type = intr_type;
7138
7139         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7140                 return ret;
7141
7142         if ((ret = pci_enable_device(pdev))) {
7143                 DBG_PRINT(ERR_DBG,
7144                           "s2io_init_nic: pci_enable_device failed\n");
7145                 return ret;
7146         }
7147
7148         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7149                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7150                 dma_flag = TRUE;
7151                 if (pci_set_consistent_dma_mask
7152                     (pdev, DMA_64BIT_MASK)) {
7153                         DBG_PRINT(ERR_DBG,
7154                                   "Unable to obtain 64bit DMA for \
7155                                         consistent allocations\n");
7156                         pci_disable_device(pdev);
7157                         return -ENOMEM;
7158                 }
7159         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7160                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7161         } else {
7162                 pci_disable_device(pdev);
7163                 return -ENOMEM;
7164         }
7165         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7166                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7167                 pci_disable_device(pdev);
7168                 return -ENODEV;
7169         }
7170
7171         dev = alloc_etherdev(sizeof(struct s2io_nic));
7172         if (dev == NULL) {
7173                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7174                 pci_disable_device(pdev);
7175                 pci_release_regions(pdev);
7176                 return -ENODEV;
7177         }
7178
7179         pci_set_master(pdev);
7180         pci_set_drvdata(pdev, dev);
7181         SET_MODULE_OWNER(dev);
7182         SET_NETDEV_DEV(dev, &pdev->dev);
7183
7184         /*  Private member variable initialized to s2io NIC structure */
7185         sp = dev->priv;
7186         memset(sp, 0, sizeof(struct s2io_nic));
7187         sp->dev = dev;
7188         sp->pdev = pdev;
7189         sp->high_dma_flag = dma_flag;
7190         sp->device_enabled_once = FALSE;
7191         if (rx_ring_mode == 1)
7192                 sp->rxd_mode = RXD_MODE_1;
7193         if (rx_ring_mode == 2)
7194                 sp->rxd_mode = RXD_MODE_3B;
7195
7196         sp->intr_type = dev_intr_type;
7197
7198         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7199                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7200                 sp->device_type = XFRAME_II_DEVICE;
7201         else
7202                 sp->device_type = XFRAME_I_DEVICE;
7203
7204         sp->lro = lro;
7205
7206         /* Initialize some PCI/PCI-X fields of the NIC. */
7207         s2io_init_pci(sp);
7208
7209         /*
7210          * Setting the device configuration parameters.
7211          * Most of these parameters can be specified by the user during
7212          * module insertion as they are module loadable parameters. If
7213          * these parameters are not not specified during load time, they
7214          * are initialized with default values.
7215          */
7216         mac_control = &sp->mac_control;
7217         config = &sp->config;
7218
7219         /* Tx side parameters. */
7220         config->tx_fifo_num = tx_fifo_num;
7221         for (i = 0; i < MAX_TX_FIFOS; i++) {
7222                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7223                 config->tx_cfg[i].fifo_priority = i;
7224         }
7225
7226         /* mapping the QoS priority to the configured fifos */
7227         for (i = 0; i < MAX_TX_FIFOS; i++)
7228                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7229
7230         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7231         for (i = 0; i < config->tx_fifo_num; i++) {
7232                 config->tx_cfg[i].f_no_snoop =
7233                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7234                 if (config->tx_cfg[i].fifo_len < 65) {
7235                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7236                         break;
7237                 }
7238         }
7239         /* + 2 because one Txd for skb->data and one Txd for UFO */
7240         config->max_txds = MAX_SKB_FRAGS + 2;
7241
7242         /* Rx side parameters. */
7243         config->rx_ring_num = rx_ring_num;
7244         for (i = 0; i < MAX_RX_RINGS; i++) {
7245                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7246                     (rxd_count[sp->rxd_mode] + 1);
7247                 config->rx_cfg[i].ring_priority = i;
7248         }
7249
7250         for (i = 0; i < rx_ring_num; i++) {
7251                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7252                 config->rx_cfg[i].f_no_snoop =
7253                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7254         }
7255
7256         /*  Setting Mac Control parameters */
7257         mac_control->rmac_pause_time = rmac_pause_time;
7258         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7259         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7260
7261
7262         /* Initialize Ring buffer parameters. */
7263         for (i = 0; i < config->rx_ring_num; i++)
7264                 atomic_set(&sp->rx_bufs_left[i], 0);
7265
7266         /* Initialize the number of ISRs currently running */
7267         atomic_set(&sp->isr_cnt, 0);
7268
7269         /*  initialize the shared memory used by the NIC and the host */
7270         if (init_shared_mem(sp)) {
7271                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7272                           dev->name);
7273                 ret = -ENOMEM;
7274                 goto mem_alloc_failed;
7275         }
7276
7277         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7278                                      pci_resource_len(pdev, 0));
7279         if (!sp->bar0) {
7280                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7281                           dev->name);
7282                 ret = -ENOMEM;
7283                 goto bar0_remap_failed;
7284         }
7285
7286         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7287                                      pci_resource_len(pdev, 2));
7288         if (!sp->bar1) {
7289                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7290                           dev->name);
7291                 ret = -ENOMEM;
7292                 goto bar1_remap_failed;
7293         }
7294
7295         dev->irq = pdev->irq;
7296         dev->base_addr = (unsigned long) sp->bar0;
7297
7298         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7299         for (j = 0; j < MAX_TX_FIFOS; j++) {
7300                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7301                     (sp->bar1 + (j * 0x00020000));
7302         }
7303
7304         /*  Driver entry points */
7305         dev->open = &s2io_open;
7306         dev->stop = &s2io_close;
7307         dev->hard_start_xmit = &s2io_xmit;
7308         dev->get_stats = &s2io_get_stats;
7309         dev->set_multicast_list = &s2io_set_multicast;
7310         dev->do_ioctl = &s2io_ioctl;
7311         dev->change_mtu = &s2io_change_mtu;
7312         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7313         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7314         dev->vlan_rx_register = s2io_vlan_rx_register;
7315
7316         /*
7317          * will use eth_mac_addr() for  dev->set_mac_address
7318          * mac address will be set every time dev->open() is called
7319          */
7320         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7321
7322 #ifdef CONFIG_NET_POLL_CONTROLLER
7323         dev->poll_controller = s2io_netpoll;
7324 #endif
7325
7326         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7327         if (sp->high_dma_flag == TRUE)
7328                 dev->features |= NETIF_F_HIGHDMA;
7329         dev->features |= NETIF_F_TSO;
7330         dev->features |= NETIF_F_TSO6;
7331         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7332                 dev->features |= NETIF_F_UFO;
7333                 dev->features |= NETIF_F_HW_CSUM;
7334         }
7335
7336         dev->tx_timeout = &s2io_tx_watchdog;
7337         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7338         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7339         INIT_WORK(&sp->set_link_task, s2io_set_link);
7340
7341         pci_save_state(sp->pdev);
7342
7343         /* Setting swapper control on the NIC, for proper reset operation */
7344         if (s2io_set_swapper(sp)) {
7345                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7346                           dev->name);
7347                 ret = -EAGAIN;
7348                 goto set_swap_failed;
7349         }
7350
7351         /* Verify if the Herc works on the slot its placed into */
7352         if (sp->device_type & XFRAME_II_DEVICE) {
7353                 mode = s2io_verify_pci_mode(sp);
7354                 if (mode < 0) {
7355                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7356                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7357                         ret = -EBADSLT;
7358                         goto set_swap_failed;
7359                 }
7360         }
7361
7362         /* Not needed for Herc */
7363         if (sp->device_type & XFRAME_I_DEVICE) {
7364                 /*
7365                  * Fix for all "FFs" MAC address problems observed on
7366                  * Alpha platforms
7367                  */
7368                 fix_mac_address(sp);
7369                 s2io_reset(sp);
7370         }
7371
7372         /*
7373          * MAC address initialization.
7374          * For now only one mac address will be read and used.
7375          */
7376         bar0 = sp->bar0;
7377         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7378             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7379         writeq(val64, &bar0->rmac_addr_cmd_mem);
7380         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7381                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7382         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7383         mac_down = (u32) tmp64;
7384         mac_up = (u32) (tmp64 >> 32);
7385
7386         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7387         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7388         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7389         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7390         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7391         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7392
7393         /*  Set the factory defined MAC address initially   */
7394         dev->addr_len = ETH_ALEN;
7395         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7396
7397          /* Store the values of the MSIX table in the s2io_nic structure */
7398         store_xmsi_data(sp);
7399         /* reset Nic and bring it to known state */
7400         s2io_reset(sp);
7401
7402         /*
7403          * Initialize the tasklet status and link state flags
7404          * and the card state parameter
7405          */
7406         atomic_set(&(sp->card_state), 0);
7407         sp->tasklet_status = 0;
7408         sp->link_state = 0;
7409
7410         /* Initialize spinlocks */
7411         spin_lock_init(&sp->tx_lock);
7412
7413         if (!napi)
7414                 spin_lock_init(&sp->put_lock);
7415         spin_lock_init(&sp->rx_lock);
7416
7417         /*
7418          * SXE-002: Configure link and activity LED to init state
7419          * on driver load.
7420          */
7421         subid = sp->pdev->subsystem_device;
7422         if ((subid & 0xFF) >= 0x07) {
7423                 val64 = readq(&bar0->gpio_control);
7424                 val64 |= 0x0000800000000000ULL;
7425                 writeq(val64, &bar0->gpio_control);
7426                 val64 = 0x0411040400000000ULL;
7427                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7428                 val64 = readq(&bar0->gpio_control);
7429         }
7430
7431         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7432
7433         if (register_netdev(dev)) {
7434                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7435                 ret = -ENODEV;
7436                 goto register_failed;
7437         }
7438         s2io_vpd_read(sp);
7439         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7440         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7441                   sp->product_name, pdev->revision);
7442         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7443                   s2io_driver_version);
7444         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7445                           "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7446                           sp->def_mac_addr[0].mac_addr[0],
7447                           sp->def_mac_addr[0].mac_addr[1],
7448                           sp->def_mac_addr[0].mac_addr[2],
7449                           sp->def_mac_addr[0].mac_addr[3],
7450                           sp->def_mac_addr[0].mac_addr[4],
7451                           sp->def_mac_addr[0].mac_addr[5]);
7452         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7453         if (sp->device_type & XFRAME_II_DEVICE) {
7454                 mode = s2io_print_pci_mode(sp);
7455                 if (mode < 0) {
7456                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7457                         ret = -EBADSLT;
7458                         unregister_netdev(dev);
7459                         goto set_swap_failed;
7460                 }
7461         }
7462         switch(sp->rxd_mode) {
7463                 case RXD_MODE_1:
7464                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7465                                                 dev->name);
7466                     break;
7467                 case RXD_MODE_3B:
7468                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7469                                                 dev->name);
7470                     break;
7471         }
7472
7473         if (napi)
7474                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7475         switch(sp->intr_type) {
7476                 case INTA:
7477                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7478                     break;
7479                 case MSI_X:
7480                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7481                     break;
7482         }
7483         if (sp->lro)
7484                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7485                           dev->name);
7486         if (ufo)
7487                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7488                                         " enabled\n", dev->name);
7489         /* Initialize device name */
7490         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7491
7492         /* Initialize bimodal Interrupts */
7493         sp->config.bimodal = bimodal;
7494         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7495                 sp->config.bimodal = 0;
7496                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7497                         dev->name);
7498         }
7499
7500         /*
7501          * Make Link state as off at this point, when the Link change
7502          * interrupt comes the state will be automatically changed to
7503          * the right state.
7504          */
7505         netif_carrier_off(dev);
7506
7507         return 0;
7508
7509       register_failed:
7510       set_swap_failed:
7511         iounmap(sp->bar1);
7512       bar1_remap_failed:
7513         iounmap(sp->bar0);
7514       bar0_remap_failed:
7515       mem_alloc_failed:
7516         free_shared_mem(sp);
7517         pci_disable_device(pdev);
7518         pci_release_regions(pdev);
7519         pci_set_drvdata(pdev, NULL);
7520         free_netdev(dev);
7521
7522         return ret;
7523 }
7524
7525 /**
7526  * s2io_rem_nic - Free the PCI device
7527  * @pdev: structure containing the PCI related information of the device.
7528  * Description: This function is called by the Pci subsystem to release a
7529  * PCI device and free up all resource held up by the device. This could
7530  * be in response to a Hot plug event or when the driver is to be removed
7531  * from memory.
7532  */
7533
7534 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7535 {
7536         struct net_device *dev =
7537             (struct net_device *) pci_get_drvdata(pdev);
7538         struct s2io_nic *sp;
7539
7540         if (dev == NULL) {
7541                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7542                 return;
7543         }
7544
7545         flush_scheduled_work();
7546
7547         sp = dev->priv;
7548         unregister_netdev(dev);
7549
7550         free_shared_mem(sp);
7551         iounmap(sp->bar0);
7552         iounmap(sp->bar1);
7553         pci_release_regions(pdev);
7554         pci_set_drvdata(pdev, NULL);
7555         free_netdev(dev);
7556         pci_disable_device(pdev);
7557 }
7558
7559 /**
7560  * s2io_starter - Entry point for the driver
7561  * Description: This function is the entry point for the driver. It verifies
7562  * the module loadable parameters and initializes PCI configuration space.
7563  */
7564
7565 int __init s2io_starter(void)
7566 {
7567         return pci_register_driver(&s2io_driver);
7568 }
7569
7570 /**
7571  * s2io_closer - Cleanup routine for the driver
7572  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7573  */
7574
7575 static __exit void s2io_closer(void)
7576 {
7577         pci_unregister_driver(&s2io_driver);
7578         DBG_PRINT(INIT_DBG, "cleanup done\n");
7579 }
7580
7581 module_init(s2io_starter);
7582 module_exit(s2io_closer);
7583
7584 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7585                 struct tcphdr **tcp, struct RxD_t *rxdp)
7586 {
7587         int ip_off;
7588         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7589
7590         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7591                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7592                           __FUNCTION__);
7593                 return -1;
7594         }
7595
7596         /* TODO:
7597          * By default the VLAN field in the MAC is stripped by the card, if this
7598          * feature is turned off in rx_pa_cfg register, then the ip_off field
7599          * has to be shifted by a further 2 bytes
7600          */
7601         switch (l2_type) {
7602                 case 0: /* DIX type */
7603                 case 4: /* DIX type with VLAN */
7604                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7605                         break;
7606                 /* LLC, SNAP etc are considered non-mergeable */
7607                 default:
7608                         return -1;
7609         }
7610
7611         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7612         ip_len = (u8)((*ip)->ihl);
7613         ip_len <<= 2;
7614         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7615
7616         return 0;
7617 }
7618
7619 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7620                                   struct tcphdr *tcp)
7621 {
7622         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7623         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7624            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7625                 return -1;
7626         return 0;
7627 }
7628
7629 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7630 {
7631         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7632 }
7633
7634 static void initiate_new_session(struct lro *lro, u8 *l2h,
7635                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7636 {
7637         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7638         lro->l2h = l2h;
7639         lro->iph = ip;
7640         lro->tcph = tcp;
7641         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7642         lro->tcp_ack = ntohl(tcp->ack_seq);
7643         lro->sg_num = 1;
7644         lro->total_len = ntohs(ip->tot_len);
7645         lro->frags_len = 0;
7646         /*
7647          * check if we saw TCP timestamp. Other consistency checks have
7648          * already been done.
7649          */
7650         if (tcp->doff == 8) {
7651                 u32 *ptr;
7652                 ptr = (u32 *)(tcp+1);
7653                 lro->saw_ts = 1;
7654                 lro->cur_tsval = *(ptr+1);
7655                 lro->cur_tsecr = *(ptr+2);
7656         }
7657         lro->in_use = 1;
7658 }
7659
7660 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7661 {
7662         struct iphdr *ip = lro->iph;
7663         struct tcphdr *tcp = lro->tcph;
7664         __sum16 nchk;
7665         struct stat_block *statinfo = sp->mac_control.stats_info;
7666         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7667
7668         /* Update L3 header */
7669         ip->tot_len = htons(lro->total_len);
7670         ip->check = 0;
7671         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7672         ip->check = nchk;
7673
7674         /* Update L4 header */
7675         tcp->ack_seq = lro->tcp_ack;
7676         tcp->window = lro->window;
7677
7678         /* Update tsecr field if this session has timestamps enabled */
7679         if (lro->saw_ts) {
7680                 u32 *ptr = (u32 *)(tcp + 1);
7681                 *(ptr+2) = lro->cur_tsecr;
7682         }
7683
7684         /* Update counters required for calculation of
7685          * average no. of packets aggregated.
7686          */
7687         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7688         statinfo->sw_stat.num_aggregations++;
7689 }
7690
7691 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7692                 struct tcphdr *tcp, u32 l4_pyld)
7693 {
7694         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7695         lro->total_len += l4_pyld;
7696         lro->frags_len += l4_pyld;
7697         lro->tcp_next_seq += l4_pyld;
7698         lro->sg_num++;
7699
7700         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7701         lro->tcp_ack = tcp->ack_seq;
7702         lro->window = tcp->window;
7703
7704         if (lro->saw_ts) {
7705                 u32 *ptr;
7706                 /* Update tsecr and tsval from this packet */
7707                 ptr = (u32 *) (tcp + 1);
7708                 lro->cur_tsval = *(ptr + 1);
7709                 lro->cur_tsecr = *(ptr + 2);
7710         }
7711 }
7712
7713 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7714                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7715 {
7716         u8 *ptr;
7717
7718         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7719
7720         if (!tcp_pyld_len) {
7721                 /* Runt frame or a pure ack */
7722                 return -1;
7723         }
7724
7725         if (ip->ihl != 5) /* IP has options */
7726                 return -1;
7727
7728         /* If we see CE codepoint in IP header, packet is not mergeable */
7729         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7730                 return -1;
7731
7732         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7733         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7734                                     tcp->ece || tcp->cwr || !tcp->ack) {
7735                 /*
7736                  * Currently recognize only the ack control word and
7737                  * any other control field being set would result in
7738                  * flushing the LRO session
7739                  */
7740                 return -1;
7741         }
7742
7743         /*
7744          * Allow only one TCP timestamp option. Don't aggregate if
7745          * any other options are detected.
7746          */
7747         if (tcp->doff != 5 && tcp->doff != 8)
7748                 return -1;
7749
7750         if (tcp->doff == 8) {
7751                 ptr = (u8 *)(tcp + 1);
7752                 while (*ptr == TCPOPT_NOP)
7753                         ptr++;
7754                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7755                         return -1;
7756
7757                 /* Ensure timestamp value increases monotonically */
7758                 if (l_lro)
7759                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7760                                 return -1;
7761
7762                 /* timestamp echo reply should be non-zero */
7763                 if (*((u32 *)(ptr+6)) == 0)
7764                         return -1;
7765         }
7766
7767         return 0;
7768 }
7769
7770 static int
7771 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7772                       struct RxD_t *rxdp, struct s2io_nic *sp)
7773 {
7774         struct iphdr *ip;
7775         struct tcphdr *tcph;
7776         int ret = 0, i;
7777
7778         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7779                                          rxdp))) {
7780                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7781                           ip->saddr, ip->daddr);
7782         } else {
7783                 return ret;
7784         }
7785
7786         tcph = (struct tcphdr *)*tcp;
7787         *tcp_len = get_l4_pyld_length(ip, tcph);
7788         for (i=0; i<MAX_LRO_SESSIONS; i++) {
7789                 struct lro *l_lro = &sp->lro0_n[i];
7790                 if (l_lro->in_use) {
7791                         if (check_for_socket_match(l_lro, ip, tcph))
7792                                 continue;
7793                         /* Sock pair matched */
7794                         *lro = l_lro;
7795
7796                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7797                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7798                                           "0x%x, actual 0x%x\n", __FUNCTION__,
7799                                           (*lro)->tcp_next_seq,
7800                                           ntohl(tcph->seq));
7801
7802                                 sp->mac_control.stats_info->
7803                                    sw_stat.outof_sequence_pkts++;
7804                                 ret = 2;
7805                                 break;
7806                         }
7807
7808                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7809                                 ret = 1; /* Aggregate */
7810                         else
7811                                 ret = 2; /* Flush both */
7812                         break;
7813                 }
7814         }
7815
7816         if (ret == 0) {
7817                 /* Before searching for available LRO objects,
7818                  * check if the pkt is L3/L4 aggregatable. If not
7819                  * don't create new LRO session. Just send this
7820                  * packet up.
7821                  */
7822                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7823                         return 5;
7824                 }
7825
7826                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7827                         struct lro *l_lro = &sp->lro0_n[i];
7828                         if (!(l_lro->in_use)) {
7829                                 *lro = l_lro;
7830                                 ret = 3; /* Begin anew */
7831                                 break;
7832                         }
7833                 }
7834         }
7835
7836         if (ret == 0) { /* sessions exceeded */
7837                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7838                           __FUNCTION__);
7839                 *lro = NULL;
7840                 return ret;
7841         }
7842
7843         switch (ret) {
7844                 case 3:
7845                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7846                         break;
7847                 case 2:
7848                         update_L3L4_header(sp, *lro);
7849                         break;
7850                 case 1:
7851                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7852                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7853                                 update_L3L4_header(sp, *lro);
7854                                 ret = 4; /* Flush the LRO */
7855                         }
7856                         break;
7857                 default:
7858                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7859                                 __FUNCTION__);
7860                         break;
7861         }
7862
7863         return ret;
7864 }
7865
7866 static void clear_lro_session(struct lro *lro)
7867 {
7868         static u16 lro_struct_size = sizeof(struct lro);
7869
7870         memset(lro, 0, lro_struct_size);
7871 }
7872
7873 static void queue_rx_frame(struct sk_buff *skb)
7874 {
7875         struct net_device *dev = skb->dev;
7876
7877         skb->protocol = eth_type_trans(skb, dev);
7878         if (napi)
7879                 netif_receive_skb(skb);
7880         else
7881                 netif_rx(skb);
7882 }
7883
7884 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7885                            struct sk_buff *skb,
7886                            u32 tcp_len)
7887 {
7888         struct sk_buff *first = lro->parent;
7889
7890         first->len += tcp_len;
7891         first->data_len = lro->frags_len;
7892         skb_pull(skb, (skb->len - tcp_len));
7893         if (skb_shinfo(first)->frag_list)
7894                 lro->last_frag->next = skb;
7895         else
7896                 skb_shinfo(first)->frag_list = skb;
7897         first->truesize += skb->truesize;
7898         lro->last_frag = skb;
7899         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7900         return;
7901 }
7902
7903 /**
7904  * s2io_io_error_detected - called when PCI error is detected
7905  * @pdev: Pointer to PCI device
7906  * @state: The current pci connection state
7907  *
7908  * This function is called after a PCI bus error affecting
7909  * this device has been detected.
7910  */
7911 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
7912                                                pci_channel_state_t state)
7913 {
7914         struct net_device *netdev = pci_get_drvdata(pdev);
7915         struct s2io_nic *sp = netdev->priv;
7916
7917         netif_device_detach(netdev);
7918
7919         if (netif_running(netdev)) {
7920                 /* Bring down the card, while avoiding PCI I/O */
7921                 do_s2io_card_down(sp, 0);
7922         }
7923         pci_disable_device(pdev);
7924
7925         return PCI_ERS_RESULT_NEED_RESET;
7926 }
7927
7928 /**
7929  * s2io_io_slot_reset - called after the pci bus has been reset.
7930  * @pdev: Pointer to PCI device
7931  *
7932  * Restart the card from scratch, as if from a cold-boot.
7933  * At this point, the card has exprienced a hard reset,
7934  * followed by fixups by BIOS, and has its config space
7935  * set up identically to what it was at cold boot.
7936  */
7937 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
7938 {
7939         struct net_device *netdev = pci_get_drvdata(pdev);
7940         struct s2io_nic *sp = netdev->priv;
7941
7942         if (pci_enable_device(pdev)) {
7943                 printk(KERN_ERR "s2io: "
7944                        "Cannot re-enable PCI device after reset.\n");
7945                 return PCI_ERS_RESULT_DISCONNECT;
7946         }
7947
7948         pci_set_master(pdev);
7949         s2io_reset(sp);
7950
7951         return PCI_ERS_RESULT_RECOVERED;
7952 }
7953
7954 /**
7955  * s2io_io_resume - called when traffic can start flowing again.
7956  * @pdev: Pointer to PCI device
7957  *
7958  * This callback is called when the error recovery driver tells
7959  * us that its OK to resume normal operation.
7960  */
7961 static void s2io_io_resume(struct pci_dev *pdev)
7962 {
7963         struct net_device *netdev = pci_get_drvdata(pdev);
7964         struct s2io_nic *sp = netdev->priv;
7965
7966         if (netif_running(netdev)) {
7967                 if (s2io_card_up(sp)) {
7968                         printk(KERN_ERR "s2io: "
7969                                "Can't bring device back up after reset.\n");
7970                         return;
7971                 }
7972
7973                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
7974                         s2io_card_down(sp);
7975                         printk(KERN_ERR "s2io: "
7976                                "Can't resetore mac addr after reset.\n");
7977                         return;
7978                 }
7979         }
7980
7981         netif_device_attach(netdev);
7982         netif_wake_queue(netdev);
7983 }