staging: et131x: Convert rest of pci memory management to dma api
[pandora-kernel.git] / drivers / staging / et131x / et131x.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10  *
11  *------------------------------------------------------------------------------
12  *
13  * SOFTWARE LICENSE
14  *
15  * This software is provided subject to the following terms and conditions,
16  * which you should read carefully before using the software.  Using this
17  * software indicates your acceptance of these terms and conditions.  If you do
18  * not agree with these terms and conditions, do not use the software.
19  *
20  * Copyright © 2005 Agere Systems Inc.
21  * All rights reserved.
22  *
23  * Redistribution and use in source or binary forms, with or without
24  * modifications, are permitted provided that the following conditions are met:
25  *
26  * . Redistributions of source code must retain the above copyright notice, this
27  *    list of conditions and the following Disclaimer as comments in the code as
28  *    well as in the documentation and/or other materials provided with the
29  *    distribution.
30  *
31  * . Redistributions in binary form must reproduce the above copyright notice,
32  *    this list of conditions and the following Disclaimer in the documentation
33  *    and/or other materials provided with the distribution.
34  *
35  * . Neither the name of Agere Systems Inc. nor the names of the contributors
36  *    may be used to endorse or promote products derived from this software
37  *    without specific prior written permission.
38  *
39  * Disclaimer
40  *
41  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
44  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52  * DAMAGE.
53  *
54  */
55
56 #include <linux/pci.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/types.h>
60 #include <linux/kernel.h>
61
62 #include <linux/sched.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/ctype.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/in.h>
70 #include <linux/delay.h>
71 #include <linux/bitops.h>
72 #include <linux/io.h>
73 #include <asm/system.h>
74
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_arp.h>
79 #include <linux/ioport.h>
80 #include <linux/crc32.h>
81 #include <linux/random.h>
82 #include <linux/phy.h>
83
84 #include "et131x.h"
85
86 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
87 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
88 MODULE_LICENSE("Dual BSD/GPL");
89 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
90                    "for the ET1310 by Agere Systems");
91
92 /* EEPROM defines */
93 #define MAX_NUM_REGISTER_POLLS          1000
94 #define MAX_NUM_WRITE_RETRIES           2
95
96 /* MAC defines */
97 #define COUNTER_WRAP_16_BIT 0x10000
98 #define COUNTER_WRAP_12_BIT 0x1000
99
100 /* PCI defines */
101 #define INTERNAL_MEM_SIZE       0x400   /* 1024 of internal memory */
102 #define INTERNAL_MEM_RX_OFFSET  0x1FF   /* 50%   Tx, 50%   Rx */
103
104 /* ISR defines */
105 /*
106  * For interrupts, normal running is:
107  *       rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108  *       watchdog_interrupt & txdma_xfer_done
109  *
110  * In both cases, when flow control is enabled for either Tx or bi-direction,
111  * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112  * buffer rings are running low.
113  */
114 #define INT_MASK_DISABLE            0xffffffff
115
116 /* NOTE: Masking out MAC_STAT Interrupt for now...
117  * #define INT_MASK_ENABLE             0xfff6bf17
118  * #define INT_MASK_ENABLE_NO_FLOW     0xfff6bfd7
119  */
120 #define INT_MASK_ENABLE             0xfffebf17
121 #define INT_MASK_ENABLE_NO_FLOW     0xfffebfd7
122
123 /* General defines */
124 /* Packet and header sizes */
125 #define NIC_MIN_PACKET_SIZE     60
126
127 /* Multicast list size */
128 #define NIC_MAX_MCAST_LIST      128
129
130 /* Supported Filters */
131 #define ET131X_PACKET_TYPE_DIRECTED             0x0001
132 #define ET131X_PACKET_TYPE_MULTICAST            0x0002
133 #define ET131X_PACKET_TYPE_BROADCAST            0x0004
134 #define ET131X_PACKET_TYPE_PROMISCUOUS          0x0008
135 #define ET131X_PACKET_TYPE_ALL_MULTICAST        0x0010
136
137 /* Tx Timeout */
138 #define ET131X_TX_TIMEOUT       (1 * HZ)
139 #define NIC_SEND_HANG_THRESHOLD 0
140
141 /* MP_TCB flags */
142 #define fMP_DEST_MULTI                  0x00000001
143 #define fMP_DEST_BROAD                  0x00000002
144
145 /* MP_ADAPTER flags */
146 #define fMP_ADAPTER_RECV_LOOKASIDE      0x00000004
147 #define fMP_ADAPTER_INTERRUPT_IN_USE    0x00000008
148 #define fMP_ADAPTER_SECONDARY           0x00000010
149
150 /* MP_SHARED flags */
151 #define fMP_ADAPTER_SHUTDOWN            0x00100000
152 #define fMP_ADAPTER_LOWER_POWER         0x00200000
153
154 #define fMP_ADAPTER_NON_RECOVER_ERROR   0x00800000
155 #define fMP_ADAPTER_RESET_IN_PROGRESS   0x01000000
156 #define fMP_ADAPTER_NO_CABLE            0x02000000
157 #define fMP_ADAPTER_HARDWARE_ERROR      0x04000000
158 #define fMP_ADAPTER_REMOVE_IN_PROGRESS  0x08000000
159 #define fMP_ADAPTER_HALT_IN_PROGRESS    0x10000000
160
161 #define fMP_ADAPTER_FAIL_SEND_MASK      0x3ff00000
162 #define fMP_ADAPTER_NOT_READY_MASK      0x3ff00000
163
164 /* Some offsets in PCI config space that are actually used. */
165 #define ET1310_PCI_MAX_PYLD             0x4C
166 #define ET1310_PCI_MAC_ADDRESS          0xA4
167 #define ET1310_PCI_EEPROM_STATUS        0xB2
168 #define ET1310_PCI_ACK_NACK             0xC0
169 #define ET1310_PCI_REPLAY               0xC2
170 #define ET1310_PCI_L0L1LATENCY          0xCF
171
172 /* PCI Product IDs */
173 #define ET131X_PCI_DEVICE_ID_GIG        0xED00  /* ET1310 1000 Base-T 8 */
174 #define ET131X_PCI_DEVICE_ID_FAST       0xED01  /* ET1310 100  Base-T */
175
176 /* Define order of magnitude converter */
177 #define NANO_IN_A_MICRO 1000
178
179 #define PARM_RX_NUM_BUFS_DEF    4
180 #define PARM_RX_TIME_INT_DEF    10
181 #define PARM_RX_MEM_END_DEF     0x2bc
182 #define PARM_TX_TIME_INT_DEF    40
183 #define PARM_TX_NUM_BUFS_DEF    4
184 #define PARM_DMA_CACHE_DEF      0
185
186 /* RX defines */
187 #define USE_FBR0 1
188
189 #define FBR_CHUNKS 32
190
191 #define MAX_DESC_PER_RING_RX         1024
192
193 /* number of RFDs - default and min */
194 #ifdef USE_FBR0
195 #define RFD_LOW_WATER_MARK      40
196 #define NIC_DEFAULT_NUM_RFD     1024
197 #define NUM_FBRS                2
198 #else
199 #define RFD_LOW_WATER_MARK      20
200 #define NIC_DEFAULT_NUM_RFD     256
201 #define NUM_FBRS                1
202 #endif
203
204 #define NIC_MIN_NUM_RFD         64
205
206 #define NUM_PACKETS_HANDLED     256
207
208 #define ALCATEL_BAD_STATUS      0xe47f0000
209 #define ALCATEL_MULTICAST_PKT   0x01000000
210 #define ALCATEL_BROADCAST_PKT   0x02000000
211
212 /* typedefs for Free Buffer Descriptors */
213 struct fbr_desc {
214         u32 addr_lo;
215         u32 addr_hi;
216         u32 word2;              /* Bits 10-31 reserved, 0-9 descriptor */
217 };
218
219 /* Packet Status Ring Descriptors
220  *
221  * Word 0:
222  *
223  * top 16 bits are from the Alcatel Status Word as enumerated in
224  * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
225  *
226  * 0: hp                        hash pass
227  * 1: ipa                       IP checksum assist
228  * 2: ipp                       IP checksum pass
229  * 3: tcpa                      TCP checksum assist
230  * 4: tcpp                      TCP checksum pass
231  * 5: wol                       WOL Event
232  * 6: rxmac_error               RXMAC Error Indicator
233  * 7: drop                      Drop packet
234  * 8: ft                        Frame Truncated
235  * 9: jp                        Jumbo Packet
236  * 10: vp                       VLAN Packet
237  * 11-15: unused
238  * 16: asw_prev_pkt_dropped     e.g. IFG too small on previous
239  * 17: asw_RX_DV_event          short receive event detected
240  * 18: asw_false_carrier_event  bad carrier since last good packet
241  * 19: asw_code_err             one or more nibbles signalled as errors
242  * 20: asw_CRC_err              CRC error
243  * 21: asw_len_chk_err          frame length field incorrect
244  * 22: asw_too_long             frame length > 1518 bytes
245  * 23: asw_OK                   valid CRC + no code error
246  * 24: asw_multicast            has a multicast address
247  * 25: asw_broadcast            has a broadcast address
248  * 26: asw_dribble_nibble       spurious bits after EOP
249  * 27: asw_control_frame        is a control frame
250  * 28: asw_pause_frame          is a pause frame
251  * 29: asw_unsupported_op       unsupported OP code
252  * 30: asw_VLAN_tag             VLAN tag detected
253  * 31: asw_long_evt             Rx long event
254  *
255  * Word 1:
256  * 0-15: length                 length in bytes
257  * 16-25: bi                    Buffer Index
258  * 26-27: ri                    Ring Index
259  * 28-31: reserved
260  */
261
262 struct pkt_stat_desc {
263         u32 word0;
264         u32 word1;
265 };
266
267 /* Typedefs for the RX DMA status word */
268
269 /*
270  * rx status word 0 holds part of the status bits of the Rx DMA engine
271  * that get copied out to memory by the ET-1310.  Word 0 is a 32 bit word
272  * which contains the Free Buffer ring 0 and 1 available offset.
273  *
274  * bit 0-9 FBR1 offset
275  * bit 10 Wrap flag for FBR1
276  * bit 16-25 FBR0 offset
277  * bit 26 Wrap flag for FBR0
278  */
279
280 /*
281  * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
282  * that get copied out to memory by the ET-1310.  Word 3 is a 32 bit word
283  * which contains the Packet Status Ring available offset.
284  *
285  * bit 0-15 reserved
286  * bit 16-27 PSRoffset
287  * bit 28 PSRwrap
288  * bit 29-31 unused
289  */
290
291 /*
292  * struct rx_status_block is a structure representing the status of the Rx
293  * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
294  */
295 struct rx_status_block {
296         u32 word0;
297         u32 word1;
298 };
299
300 /*
301  * Structure for look-up table holding free buffer ring pointers, addresses
302  * and state.
303  */
304 struct fbr_lookup {
305         void            *virt[MAX_DESC_PER_RING_RX];
306         void            *buffer1[MAX_DESC_PER_RING_RX];
307         void            *buffer2[MAX_DESC_PER_RING_RX];
308         u32              bus_high[MAX_DESC_PER_RING_RX];
309         u32              bus_low[MAX_DESC_PER_RING_RX];
310         void            *ring_virtaddr;
311         dma_addr_t       ring_physaddr;
312         void            *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
313         dma_addr_t       mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
314         uint64_t         real_physaddr;
315         uint64_t         offset;
316         u32              local_full;
317         u32              num_entries;
318         u32              buffsize;
319 };
320
321 /*
322  * struct rx_ring is the sructure representing the adaptor's local
323  * reference(s) to the rings
324  *
325  ******************************************************************************
326  * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
327  *                      and index 1 to refer to FRB0
328  ******************************************************************************
329  */
330 struct rx_ring {
331         struct fbr_lookup *fbr[NUM_FBRS];
332         void *ps_ring_virtaddr;
333         dma_addr_t ps_ring_physaddr;
334         u32 local_psr_full;
335         u32 psr_num_entries;
336
337         struct rx_status_block *rx_status_block;
338         dma_addr_t rx_status_bus;
339
340         struct list_head recv_buff_pool;
341
342         /* RECV */
343         struct list_head recv_list;
344         u32 num_ready_recv;
345
346         u32 num_rfd;
347
348         bool unfinished_receives;
349
350         struct list_head recv_packet_pool;
351
352         /* lookaside lists */
353         struct kmem_cache *recv_lookaside;
354 };
355
356 /* TX defines */
357 /*
358  * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
359  *
360  * 0-15: length of packet
361  * 16-27: VLAN tag
362  * 28: VLAN CFI
363  * 29-31: VLAN priority
364  *
365  * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
366  *
367  * 0: last packet in the sequence
368  * 1: first packet in the sequence
369  * 2: interrupt the processor when this pkt sent
370  * 3: Control word - no packet data
371  * 4: Issue half-duplex backpressure : XON/XOFF
372  * 5: send pause frame
373  * 6: Tx frame has error
374  * 7: append CRC
375  * 8: MAC override
376  * 9: pad packet
377  * 10: Packet is a Huge packet
378  * 11: append VLAN tag
379  * 12: IP checksum assist
380  * 13: TCP checksum assist
381  * 14: UDP checksum assist
382  */
383
384 /* struct tx_desc represents each descriptor on the ring */
385 struct tx_desc {
386         u32 addr_hi;
387         u32 addr_lo;
388         u32 len_vlan;   /* control words how to xmit the */
389         u32 flags;      /* data (detailed above) */
390 };
391
392 /*
393  * The status of the Tx DMA engine it sits in free memory, and is pointed to
394  * by 0x101c / 0x1020. This is a DMA10 type
395  */
396
397 /* TCB (Transmit Control Block: Host Side) */
398 struct tcb {
399         struct tcb *next;       /* Next entry in ring */
400         u32 flags;              /* Our flags for the packet */
401         u32 count;              /* Used to spot stuck/lost packets */
402         u32 stale;              /* Used to spot stuck/lost packets */
403         struct sk_buff *skb;    /* Network skb we are tied to */
404         u32 index;              /* Ring indexes */
405         u32 index_start;
406 };
407
408 /* Structure representing our local reference(s) to the ring */
409 struct tx_ring {
410         /* TCB (Transmit Control Block) memory and lists */
411         struct tcb *tcb_ring;
412
413         /* List of TCBs that are ready to be used */
414         struct tcb *tcb_qhead;
415         struct tcb *tcb_qtail;
416
417         /* list of TCBs that are currently being sent.  NOTE that access to all
418          * three of these (including used) are controlled via the
419          * TCBSendQLock.  This lock should be secured prior to incementing /
420          * decrementing used, or any queue manipulation on send_head /
421          * tail
422          */
423         struct tcb *send_head;
424         struct tcb *send_tail;
425         int used;
426
427         /* The actual descriptor ring */
428         struct tx_desc *tx_desc_ring;
429         dma_addr_t tx_desc_ring_pa;
430
431         /* send_idx indicates where we last wrote to in the descriptor ring. */
432         u32 send_idx;
433
434         /* The location of the write-back status block */
435         u32 *tx_status;
436         dma_addr_t tx_status_pa;
437
438         /* Packets since the last IRQ: used for interrupt coalescing */
439         int since_irq;
440 };
441
442 /* ADAPTER defines */
443 /*
444  * Do not change these values: if changed, then change also in respective
445  * TXdma and Rxdma engines
446  */
447 #define NUM_DESC_PER_RING_TX         512    /* TX Do not change these values */
448 #define NUM_TCB                      64
449
450 /*
451  * These values are all superseded by registry entries to facilitate tuning.
452  * Once the desired performance has been achieved, the optimal registry values
453  * should be re-populated to these #defines:
454  */
455 #define NUM_TRAFFIC_CLASSES          1
456
457 #define TX_ERROR_PERIOD             1000
458
459 #define LO_MARK_PERCENT_FOR_PSR     15
460 #define LO_MARK_PERCENT_FOR_RX      15
461
462 /* RFD (Receive Frame Descriptor) */
463 struct rfd {
464         struct list_head list_node;
465         struct sk_buff *skb;
466         u32 len;        /* total size of receive frame */
467         u16 bufferindex;
468         u8 ringindex;
469 };
470
471 /* Flow Control */
472 #define FLOW_BOTH       0
473 #define FLOW_TXONLY     1
474 #define FLOW_RXONLY     2
475 #define FLOW_NONE       3
476
477 /* Struct to define some device statistics */
478 struct ce_stats {
479         /* MIB II variables
480          *
481          * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
482          * MUST have 32, then we'll need another way to perform atomic
483          * operations
484          */
485         u32             unicast_pkts_rcvd;
486         atomic_t        unicast_pkts_xmtd;
487         u32             multicast_pkts_rcvd;
488         atomic_t        multicast_pkts_xmtd;
489         u32             broadcast_pkts_rcvd;
490         atomic_t        broadcast_pkts_xmtd;
491         u32             rcvd_pkts_dropped;
492
493         /* Tx Statistics. */
494         u32             tx_underflows;
495
496         u32             tx_collisions;
497         u32             tx_excessive_collisions;
498         u32             tx_first_collisions;
499         u32             tx_late_collisions;
500         u32             tx_max_pkt_errs;
501         u32             tx_deferred;
502
503         /* Rx Statistics. */
504         u32             rx_overflows;
505
506         u32             rx_length_errs;
507         u32             rx_align_errs;
508         u32             rx_crc_errs;
509         u32             rx_code_violations;
510         u32             rx_other_errs;
511
512         u32             synchronous_iterations;
513         u32             interrupt_status;
514 };
515
516 /* The private adapter structure */
517 struct et131x_adapter {
518         struct net_device *netdev;
519         struct pci_dev *pdev;
520         struct mii_bus *mii_bus;
521         struct phy_device *phydev;
522         struct work_struct task;
523
524         /* Flags that indicate current state of the adapter */
525         u32 flags;
526
527         /* local link state, to determine if a state change has occurred */
528         int link;
529
530         /* Configuration  */
531         u8 rom_addr[ETH_ALEN];
532         u8 addr[ETH_ALEN];
533         bool has_eeprom;
534         u8 eeprom_data[2];
535
536         /* Spinlocks */
537         spinlock_t lock;
538
539         spinlock_t tcb_send_qlock;
540         spinlock_t tcb_ready_qlock;
541         spinlock_t send_hw_lock;
542
543         spinlock_t rcv_lock;
544         spinlock_t rcv_pend_lock;
545         spinlock_t fbr_lock;
546
547         spinlock_t phy_lock;
548
549         /* Packet Filter and look ahead size */
550         u32 packet_filter;
551
552         /* multicast list */
553         u32 multicast_addr_count;
554         u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
555
556         /* Pointer to the device's PCI register space */
557         struct address_map __iomem *regs;
558
559         /* Registry parameters */
560         u8 wanted_flow;         /* Flow we want for 802.3x flow control */
561         u32 registry_jumbo_packet;      /* Max supported ethernet packet size */
562
563         /* Derived from the registry: */
564         u8 flowcontrol;         /* flow control validated by the far-end */
565
566         /* Minimize init-time */
567         struct timer_list error_timer;
568
569         /* variable putting the phy into coma mode when boot up with no cable
570          * plugged in after 5 seconds
571          */
572         u8 boot_coma;
573
574         /* Next two used to save power information at power down. This
575          * information will be used during power up to set up parts of Power
576          * Management in JAGCore
577          */
578         u16 pdown_speed;
579         u8 pdown_duplex;
580
581         /* Tx Memory Variables */
582         struct tx_ring tx_ring;
583
584         /* Rx Memory Variables */
585         struct rx_ring rx_ring;
586
587         /* Stats */
588         struct ce_stats stats;
589
590         struct net_device_stats net_stats;
591 };
592
593 void et131x_error_timer_handler(unsigned long data);
594 void et131x_enable_interrupts(struct et131x_adapter *adapter);
595 void et131x_disable_interrupts(struct et131x_adapter *adapter);
596 void et131x_align_allocated_memory(struct et131x_adapter *adapter,
597                                    u64 *phys_addr,
598                                    u64 *offset, u64 mask);
599 void et131x_adapter_setup(struct et131x_adapter *adapter);
600 void et131x_soft_reset(struct et131x_adapter *adapter);
601 void et131x_isr_handler(struct work_struct *work);
602 void et1310_setup_device_for_multicast(struct et131x_adapter *adapter);
603 void et1310_setup_device_for_unicast(struct et131x_adapter *adapter);
604 void et131x_up(struct net_device *netdev);
605 void et131x_down(struct net_device *netdev);
606 struct net_device *et131x_device_alloc(void);
607 void et131x_enable_txrx(struct net_device *netdev);
608 void et131x_disable_txrx(struct net_device *netdev);
609 int et1310_in_phy_coma(struct et131x_adapter *adapter);
610 void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
611                                u16 action,
612                                u16 regnum, u16 bitnum, u8 *value);
613 int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
614               u8 reg, u16 *value);
615 int32_t et131x_mii_write(struct et131x_adapter *adapter,
616                 u8 reg, u16 value);
617 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter);
618 void et131x_rx_dma_disable(struct et131x_adapter *adapter);
619 void et131x_rx_dma_enable(struct et131x_adapter *adapter);
620 void et131x_reset_recv(struct et131x_adapter *adapter);
621 void et131x_init_send(struct et131x_adapter *adapter);
622 void et131x_tx_dma_enable(struct et131x_adapter *adapter);
623
624 /* EEPROM functions */
625
626 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
627 {
628         u32 reg;
629         int i;
630
631         /*
632          * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
633          *    bits 7,1:0 both equal to 1, at least once after reset.
634          *    Subsequent operations need only to check that bits 1:0 are equal
635          *    to 1 prior to starting a single byte read/write
636          */
637
638         for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
639                 /* Read registers grouped in DWORD1 */
640                 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
641                         return -EIO;
642
643                 /* I2C idle and Phy Queue Avail both true */
644                 if ((reg & 0x3000) == 0x3000) {
645                         if (status)
646                                 *status = reg;
647                         return reg & 0xFF;
648                 }
649         }
650         return -ETIMEDOUT;
651 }
652
653
654 /**
655  * eeprom_write - Write a byte to the ET1310's EEPROM
656  * @adapter: pointer to our private adapter structure
657  * @addr: the address to write
658  * @data: the value to write
659  *
660  * Returns 1 for a successful write.
661  */
662 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
663 {
664         struct pci_dev *pdev = adapter->pdev;
665         int index = 0;
666         int retries;
667         int err = 0;
668         int i2c_wack = 0;
669         int writeok = 0;
670         u32 status;
671         u32 val = 0;
672
673         /*
674          * For an EEPROM, an I2C single byte write is defined as a START
675          * condition followed by the device address, EEPROM address, one byte
676          * of data and a STOP condition.  The STOP condition will trigger the
677          * EEPROM's internally timed write cycle to the nonvolatile memory.
678          * All inputs are disabled during this write cycle and the EEPROM will
679          * not respond to any access until the internal write is complete.
680          */
681
682         err = eeprom_wait_ready(pdev, NULL);
683         if (err)
684                 return err;
685
686          /*
687          * 2. Write to the LBCIF Control Register:  bit 7=1, bit 6=1, bit 3=0,
688          *    and bits 1:0 both =0.  Bit 5 should be set according to the
689          *    type of EEPROM being accessed (1=two byte addressing, 0=one
690          *    byte addressing).
691          */
692         if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
693                         LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
694                 return -EIO;
695
696         i2c_wack = 1;
697
698         /* Prepare EEPROM address for Step 3 */
699
700         for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
701                 /* Write the address to the LBCIF Address Register */
702                 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
703                         break;
704                 /*
705                  * Write the data to the LBCIF Data Register (the I2C write
706                  * will begin).
707                  */
708                 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
709                         break;
710                 /*
711                  * Monitor bit 1:0 of the LBCIF Status Register.  When bits
712                  * 1:0 are both equal to 1, the I2C write has completed and the
713                  * internal write cycle of the EEPROM is about to start.
714                  * (bits 1:0 = 01 is a legal state while waiting from both
715                  * equal to 1, but bits 1:0 = 10 is invalid and implies that
716                  * something is broken).
717                  */
718                 err = eeprom_wait_ready(pdev, &status);
719                 if (err < 0)
720                         return 0;
721
722                 /*
723                  * Check bit 3 of the LBCIF Status Register.  If  equal to 1,
724                  * an error has occurred.Don't break here if we are revision
725                  * 1, this is so we do a blind write for load bug.
726                  */
727                 if ((status & LBCIF_STATUS_GENERAL_ERROR)
728                         && adapter->pdev->revision == 0)
729                         break;
730
731                 /*
732                  * Check bit 2 of the LBCIF Status Register.  If equal to 1 an
733                  * ACK error has occurred on the address phase of the write.
734                  * This could be due to an actual hardware failure or the
735                  * EEPROM may still be in its internal write cycle from a
736                  * previous write. This write operation was ignored and must be
737                   *repeated later.
738                  */
739                 if (status & LBCIF_STATUS_ACK_ERROR) {
740                         /*
741                          * This could be due to an actual hardware failure
742                          * or the EEPROM may still be in its internal write
743                          * cycle from a previous write. This write operation
744                          * was ignored and must be repeated later.
745                          */
746                         udelay(10);
747                         continue;
748                 }
749
750                 writeok = 1;
751                 break;
752         }
753
754         /*
755          * Set bit 6 of the LBCIF Control Register = 0.
756          */
757         udelay(10);
758
759         while (i2c_wack) {
760                 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
761                         LBCIF_CONTROL_LBCIF_ENABLE))
762                         writeok = 0;
763
764                 /* Do read until internal ACK_ERROR goes away meaning write
765                  * completed
766                  */
767                 do {
768                         pci_write_config_dword(pdev,
769                                                LBCIF_ADDRESS_REGISTER,
770                                                addr);
771                         do {
772                                 pci_read_config_dword(pdev,
773                                         LBCIF_DATA_REGISTER, &val);
774                         } while ((val & 0x00010000) == 0);
775                 } while (val & 0x00040000);
776
777                 if ((val & 0xFF00) != 0xC000 || index == 10000)
778                         break;
779                 index++;
780         }
781         return writeok ? 0 : -EIO;
782 }
783
784 /**
785  * eeprom_read - Read a byte from the ET1310's EEPROM
786  * @adapter: pointer to our private adapter structure
787  * @addr: the address from which to read
788  * @pdata: a pointer to a byte in which to store the value of the read
789  * @eeprom_id: the ID of the EEPROM
790  * @addrmode: how the EEPROM is to be accessed
791  *
792  * Returns 1 for a successful read
793  */
794 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
795 {
796         struct pci_dev *pdev = adapter->pdev;
797         int err;
798         u32 status;
799
800         /*
801          * A single byte read is similar to the single byte write, with the
802          * exception of the data flow:
803          */
804
805         err = eeprom_wait_ready(pdev, NULL);
806         if (err)
807                 return err;
808         /*
809          * Write to the LBCIF Control Register:  bit 7=1, bit 6=0, bit 3=0,
810          * and bits 1:0 both =0.  Bit 5 should be set according to the type
811          * of EEPROM being accessed (1=two byte addressing, 0=one byte
812          * addressing).
813          */
814         if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
815                                   LBCIF_CONTROL_LBCIF_ENABLE))
816                 return -EIO;
817         /*
818          * Write the address to the LBCIF Address Register (I2C read will
819          * begin).
820          */
821         if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
822                 return -EIO;
823         /*
824          * Monitor bit 0 of the LBCIF Status Register.  When = 1, I2C read
825          * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
826          * has occurred).
827          */
828         err = eeprom_wait_ready(pdev, &status);
829         if (err < 0)
830                 return err;
831         /*
832          * Regardless of error status, read data byte from LBCIF Data
833          * Register.
834          */
835         *pdata = err;
836         /*
837          * Check bit 2 of the LBCIF Status Register.  If = 1,
838          * then an error has occurred.
839          */
840         return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
841 }
842
843 int et131x_init_eeprom(struct et131x_adapter *adapter)
844 {
845         struct pci_dev *pdev = adapter->pdev;
846         u8 eestatus;
847
848         /* We first need to check the EEPROM Status code located at offset
849          * 0xB2 of config space
850          */
851         pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
852                                       &eestatus);
853
854         /* THIS IS A WORKAROUND:
855          * I need to call this function twice to get my card in a
856          * LG M1 Express Dual running. I tried also a msleep before this
857          * function, because I thougth there could be some time condidions
858          * but it didn't work. Call the whole function twice also work.
859          */
860         if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
861                 dev_err(&pdev->dev,
862                        "Could not read PCI config space for EEPROM Status\n");
863                 return -EIO;
864         }
865
866         /* Determine if the error(s) we care about are present. If they are
867          * present we need to fail.
868          */
869         if (eestatus & 0x4C) {
870                 int write_failed = 0;
871                 if (pdev->revision == 0x01) {
872                         int     i;
873                         static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
874
875                         /* Re-write the first 4 bytes if we have an eeprom
876                          * present and the revision id is 1, this fixes the
877                          * corruption seen with 1310 B Silicon
878                          */
879                         for (i = 0; i < 3; i++)
880                                 if (eeprom_write(adapter, i, eedata[i]) < 0)
881                                         write_failed = 1;
882                 }
883                 if (pdev->revision  != 0x01 || write_failed) {
884                         dev_err(&pdev->dev,
885                             "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
886
887                         /* This error could mean that there was an error
888                          * reading the eeprom or that the eeprom doesn't exist.
889                          * We will treat each case the same and not try to
890                          * gather additional information that normally would
891                          * come from the eeprom, like MAC Address
892                          */
893                         adapter->has_eeprom = 0;
894                         return -EIO;
895                 }
896         }
897         adapter->has_eeprom = 1;
898
899         /* Read the EEPROM for information regarding LED behavior. Refer to
900          * ET1310_phy.c, et131x_xcvr_init(), for its use.
901          */
902         eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
903         eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
904
905         if (adapter->eeprom_data[0] != 0xcd)
906                 /* Disable all optional features */
907                 adapter->eeprom_data[1] = 0x00;
908
909         return 0;
910 }
911
912 /* MAC functions */
913
914 /**
915  * et1310_config_mac_regs1 - Initialize the first part of MAC regs
916  * @adapter: pointer to our adapter structure
917  */
918 void et1310_config_mac_regs1(struct et131x_adapter *adapter)
919 {
920         struct mac_regs __iomem *macregs = &adapter->regs->mac;
921         u32 station1;
922         u32 station2;
923         u32 ipg;
924
925         /* First we need to reset everything.  Write to MAC configuration
926          * register 1 to perform reset.
927          */
928         writel(0xC00F0000, &macregs->cfg1);
929
930         /* Next lets configure the MAC Inter-packet gap register */
931         ipg = 0x38005860;               /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
932         ipg |= 0x50 << 8;               /* ifg enforce 0x50 */
933         writel(ipg, &macregs->ipg);
934
935         /* Next lets configure the MAC Half Duplex register */
936         /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
937         writel(0x00A1F037, &macregs->hfdp);
938
939         /* Next lets configure the MAC Interface Control register */
940         writel(0, &macregs->if_ctrl);
941
942         /* Let's move on to setting up the mii management configuration */
943         writel(0x07, &macregs->mii_mgmt_cfg);   /* Clock reset 0x7 */
944
945         /* Next lets configure the MAC Station Address register.  These
946          * values are read from the EEPROM during initialization and stored
947          * in the adapter structure.  We write what is stored in the adapter
948          * structure to the MAC Station Address registers high and low.  This
949          * station address is used for generating and checking pause control
950          * packets.
951          */
952         station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
953                    (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
954         station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
955                    (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
956                    (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
957                     adapter->addr[2];
958         writel(station1, &macregs->station_addr_1);
959         writel(station2, &macregs->station_addr_2);
960
961         /* Max ethernet packet in bytes that will passed by the mac without
962          * being truncated.  Allow the MAC to pass 4 more than our max packet
963          * size.  This is 4 for the Ethernet CRC.
964          *
965          * Packets larger than (registry_jumbo_packet) that do not contain a
966          * VLAN ID will be dropped by the Rx function.
967          */
968         writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
969
970         /* clear out MAC config reset */
971         writel(0, &macregs->cfg1);
972 }
973
974 /**
975  * et1310_config_mac_regs2 - Initialize the second part of MAC regs
976  * @adapter: pointer to our adapter structure
977  */
978 void et1310_config_mac_regs2(struct et131x_adapter *adapter)
979 {
980         int32_t delay = 0;
981         struct mac_regs __iomem *mac = &adapter->regs->mac;
982         struct phy_device *phydev = adapter->phydev;
983         u32 cfg1;
984         u32 cfg2;
985         u32 ifctrl;
986         u32 ctl;
987
988         ctl = readl(&adapter->regs->txmac.ctl);
989         cfg1 = readl(&mac->cfg1);
990         cfg2 = readl(&mac->cfg2);
991         ifctrl = readl(&mac->if_ctrl);
992
993         /* Set up the if mode bits */
994         cfg2 &= ~0x300;
995         if (phydev && phydev->speed == SPEED_1000) {
996                 cfg2 |= 0x200;
997                 /* Phy mode bit */
998                 ifctrl &= ~(1 << 24);
999         } else {
1000                 cfg2 |= 0x100;
1001                 ifctrl |= (1 << 24);
1002         }
1003
1004         /* We need to enable Rx/Tx */
1005         cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1006         /* Initialize loop back to off */
1007         cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1008         if (adapter->flowcontrol == FLOW_RXONLY ||
1009                                 adapter->flowcontrol == FLOW_BOTH)
1010                 cfg1 |= CFG1_RX_FLOW;
1011         writel(cfg1, &mac->cfg1);
1012
1013         /* Now we need to initialize the MAC Configuration 2 register */
1014         /* preamble 7, check length, huge frame off, pad crc, crc enable
1015            full duplex off */
1016         cfg2 |= 0x7016;
1017         cfg2 &= ~0x0021;
1018
1019         /* Turn on duplex if needed */
1020         if (phydev && phydev->duplex == DUPLEX_FULL)
1021                 cfg2 |= 0x01;
1022
1023         ifctrl &= ~(1 << 26);
1024         if (phydev && phydev->duplex == DUPLEX_HALF)
1025                 ifctrl |= (1<<26);      /* Enable ghd */
1026
1027         writel(ifctrl, &mac->if_ctrl);
1028         writel(cfg2, &mac->cfg2);
1029
1030         do {
1031                 udelay(10);
1032                 delay++;
1033                 cfg1 = readl(&mac->cfg1);
1034         } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1035
1036         if (delay == 100) {
1037                 dev_warn(&adapter->pdev->dev,
1038                     "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1039                         cfg1);
1040         }
1041
1042         /* Enable txmac */
1043         ctl |= 0x09;    /* TX mac enable, FC disable */
1044         writel(ctl, &adapter->regs->txmac.ctl);
1045
1046         /* Ready to start the RXDMA/TXDMA engine */
1047         if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1048                 et131x_rx_dma_enable(adapter);
1049                 et131x_tx_dma_enable(adapter);
1050         }
1051 }
1052
1053 void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1054 {
1055         struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1056         struct phy_device *phydev = adapter->phydev;
1057         u32 sa_lo;
1058         u32 sa_hi = 0;
1059         u32 pf_ctrl = 0;
1060
1061         /* Disable the MAC while it is being configured (also disable WOL) */
1062         writel(0x8, &rxmac->ctrl);
1063
1064         /* Initialize WOL to disabled. */
1065         writel(0, &rxmac->crc0);
1066         writel(0, &rxmac->crc12);
1067         writel(0, &rxmac->crc34);
1068
1069         /* We need to set the WOL mask0 - mask4 next.  We initialize it to
1070          * its default Values of 0x00000000 because there are not WOL masks
1071          * as of this time.
1072          */
1073         writel(0, &rxmac->mask0_word0);
1074         writel(0, &rxmac->mask0_word1);
1075         writel(0, &rxmac->mask0_word2);
1076         writel(0, &rxmac->mask0_word3);
1077
1078         writel(0, &rxmac->mask1_word0);
1079         writel(0, &rxmac->mask1_word1);
1080         writel(0, &rxmac->mask1_word2);
1081         writel(0, &rxmac->mask1_word3);
1082
1083         writel(0, &rxmac->mask2_word0);
1084         writel(0, &rxmac->mask2_word1);
1085         writel(0, &rxmac->mask2_word2);
1086         writel(0, &rxmac->mask2_word3);
1087
1088         writel(0, &rxmac->mask3_word0);
1089         writel(0, &rxmac->mask3_word1);
1090         writel(0, &rxmac->mask3_word2);
1091         writel(0, &rxmac->mask3_word3);
1092
1093         writel(0, &rxmac->mask4_word0);
1094         writel(0, &rxmac->mask4_word1);
1095         writel(0, &rxmac->mask4_word2);
1096         writel(0, &rxmac->mask4_word3);
1097
1098         /* Lets setup the WOL Source Address */
1099         sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1100                 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1101                 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1102                  adapter->addr[5];
1103         writel(sa_lo, &rxmac->sa_lo);
1104
1105         sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1106                        adapter->addr[1];
1107         writel(sa_hi, &rxmac->sa_hi);
1108
1109         /* Disable all Packet Filtering */
1110         writel(0, &rxmac->pf_ctrl);
1111
1112         /* Let's initialize the Unicast Packet filtering address */
1113         if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1114                 et1310_setup_device_for_unicast(adapter);
1115                 pf_ctrl |= 4;   /* Unicast filter */
1116         } else {
1117                 writel(0, &rxmac->uni_pf_addr1);
1118                 writel(0, &rxmac->uni_pf_addr2);
1119                 writel(0, &rxmac->uni_pf_addr3);
1120         }
1121
1122         /* Let's initialize the Multicast hash */
1123         if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1124                 pf_ctrl |= 2;   /* Multicast filter */
1125                 et1310_setup_device_for_multicast(adapter);
1126         }
1127
1128         /* Runt packet filtering.  Didn't work in version A silicon. */
1129         pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1130         pf_ctrl |= 8;   /* Fragment filter */
1131
1132         if (adapter->registry_jumbo_packet > 8192)
1133                 /* In order to transmit jumbo packets greater than 8k, the
1134                  * FIFO between RxMAC and RxDMA needs to be reduced in size
1135                  * to (16k - Jumbo packet size).  In order to implement this,
1136                  * we must use "cut through" mode in the RxMAC, which chops
1137                  * packets down into segments which are (max_size * 16).  In
1138                  * this case we selected 256 bytes, since this is the size of
1139                  * the PCI-Express TLP's that the 1310 uses.
1140                  *
1141                  * seg_en on, fc_en off, size 0x10
1142                  */
1143                 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1144         else
1145                 writel(0, &rxmac->mcif_ctrl_max_seg);
1146
1147         /* Initialize the MCIF water marks */
1148         writel(0, &rxmac->mcif_water_mark);
1149
1150         /*  Initialize the MIF control */
1151         writel(0, &rxmac->mif_ctrl);
1152
1153         /* Initialize the Space Available Register */
1154         writel(0, &rxmac->space_avail);
1155
1156         /* Initialize the the mif_ctrl register
1157          * bit 3:  Receive code error. One or more nibbles were signaled as
1158          *         errors  during the reception of the packet.  Clear this
1159          *         bit in Gigabit, set it in 100Mbit.  This was derived
1160          *         experimentally at UNH.
1161          * bit 4:  Receive CRC error. The packet's CRC did not match the
1162          *         internally generated CRC.
1163          * bit 5:  Receive length check error. Indicates that frame length
1164          *         field value in the packet does not match the actual data
1165          *         byte length and is not a type field.
1166          * bit 16: Receive frame truncated.
1167          * bit 17: Drop packet enable
1168          */
1169         if (phydev && phydev->speed == SPEED_100)
1170                 writel(0x30038, &rxmac->mif_ctrl);
1171         else
1172                 writel(0x30030, &rxmac->mif_ctrl);
1173
1174         /* Finally we initialize RxMac to be enabled & WOL disabled.  Packet
1175          * filter is always enabled since it is where the runt packets are
1176          * supposed to be dropped.  For version A silicon, runt packet
1177          * dropping doesn't work, so it is disabled in the pf_ctrl register,
1178          * but we still leave the packet filter on.
1179          */
1180         writel(pf_ctrl, &rxmac->pf_ctrl);
1181         writel(0x9, &rxmac->ctrl);
1182 }
1183
1184 void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1185 {
1186         struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1187
1188         /* We need to update the Control Frame Parameters
1189          * cfpt - control frame pause timer set to 64 (0x40)
1190          * cfep - control frame extended pause timer set to 0x0
1191          */
1192         if (adapter->flowcontrol == FLOW_NONE)
1193                 writel(0, &txmac->cf_param);
1194         else
1195                 writel(0x40, &txmac->cf_param);
1196 }
1197
1198 void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1199 {
1200         struct macstat_regs __iomem *macstat =
1201                 &adapter->regs->macstat;
1202
1203         /* Next we need to initialize all the macstat registers to zero on
1204          * the device.
1205          */
1206         writel(0, &macstat->txrx_0_64_byte_frames);
1207         writel(0, &macstat->txrx_65_127_byte_frames);
1208         writel(0, &macstat->txrx_128_255_byte_frames);
1209         writel(0, &macstat->txrx_256_511_byte_frames);
1210         writel(0, &macstat->txrx_512_1023_byte_frames);
1211         writel(0, &macstat->txrx_1024_1518_byte_frames);
1212         writel(0, &macstat->txrx_1519_1522_gvln_frames);
1213
1214         writel(0, &macstat->rx_bytes);
1215         writel(0, &macstat->rx_packets);
1216         writel(0, &macstat->rx_fcs_errs);
1217         writel(0, &macstat->rx_multicast_packets);
1218         writel(0, &macstat->rx_broadcast_packets);
1219         writel(0, &macstat->rx_control_frames);
1220         writel(0, &macstat->rx_pause_frames);
1221         writel(0, &macstat->rx_unknown_opcodes);
1222         writel(0, &macstat->rx_align_errs);
1223         writel(0, &macstat->rx_frame_len_errs);
1224         writel(0, &macstat->rx_code_errs);
1225         writel(0, &macstat->rx_carrier_sense_errs);
1226         writel(0, &macstat->rx_undersize_packets);
1227         writel(0, &macstat->rx_oversize_packets);
1228         writel(0, &macstat->rx_fragment_packets);
1229         writel(0, &macstat->rx_jabbers);
1230         writel(0, &macstat->rx_drops);
1231
1232         writel(0, &macstat->tx_bytes);
1233         writel(0, &macstat->tx_packets);
1234         writel(0, &macstat->tx_multicast_packets);
1235         writel(0, &macstat->tx_broadcast_packets);
1236         writel(0, &macstat->tx_pause_frames);
1237         writel(0, &macstat->tx_deferred);
1238         writel(0, &macstat->tx_excessive_deferred);
1239         writel(0, &macstat->tx_single_collisions);
1240         writel(0, &macstat->tx_multiple_collisions);
1241         writel(0, &macstat->tx_late_collisions);
1242         writel(0, &macstat->tx_excessive_collisions);
1243         writel(0, &macstat->tx_total_collisions);
1244         writel(0, &macstat->tx_pause_honored_frames);
1245         writel(0, &macstat->tx_drops);
1246         writel(0, &macstat->tx_jabbers);
1247         writel(0, &macstat->tx_fcs_errs);
1248         writel(0, &macstat->tx_control_frames);
1249         writel(0, &macstat->tx_oversize_frames);
1250         writel(0, &macstat->tx_undersize_frames);
1251         writel(0, &macstat->tx_fragments);
1252         writel(0, &macstat->carry_reg1);
1253         writel(0, &macstat->carry_reg2);
1254
1255         /* Unmask any counters that we want to track the overflow of.
1256          * Initially this will be all counters.  It may become clear later
1257          * that we do not need to track all counters.
1258          */
1259         writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1260         writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1261 }
1262
1263 void et1310_config_flow_control(struct et131x_adapter *adapter)
1264 {
1265         struct phy_device *phydev = adapter->phydev;
1266
1267         if (phydev->duplex == DUPLEX_HALF) {
1268                 adapter->flowcontrol = FLOW_NONE;
1269         } else {
1270                 char remote_pause, remote_async_pause;
1271
1272                 et1310_phy_access_mii_bit(adapter,
1273                                 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1274                 et1310_phy_access_mii_bit(adapter,
1275                                 TRUEPHY_BIT_READ, 5, 11,
1276                                 &remote_async_pause);
1277
1278                 if ((remote_pause == TRUEPHY_BIT_SET) &&
1279                     (remote_async_pause == TRUEPHY_BIT_SET)) {
1280                         adapter->flowcontrol = adapter->wanted_flow;
1281                 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1282                            (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1283                         if (adapter->wanted_flow == FLOW_BOTH)
1284                                 adapter->flowcontrol = FLOW_BOTH;
1285                         else
1286                                 adapter->flowcontrol = FLOW_NONE;
1287                 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1288                            (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1289                         adapter->flowcontrol = FLOW_NONE;
1290                 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1291                                remote_async_pause == TRUEPHY_SET_BIT) */
1292                         if (adapter->wanted_flow == FLOW_BOTH)
1293                                 adapter->flowcontrol = FLOW_RXONLY;
1294                         else
1295                                 adapter->flowcontrol = FLOW_NONE;
1296                 }
1297         }
1298 }
1299
1300 /**
1301  * et1310_update_macstat_host_counters - Update the local copy of the statistics
1302  * @adapter: pointer to the adapter structure
1303  */
1304 void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1305 {
1306         struct ce_stats *stats = &adapter->stats;
1307         struct macstat_regs __iomem *macstat =
1308                 &adapter->regs->macstat;
1309
1310         stats->tx_collisions           += readl(&macstat->tx_total_collisions);
1311         stats->tx_first_collisions     += readl(&macstat->tx_single_collisions);
1312         stats->tx_deferred             += readl(&macstat->tx_deferred);
1313         stats->tx_excessive_collisions +=
1314                                 readl(&macstat->tx_multiple_collisions);
1315         stats->tx_late_collisions      += readl(&macstat->tx_late_collisions);
1316         stats->tx_underflows           += readl(&macstat->tx_undersize_frames);
1317         stats->tx_max_pkt_errs         += readl(&macstat->tx_oversize_frames);
1318
1319         stats->rx_align_errs        += readl(&macstat->rx_align_errs);
1320         stats->rx_crc_errs          += readl(&macstat->rx_code_errs);
1321         stats->rcvd_pkts_dropped    += readl(&macstat->rx_drops);
1322         stats->rx_overflows         += readl(&macstat->rx_oversize_packets);
1323         stats->rx_code_violations   += readl(&macstat->rx_fcs_errs);
1324         stats->rx_length_errs       += readl(&macstat->rx_frame_len_errs);
1325         stats->rx_other_errs        += readl(&macstat->rx_fragment_packets);
1326 }
1327
1328 /**
1329  * et1310_handle_macstat_interrupt
1330  * @adapter: pointer to the adapter structure
1331  *
1332  * One of the MACSTAT counters has wrapped.  Update the local copy of
1333  * the statistics held in the adapter structure, checking the "wrap"
1334  * bit for each counter.
1335  */
1336 void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1337 {
1338         u32 carry_reg1;
1339         u32 carry_reg2;
1340
1341         /* Read the interrupt bits from the register(s).  These are Clear On
1342          * Write.
1343          */
1344         carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1345         carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1346
1347         writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1348         writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1349
1350         /* We need to do update the host copy of all the MAC_STAT counters.
1351          * For each counter, check it's overflow bit.  If the overflow bit is
1352          * set, then increment the host version of the count by one complete
1353          * revolution of the counter.  This routine is called when the counter
1354          * block indicates that one of the counters has wrapped.
1355          */
1356         if (carry_reg1 & (1 << 14))
1357                 adapter->stats.rx_code_violations       += COUNTER_WRAP_16_BIT;
1358         if (carry_reg1 & (1 << 8))
1359                 adapter->stats.rx_align_errs    += COUNTER_WRAP_12_BIT;
1360         if (carry_reg1 & (1 << 7))
1361                 adapter->stats.rx_length_errs   += COUNTER_WRAP_16_BIT;
1362         if (carry_reg1 & (1 << 2))
1363                 adapter->stats.rx_other_errs    += COUNTER_WRAP_16_BIT;
1364         if (carry_reg1 & (1 << 6))
1365                 adapter->stats.rx_crc_errs      += COUNTER_WRAP_16_BIT;
1366         if (carry_reg1 & (1 << 3))
1367                 adapter->stats.rx_overflows     += COUNTER_WRAP_16_BIT;
1368         if (carry_reg1 & (1 << 0))
1369                 adapter->stats.rcvd_pkts_dropped        += COUNTER_WRAP_16_BIT;
1370         if (carry_reg2 & (1 << 16))
1371                 adapter->stats.tx_max_pkt_errs  += COUNTER_WRAP_12_BIT;
1372         if (carry_reg2 & (1 << 15))
1373                 adapter->stats.tx_underflows    += COUNTER_WRAP_12_BIT;
1374         if (carry_reg2 & (1 << 6))
1375                 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1376         if (carry_reg2 & (1 << 8))
1377                 adapter->stats.tx_deferred      += COUNTER_WRAP_12_BIT;
1378         if (carry_reg2 & (1 << 5))
1379                 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1380         if (carry_reg2 & (1 << 4))
1381                 adapter->stats.tx_late_collisions       += COUNTER_WRAP_12_BIT;
1382         if (carry_reg2 & (1 << 2))
1383                 adapter->stats.tx_collisions    += COUNTER_WRAP_12_BIT;
1384 }
1385
1386 void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1387 {
1388         struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1389         uint32_t nIndex;
1390         uint32_t result;
1391         uint32_t hash1 = 0;
1392         uint32_t hash2 = 0;
1393         uint32_t hash3 = 0;
1394         uint32_t hash4 = 0;
1395         u32 pm_csr;
1396
1397         /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1398          * the multi-cast LIST.  If it is NOT specified, (and "ALL" is not
1399          * specified) then we should pass NO multi-cast addresses to the
1400          * driver.
1401          */
1402         if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1403                 /* Loop through our multicast array and set up the device */
1404                 for (nIndex = 0; nIndex < adapter->multicast_addr_count;
1405                      nIndex++) {
1406                         result = ether_crc(6, adapter->multicast_list[nIndex]);
1407
1408                         result = (result & 0x3F800000) >> 23;
1409
1410                         if (result < 32) {
1411                                 hash1 |= (1 << result);
1412                         } else if ((31 < result) && (result < 64)) {
1413                                 result -= 32;
1414                                 hash2 |= (1 << result);
1415                         } else if ((63 < result) && (result < 96)) {
1416                                 result -= 64;
1417                                 hash3 |= (1 << result);
1418                         } else {
1419                                 result -= 96;
1420                                 hash4 |= (1 << result);
1421                         }
1422                 }
1423         }
1424
1425         /* Write out the new hash to the device */
1426         pm_csr = readl(&adapter->regs->global.pm_csr);
1427         if (!et1310_in_phy_coma(adapter)) {
1428                 writel(hash1, &rxmac->multi_hash1);
1429                 writel(hash2, &rxmac->multi_hash2);
1430                 writel(hash3, &rxmac->multi_hash3);
1431                 writel(hash4, &rxmac->multi_hash4);
1432         }
1433 }
1434
1435 void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1436 {
1437         struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1438         u32 uni_pf1;
1439         u32 uni_pf2;
1440         u32 uni_pf3;
1441         u32 pm_csr;
1442
1443         /* Set up unicast packet filter reg 3 to be the first two octets of
1444          * the MAC address for both address
1445          *
1446          * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1447          * MAC address for second address
1448          *
1449          * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1450          * MAC address for first address
1451          */
1452         uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1453                   (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1454                   (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1455                    adapter->addr[1];
1456
1457         uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1458                   (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1459                   (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1460                    adapter->addr[5];
1461
1462         uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1463                   (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1464                   (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1465                    adapter->addr[5];
1466
1467         pm_csr = readl(&adapter->regs->global.pm_csr);
1468         if (!et1310_in_phy_coma(adapter)) {
1469                 writel(uni_pf1, &rxmac->uni_pf_addr1);
1470                 writel(uni_pf2, &rxmac->uni_pf_addr2);
1471                 writel(uni_pf3, &rxmac->uni_pf_addr3);
1472         }
1473 }
1474
1475 /* PHY functions */
1476
1477 int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1478 {
1479         struct net_device *netdev = bus->priv;
1480         struct et131x_adapter *adapter = netdev_priv(netdev);
1481         u16 value;
1482         int ret;
1483
1484         ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1485
1486         if (ret < 0)
1487                 return ret;
1488         else
1489                 return value;
1490 }
1491
1492 int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
1493 {
1494         struct net_device *netdev = bus->priv;
1495         struct et131x_adapter *adapter = netdev_priv(netdev);
1496
1497         return et131x_mii_write(adapter, reg, value);
1498 }
1499
1500 int et131x_mdio_reset(struct mii_bus *bus)
1501 {
1502         struct net_device *netdev = bus->priv;
1503         struct et131x_adapter *adapter = netdev_priv(netdev);
1504
1505         et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1506
1507         return 0;
1508 }
1509
1510 int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1511 {
1512         struct phy_device *phydev = adapter->phydev;
1513
1514         if (!phydev)
1515                 return -EIO;
1516
1517         return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1518 }
1519
1520 /**
1521  * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1522  * @adapter: pointer to our private adapter structure
1523  * @addr: the address of the transceiver
1524  * @reg: the register to read
1525  * @value: pointer to a 16-bit value in which the value will be stored
1526  *
1527  * Returns 0 on success, errno on failure (as defined in errno.h)
1528  */
1529 int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1530               u8 reg, u16 *value)
1531 {
1532         struct mac_regs __iomem *mac = &adapter->regs->mac;
1533         int status = 0;
1534         u32 delay = 0;
1535         u32 mii_addr;
1536         u32 mii_cmd;
1537         u32 mii_indicator;
1538
1539         /* Save a local copy of the registers we are dealing with so we can
1540          * set them back
1541          */
1542         mii_addr = readl(&mac->mii_mgmt_addr);
1543         mii_cmd = readl(&mac->mii_mgmt_cmd);
1544
1545         /* Stop the current operation */
1546         writel(0, &mac->mii_mgmt_cmd);
1547
1548         /* Set up the register we need to read from on the correct PHY */
1549         writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1550
1551         writel(0x1, &mac->mii_mgmt_cmd);
1552
1553         do {
1554                 udelay(50);
1555                 delay++;
1556                 mii_indicator = readl(&mac->mii_mgmt_indicator);
1557         } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1558
1559         /* If we hit the max delay, we could not read the register */
1560         if (delay == 50) {
1561                 dev_warn(&adapter->pdev->dev,
1562                             "reg 0x%08x could not be read\n", reg);
1563                 dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1564                             mii_indicator);
1565
1566                 status = -EIO;
1567         }
1568
1569         /* If we hit here we were able to read the register and we need to
1570          * return the value to the caller */
1571         *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1572
1573         /* Stop the read operation */
1574         writel(0, &mac->mii_mgmt_cmd);
1575
1576         /* set the registers we touched back to the state at which we entered
1577          * this function
1578          */
1579         writel(mii_addr, &mac->mii_mgmt_addr);
1580         writel(mii_cmd, &mac->mii_mgmt_cmd);
1581
1582         return status;
1583 }
1584
1585 /**
1586  * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1587  * @adapter: pointer to our private adapter structure
1588  * @reg: the register to read
1589  * @value: 16-bit value to write
1590  *
1591  * FIXME: one caller in netdev still
1592  *
1593  * Return 0 on success, errno on failure (as defined in errno.h)
1594  */
1595 int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1596 {
1597         struct mac_regs __iomem *mac = &adapter->regs->mac;
1598         struct phy_device *phydev = adapter->phydev;
1599         int status = 0;
1600         u8 addr;
1601         u32 delay = 0;
1602         u32 mii_addr;
1603         u32 mii_cmd;
1604         u32 mii_indicator;
1605
1606         if (!phydev)
1607                 return -EIO;
1608
1609         addr = phydev->addr;
1610
1611         /* Save a local copy of the registers we are dealing with so we can
1612          * set them back
1613          */
1614         mii_addr = readl(&mac->mii_mgmt_addr);
1615         mii_cmd = readl(&mac->mii_mgmt_cmd);
1616
1617         /* Stop the current operation */
1618         writel(0, &mac->mii_mgmt_cmd);
1619
1620         /* Set up the register we need to write to on the correct PHY */
1621         writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1622
1623         /* Add the value to write to the registers to the mac */
1624         writel(value, &mac->mii_mgmt_ctrl);
1625
1626         do {
1627                 udelay(50);
1628                 delay++;
1629                 mii_indicator = readl(&mac->mii_mgmt_indicator);
1630         } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1631
1632         /* If we hit the max delay, we could not write the register */
1633         if (delay == 100) {
1634                 u16 tmp;
1635
1636                 dev_warn(&adapter->pdev->dev,
1637                     "reg 0x%08x could not be written", reg);
1638                 dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1639                             mii_indicator);
1640                 dev_warn(&adapter->pdev->dev, "command is  0x%08x\n",
1641                             readl(&mac->mii_mgmt_cmd));
1642
1643                 et131x_mii_read(adapter, reg, &tmp);
1644
1645                 status = -EIO;
1646         }
1647         /* Stop the write operation */
1648         writel(0, &mac->mii_mgmt_cmd);
1649
1650         /*
1651          * set the registers we touched back to the state at which we entered
1652          * this function
1653          */
1654         writel(mii_addr, &mac->mii_mgmt_addr);
1655         writel(mii_cmd, &mac->mii_mgmt_cmd);
1656
1657         return status;
1658 }
1659
1660 /**
1661  *      et1310_phy_power_down   -       PHY power control
1662  *      @adapter: device to control
1663  *      @down: true for off/false for back on
1664  *
1665  *      one hundred, ten, one thousand megs
1666  *      How would you like to have your LAN accessed
1667  *      Can't you see that this code processed
1668  *      Phy power, phy power..
1669  */
1670 void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1671 {
1672         u16 data;
1673
1674         et131x_mii_read(adapter, MII_BMCR, &data);
1675         data &= ~BMCR_PDOWN;
1676         if (down)
1677                 data |= BMCR_PDOWN;
1678         et131x_mii_write(adapter, MII_BMCR, data);
1679 }
1680
1681 /* Still used from _mac for BIT_READ */
1682 void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
1683                                u16 regnum, u16 bitnum, u8 *value)
1684 {
1685         u16 reg;
1686         u16 mask = 0x0001 << bitnum;
1687
1688         /* Read the requested register */
1689         et131x_mii_read(adapter, regnum, &reg);
1690
1691         switch (action) {
1692         case TRUEPHY_BIT_READ:
1693                 *value = (reg & mask) >> bitnum;
1694                 break;
1695
1696         case TRUEPHY_BIT_SET:
1697                 et131x_mii_write(adapter, regnum, reg | mask);
1698                 break;
1699
1700         case TRUEPHY_BIT_CLEAR:
1701                 et131x_mii_write(adapter, regnum, reg & ~mask);
1702                 break;
1703
1704         default:
1705                 break;
1706         }
1707 }
1708
1709 /**
1710  * et131x_xcvr_init - Init the phy if we are setting it into force mode
1711  * @adapter: pointer to our private adapter structure
1712  *
1713  */
1714 void et131x_xcvr_init(struct et131x_adapter *adapter)
1715 {
1716         u16 imr;
1717         u16 isr;
1718         u16 lcr2;
1719
1720         et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1721         et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1722
1723         /* Set the link status interrupt only.  Bad behavior when link status
1724          * and auto neg are set, we run into a nested interrupt problem
1725          */
1726         imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1727                 ET_PHY_INT_MASK_LINKSTAT &
1728                 ET_PHY_INT_MASK_ENABLE);
1729
1730         et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1731
1732         /* Set the LED behavior such that LED 1 indicates speed (off =
1733          * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1734          * link and activity (on for link, blink off for activity).
1735          *
1736          * NOTE: Some customizations have been added here for specific
1737          * vendors; The LED behavior is now determined by vendor data in the
1738          * EEPROM. However, the above description is the default.
1739          */
1740         if ((adapter->eeprom_data[1] & 0x4) == 0) {
1741                 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1742
1743                 lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1744                 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1745
1746                 if ((adapter->eeprom_data[1] & 0x8) == 0)
1747                         lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1748                 else
1749                         lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1750
1751                 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1752         }
1753 }
1754
1755 /* PM functions */
1756
1757 /**
1758  * et1310_in_phy_coma - check if the device is in phy coma
1759  * @adapter: pointer to our adapter structure
1760  *
1761  * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1762  */
1763 int et1310_in_phy_coma(struct et131x_adapter *adapter)
1764 {
1765         u32 pmcsr;
1766
1767         pmcsr = readl(&adapter->regs->global.pm_csr);
1768
1769         return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1770 }
1771
1772 /**
1773  * et1310_enable_phy_coma - called when network cable is unplugged
1774  * @adapter: pointer to our adapter structure
1775  *
1776  * driver receive an phy status change interrupt while in D0 and check that
1777  * phy_status is down.
1778  *
1779  *          -- gate off JAGCore;
1780  *          -- set gigE PHY in Coma mode
1781  *          -- wake on phy_interrupt; Perform software reset JAGCore,
1782  *             re-initialize jagcore and gigE PHY
1783  *
1784  *      Add D0-ASPM-PhyLinkDown Support:
1785  *          -- while in D0, when there is a phy_interrupt indicating phy link
1786  *             down status, call the MPSetPhyComa routine to enter this active
1787  *             state power saving mode
1788  *          -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
1789  *       indicating linkup status, call the MPDisablePhyComa routine to
1790  *             restore JAGCore and gigE PHY
1791  */
1792 void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1793 {
1794         unsigned long flags;
1795         u32 pmcsr;
1796
1797         pmcsr = readl(&adapter->regs->global.pm_csr);
1798
1799         /* Save the GbE PHY speed and duplex modes. Need to restore this
1800          * when cable is plugged back in
1801          */
1802         /*
1803          * TODO - when PM is re-enabled, check if we need to
1804          * perform a similar task as this -
1805          * adapter->pdown_speed = adapter->ai_force_speed;
1806          * adapter->pdown_duplex = adapter->ai_force_duplex;
1807          */
1808
1809         /* Stop sending packets. */
1810         spin_lock_irqsave(&adapter->send_hw_lock, flags);
1811         adapter->flags |= fMP_ADAPTER_LOWER_POWER;
1812         spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
1813
1814         /* Wait for outstanding Receive packets */
1815
1816         et131x_disable_txrx(adapter->netdev);
1817
1818         /* Gate off JAGCore 3 clock domains */
1819         pmcsr &= ~ET_PMCSR_INIT;
1820         writel(pmcsr, &adapter->regs->global.pm_csr);
1821
1822         /* Program gigE PHY in to Coma mode */
1823         pmcsr |= ET_PM_PHY_SW_COMA;
1824         writel(pmcsr, &adapter->regs->global.pm_csr);
1825 }
1826
1827 /**
1828  * et1310_disable_phy_coma - Disable the Phy Coma Mode
1829  * @adapter: pointer to our adapter structure
1830  */
1831 void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1832 {
1833         u32 pmcsr;
1834
1835         pmcsr = readl(&adapter->regs->global.pm_csr);
1836
1837         /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1838         pmcsr |= ET_PMCSR_INIT;
1839         pmcsr &= ~ET_PM_PHY_SW_COMA;
1840         writel(pmcsr, &adapter->regs->global.pm_csr);
1841
1842         /* Restore the GbE PHY speed and duplex modes;
1843          * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1844          */
1845         /* TODO - when PM is re-enabled, check if we need to
1846          * perform a similar task as this -
1847          * adapter->ai_force_speed = adapter->pdown_speed;
1848          * adapter->ai_force_duplex = adapter->pdown_duplex;
1849          */
1850
1851         /* Re-initialize the send structures */
1852         et131x_init_send(adapter);
1853
1854         /* Reset the RFD list and re-start RU  */
1855         et131x_reset_recv(adapter);
1856
1857         /* Bring the device back to the state it was during init prior to
1858          * autonegotiation being complete.  This way, when we get the auto-neg
1859          * complete interrupt, we can complete init by calling ConfigMacREGS2.
1860          */
1861         et131x_soft_reset(adapter);
1862
1863         /* setup et1310 as per the documentation ?? */
1864         et131x_adapter_setup(adapter);
1865
1866         /* Allow Tx to restart */
1867         adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
1868
1869         et131x_enable_txrx(adapter->netdev);
1870 }
1871
1872 /* RX functions */
1873
1874 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1875 {
1876         u32 tmp_free_buff_ring = *free_buff_ring;
1877         tmp_free_buff_ring++;
1878         /* This works for all cases where limit < 1024. The 1023 case
1879            works because 1023++ is 1024 which means the if condition is not
1880            taken but the carry of the bit into the wrap bit toggles the wrap
1881            value correctly */
1882         if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1883                 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1884                 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1885         }
1886         /* For the 1023 case */
1887         tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1888         *free_buff_ring = tmp_free_buff_ring;
1889         return tmp_free_buff_ring;
1890 }
1891
1892 /**
1893  * et131x_rx_dma_memory_alloc
1894  * @adapter: pointer to our private adapter structure
1895  *
1896  * Returns 0 on success and errno on failure (as defined in errno.h)
1897  *
1898  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1899  * and the Packet Status Ring.
1900  */
1901 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1902 {
1903         u32 i, j;
1904         u32 bufsize;
1905         u32 pktstat_ringsize, fbr_chunksize;
1906         struct rx_ring *rx_ring;
1907
1908         /* Setup some convenience pointers */
1909         rx_ring = &adapter->rx_ring;
1910
1911         /* Alloc memory for the lookup table */
1912 #ifdef USE_FBR0
1913         rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
1914 #endif
1915         rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
1916
1917         /* The first thing we will do is configure the sizes of the buffer
1918          * rings. These will change based on jumbo packet support.  Larger
1919          * jumbo packets increases the size of each entry in FBR0, and the
1920          * number of entries in FBR0, while at the same time decreasing the
1921          * number of entries in FBR1.
1922          *
1923          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
1924          * entries are huge in order to accommodate a "jumbo" frame, then it
1925          * will have less entries.  Conversely, FBR1 will now be relied upon
1926          * to carry more "normal" frames, thus it's entry size also increases
1927          * and the number of entries goes up too (since it now carries
1928          * "small" + "regular" packets.
1929          *
1930          * In this scheme, we try to maintain 512 entries between the two
1931          * rings. Also, FBR1 remains a constant size - when it's size doubles
1932          * the number of entries halves.  FBR0 increases in size, however.
1933          */
1934
1935         if (adapter->registry_jumbo_packet < 2048) {
1936 #ifdef USE_FBR0
1937                 rx_ring->fbr[1]->buffsize = 256;
1938                 rx_ring->fbr[1]->num_entries = 512;
1939 #endif
1940                 rx_ring->fbr[0]->buffsize = 2048;
1941                 rx_ring->fbr[0]->num_entries = 512;
1942         } else if (adapter->registry_jumbo_packet < 4096) {
1943 #ifdef USE_FBR0
1944                 rx_ring->fbr[1]->buffsize = 512;
1945                 rx_ring->fbr[1]->num_entries = 1024;
1946 #endif
1947                 rx_ring->fbr[0]->buffsize = 4096;
1948                 rx_ring->fbr[0]->num_entries = 512;
1949         } else {
1950 #ifdef USE_FBR0
1951                 rx_ring->fbr[1]->buffsize = 1024;
1952                 rx_ring->fbr[1]->num_entries = 768;
1953 #endif
1954                 rx_ring->fbr[0]->buffsize = 16384;
1955                 rx_ring->fbr[0]->num_entries = 128;
1956         }
1957
1958 #ifdef USE_FBR0
1959         adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[1]->num_entries +
1960             adapter->rx_ring.fbr[0]->num_entries;
1961 #else
1962         adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
1963 #endif
1964
1965         /* Allocate an area of memory for Free Buffer Ring 1 */
1966         bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff;
1967         rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1968                                         bufsize,
1969                                         &rx_ring->fbr[0]->ring_physaddr,
1970                                         GFP_KERNEL);
1971         if (!rx_ring->fbr[0]->ring_virtaddr) {
1972                 dev_err(&adapter->pdev->dev,
1973                           "Cannot alloc memory for Free Buffer Ring 1\n");
1974                 return -ENOMEM;
1975         }
1976
1977         /* Save physical address
1978          *
1979          * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
1980          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1981          * are ever returned, make sure the high part is retrieved here
1982          * before storing the adjusted address.
1983          */
1984         rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
1985
1986         /* Align Free Buffer Ring 1 on a 4K boundary */
1987         et131x_align_allocated_memory(adapter,
1988                                       &rx_ring->fbr[0]->real_physaddr,
1989                                       &rx_ring->fbr[0]->offset, 0x0FFF);
1990
1991         rx_ring->fbr[0]->ring_virtaddr =
1992                         (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
1993                         rx_ring->fbr[0]->offset);
1994
1995 #ifdef USE_FBR0
1996         /* Allocate an area of memory for Free Buffer Ring 0 */
1997         bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff;
1998         rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1999                                                 bufsize,
2000                                                 &rx_ring->fbr[1]->ring_physaddr,
2001                                                 GFP_KERNEL);
2002         if (!rx_ring->fbr[1]->ring_virtaddr) {
2003                 dev_err(&adapter->pdev->dev,
2004                           "Cannot alloc memory for Free Buffer Ring 0\n");
2005                 return -ENOMEM;
2006         }
2007
2008         /* Save physical address
2009          *
2010          * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2011          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2012          * are ever returned, make sure the high part is retrieved here before
2013          * storing the adjusted address.
2014          */
2015         rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2016
2017         /* Align Free Buffer Ring 0 on a 4K boundary */
2018         et131x_align_allocated_memory(adapter,
2019                                       &rx_ring->fbr[1]->real_physaddr,
2020                                       &rx_ring->fbr[1]->offset, 0x0FFF);
2021
2022         rx_ring->fbr[1]->ring_virtaddr =
2023                         (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2024                         rx_ring->fbr[1]->offset);
2025 #endif
2026         for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2027                 u64 fbr1_offset;
2028                 u64 fbr1_tmp_physaddr;
2029                 u32 fbr1_align;
2030
2031                 /* This code allocates an area of memory big enough for N
2032                  * free buffers + (buffer_size - 1) so that the buffers can
2033                  * be aligned on 4k boundaries.  If each buffer were aligned
2034                  * to a buffer_size boundary, the effect would be to double
2035                  * the size of FBR0.  By allocating N buffers at once, we
2036                  * reduce this overhead.
2037                  */
2038                 if (rx_ring->fbr[0]->buffsize > 4096)
2039                         fbr1_align = 4096;
2040                 else
2041                         fbr1_align = rx_ring->fbr[0]->buffsize;
2042
2043                 fbr_chunksize =
2044                     (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2045                 rx_ring->fbr[0]->mem_virtaddrs[i] =
2046                     dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2047                                          &rx_ring->fbr[0]->mem_physaddrs[i], GFP_KERNEL);
2048
2049                 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2050                         dev_err(&adapter->pdev->dev,
2051                                 "Could not alloc memory\n");
2052                         return -ENOMEM;
2053                 }
2054
2055                 /* See NOTE in "Save Physical Address" comment above */
2056                 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2057
2058                 et131x_align_allocated_memory(adapter,
2059                                               &fbr1_tmp_physaddr,
2060                                               &fbr1_offset, (fbr1_align - 1));
2061
2062                 for (j = 0; j < FBR_CHUNKS; j++) {
2063                         u32 index = (i * FBR_CHUNKS) + j;
2064
2065                         /* Save the Virtual address of this index for quick
2066                          * access later
2067                          */
2068                         rx_ring->fbr[0]->virt[index] =
2069                             (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2070                             (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2071
2072                         /* now store the physical address in the descriptor
2073                          * so the device can access it
2074                          */
2075                         rx_ring->fbr[0]->bus_high[index] =
2076                             (u32) (fbr1_tmp_physaddr >> 32);
2077                         rx_ring->fbr[0]->bus_low[index] =
2078                             (u32) fbr1_tmp_physaddr;
2079
2080                         fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2081
2082                         rx_ring->fbr[0]->buffer1[index] =
2083                             rx_ring->fbr[0]->virt[index];
2084                         rx_ring->fbr[0]->buffer2[index] =
2085                             rx_ring->fbr[0]->virt[index] - 4;
2086                 }
2087         }
2088
2089 #ifdef USE_FBR0
2090         /* Same for FBR0 (if in use) */
2091         for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2092                 u64 fbr0_offset;
2093                 u64 fbr0_tmp_physaddr;
2094
2095                 fbr_chunksize =
2096                     ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2097                 rx_ring->fbr[1]->mem_virtaddrs[i] =
2098                     dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2099                                          &rx_ring->fbr[1]->mem_physaddrs[i], GFP_KERNEL);
2100
2101                 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2102                         dev_err(&adapter->pdev->dev,
2103                                 "Could not alloc memory\n");
2104                         return -ENOMEM;
2105                 }
2106
2107                 /* See NOTE in "Save Physical Address" comment above */
2108                 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2109
2110                 et131x_align_allocated_memory(adapter,
2111                                               &fbr0_tmp_physaddr,
2112                                               &fbr0_offset,
2113                                               rx_ring->fbr[1]->buffsize - 1);
2114
2115                 for (j = 0; j < FBR_CHUNKS; j++) {
2116                         u32 index = (i * FBR_CHUNKS) + j;
2117
2118                         rx_ring->fbr[1]->virt[index] =
2119                             (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2120                             (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2121
2122                         rx_ring->fbr[1]->bus_high[index] =
2123                             (u32) (fbr0_tmp_physaddr >> 32);
2124                         rx_ring->fbr[1]->bus_low[index] =
2125                             (u32) fbr0_tmp_physaddr;
2126
2127                         fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2128
2129                         rx_ring->fbr[1]->buffer1[index] =
2130                             rx_ring->fbr[1]->virt[index];
2131                         rx_ring->fbr[1]->buffer2[index] =
2132                             rx_ring->fbr[1]->virt[index] - 4;
2133                 }
2134         }
2135 #endif
2136
2137         /* Allocate an area of memory for FIFO of Packet Status ring entries */
2138         pktstat_ringsize =
2139             sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2140
2141         rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2142                                                   pktstat_ringsize,
2143                                                   &rx_ring->ps_ring_physaddr,
2144                                                   GFP_KERNEL);
2145
2146         if (!rx_ring->ps_ring_virtaddr) {
2147                 dev_err(&adapter->pdev->dev,
2148                           "Cannot alloc memory for Packet Status Ring\n");
2149                 return -ENOMEM;
2150         }
2151         printk(KERN_INFO "Packet Status Ring %lx\n",
2152             (unsigned long) rx_ring->ps_ring_physaddr);
2153
2154         /*
2155          * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2156          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2157          * are ever returned, make sure the high part is retrieved here before
2158          * storing the adjusted address.
2159          */
2160
2161         /* Allocate an area of memory for writeback of status information */
2162         rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2163                                             sizeof(struct rx_status_block),
2164                                             &rx_ring->rx_status_bus,
2165                                             GFP_KERNEL);
2166         if (!rx_ring->rx_status_block) {
2167                 dev_err(&adapter->pdev->dev,
2168                           "Cannot alloc memory for Status Block\n");
2169                 return -ENOMEM;
2170         }
2171         rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2172         printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2173
2174         /* Recv
2175          * kmem_cache_create initializes a lookaside list. After successful
2176          * creation, nonpaged fixed-size blocks can be allocated from and
2177          * freed to the lookaside list.
2178          * RFDs will be allocated from this pool.
2179          */
2180         rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2181                                                    sizeof(struct rfd),
2182                                                    0,
2183                                                    SLAB_CACHE_DMA |
2184                                                    SLAB_HWCACHE_ALIGN,
2185                                                    NULL);
2186
2187         adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2188
2189         /* The RFDs are going to be put on lists later on, so initialize the
2190          * lists now.
2191          */
2192         INIT_LIST_HEAD(&rx_ring->recv_list);
2193         return 0;
2194 }
2195
2196 /**
2197  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2198  * @adapter: pointer to our private adapter structure
2199  */
2200 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2201 {
2202         u32 index;
2203         u32 bufsize;
2204         u32 pktstat_ringsize;
2205         struct rfd *rfd;
2206         struct rx_ring *rx_ring;
2207
2208         /* Setup some convenience pointers */
2209         rx_ring = &adapter->rx_ring;
2210
2211         /* Free RFDs and associated packet descriptors */
2212         WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2213
2214         while (!list_empty(&rx_ring->recv_list)) {
2215                 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2216                                 struct rfd, list_node);
2217
2218                 list_del(&rfd->list_node);
2219                 rfd->skb = NULL;
2220                 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2221         }
2222
2223         /* Free Free Buffer Ring 1 */
2224         if (rx_ring->fbr[0]->ring_virtaddr) {
2225                 /* First the packet memory */
2226                 for (index = 0; index <
2227                      (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2228                         if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2229                                 u32 fbr1_align;
2230
2231                                 if (rx_ring->fbr[0]->buffsize > 4096)
2232                                         fbr1_align = 4096;
2233                                 else
2234                                         fbr1_align = rx_ring->fbr[0]->buffsize;
2235
2236                                 bufsize =
2237                                     (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2238                                     fbr1_align - 1;
2239
2240                                 dma_free_coherent(&adapter->pdev->dev,
2241                                         bufsize,
2242                                         rx_ring->fbr[0]->mem_virtaddrs[index],
2243                                         rx_ring->fbr[0]->mem_physaddrs[index]);
2244
2245                                 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2246                         }
2247                 }
2248
2249                 /* Now the FIFO itself */
2250                 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2251                         rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2252
2253                 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries)
2254                                                             + 0xfff;
2255
2256                 dma_free_coherent(&adapter->pdev->dev, bufsize,
2257                                     rx_ring->fbr[0]->ring_virtaddr,
2258                                     rx_ring->fbr[0]->ring_physaddr);
2259
2260                 rx_ring->fbr[0]->ring_virtaddr = NULL;
2261         }
2262
2263 #ifdef USE_FBR0
2264         /* Now the same for Free Buffer Ring 0 */
2265         if (rx_ring->fbr[1]->ring_virtaddr) {
2266                 /* First the packet memory */
2267                 for (index = 0; index <
2268                      (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2269                         if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2270                                 bufsize =
2271                                     (rx_ring->fbr[1]->buffsize *
2272                                      (FBR_CHUNKS + 1)) - 1;
2273
2274                                 dma_free_coherent(&adapter->pdev->dev,
2275                                         bufsize,
2276                                         rx_ring->fbr[1]->mem_virtaddrs[index],
2277                                         rx_ring->fbr[1]->mem_physaddrs[index]);
2278
2279                                 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2280                         }
2281                 }
2282
2283                 /* Now the FIFO itself */
2284                 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2285                         rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2286
2287                 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries)
2288                                                             + 0xfff;
2289
2290                 dma_free_coherent(&adapter->pdev->dev,
2291                                     bufsize,
2292                                     rx_ring->fbr[1]->ring_virtaddr,
2293                                     rx_ring->fbr[1]->ring_physaddr);
2294
2295                 rx_ring->fbr[1]->ring_virtaddr = NULL;
2296         }
2297 #endif
2298
2299         /* Free Packet Status Ring */
2300         if (rx_ring->ps_ring_virtaddr) {
2301                 pktstat_ringsize =
2302                     sizeof(struct pkt_stat_desc) *
2303                     adapter->rx_ring.psr_num_entries;
2304
2305                 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2306                                     rx_ring->ps_ring_virtaddr,
2307                                     rx_ring->ps_ring_physaddr);
2308
2309                 rx_ring->ps_ring_virtaddr = NULL;
2310         }
2311
2312         /* Free area of memory for the writeback of status information */
2313         if (rx_ring->rx_status_block) {
2314                 dma_free_coherent(&adapter->pdev->dev,
2315                         sizeof(struct rx_status_block),
2316                         rx_ring->rx_status_block, rx_ring->rx_status_bus);
2317                 rx_ring->rx_status_block = NULL;
2318         }
2319
2320         /* Free receive buffer pool */
2321
2322         /* Free receive packet pool */
2323
2324         /* Destroy the lookaside (RFD) pool */
2325         if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2326                 kmem_cache_destroy(rx_ring->recv_lookaside);
2327                 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2328         }
2329
2330         /* Free the FBR Lookup Table */
2331 #ifdef USE_FBR0
2332         kfree(rx_ring->fbr[1]);
2333 #endif
2334
2335         kfree(rx_ring->fbr[0]);
2336
2337         /* Reset Counters */
2338         rx_ring->num_ready_recv = 0;
2339 }
2340
2341 /**
2342  * et131x_init_recv - Initialize receive data structures.
2343  * @adapter: pointer to our private adapter structure
2344  *
2345  * Returns 0 on success and errno on failure (as defined in errno.h)
2346  */
2347 int et131x_init_recv(struct et131x_adapter *adapter)
2348 {
2349         int status = -ENOMEM;
2350         struct rfd *rfd = NULL;
2351         u32 rfdct;
2352         u32 numrfd = 0;
2353         struct rx_ring *rx_ring;
2354
2355         /* Setup some convenience pointers */
2356         rx_ring = &adapter->rx_ring;
2357
2358         /* Setup each RFD */
2359         for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2360                 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2361                                                      GFP_ATOMIC | GFP_DMA);
2362
2363                 if (!rfd) {
2364                         dev_err(&adapter->pdev->dev,
2365                                   "Couldn't alloc RFD out of kmem_cache\n");
2366                         status = -ENOMEM;
2367                         continue;
2368                 }
2369
2370                 rfd->skb = NULL;
2371
2372                 /* Add this RFD to the recv_list */
2373                 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2374
2375                 /* Increment both the available RFD's, and the total RFD's. */
2376                 rx_ring->num_ready_recv++;
2377                 numrfd++;
2378         }
2379
2380         if (numrfd > NIC_MIN_NUM_RFD)
2381                 status = 0;
2382
2383         rx_ring->num_rfd = numrfd;
2384
2385         if (status != 0) {
2386                 kmem_cache_free(rx_ring->recv_lookaside, rfd);
2387                 dev_err(&adapter->pdev->dev,
2388                           "Allocation problems in et131x_init_recv\n");
2389         }
2390         return status;
2391 }
2392
2393 /**
2394  * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
2395  * @adapter: pointer to our adapter structure
2396  */
2397 void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
2398 {
2399         struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2400         struct rx_ring *rx_local = &adapter->rx_ring;
2401         struct fbr_desc *fbr_entry;
2402         u32 entry;
2403         u32 psr_num_des;
2404         unsigned long flags;
2405
2406         /* Halt RXDMA to perform the reconfigure.  */
2407         et131x_rx_dma_disable(adapter);
2408
2409         /* Load the completion writeback physical address
2410          *
2411          * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2412          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2413          * are ever returned, make sure the high part is retrieved here
2414          * before storing the adjusted address.
2415          */
2416         writel((u32) ((u64)rx_local->rx_status_bus >> 32),
2417                &rx_dma->dma_wb_base_hi);
2418         writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
2419
2420         memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
2421
2422         /* Set the address and parameters of the packet status ring into the
2423          * 1310's registers
2424          */
2425         writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
2426                &rx_dma->psr_base_hi);
2427         writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
2428         writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
2429         writel(0, &rx_dma->psr_full_offset);
2430
2431         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
2432         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
2433                &rx_dma->psr_min_des);
2434
2435         spin_lock_irqsave(&adapter->rcv_lock, flags);
2436
2437         /* These local variables track the PSR in the adapter structure */
2438         rx_local->local_psr_full = 0;
2439
2440         /* Now's the best time to initialize FBR1 contents */
2441         fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
2442         for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
2443                 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
2444                 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
2445                 fbr_entry->word2 = entry;
2446                 fbr_entry++;
2447         }
2448
2449         /* Set the address and parameters of Free buffer ring 1 (and 0 if
2450          * required) into the 1310's registers
2451          */
2452         writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
2453                &rx_dma->fbr1_base_hi);
2454         writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
2455         writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
2456         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
2457
2458         /* This variable tracks the free buffer ring 1 full position, so it
2459          * has to match the above.
2460          */
2461         rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
2462         writel(
2463             ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
2464             &rx_dma->fbr1_min_des);
2465
2466 #ifdef USE_FBR0
2467         /* Now's the best time to initialize FBR0 contents */
2468         fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
2469         for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
2470                 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
2471                 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
2472                 fbr_entry->word2 = entry;
2473                 fbr_entry++;
2474         }
2475
2476         writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
2477                &rx_dma->fbr0_base_hi);
2478         writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
2479         writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
2480         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
2481
2482         /* This variable tracks the free buffer ring 0 full position, so it
2483          * has to match the above.
2484          */
2485         rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
2486         writel(
2487             ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
2488             &rx_dma->fbr0_min_des);
2489 #endif
2490
2491         /* Program the number of packets we will receive before generating an
2492          * interrupt.
2493          * For version B silicon, this value gets updated once autoneg is
2494          *complete.
2495          */
2496         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
2497
2498         /* The "time_done" is not working correctly to coalesce interrupts
2499          * after a given time period, but rather is giving us an interrupt
2500          * regardless of whether we have received packets.
2501          * This value gets updated once autoneg is complete.
2502          */
2503         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
2504
2505         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2506 }
2507
2508 /**
2509  * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2510  * @adapter: pointer to our adapter structure
2511  */
2512 void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2513 {
2514         struct phy_device *phydev = adapter->phydev;
2515
2516         if (!phydev)
2517                 return;
2518
2519         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2520          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2521          */
2522         if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2523                 writel(0, &adapter->regs->rxdma.max_pkt_time);
2524                 writel(1, &adapter->regs->rxdma.num_pkt_done);
2525         }
2526 }
2527
2528 /**
2529  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2530  * @adapter: pointer to our adapter
2531  * @rfd: pointer to the RFD
2532  */
2533 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2534 {
2535         struct rx_ring *rx_local = &adapter->rx_ring;
2536         struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2537         u16 buff_index = rfd->bufferindex;
2538         u8 ring_index = rfd->ringindex;
2539         unsigned long flags;
2540
2541         /* We don't use any of the OOB data besides status. Otherwise, we
2542          * need to clean up OOB data
2543          */
2544         if (
2545 #ifdef USE_FBR0
2546             (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2547 #endif
2548             (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2549                 spin_lock_irqsave(&adapter->fbr_lock, flags);
2550
2551                 if (ring_index == 1) {
2552                         struct fbr_desc *next =
2553                             (struct fbr_desc *) (rx_local->fbr[0]->ring_virtaddr) +
2554                                          INDEX10(rx_local->fbr[0]->local_full);
2555
2556                         /* Handle the Free Buffer Ring advancement here. Write
2557                          * the PA / Buffer Index for the returned buffer into
2558                          * the oldest (next to be freed)FBR entry
2559                          */
2560                         next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2561                         next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2562                         next->word2 = buff_index;
2563
2564                         writel(bump_free_buff_ring(&rx_local->fbr[0]->local_full,
2565                                 rx_local->fbr[0]->num_entries - 1),
2566                                 &rx_dma->fbr1_full_offset);
2567                 }
2568 #ifdef USE_FBR0
2569                 else {
2570                         struct fbr_desc *next = (struct fbr_desc *)
2571                                 rx_local->fbr[1]->ring_virtaddr +
2572                                     INDEX10(rx_local->fbr[1]->local_full);
2573
2574                         /* Handle the Free Buffer Ring advancement here. Write
2575                          * the PA / Buffer Index for the returned buffer into
2576                          * the oldest (next to be freed) FBR entry
2577                          */
2578                         next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2579                         next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2580                         next->word2 = buff_index;
2581
2582                         writel(bump_free_buff_ring(
2583                                         &rx_local->fbr[1]->local_full,
2584                                         rx_local->fbr[1]->num_entries - 1),
2585                                &rx_dma->fbr0_full_offset);
2586                 }
2587 #endif
2588                 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2589         } else {
2590                 dev_err(&adapter->pdev->dev,
2591                           "%s illegal Buffer Index returned\n", __func__);
2592         }
2593
2594         /* The processing on this RFD is done, so put it back on the tail of
2595          * our list
2596          */
2597         spin_lock_irqsave(&adapter->rcv_lock, flags);
2598         list_add_tail(&rfd->list_node, &rx_local->recv_list);
2599         rx_local->num_ready_recv++;
2600         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2601
2602         WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2603 }
2604
2605 /**
2606  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
2607  * @adapter: pointer to our adapter structure
2608  */
2609 void et131x_rx_dma_disable(struct et131x_adapter *adapter)
2610 {
2611         u32 csr;
2612         /* Setup the receive dma configuration register */
2613         writel(0x00002001, &adapter->regs->rxdma.csr);
2614         csr = readl(&adapter->regs->rxdma.csr);
2615         if ((csr & 0x00020000) == 0) {  /* Check halt status (bit 17) */
2616                 udelay(5);
2617                 csr = readl(&adapter->regs->rxdma.csr);
2618                 if ((csr & 0x00020000) == 0)
2619                         dev_err(&adapter->pdev->dev,
2620                         "RX Dma failed to enter halt state. CSR 0x%08x\n",
2621                                 csr);
2622         }
2623 }
2624
2625 /**
2626  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
2627  * @adapter: pointer to our adapter structure
2628  */
2629 void et131x_rx_dma_enable(struct et131x_adapter *adapter)
2630 {
2631         /* Setup the receive dma configuration register for normal operation */
2632         u32 csr =  0x2000;      /* FBR1 enable */
2633
2634         if (adapter->rx_ring.fbr[0]->buffsize == 4096)
2635                 csr |= 0x0800;
2636         else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
2637                 csr |= 0x1000;
2638         else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
2639                 csr |= 0x1800;
2640 #ifdef USE_FBR0
2641         csr |= 0x0400;          /* FBR0 enable */
2642         if (adapter->rx_ring.fbr[1]->buffsize == 256)
2643                 csr |= 0x0100;
2644         else if (adapter->rx_ring.fbr[1]->buffsize == 512)
2645                 csr |= 0x0200;
2646         else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
2647                 csr |= 0x0300;
2648 #endif
2649         writel(csr, &adapter->regs->rxdma.csr);
2650
2651         csr = readl(&adapter->regs->rxdma.csr);
2652         if ((csr & 0x00020000) != 0) {
2653                 udelay(5);
2654                 csr = readl(&adapter->regs->rxdma.csr);
2655                 if ((csr & 0x00020000) != 0) {
2656                         dev_err(&adapter->pdev->dev,
2657                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
2658                                 csr);
2659                 }
2660         }
2661 }
2662
2663
2664 static inline void add_10bit(u32 *v, int n)
2665 {
2666         *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
2667 }
2668
2669 static inline void add_12bit(u32 *v, int n)
2670 {
2671         *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
2672 }
2673
2674 /**
2675  * nic_rx_pkts - Checks the hardware for available packets
2676  * @adapter: pointer to our adapter
2677  *
2678  * Returns rfd, a pointer to our MPRFD.
2679  *
2680  * Checks the hardware for available packets, using completion ring
2681  * If packets are available, it gets an RFD from the recv_list, attaches
2682  * the packet to it, puts the RFD in the RecvPendList, and also returns
2683  * the pointer to the RFD.
2684  */
2685 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2686 {
2687         struct rx_ring *rx_local = &adapter->rx_ring;
2688         struct rx_status_block *status;
2689         struct pkt_stat_desc *psr;
2690         struct rfd *rfd;
2691         u32 i;
2692         u8 *buf;
2693         unsigned long flags;
2694         struct list_head *element;
2695         u8 ring_index;
2696         u16 buff_index;
2697         u32 len;
2698         u32 word0;
2699         u32 word1;
2700
2701         /* RX Status block is written by the DMA engine prior to every
2702          * interrupt. It contains the next to be used entry in the Packet
2703          * Status Ring, and also the two Free Buffer rings.
2704          */
2705         status = rx_local->rx_status_block;
2706         word1 = status->word1 >> 16;    /* Get the useful bits */
2707
2708         /* Check the PSR and wrap bits do not match */
2709         if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2710                 /* Looks like this ring is not updated yet */
2711                 return NULL;
2712
2713         /* The packet status ring indicates that data is available. */
2714         psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2715                         (rx_local->local_psr_full & 0xFFF);
2716
2717         /* Grab any information that is required once the PSR is
2718          * advanced, since we can no longer rely on the memory being
2719          * accurate
2720          */
2721         len = psr->word1 & 0xFFFF;
2722         ring_index = (psr->word1 >> 26) & 0x03;
2723         buff_index = (psr->word1 >> 16) & 0x3FF;
2724         word0 = psr->word0;
2725
2726         /* Indicate that we have used this PSR entry. */
2727         /* FIXME wrap 12 */
2728         add_12bit(&rx_local->local_psr_full, 1);
2729         if (
2730           (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2731                 /* Clear psr full and toggle the wrap bit */
2732                 rx_local->local_psr_full &=  ~0xFFF;
2733                 rx_local->local_psr_full ^= 0x1000;
2734         }
2735
2736         writel(rx_local->local_psr_full,
2737                &adapter->regs->rxdma.psr_full_offset);
2738
2739 #ifndef USE_FBR0
2740         if (ring_index != 1)
2741                 return NULL;
2742 #endif
2743
2744 #ifdef USE_FBR0
2745         if (ring_index > 1 ||
2746                 (ring_index == 0 &&
2747                 buff_index > rx_local->fbr[1]->num_entries - 1) ||
2748                 (ring_index == 1 &&
2749                 buff_index > rx_local->fbr[0]->num_entries - 1))
2750 #else
2751         if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2752 #endif
2753         {
2754                 /* Illegal buffer or ring index cannot be used by S/W*/
2755                 dev_err(&adapter->pdev->dev,
2756                           "NICRxPkts PSR Entry %d indicates "
2757                           "length of %d and/or bad bi(%d)\n",
2758                           rx_local->local_psr_full & 0xFFF,
2759                           len, buff_index);
2760                 return NULL;
2761         }
2762
2763         /* Get and fill the RFD. */
2764         spin_lock_irqsave(&adapter->rcv_lock, flags);
2765
2766         rfd = NULL;
2767         element = rx_local->recv_list.next;
2768         rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2769
2770         if (rfd == NULL) {
2771                 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2772                 return NULL;
2773         }
2774
2775         list_del(&rfd->list_node);
2776         rx_local->num_ready_recv--;
2777
2778         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2779
2780         rfd->bufferindex = buff_index;
2781         rfd->ringindex = ring_index;
2782
2783         /* In V1 silicon, there is a bug which screws up filtering of
2784          * runt packets.  Therefore runt packet filtering is disabled
2785          * in the MAC and the packets are dropped here.  They are
2786          * also counted here.
2787          */
2788         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2789                 adapter->stats.rx_other_errs++;
2790                 len = 0;
2791         }
2792
2793         if (len) {
2794                 /* Determine if this is a multicast packet coming in */
2795                 if ((word0 & ALCATEL_MULTICAST_PKT) &&
2796                     !(word0 & ALCATEL_BROADCAST_PKT)) {
2797                         /* Promiscuous mode and Multicast mode are
2798                          * not mutually exclusive as was first
2799                          * thought.  I guess Promiscuous is just
2800                          * considered a super-set of the other
2801                          * filters. Generally filter is 0x2b when in
2802                          * promiscuous mode.
2803                          */
2804                         if ((adapter->packet_filter &
2805                                         ET131X_PACKET_TYPE_MULTICAST)
2806                             && !(adapter->packet_filter &
2807                                         ET131X_PACKET_TYPE_PROMISCUOUS)
2808                             && !(adapter->packet_filter &
2809                                         ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2810                                 /*
2811                                  * Note - ring_index for fbr[] array is reversed
2812                                  * 1 for FBR0 etc
2813                                  */
2814                                 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
2815                                                 virt[buff_index];
2816
2817                                 /* Loop through our list to see if the
2818                                  * destination address of this packet
2819                                  * matches one in our list.
2820                                  */
2821                                 for (i = 0; i < adapter->multicast_addr_count;
2822                                      i++) {
2823                                         if (buf[0] ==
2824                                                 adapter->multicast_list[i][0]
2825                                             && buf[1] ==
2826                                                 adapter->multicast_list[i][1]
2827                                             && buf[2] ==
2828                                                 adapter->multicast_list[i][2]
2829                                             && buf[3] ==
2830                                                 adapter->multicast_list[i][3]
2831                                             && buf[4] ==
2832                                                 adapter->multicast_list[i][4]
2833                                             && buf[5] ==
2834                                                 adapter->multicast_list[i][5]) {
2835                                                 break;
2836                                         }
2837                                 }
2838
2839                                 /* If our index is equal to the number
2840                                  * of Multicast address we have, then
2841                                  * this means we did not find this
2842                                  * packet's matching address in our
2843                                  * list.  Set the len to zero,
2844                                  * so we free our RFD when we return
2845                                  * from this function.
2846                                  */
2847                                 if (i == adapter->multicast_addr_count)
2848                                         len = 0;
2849                         }
2850
2851                         if (len > 0)
2852                                 adapter->stats.multicast_pkts_rcvd++;
2853                 } else if (word0 & ALCATEL_BROADCAST_PKT)
2854                         adapter->stats.broadcast_pkts_rcvd++;
2855                 else
2856                         /* Not sure what this counter measures in
2857                          * promiscuous mode. Perhaps we should check
2858                          * the MAC address to see if it is directed
2859                          * to us in promiscuous mode.
2860                          */
2861                         adapter->stats.unicast_pkts_rcvd++;
2862         }
2863
2864         if (len > 0) {
2865                 struct sk_buff *skb = NULL;
2866
2867                 /*rfd->len = len - 4; */
2868                 rfd->len = len;
2869
2870                 skb = dev_alloc_skb(rfd->len + 2);
2871                 if (!skb) {
2872                         dev_err(&adapter->pdev->dev,
2873                                   "Couldn't alloc an SKB for Rx\n");
2874                         return NULL;
2875                 }
2876
2877                 adapter->net_stats.rx_bytes += rfd->len;
2878
2879                 /*
2880                  * Note - ring_index for fbr[] array is reversed,
2881                  * 1 for FBR0 etc
2882                  */
2883                 memcpy(skb_put(skb, rfd->len),
2884                        rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
2885                        rfd->len);
2886
2887                 skb->dev = adapter->netdev;
2888                 skb->protocol = eth_type_trans(skb, adapter->netdev);
2889                 skb->ip_summed = CHECKSUM_NONE;
2890
2891                 netif_rx(skb);
2892         } else {
2893                 rfd->len = 0;
2894         }
2895
2896         nic_return_rfd(adapter, rfd);
2897         return rfd;
2898 }
2899
2900 /**
2901  * et131x_reset_recv - Reset the receive list
2902  * @adapter: pointer to our adapter
2903  *
2904  * Assumption, Rcv spinlock has been acquired.
2905  */
2906 void et131x_reset_recv(struct et131x_adapter *adapter)
2907 {
2908         WARN_ON(list_empty(&adapter->rx_ring.recv_list));
2909 }
2910
2911 /**
2912  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
2913  * @adapter: pointer to our adapter
2914  *
2915  * Assumption, Rcv spinlock has been acquired.
2916  */
2917 void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2918 {
2919         struct rfd *rfd = NULL;
2920         u32 count = 0;
2921         bool done = true;
2922
2923         /* Process up to available RFD's */
2924         while (count < NUM_PACKETS_HANDLED) {
2925                 if (list_empty(&adapter->rx_ring.recv_list)) {
2926                         WARN_ON(adapter->rx_ring.num_ready_recv != 0);
2927                         done = false;
2928                         break;
2929                 }
2930
2931                 rfd = nic_rx_pkts(adapter);
2932
2933                 if (rfd == NULL)
2934                         break;
2935
2936                 /* Do not receive any packets until a filter has been set.
2937                  * Do not receive any packets until we have link.
2938                  * If length is zero, return the RFD in order to advance the
2939                  * Free buffer ring.
2940                  */
2941                 if (!adapter->packet_filter ||
2942                     !netif_carrier_ok(adapter->netdev) ||
2943                     rfd->len == 0)
2944                         continue;
2945
2946                 /* Increment the number of packets we received */
2947                 adapter->net_stats.rx_packets++;
2948
2949                 /* Set the status on the packet, either resources or success */
2950                 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
2951                         dev_warn(&adapter->pdev->dev,
2952                                     "RFD's are running out\n");
2953                 }
2954                 count++;
2955         }
2956
2957         if (count == NUM_PACKETS_HANDLED || !done) {
2958                 adapter->rx_ring.unfinished_receives = true;
2959                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2960                        &adapter->regs->global.watchdog_timer);
2961         } else
2962                 /* Watchdog timer will disable itself if appropriate. */
2963                 adapter->rx_ring.unfinished_receives = false;
2964 }
2965
2966 /* TX functions */
2967
2968 /**
2969  * et131x_tx_dma_memory_alloc
2970  * @adapter: pointer to our private adapter structure
2971  *
2972  * Returns 0 on success and errno on failure (as defined in errno.h).
2973  *
2974  * Allocates memory that will be visible both to the device and to the CPU.
2975  * The OS will pass us packets, pointers to which we will insert in the Tx
2976  * Descriptor queue. The device will read this queue to find the packets in
2977  * memory. The device will update the "status" in memory each time it xmits a
2978  * packet.
2979  */
2980 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2981 {
2982         int desc_size = 0;
2983         struct tx_ring *tx_ring = &adapter->tx_ring;
2984
2985         /* Allocate memory for the TCB's (Transmit Control Block) */
2986         adapter->tx_ring.tcb_ring =
2987                 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
2988         if (!adapter->tx_ring.tcb_ring) {
2989                 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
2990                 return -ENOMEM;
2991         }
2992
2993         /* Allocate enough memory for the Tx descriptor ring, and allocate
2994          * some extra so that the ring can be aligned on a 4k boundary.
2995          */
2996         desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
2997         tx_ring->tx_desc_ring =
2998             (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, desc_size,
2999                                                     &tx_ring->tx_desc_ring_pa, GFP_KERNEL);
3000         if (!adapter->tx_ring.tx_desc_ring) {
3001                 dev_err(&adapter->pdev->dev,
3002                                         "Cannot alloc memory for Tx Ring\n");
3003                 return -ENOMEM;
3004         }
3005
3006         /* Save physical address
3007          *
3008          * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
3009          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
3010          * are ever returned, make sure the high part is retrieved here before
3011          * storing the adjusted address.
3012          */
3013         /* Allocate memory for the Tx status block */
3014         tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3015                                                     sizeof(u32),
3016                                                     &tx_ring->tx_status_pa,
3017                                                     GFP_KERNEL);
3018         if (!adapter->tx_ring.tx_status_pa) {
3019                 dev_err(&adapter->pdev->dev,
3020                                   "Cannot alloc memory for Tx status block\n");
3021                 return -ENOMEM;
3022         }
3023         return 0;
3024 }
3025
3026 /**
3027  * et131x_tx_dma_memory_free - Free all memory allocated within this module
3028  * @adapter: pointer to our private adapter structure
3029  *
3030  * Returns 0 on success and errno on failure (as defined in errno.h).
3031  */
3032 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3033 {
3034         int desc_size = 0;
3035
3036         if (adapter->tx_ring.tx_desc_ring) {
3037                 /* Free memory relating to Tx rings here */
3038                 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3039                                                                 + 4096 - 1;
3040                 dma_free_coherent(&adapter->pdev->dev,
3041                                     desc_size,
3042                                     adapter->tx_ring.tx_desc_ring,
3043                                     adapter->tx_ring.tx_desc_ring_pa);
3044                 adapter->tx_ring.tx_desc_ring = NULL;
3045         }
3046
3047         /* Free memory for the Tx status block */
3048         if (adapter->tx_ring.tx_status) {
3049                 dma_free_coherent(&adapter->pdev->dev,
3050                                     sizeof(u32),
3051                                     adapter->tx_ring.tx_status,
3052                                     adapter->tx_ring.tx_status_pa);
3053
3054                 adapter->tx_ring.tx_status = NULL;
3055         }
3056         /* Free the memory for the tcb structures */
3057         kfree(adapter->tx_ring.tcb_ring);
3058 }
3059
3060 /**
3061  * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
3062  * @adapter: pointer to our private adapter structure
3063  *
3064  * Configure the transmit engine with the ring buffers we have created
3065  * and prepare it for use.
3066  */
3067 void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
3068 {
3069         struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
3070
3071         /* Load the hardware with the start of the transmit descriptor ring. */
3072         writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
3073                &txdma->pr_base_hi);
3074         writel((u32) adapter->tx_ring.tx_desc_ring_pa,
3075                &txdma->pr_base_lo);
3076
3077         /* Initialise the transmit DMA engine */
3078         writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
3079
3080         /* Load the completion writeback physical address */
3081         writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
3082                                                 &txdma->dma_wb_base_hi);
3083         writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
3084
3085         *adapter->tx_ring.tx_status = 0;
3086
3087         writel(0, &txdma->service_request);
3088         adapter->tx_ring.send_idx = 0;
3089 }
3090
3091 /**
3092  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
3093  * @adapter: pointer to our adapter structure
3094  */
3095 void et131x_tx_dma_disable(struct et131x_adapter *adapter)
3096 {
3097         /* Setup the tramsmit dma configuration register */
3098         writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
3099                                         &adapter->regs->txdma.csr);
3100 }
3101
3102 /**
3103  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
3104  * @adapter: pointer to our adapter structure
3105  *
3106  * Mainly used after a return to the D0 (full-power) state from a lower state.
3107  */
3108 void et131x_tx_dma_enable(struct et131x_adapter *adapter)
3109 {
3110         /* Setup the transmit dma configuration register for normal
3111          * operation
3112          */
3113         writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
3114                                         &adapter->regs->txdma.csr);
3115 }
3116
3117 /**
3118  * et131x_init_send - Initialize send data structures
3119  * @adapter: pointer to our private adapter structure
3120  */
3121 void et131x_init_send(struct et131x_adapter *adapter)
3122 {
3123         struct tcb *tcb;
3124         u32 ct;
3125         struct tx_ring *tx_ring;
3126
3127         /* Setup some convenience pointers */
3128         tx_ring = &adapter->tx_ring;
3129         tcb = adapter->tx_ring.tcb_ring;
3130
3131         tx_ring->tcb_qhead = tcb;
3132
3133         memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
3134
3135         /* Go through and set up each TCB */
3136         for (ct = 0; ct++ < NUM_TCB; tcb++)
3137                 /* Set the link pointer in HW TCB to the next TCB in the
3138                  * chain
3139                  */
3140                 tcb->next = tcb + 1;
3141
3142         /* Set the  tail pointer */
3143         tcb--;
3144         tx_ring->tcb_qtail = tcb;
3145         tcb->next = NULL;
3146         /* Curr send queue should now be empty */
3147         tx_ring->send_head = NULL;
3148         tx_ring->send_tail = NULL;
3149 }
3150
3151 /**
3152  * nic_send_packet - NIC specific send handler for version B silicon.
3153  * @adapter: pointer to our adapter
3154  * @tcb: pointer to struct tcb
3155  *
3156  * Returns 0 or errno.
3157  */
3158 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3159 {
3160         u32 i;
3161         struct tx_desc desc[24];        /* 24 x 16 byte */
3162         u32 frag = 0;
3163         u32 thiscopy, remainder;
3164         struct sk_buff *skb = tcb->skb;
3165         u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3166         struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3167         unsigned long flags;
3168         struct phy_device *phydev = adapter->phydev;
3169
3170         /* Part of the optimizations of this send routine restrict us to
3171          * sending 24 fragments at a pass.  In practice we should never see
3172          * more than 5 fragments.
3173          *
3174          * NOTE: The older version of this function (below) can handle any
3175          * number of fragments. If needed, we can call this function,
3176          * although it is less efficient.
3177          */
3178         if (nr_frags > 23)
3179                 return -EIO;
3180
3181         memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3182
3183         for (i = 0; i < nr_frags; i++) {
3184                 /* If there is something in this element, lets get a
3185                  * descriptor from the ring and get the necessary data
3186                  */
3187                 if (i == 0) {
3188                         /* If the fragments are smaller than a standard MTU,
3189                          * then map them to a single descriptor in the Tx
3190                          * Desc ring. However, if they're larger, as is
3191                          * possible with support for jumbo packets, then
3192                          * split them each across 2 descriptors.
3193                          *
3194                          * This will work until we determine why the hardware
3195                          * doesn't seem to like large fragments.
3196                          */
3197                         if ((skb->len - skb->data_len) <= 1514) {
3198                                 desc[frag].addr_hi = 0;
3199                                 /* Low 16bits are length, high is vlan and
3200                                    unused currently so zero */
3201                                 desc[frag].len_vlan =
3202                                         skb->len - skb->data_len;
3203
3204                                 /* NOTE: Here, the dma_addr_t returned from
3205                                  * dma_map_single() is implicitly cast as a
3206                                  * u32. Although dma_addr_t can be
3207                                  * 64-bit, the address returned by
3208                                  * dma_map_single() is always 32-bit
3209                                  * addressable (as defined by the pci/dma
3210                                  * subsystem)
3211                                  */
3212                                 desc[frag++].addr_lo =
3213                                     dma_map_single(&adapter->pdev->dev,
3214                                                    skb->data,
3215                                                    skb->len -
3216                                                    skb->data_len,
3217                                                    DMA_TO_DEVICE);
3218                         } else {
3219                                 desc[frag].addr_hi = 0;
3220                                 desc[frag].len_vlan =
3221                                     (skb->len - skb->data_len) / 2;
3222
3223                                 /* NOTE: Here, the dma_addr_t returned from
3224                                  * dma_map_single() is implicitly cast as a
3225                                  * u32. Although dma_addr_t can be
3226                                  * 64-bit, the address returned by
3227                                  * dma_map_single() is always 32-bit
3228                                  * addressable (as defined by the pci/dma
3229                                  * subsystem)
3230                                  */
3231                                 desc[frag++].addr_lo =
3232                                     dma_map_single(&adapter->pdev->dev,
3233                                                    skb->data,
3234                                                    ((skb->len -
3235                                                      skb->data_len) / 2),
3236                                                    DMA_TO_DEVICE);
3237                                 desc[frag].addr_hi = 0;
3238
3239                                 desc[frag].len_vlan =
3240                                     (skb->len - skb->data_len) / 2;
3241
3242                                 /* NOTE: Here, the dma_addr_t returned from
3243                                  * dma_map_single() is implicitly cast as a
3244                                  * u32. Although dma_addr_t can be
3245                                  * 64-bit, the address returned by
3246                                  * dma_map_single() is always 32-bit
3247                                  * addressable (as defined by the pci/dma
3248                                  * subsystem)
3249                                  */
3250                                 desc[frag++].addr_lo =
3251                                     dma_map_single(&adapter->pdev->dev,
3252                                                    skb->data +
3253                                                    ((skb->len -
3254                                                      skb->data_len) / 2),
3255                                                    ((skb->len -
3256                                                      skb->data_len) / 2),
3257                                                    DMA_TO_DEVICE);
3258                         }
3259                 } else {
3260                         desc[frag].addr_hi = 0;
3261                         desc[frag].len_vlan =
3262                                         frags[i - 1].size;
3263
3264                         /* NOTE: Here, the dma_addr_t returned from
3265                          * dma_map_page() is implicitly cast as a u32.
3266                          * Although dma_addr_t can be 64-bit, the address
3267                          * returned by dma_map_page() is always 32-bit
3268                          * addressable (as defined by the pci/dma subsystem)
3269                          */
3270                         desc[frag++].addr_lo =
3271                             dma_map_page(&adapter->pdev->dev,
3272                                          frags[i - 1].page,
3273                                          frags[i - 1].page_offset,
3274                                          frags[i - 1].size,
3275                                          DMA_TO_DEVICE);
3276                 }
3277         }
3278
3279         if (frag == 0)
3280                 return -EIO;
3281
3282         if (phydev && phydev->speed == SPEED_1000) {
3283                 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3284                         /* Last element & Interrupt flag */
3285                         desc[frag - 1].flags = 0x5;
3286                         adapter->tx_ring.since_irq = 0;
3287                 } else { /* Last element */
3288                         desc[frag - 1].flags = 0x1;
3289                 }
3290         } else
3291                 desc[frag - 1].flags = 0x5;
3292
3293         desc[0].flags |= 2;     /* First element flag */
3294
3295         tcb->index_start = adapter->tx_ring.send_idx;
3296         tcb->stale = 0;
3297
3298         spin_lock_irqsave(&adapter->send_hw_lock, flags);
3299
3300         thiscopy = NUM_DESC_PER_RING_TX -
3301                                 INDEX10(adapter->tx_ring.send_idx);
3302
3303         if (thiscopy >= frag) {
3304                 remainder = 0;
3305                 thiscopy = frag;
3306         } else {
3307                 remainder = frag - thiscopy;
3308         }
3309
3310         memcpy(adapter->tx_ring.tx_desc_ring +
3311                INDEX10(adapter->tx_ring.send_idx), desc,
3312                sizeof(struct tx_desc) * thiscopy);
3313
3314         add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3315
3316         if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3317                   INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3318                 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3319                 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3320         }
3321
3322         if (remainder) {
3323                 memcpy(adapter->tx_ring.tx_desc_ring,
3324                        desc + thiscopy,
3325                        sizeof(struct tx_desc) * remainder);
3326
3327                 add_10bit(&adapter->tx_ring.send_idx, remainder);
3328         }
3329
3330         if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3331                 if (adapter->tx_ring.send_idx)
3332                         tcb->index = NUM_DESC_PER_RING_TX - 1;
3333                 else
3334                         tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3335         } else
3336                 tcb->index = adapter->tx_ring.send_idx - 1;
3337
3338         spin_lock(&adapter->tcb_send_qlock);
3339
3340         if (adapter->tx_ring.send_tail)
3341                 adapter->tx_ring.send_tail->next = tcb;
3342         else
3343                 adapter->tx_ring.send_head = tcb;
3344
3345         adapter->tx_ring.send_tail = tcb;
3346
3347         WARN_ON(tcb->next != NULL);
3348
3349         adapter->tx_ring.used++;
3350
3351         spin_unlock(&adapter->tcb_send_qlock);
3352
3353         /* Write the new write pointer back to the device. */
3354         writel(adapter->tx_ring.send_idx,
3355                &adapter->regs->txdma.service_request);
3356
3357         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
3358          * timer to wake us up if this packet isn't followed by N more.
3359          */
3360         if (phydev && phydev->speed == SPEED_1000) {
3361                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3362                        &adapter->regs->global.watchdog_timer);
3363         }
3364         spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3365
3366         return 0;
3367 }
3368
3369 /**
3370  * send_packet - Do the work to send a packet
3371  * @skb: the packet(s) to send
3372  * @adapter: a pointer to the device's private adapter structure
3373  *
3374  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3375  *
3376  * Assumption: Send spinlock has been acquired
3377  */
3378 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3379 {
3380         int status;
3381         struct tcb *tcb = NULL;
3382         u16 *shbufva;
3383         unsigned long flags;
3384
3385         /* All packets must have at least a MAC address and a protocol type */
3386         if (skb->len < ETH_HLEN)
3387                 return -EIO;
3388
3389         /* Get a TCB for this packet */
3390         spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3391
3392         tcb = adapter->tx_ring.tcb_qhead;
3393
3394         if (tcb == NULL) {
3395                 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3396                 return -ENOMEM;
3397         }
3398
3399         adapter->tx_ring.tcb_qhead = tcb->next;
3400
3401         if (adapter->tx_ring.tcb_qhead == NULL)
3402                 adapter->tx_ring.tcb_qtail = NULL;
3403
3404         spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3405
3406         tcb->skb = skb;
3407
3408         if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3409                 shbufva = (u16 *) skb->data;
3410
3411                 if ((shbufva[0] == 0xffff) &&
3412                     (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3413                         tcb->flags |= fMP_DEST_BROAD;
3414                 } else if ((shbufva[0] & 0x3) == 0x0001) {
3415                         tcb->flags |=  fMP_DEST_MULTI;
3416                 }
3417         }
3418
3419         tcb->next = NULL;
3420
3421         /* Call the NIC specific send handler. */
3422         status = nic_send_packet(adapter, tcb);
3423
3424         if (status != 0) {
3425                 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3426
3427                 if (adapter->tx_ring.tcb_qtail)
3428                         adapter->tx_ring.tcb_qtail->next = tcb;
3429                 else
3430                         /* Apparently ready Q is empty. */
3431                         adapter->tx_ring.tcb_qhead = tcb;
3432
3433                 adapter->tx_ring.tcb_qtail = tcb;
3434                 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3435                 return status;
3436         }
3437         WARN_ON(adapter->tx_ring.used > NUM_TCB);
3438         return 0;
3439 }
3440
3441 /**
3442  * et131x_send_packets - This function is called by the OS to send packets
3443  * @skb: the packet(s) to send
3444  * @netdev:device on which to TX the above packet(s)
3445  *
3446  * Return 0 in almost all cases; non-zero value in extreme hard failure only
3447  */
3448 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3449 {
3450         int status = 0;
3451         struct et131x_adapter *adapter = netdev_priv(netdev);
3452
3453         /* Send these packets
3454          *
3455          * NOTE: The Linux Tx entry point is only given one packet at a time
3456          * to Tx, so the PacketCount and it's array used makes no sense here
3457          */
3458
3459         /* TCB is not available */
3460         if (adapter->tx_ring.used >= NUM_TCB) {
3461                 /* NOTE: If there's an error on send, no need to queue the
3462                  * packet under Linux; if we just send an error up to the
3463                  * netif layer, it will resend the skb to us.
3464                  */
3465                 status = -ENOMEM;
3466         } else {
3467                 /* We need to see if the link is up; if it's not, make the
3468                  * netif layer think we're good and drop the packet
3469                  */
3470                 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3471                                         !netif_carrier_ok(netdev)) {
3472                         dev_kfree_skb_any(skb);
3473                         skb = NULL;
3474
3475                         adapter->net_stats.tx_dropped++;
3476                 } else {
3477                         status = send_packet(skb, adapter);
3478                         if (status != 0 && status != -ENOMEM) {
3479                                 /* On any other error, make netif think we're
3480                                  * OK and drop the packet
3481                                  */
3482                                 dev_kfree_skb_any(skb);
3483                                 skb = NULL;
3484                                 adapter->net_stats.tx_dropped++;
3485                         }
3486                 }
3487         }
3488         return status;
3489 }
3490
3491 /**
3492  * free_send_packet - Recycle a struct tcb
3493  * @adapter: pointer to our adapter
3494  * @tcb: pointer to struct tcb
3495  *
3496  * Complete the packet if necessary
3497  * Assumption - Send spinlock has been acquired
3498  */
3499 static inline void free_send_packet(struct et131x_adapter *adapter,
3500                                                 struct tcb *tcb)
3501 {
3502         unsigned long flags;
3503         struct tx_desc *desc = NULL;
3504         struct net_device_stats *stats = &adapter->net_stats;
3505
3506         if (tcb->flags & fMP_DEST_BROAD)
3507                 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3508         else if (tcb->flags & fMP_DEST_MULTI)
3509                 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3510         else
3511                 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3512
3513         if (tcb->skb) {
3514                 stats->tx_bytes += tcb->skb->len;
3515
3516                 /* Iterate through the TX descriptors on the ring
3517                  * corresponding to this packet and umap the fragments
3518                  * they point to
3519                  */
3520                 do {
3521                         desc = (struct tx_desc *)
3522                                     (adapter->tx_ring.tx_desc_ring +
3523                                                 INDEX10(tcb->index_start));
3524
3525                         dma_unmap_single(&adapter->pdev->dev,
3526                                          desc->addr_lo,
3527                                          desc->len_vlan, DMA_TO_DEVICE);
3528
3529                         add_10bit(&tcb->index_start, 1);
3530                         if (INDEX10(tcb->index_start) >=
3531                                                         NUM_DESC_PER_RING_TX) {
3532                                 tcb->index_start &= ~ET_DMA10_MASK;
3533                                 tcb->index_start ^= ET_DMA10_WRAP;
3534                         }
3535                 } while (desc != (adapter->tx_ring.tx_desc_ring +
3536                                 INDEX10(tcb->index)));
3537
3538                 dev_kfree_skb_any(tcb->skb);
3539         }
3540
3541         memset(tcb, 0, sizeof(struct tcb));
3542
3543         /* Add the TCB to the Ready Q */
3544         spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3545
3546         adapter->net_stats.tx_packets++;
3547
3548         if (adapter->tx_ring.tcb_qtail)
3549                 adapter->tx_ring.tcb_qtail->next = tcb;
3550         else
3551                 /* Apparently ready Q is empty. */
3552                 adapter->tx_ring.tcb_qhead = tcb;
3553
3554         adapter->tx_ring.tcb_qtail = tcb;
3555
3556         spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3557         WARN_ON(adapter->tx_ring.used < 0);
3558 }
3559
3560 /**
3561  * et131x_free_busy_send_packets - Free and complete the stopped active sends
3562  * @adapter: pointer to our adapter
3563  *
3564  * Assumption - Send spinlock has been acquired
3565  */
3566 void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3567 {
3568         struct tcb *tcb;
3569         unsigned long flags;
3570         u32 freed = 0;
3571
3572         /* Any packets being sent? Check the first TCB on the send list */
3573         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3574
3575         tcb = adapter->tx_ring.send_head;
3576
3577         while (tcb != NULL && freed < NUM_TCB) {
3578                 struct tcb *next = tcb->next;
3579
3580                 adapter->tx_ring.send_head = next;
3581
3582                 if (next == NULL)
3583                         adapter->tx_ring.send_tail = NULL;
3584
3585                 adapter->tx_ring.used--;
3586
3587                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3588
3589                 freed++;
3590                 free_send_packet(adapter, tcb);
3591
3592                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3593
3594                 tcb = adapter->tx_ring.send_head;
3595         }
3596
3597         WARN_ON(freed == NUM_TCB);
3598
3599         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3600
3601         adapter->tx_ring.used = 0;
3602 }
3603
3604 /**
3605  * et131x_handle_send_interrupt - Interrupt handler for sending processing
3606  * @adapter: pointer to our adapter
3607  *
3608  * Re-claim the send resources, complete sends and get more to send from
3609  * the send wait queue.
3610  *
3611  * Assumption - Send spinlock has been acquired
3612  */
3613 void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3614 {
3615         unsigned long flags;
3616         u32 serviced;
3617         struct tcb *tcb;
3618         u32 index;
3619
3620         serviced = readl(&adapter->regs->txdma.new_service_complete);
3621         index = INDEX10(serviced);
3622
3623         /* Has the ring wrapped?  Process any descriptors that do not have
3624          * the same "wrap" indicator as the current completion indicator
3625          */
3626         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3627
3628         tcb = adapter->tx_ring.send_head;
3629
3630         while (tcb &&
3631                ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3632                index < INDEX10(tcb->index)) {
3633                 adapter->tx_ring.used--;
3634                 adapter->tx_ring.send_head = tcb->next;
3635                 if (tcb->next == NULL)
3636                         adapter->tx_ring.send_tail = NULL;
3637
3638                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3639                 free_send_packet(adapter, tcb);
3640                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3641
3642                 /* Goto the next packet */
3643                 tcb = adapter->tx_ring.send_head;
3644         }
3645         while (tcb &&
3646                !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3647                && index > (tcb->index & ET_DMA10_MASK)) {
3648                 adapter->tx_ring.used--;
3649                 adapter->tx_ring.send_head = tcb->next;
3650                 if (tcb->next == NULL)
3651                         adapter->tx_ring.send_tail = NULL;
3652
3653                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3654                 free_send_packet(adapter, tcb);
3655                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3656
3657                 /* Goto the next packet */
3658                 tcb = adapter->tx_ring.send_head;
3659         }
3660
3661         /* Wake up the queue when we hit a low-water mark */
3662         if (adapter->tx_ring.used <= NUM_TCB / 3)
3663                 netif_wake_queue(adapter->netdev);
3664
3665         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3666 }
3667
3668 /* ETHTOOL functions */
3669
3670 static int et131x_get_settings(struct net_device *netdev,
3671                                struct ethtool_cmd *cmd)
3672 {
3673         struct et131x_adapter *adapter = netdev_priv(netdev);
3674
3675         return phy_ethtool_gset(adapter->phydev, cmd);
3676 }
3677
3678 static int et131x_set_settings(struct net_device *netdev,
3679                                struct ethtool_cmd *cmd)
3680 {
3681         struct et131x_adapter *adapter = netdev_priv(netdev);
3682
3683         return phy_ethtool_sset(adapter->phydev, cmd);
3684 }
3685
3686 static int et131x_get_regs_len(struct net_device *netdev)
3687 {
3688 #define ET131X_REGS_LEN 256
3689         return ET131X_REGS_LEN * sizeof(u32);
3690 }
3691
3692 static void et131x_get_regs(struct net_device *netdev,
3693                             struct ethtool_regs *regs, void *regs_data)
3694 {
3695         struct et131x_adapter *adapter = netdev_priv(netdev);
3696         struct address_map __iomem *aregs = adapter->regs;
3697         u32 *regs_buff = regs_data;
3698         u32 num = 0;
3699
3700         memset(regs_data, 0, et131x_get_regs_len(netdev));
3701
3702         regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3703                         adapter->pdev->device;
3704
3705         /* PHY regs */
3706         et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
3707         et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
3708         et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
3709         et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
3710         et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
3711         et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
3712         et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
3713         /* Autoneg next page transmit reg */
3714         et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
3715         /* Link partner next page reg */
3716         et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
3717         et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
3718         et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
3719         et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
3720         et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
3721         et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
3722         et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3723                         (u16 *)&regs_buff[num++]);
3724         et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3725                         (u16 *)&regs_buff[num++]);
3726         et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3727                         (u16 *)&regs_buff[num++]);
3728         et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3729                         (u16 *)&regs_buff[num++]);
3730         et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
3731         et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
3732         et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
3733         et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3734                         (u16 *)&regs_buff[num++]);
3735         et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
3736         et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
3737         et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
3738
3739         /* Global regs */
3740         regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3741         regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3742         regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3743         regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3744         regs_buff[num++] = readl(&aregs->global.pm_csr);
3745         regs_buff[num++] = adapter->stats.interrupt_status;
3746         regs_buff[num++] = readl(&aregs->global.int_mask);
3747         regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3748         regs_buff[num++] = readl(&aregs->global.int_status_alias);
3749         regs_buff[num++] = readl(&aregs->global.sw_reset);
3750         regs_buff[num++] = readl(&aregs->global.slv_timer);
3751         regs_buff[num++] = readl(&aregs->global.msi_config);
3752         regs_buff[num++] = readl(&aregs->global.loopback);
3753         regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3754
3755         /* TXDMA regs */
3756         regs_buff[num++] = readl(&aregs->txdma.csr);
3757         regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3758         regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3759         regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3760         regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3761         regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3762         regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3763         regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3764         regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3765         regs_buff[num++] = readl(&aregs->txdma.service_request);
3766         regs_buff[num++] = readl(&aregs->txdma.service_complete);
3767         regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3768         regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3769         regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3770         regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3771         regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3772         regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3773         regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3774         regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3775         regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3776         regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3777         regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3778         regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3779         regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3780         regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3781         regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3782
3783         /* RXDMA regs */
3784         regs_buff[num++] = readl(&aregs->rxdma.csr);
3785         regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3786         regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3787         regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3788         regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3789         regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3790         regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3791         regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3792         regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3793         regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3794         regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3795         regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3796         regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3797         regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3798         regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3799         regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3800         regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3801         regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3802         regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3803         regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3804         regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3805         regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3806         regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3807         regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3808         regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3809         regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3810         regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3811         regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3812         regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3813 }
3814
3815 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
3816 static void et131x_get_drvinfo(struct net_device *netdev,
3817                                struct ethtool_drvinfo *info)
3818 {
3819         struct et131x_adapter *adapter = netdev_priv(netdev);
3820
3821         strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3822         strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3823         strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3824 }
3825
3826 static struct ethtool_ops et131x_ethtool_ops = {
3827         .get_settings   = et131x_get_settings,
3828         .set_settings   = et131x_set_settings,
3829         .get_drvinfo    = et131x_get_drvinfo,
3830         .get_regs_len   = et131x_get_regs_len,
3831         .get_regs       = et131x_get_regs,
3832         .get_link = ethtool_op_get_link,
3833 };
3834
3835 void et131x_set_ethtool_ops(struct net_device *netdev)
3836 {
3837         SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3838 }
3839
3840 /* PCI functions */
3841
3842 /**
3843  * et131x_hwaddr_init - set up the MAC Address on the ET1310
3844  * @adapter: pointer to our private adapter structure
3845  */
3846 void et131x_hwaddr_init(struct et131x_adapter *adapter)
3847 {
3848         /* If have our default mac from init and no mac address from
3849          * EEPROM then we need to generate the last octet and set it on the
3850          * device
3851          */
3852         if (adapter->rom_addr[0] == 0x00 &&
3853             adapter->rom_addr[1] == 0x00 &&
3854             adapter->rom_addr[2] == 0x00 &&
3855             adapter->rom_addr[3] == 0x00 &&
3856             adapter->rom_addr[4] == 0x00 &&
3857             adapter->rom_addr[5] == 0x00) {
3858                 /*
3859                  * We need to randomly generate the last octet so we
3860                  * decrease our chances of setting the mac address to
3861                  * same as another one of our cards in the system
3862                  */
3863                 get_random_bytes(&adapter->addr[5], 1);
3864                 /*
3865                  * We have the default value in the register we are
3866                  * working with so we need to copy the current
3867                  * address into the permanent address
3868                  */
3869                 memcpy(adapter->rom_addr,
3870                         adapter->addr, ETH_ALEN);
3871         } else {
3872                 /* We do not have an override address, so set the
3873                  * current address to the permanent address and add
3874                  * it to the device
3875                  */
3876                 memcpy(adapter->addr,
3877                        adapter->rom_addr, ETH_ALEN);
3878         }
3879 }
3880
3881 /**
3882  * et131x_pci_init       - initial PCI setup
3883  * @adapter: pointer to our private adapter structure
3884  * @pdev: our PCI device
3885  *
3886  * Perform the initial setup of PCI registers and if possible initialise
3887  * the MAC address. At this point the I/O registers have yet to be mapped
3888  */
3889 static int et131x_pci_init(struct et131x_adapter *adapter,
3890                                                 struct pci_dev *pdev)
3891 {
3892         int i;
3893         u8 max_payload;
3894         u8 read_size_reg;
3895
3896         if (et131x_init_eeprom(adapter) < 0)
3897                 return -EIO;
3898
3899         /* Let's set up the PORT LOGIC Register.  First we need to know what
3900          * the max_payload_size is
3901          */
3902         if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
3903                 dev_err(&pdev->dev,
3904                     "Could not read PCI config space for Max Payload Size\n");
3905                 return -EIO;
3906         }
3907
3908         /* Program the Ack/Nak latency and replay timers */
3909         max_payload &= 0x07;    /* Only the lower 3 bits are valid */
3910
3911         if (max_payload < 2) {
3912                 static const u16 acknak[2] = { 0x76, 0xD0 };
3913                 static const u16 replay[2] = { 0x1E0, 0x2ED };
3914
3915                 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3916                                                acknak[max_payload])) {
3917                         dev_err(&pdev->dev,
3918                           "Could not write PCI config space for ACK/NAK\n");
3919                         return -EIO;
3920                 }
3921                 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3922                                                replay[max_payload])) {
3923                         dev_err(&pdev->dev,
3924                           "Could not write PCI config space for Replay Timer\n");
3925                         return -EIO;
3926                 }
3927         }
3928
3929         /* l0s and l1 latency timers.  We are using default values.
3930          * Representing 001 for L0s and 010 for L1
3931          */
3932         if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3933                 dev_err(&pdev->dev,
3934                   "Could not write PCI config space for Latency Timers\n");
3935                 return -EIO;
3936         }
3937
3938         /* Change the max read size to 2k */
3939         if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
3940                 dev_err(&pdev->dev,
3941                         "Could not read PCI config space for Max read size\n");
3942                 return -EIO;
3943         }
3944
3945         read_size_reg &= 0x8f;
3946         read_size_reg |= 0x40;
3947
3948         if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
3949                 dev_err(&pdev->dev,
3950                       "Could not write PCI config space for Max read size\n");
3951                 return -EIO;
3952         }
3953
3954         /* Get MAC address from config space if an eeprom exists, otherwise
3955          * the MAC address there will not be valid
3956          */
3957         if (!adapter->has_eeprom) {
3958                 et131x_hwaddr_init(adapter);
3959                 return 0;
3960         }
3961
3962         for (i = 0; i < ETH_ALEN; i++) {
3963                 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3964                                         adapter->rom_addr + i)) {
3965                         dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3966                         return -EIO;
3967                 }
3968         }
3969         memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
3970         return 0;
3971 }
3972
3973 /**
3974  * et131x_error_timer_handler
3975  * @data: timer-specific variable; here a pointer to our adapter structure
3976  *
3977  * The routine called when the error timer expires, to track the number of
3978  * recurring errors.
3979  */
3980 void et131x_error_timer_handler(unsigned long data)
3981 {
3982         struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3983         struct phy_device *phydev = adapter->phydev;
3984
3985         if (et1310_in_phy_coma(adapter)) {
3986                 /* Bring the device immediately out of coma, to
3987                  * prevent it from sleeping indefinitely, this
3988                  * mechanism could be improved! */
3989                 et1310_disable_phy_coma(adapter);
3990                 adapter->boot_coma = 20;
3991         } else {
3992                 et1310_update_macstat_host_counters(adapter);
3993         }
3994
3995         if (!phydev->link && adapter->boot_coma < 11)
3996                 adapter->boot_coma++;
3997
3998         if (adapter->boot_coma == 10) {
3999                 if (!phydev->link) {
4000                         if (!et1310_in_phy_coma(adapter)) {
4001                                 /* NOTE - This was originally a 'sync with
4002                                  *  interrupt'. How to do that under Linux?
4003                                  */
4004                                 et131x_enable_interrupts(adapter);
4005                                 et1310_enable_phy_coma(adapter);
4006                         }
4007                 }
4008         }
4009
4010         /* This is a periodic timer, so reschedule */
4011         mod_timer(&adapter->error_timer, jiffies +
4012                                           TX_ERROR_PERIOD * HZ / 1000);
4013 }
4014
4015 /**
4016  * et131x_configure_global_regs -       configure JAGCore global regs
4017  * @adapter: pointer to our adapter structure
4018  *
4019  * Used to configure the global registers on the JAGCore
4020  */
4021 void et131x_configure_global_regs(struct et131x_adapter *adapter)
4022 {
4023         struct global_regs __iomem *regs = &adapter->regs->global;
4024
4025         writel(0, &regs->rxq_start_addr);
4026         writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
4027
4028         if (adapter->registry_jumbo_packet < 2048) {
4029                 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
4030                  * block of RAM that the driver can split between Tx
4031                  * and Rx as it desires.  Our default is to split it
4032                  * 50/50:
4033                  */
4034                 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
4035                 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
4036         } else if (adapter->registry_jumbo_packet < 8192) {
4037                 /* For jumbo packets > 2k but < 8k, split 50-50. */
4038                 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
4039                 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
4040         } else {
4041                 /* 9216 is the only packet size greater than 8k that
4042                  * is available. The Tx buffer has to be big enough
4043                  * for one whole packet on the Tx side. We'll make
4044                  * the Tx 9408, and give the rest to Rx
4045                  */
4046                 writel(0x01b3, &regs->rxq_end_addr);
4047                 writel(0x01b4, &regs->txq_start_addr);
4048         }
4049
4050         /* Initialize the loopback register. Disable all loopbacks. */
4051         writel(0, &regs->loopback);
4052
4053         /* MSI Register */
4054         writel(0, &regs->msi_config);
4055
4056         /* By default, disable the watchdog timer.  It will be enabled when
4057          * a packet is queued.
4058          */
4059         writel(0, &regs->watchdog_timer);
4060 }
4061
4062 /**
4063  * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
4064  * @adapter: pointer to our private adapter structure
4065  *
4066  * Returns 0 on success, errno on failure (as defined in errno.h)
4067  */
4068 void et131x_adapter_setup(struct et131x_adapter *adapter)
4069 {
4070         /* Configure the JAGCore */
4071         et131x_configure_global_regs(adapter);
4072
4073         et1310_config_mac_regs1(adapter);
4074
4075         /* Configure the MMC registers */
4076         /* All we need to do is initialize the Memory Control Register */
4077         writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
4078
4079         et1310_config_rxmac_regs(adapter);
4080         et1310_config_txmac_regs(adapter);
4081
4082         et131x_config_rx_dma_regs(adapter);
4083         et131x_config_tx_dma_regs(adapter);
4084
4085         et1310_config_macstat_regs(adapter);
4086
4087         et1310_phy_power_down(adapter, 0);
4088         et131x_xcvr_init(adapter);
4089 }
4090
4091 /**
4092  * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
4093  * @adapter: pointer to our private adapter structure
4094  */
4095 void et131x_soft_reset(struct et131x_adapter *adapter)
4096 {
4097         /* Disable MAC Core */
4098         writel(0xc00f0000, &adapter->regs->mac.cfg1);
4099
4100         /* Set everything to a reset value */
4101         writel(0x7F, &adapter->regs->global.sw_reset);
4102         writel(0x000f0000, &adapter->regs->mac.cfg1);
4103         writel(0x00000000, &adapter->regs->mac.cfg1);
4104 }
4105
4106 /**
4107  * et131x_align_allocated_memory - Align allocated memory on a given boundary
4108  * @adapter: pointer to our adapter structure
4109  * @phys_addr: pointer to Physical address
4110  * @offset: pointer to the offset variable
4111  * @mask: correct mask
4112  */
4113 void et131x_align_allocated_memory(struct et131x_adapter *adapter,
4114                                    uint64_t *phys_addr,
4115                                    uint64_t *offset, uint64_t mask)
4116 {
4117         uint64_t new_addr;
4118
4119         *offset = 0;
4120
4121         new_addr = *phys_addr & ~mask;
4122
4123         if (new_addr != *phys_addr) {
4124                 /* Move to next aligned block */
4125                 new_addr += mask + 1;
4126                 /* Return offset for adjusting virt addr */
4127                 *offset = new_addr - *phys_addr;
4128                 /* Return new physical address */
4129                 *phys_addr = new_addr;
4130         }
4131 }
4132
4133 /**
4134  * et131x_adapter_memory_alloc
4135  * @adapter: pointer to our private adapter structure
4136  *
4137  * Returns 0 on success, errno on failure (as defined in errno.h).
4138  *
4139  * Allocate all the memory blocks for send, receive and others.
4140  */
4141 int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4142 {
4143         int status;
4144
4145         /* Allocate memory for the Tx Ring */
4146         status = et131x_tx_dma_memory_alloc(adapter);
4147         if (status != 0) {
4148                 dev_err(&adapter->pdev->dev,
4149                           "et131x_tx_dma_memory_alloc FAILED\n");
4150                 return status;
4151         }
4152         /* Receive buffer memory allocation */
4153         status = et131x_rx_dma_memory_alloc(adapter);
4154         if (status != 0) {
4155                 dev_err(&adapter->pdev->dev,
4156                           "et131x_rx_dma_memory_alloc FAILED\n");
4157                 et131x_tx_dma_memory_free(adapter);
4158                 return status;
4159         }
4160
4161         /* Init receive data structures */
4162         status = et131x_init_recv(adapter);
4163         if (status != 0) {
4164                 dev_err(&adapter->pdev->dev,
4165                         "et131x_init_recv FAILED\n");
4166                 et131x_tx_dma_memory_free(adapter);
4167                 et131x_rx_dma_memory_free(adapter);
4168         }
4169         return status;
4170 }
4171
4172 /**
4173  * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
4174  * @adapter: pointer to our private adapter structure
4175  */
4176 void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4177 {
4178         /* Free DMA memory */
4179         et131x_tx_dma_memory_free(adapter);
4180         et131x_rx_dma_memory_free(adapter);
4181 }
4182
4183 static void et131x_adjust_link(struct net_device *netdev)
4184 {
4185         struct et131x_adapter *adapter = netdev_priv(netdev);
4186         struct  phy_device *phydev = adapter->phydev;
4187
4188         if (netif_carrier_ok(netdev)) {
4189                 adapter->boot_coma = 20;
4190
4191                 if (phydev && phydev->speed == SPEED_10) {
4192                         /*
4193                          * NOTE - Is there a way to query this without
4194                          * TruePHY?
4195                          * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4196                          * EMI_TRUEPHY_A13O) {
4197                          */
4198                         u16 register18;
4199
4200                         et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4201                                          &register18);
4202                         et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4203                                          register18 | 0x4);
4204                         et131x_mii_write(adapter, PHY_INDEX_REG,
4205                                          register18 | 0x8402);
4206                         et131x_mii_write(adapter, PHY_DATA_REG,
4207                                          register18 | 511);
4208                         et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4209                                          register18);
4210                 }
4211
4212                 et1310_config_flow_control(adapter);
4213
4214                 if (phydev && phydev->speed == SPEED_1000 &&
4215                                 adapter->registry_jumbo_packet > 2048) {
4216                         u16 reg;
4217
4218                         et131x_mii_read(adapter, PHY_CONFIG, &reg);
4219                         reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4220                         reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4221                         et131x_mii_write(adapter, PHY_CONFIG, reg);
4222                 }
4223
4224                 et131x_set_rx_dma_timer(adapter);
4225                 et1310_config_mac_regs2(adapter);
4226         }
4227
4228         if (phydev && phydev->link != adapter->link) {
4229                 /*
4230                  * Check to see if we are in coma mode and if
4231                  * so, disable it because we will not be able
4232                  * to read PHY values until we are out.
4233                  */
4234                 if (et1310_in_phy_coma(adapter))
4235                         et1310_disable_phy_coma(adapter);
4236
4237                 if (phydev->link) {
4238                         adapter->boot_coma = 20;
4239                 } else {
4240                         dev_warn(&adapter->pdev->dev,
4241                             "Link down - cable problem ?\n");
4242                         adapter->boot_coma = 0;
4243
4244                         if (phydev->speed == SPEED_10) {
4245                                 /* NOTE - Is there a way to query this without
4246                                  * TruePHY?
4247                                  * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4248                                  * EMI_TRUEPHY_A13O)
4249                                  */
4250                                 u16 register18;
4251
4252                                 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4253                                                  &register18);
4254                                 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4255                                                  register18 | 0x4);
4256                                 et131x_mii_write(adapter, PHY_INDEX_REG,
4257                                                  register18 | 0x8402);
4258                                 et131x_mii_write(adapter, PHY_DATA_REG,
4259                                                  register18 | 511);
4260                                 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4261                                                  register18);
4262                         }
4263
4264                         /* Free the packets being actively sent & stopped */
4265                         et131x_free_busy_send_packets(adapter);
4266
4267                         /* Re-initialize the send structures */
4268                         et131x_init_send(adapter);
4269
4270                         /* Reset the RFD list and re-start RU */
4271                         et131x_reset_recv(adapter);
4272
4273                         /*
4274                          * Bring the device back to the state it was during
4275                          * init prior to autonegotiation being complete. This
4276                          * way, when we get the auto-neg complete interrupt,
4277                          * we can complete init by calling config_mac_regs2.
4278                          */
4279                         et131x_soft_reset(adapter);
4280
4281                         /* Setup ET1310 as per the documentation */
4282                         et131x_adapter_setup(adapter);
4283
4284                         /* perform reset of tx/rx */
4285                         et131x_disable_txrx(netdev);
4286                         et131x_enable_txrx(netdev);
4287                 }
4288
4289                 adapter->link = phydev->link;
4290
4291                 phy_print_status(phydev);
4292         }
4293 }
4294
4295 static int et131x_mii_probe(struct net_device *netdev)
4296 {
4297         struct et131x_adapter *adapter = netdev_priv(netdev);
4298         struct  phy_device *phydev = NULL;
4299
4300         phydev = phy_find_first(adapter->mii_bus);
4301         if (!phydev) {
4302                 dev_err(&adapter->pdev->dev, "no PHY found\n");
4303                 return -ENODEV;
4304         }
4305
4306         phydev = phy_connect(netdev, dev_name(&phydev->dev),
4307                         &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4308
4309         if (IS_ERR(phydev)) {
4310                 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4311                 return PTR_ERR(phydev);
4312         }
4313
4314         phydev->supported &= (SUPPORTED_10baseT_Half
4315                                 | SUPPORTED_10baseT_Full
4316                                 | SUPPORTED_100baseT_Half
4317                                 | SUPPORTED_100baseT_Full
4318                                 | SUPPORTED_Autoneg
4319                                 | SUPPORTED_MII
4320                                 | SUPPORTED_TP);
4321
4322         if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4323                 phydev->supported |= SUPPORTED_1000baseT_Full;
4324
4325         phydev->advertising = phydev->supported;
4326         adapter->phydev = phydev;
4327
4328         dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4329                  "(mii_bus:phy_addr=%s)\n",
4330                  phydev->drv->name, dev_name(&phydev->dev));
4331
4332         return 0;
4333 }
4334
4335 /**
4336  * et131x_adapter_init
4337  * @adapter: pointer to the private adapter struct
4338  * @pdev: pointer to the PCI device
4339  *
4340  * Initialize the data structures for the et131x_adapter object and link
4341  * them together with the platform provided device structures.
4342  */
4343 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4344                 struct pci_dev *pdev)
4345 {
4346         static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4347
4348         struct et131x_adapter *adapter;
4349
4350         /* Allocate private adapter struct and copy in relevant information */
4351         adapter = netdev_priv(netdev);
4352         adapter->pdev = pci_dev_get(pdev);
4353         adapter->netdev = netdev;
4354
4355         /* Do the same for the netdev struct */
4356         netdev->irq = pdev->irq;
4357         netdev->base_addr = pci_resource_start(pdev, 0);
4358
4359         /* Initialize spinlocks here */
4360         spin_lock_init(&adapter->lock);
4361         spin_lock_init(&adapter->tcb_send_qlock);
4362         spin_lock_init(&adapter->tcb_ready_qlock);
4363         spin_lock_init(&adapter->send_hw_lock);
4364         spin_lock_init(&adapter->rcv_lock);
4365         spin_lock_init(&adapter->rcv_pend_lock);
4366         spin_lock_init(&adapter->fbr_lock);
4367         spin_lock_init(&adapter->phy_lock);
4368
4369         adapter->registry_jumbo_packet = 1514;  /* 1514-9216 */
4370
4371         /* Set the MAC address to a default */
4372         memcpy(adapter->addr, default_mac, ETH_ALEN);
4373
4374         return adapter;
4375 }
4376
4377 /**
4378  * et131x_pci_setup - Perform device initialization
4379  * @pdev: a pointer to the device's pci_dev structure
4380  * @ent: this device's entry in the pci_device_id table
4381  *
4382  * Returns 0 on success, errno on failure (as defined in errno.h)
4383  *
4384  * Registered in the pci_driver structure, this function is called when the
4385  * PCI subsystem finds a new PCI device which matches the information
4386  * contained in the pci_device_id table. This routine is the equivalent to
4387  * a device insertion routine.
4388  */
4389 static int __devinit et131x_pci_setup(struct pci_dev *pdev,
4390                                const struct pci_device_id *ent)
4391 {
4392         int result;
4393         int pm_cap;
4394         struct net_device *netdev;
4395         struct et131x_adapter *adapter;
4396         int ii;
4397
4398         result = pci_enable_device(pdev);
4399         if (result) {
4400                 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4401                 goto err_out;
4402         }
4403
4404         /* Perform some basic PCI checks */
4405         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4406                 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4407                 goto err_disable;
4408         }
4409
4410         if (pci_request_regions(pdev, DRIVER_NAME)) {
4411                 dev_err(&pdev->dev, "Can't get PCI resources\n");
4412                 goto err_disable;
4413         }
4414
4415         pci_set_master(pdev);
4416
4417         /* Query PCI for Power Mgmt Capabilities
4418          *
4419          * NOTE: Now reading PowerMgmt in another location; is this still
4420          * needed?
4421          */
4422         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
4423         if (!pm_cap) {
4424                 dev_err(&pdev->dev,
4425                           "Cannot find Power Management capabilities\n");
4426                 result = -EIO;
4427                 goto err_release_res;
4428         }
4429
4430         /* Check the DMA addressing support of this device */
4431         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4432                 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4433                 if (result) {
4434                         dev_err(&pdev->dev,
4435                           "Unable to obtain 64 bit DMA for consistent allocations\n");
4436                         goto err_release_res;
4437                 }
4438         } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4439                 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4440                 if (result) {
4441                         dev_err(&pdev->dev,
4442                           "Unable to obtain 32 bit DMA for consistent allocations\n");
4443                         goto err_release_res;
4444                 }
4445         } else {
4446                 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4447                 result = -EIO;
4448                 goto err_release_res;
4449         }
4450
4451         /* Allocate netdev and private adapter structs */
4452         netdev = et131x_device_alloc();
4453         if (!netdev) {
4454                 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4455                 result = -ENOMEM;
4456                 goto err_release_res;
4457         }
4458
4459         SET_NETDEV_DEV(netdev, &pdev->dev);
4460         et131x_set_ethtool_ops(netdev);
4461
4462         adapter = et131x_adapter_init(netdev, pdev);
4463
4464         /* Initialise the PCI setup for the device */
4465         et131x_pci_init(adapter, pdev);
4466
4467         /* Map the bus-relative registers to system virtual memory */
4468         adapter->regs = pci_ioremap_bar(pdev, 0);
4469         if (!adapter->regs) {
4470                 dev_err(&pdev->dev, "Cannot map device registers\n");
4471                 result = -ENOMEM;
4472                 goto err_free_dev;
4473         }
4474
4475         /* If Phy COMA mode was enabled when we went down, disable it here. */
4476         writel(ET_PMCSR_INIT,  &adapter->regs->global.pm_csr);
4477
4478         /* Issue a global reset to the et1310 */
4479         et131x_soft_reset(adapter);
4480
4481         /* Disable all interrupts (paranoid) */
4482         et131x_disable_interrupts(adapter);
4483
4484         /* Allocate DMA memory */
4485         result = et131x_adapter_memory_alloc(adapter);
4486         if (result) {
4487                 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4488                 goto err_iounmap;
4489         }
4490
4491         /* Init send data structures */
4492         et131x_init_send(adapter);
4493
4494         /* Set up the task structure for the ISR's deferred handler */
4495         INIT_WORK(&adapter->task, et131x_isr_handler);
4496
4497         /* Copy address into the net_device struct */
4498         memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4499
4500         /* Init variable for counting how long we do not have link status */
4501         adapter->boot_coma = 0;
4502         et1310_disable_phy_coma(adapter);
4503
4504         /* Setup the mii_bus struct */
4505         adapter->mii_bus = mdiobus_alloc();
4506         if (!adapter->mii_bus) {
4507                 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4508                 goto err_mem_free;
4509         }
4510
4511         adapter->mii_bus->name = "et131x_eth_mii";
4512         snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4513                 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4514         adapter->mii_bus->priv = netdev;
4515         adapter->mii_bus->read = et131x_mdio_read;
4516         adapter->mii_bus->write = et131x_mdio_write;
4517         adapter->mii_bus->reset = et131x_mdio_reset;
4518         adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
4519         if (!adapter->mii_bus->irq) {
4520                 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
4521                 goto err_mdio_free;
4522         }
4523
4524         for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4525                 adapter->mii_bus->irq[ii] = PHY_POLL;
4526
4527         if (mdiobus_register(adapter->mii_bus)) {
4528                 dev_err(&pdev->dev, "failed to register MII bus\n");
4529                 mdiobus_free(adapter->mii_bus);
4530                 goto err_mdio_free_irq;
4531         }
4532
4533         if (et131x_mii_probe(netdev)) {
4534                 dev_err(&pdev->dev, "failed to probe MII bus\n");
4535                 goto err_mdio_unregister;
4536         }
4537
4538         /* Setup et1310 as per the documentation */
4539         et131x_adapter_setup(adapter);
4540
4541         /* We can enable interrupts now
4542          *
4543          *  NOTE - Because registration of interrupt handler is done in the
4544          *         device's open(), defer enabling device interrupts to that
4545          *         point
4546          */
4547
4548         /* Register the net_device struct with the Linux network layer */
4549         result = register_netdev(netdev);
4550         if (result != 0) {
4551                 dev_err(&pdev->dev, "register_netdev() failed\n");
4552                 goto err_mdio_unregister;
4553         }
4554
4555         /* Register the net_device struct with the PCI subsystem. Save a copy
4556          * of the PCI config space for this device now that the device has
4557          * been initialized, just in case it needs to be quickly restored.
4558          */
4559         pci_set_drvdata(pdev, netdev);
4560         pci_save_state(adapter->pdev);
4561
4562         return result;
4563
4564 err_mdio_unregister:
4565         mdiobus_unregister(adapter->mii_bus);
4566 err_mdio_free_irq:
4567         kfree(adapter->mii_bus->irq);
4568 err_mdio_free:
4569         mdiobus_free(adapter->mii_bus);
4570 err_mem_free:
4571         et131x_adapter_memory_free(adapter);
4572 err_iounmap:
4573         iounmap(adapter->regs);
4574 err_free_dev:
4575         pci_dev_put(pdev);
4576         free_netdev(netdev);
4577 err_release_res:
4578         pci_release_regions(pdev);
4579 err_disable:
4580         pci_disable_device(pdev);
4581 err_out:
4582         return result;
4583 }
4584
4585 /**
4586  * et131x_pci_remove
4587  * @pdev: a pointer to the device's pci_dev structure
4588  *
4589  * Registered in the pci_driver structure, this function is called when the
4590  * PCI subsystem detects that a PCI device which matches the information
4591  * contained in the pci_device_id table has been removed.
4592  */
4593 static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4594 {
4595         struct net_device *netdev = pci_get_drvdata(pdev);
4596         struct et131x_adapter *adapter = netdev_priv(netdev);
4597
4598         unregister_netdev(netdev);
4599         mdiobus_unregister(adapter->mii_bus);
4600         kfree(adapter->mii_bus->irq);
4601         mdiobus_free(adapter->mii_bus);
4602
4603         et131x_adapter_memory_free(adapter);
4604         iounmap(adapter->regs);
4605         pci_dev_put(pdev);
4606
4607         free_netdev(netdev);
4608         pci_release_regions(pdev);
4609         pci_disable_device(pdev);
4610 }
4611
4612 #ifdef CONFIG_PM_SLEEP
4613 static int et131x_suspend(struct device *dev)
4614 {
4615         struct pci_dev *pdev = to_pci_dev(dev);
4616         struct net_device *netdev = pci_get_drvdata(pdev);
4617
4618         if (netif_running(netdev)) {
4619                 netif_device_detach(netdev);
4620                 et131x_down(netdev);
4621                 pci_save_state(pdev);
4622         }
4623
4624         return 0;
4625 }
4626
4627 static int et131x_resume(struct device *dev)
4628 {
4629         struct pci_dev *pdev = to_pci_dev(dev);
4630         struct net_device *netdev = pci_get_drvdata(pdev);
4631
4632         if (netif_running(netdev)) {
4633                 pci_restore_state(pdev);
4634                 et131x_up(netdev);
4635                 netif_device_attach(netdev);
4636         }
4637
4638         return 0;
4639 }
4640
4641 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4642 #define ET131X_PM_OPS (&et131x_pm_ops)
4643 #else
4644 #define ET131X_PM_OPS NULL
4645 #endif
4646
4647 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
4648         { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4649         { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4650         {0,}
4651 };
4652 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4653
4654 static struct pci_driver et131x_driver = {
4655         .name           = DRIVER_NAME,
4656         .id_table       = et131x_pci_table,
4657         .probe          = et131x_pci_setup,
4658         .remove         = __devexit_p(et131x_pci_remove),
4659         .driver.pm      = ET131X_PM_OPS,
4660 };
4661
4662 /**
4663  * et131x_init_module - The "main" entry point called on driver initialization
4664  *
4665  * Returns 0 on success, errno on failure (as defined in errno.h)
4666  */
4667 static int __init et131x_init_module(void)
4668 {
4669         return pci_register_driver(&et131x_driver);
4670 }
4671
4672 /**
4673  * et131x_cleanup_module - The entry point called on driver cleanup
4674  */
4675 static void __exit et131x_cleanup_module(void)
4676 {
4677         pci_unregister_driver(&et131x_driver);
4678 }
4679
4680 module_init(et131x_init_module);
4681 module_exit(et131x_cleanup_module);
4682
4683 /* ISR functions */
4684
4685 /**
4686  *      et131x_enable_interrupts        -       enable interrupt
4687  *      @adapter: et131x device
4688  *
4689  *      Enable the appropriate interrupts on the ET131x according to our
4690  *      configuration
4691  */
4692 void et131x_enable_interrupts(struct et131x_adapter *adapter)
4693 {
4694         u32 mask;
4695
4696         /* Enable all global interrupts */
4697         if (adapter->flowcontrol == FLOW_TXONLY ||
4698                             adapter->flowcontrol == FLOW_BOTH)
4699                 mask = INT_MASK_ENABLE;
4700         else
4701                 mask = INT_MASK_ENABLE_NO_FLOW;
4702
4703         writel(mask, &adapter->regs->global.int_mask);
4704 }
4705
4706 /**
4707  *      et131x_disable_interrupts       -       interrupt disable
4708  *      @adapter: et131x device
4709  *
4710  *      Block all interrupts from the et131x device at the device itself
4711  */
4712 void et131x_disable_interrupts(struct et131x_adapter *adapter)
4713 {
4714         /* Disable all global interrupts */
4715         writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
4716 }
4717
4718
4719 /**
4720  * et131x_isr - The Interrupt Service Routine for the driver.
4721  * @irq: the IRQ on which the interrupt was received.
4722  * @dev_id: device-specific info (here a pointer to a net_device struct)
4723  *
4724  * Returns a value indicating if the interrupt was handled.
4725  */
4726 irqreturn_t et131x_isr(int irq, void *dev_id)
4727 {
4728         bool handled = true;
4729         struct net_device *netdev = (struct net_device *)dev_id;
4730         struct et131x_adapter *adapter = NULL;
4731         u32 status;
4732
4733         if (!netif_device_present(netdev)) {
4734                 handled = false;
4735                 goto out;
4736         }
4737
4738         adapter = netdev_priv(netdev);
4739
4740         /* If the adapter is in low power state, then it should not
4741          * recognize any interrupt
4742          */
4743
4744         /* Disable Device Interrupts */
4745         et131x_disable_interrupts(adapter);
4746
4747         /* Get a copy of the value in the interrupt status register
4748          * so we can process the interrupting section
4749          */
4750         status = readl(&adapter->regs->global.int_status);
4751
4752         if (adapter->flowcontrol == FLOW_TXONLY ||
4753             adapter->flowcontrol == FLOW_BOTH) {
4754                 status &= ~INT_MASK_ENABLE;
4755         } else {
4756                 status &= ~INT_MASK_ENABLE_NO_FLOW;
4757         }
4758
4759         /* Make sure this is our interrupt */
4760         if (!status) {
4761                 handled = false;
4762                 et131x_enable_interrupts(adapter);
4763                 goto out;
4764         }
4765
4766         /* This is our interrupt, so process accordingly */
4767
4768         if (status & ET_INTR_WATCHDOG) {
4769                 struct tcb *tcb = adapter->tx_ring.send_head;
4770
4771                 if (tcb)
4772                         if (++tcb->stale > 1)
4773                                 status |= ET_INTR_TXDMA_ISR;
4774
4775                 if (adapter->rx_ring.unfinished_receives)
4776                         status |= ET_INTR_RXDMA_XFR_DONE;
4777                 else if (tcb == NULL)
4778                         writel(0, &adapter->regs->global.watchdog_timer);
4779
4780                 status &= ~ET_INTR_WATCHDOG;
4781         }
4782
4783         if (status == 0) {
4784                 /* This interrupt has in some way been "handled" by
4785                  * the ISR. Either it was a spurious Rx interrupt, or
4786                  * it was a Tx interrupt that has been filtered by
4787                  * the ISR.
4788                  */
4789                 et131x_enable_interrupts(adapter);
4790                 goto out;
4791         }
4792
4793         /* We need to save the interrupt status value for use in our
4794          * DPC. We will clear the software copy of that in that
4795          * routine.
4796          */
4797         adapter->stats.interrupt_status = status;
4798
4799         /* Schedule the ISR handler as a bottom-half task in the
4800          * kernel's tq_immediate queue, and mark the queue for
4801          * execution
4802          */
4803         schedule_work(&adapter->task);
4804 out:
4805         return IRQ_RETVAL(handled);
4806 }
4807
4808 /**
4809  * et131x_isr_handler - The ISR handler
4810  * @p_adapter, a pointer to the device's private adapter structure
4811  *
4812  * scheduled to run in a deferred context by the ISR. This is where the ISR's
4813  * work actually gets done.
4814  */
4815 void et131x_isr_handler(struct work_struct *work)
4816 {
4817         struct et131x_adapter *adapter =
4818                 container_of(work, struct et131x_adapter, task);
4819         u32 status = adapter->stats.interrupt_status;
4820         struct address_map __iomem *iomem = adapter->regs;
4821
4822         /*
4823          * These first two are by far the most common.  Once handled, we clear
4824          * their two bits in the status word.  If the word is now zero, we
4825          * exit.
4826          */
4827         /* Handle all the completed Transmit interrupts */
4828         if (status & ET_INTR_TXDMA_ISR)
4829                 et131x_handle_send_interrupt(adapter);
4830
4831         /* Handle all the completed Receives interrupts */
4832         if (status & ET_INTR_RXDMA_XFR_DONE)
4833                 et131x_handle_recv_interrupt(adapter);
4834
4835         status &= 0xffffffd7;
4836
4837         if (status) {
4838                 /* Handle the TXDMA Error interrupt */
4839                 if (status & ET_INTR_TXDMA_ERR) {
4840                         u32 txdma_err;
4841
4842                         /* Following read also clears the register (COR) */
4843                         txdma_err = readl(&iomem->txdma.tx_dma_error);
4844
4845                         dev_warn(&adapter->pdev->dev,
4846                                     "TXDMA_ERR interrupt, error = %d\n",
4847                                     txdma_err);
4848                 }
4849
4850                 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4851                 if (status &
4852                     (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4853                         /*
4854                          * This indicates the number of unused buffers in
4855                          * RXDMA free buffer ring 0 is <= the limit you
4856                          * programmed. Free buffer resources need to be
4857                          * returned.  Free buffers are consumed as packets
4858                          * are passed from the network to the host. The host
4859                          * becomes aware of the packets from the contents of
4860                          * the packet status ring. This ring is queried when
4861                          * the packet done interrupt occurs. Packets are then
4862                          * passed to the OS. When the OS is done with the
4863                          * packets the resources can be returned to the
4864                          * ET1310 for re-use. This interrupt is one method of
4865                          * returning resources.
4866                          */
4867
4868                         /* If the user has flow control on, then we will
4869                          * send a pause packet, otherwise just exit
4870                          */
4871                         if (adapter->flowcontrol == FLOW_TXONLY ||
4872                             adapter->flowcontrol == FLOW_BOTH) {
4873                                 u32 pm_csr;
4874
4875                                 /* Tell the device to send a pause packet via
4876                                  * the back pressure register (bp req  and
4877                                  * bp xon/xoff)
4878                                  */
4879                                 pm_csr = readl(&iomem->global.pm_csr);
4880                                 if (!et1310_in_phy_coma(adapter))
4881                                         writel(3, &iomem->txmac.bp_ctrl);
4882                         }
4883                 }
4884
4885                 /* Handle Packet Status Ring Low Interrupt */
4886                 if (status & ET_INTR_RXDMA_STAT_LOW) {
4887
4888                         /*
4889                          * Same idea as with the two Free Buffer Rings.
4890                          * Packets going from the network to the host each
4891                          * consume a free buffer resource and a packet status
4892                          * resource.  These resoures are passed to the OS.
4893                          * When the OS is done with the resources, they need
4894                          * to be returned to the ET1310. This is one method
4895                          * of returning the resources.
4896                          */
4897                 }
4898
4899                 /* Handle RXDMA Error Interrupt */
4900                 if (status & ET_INTR_RXDMA_ERR) {
4901                         /*
4902                          * The rxdma_error interrupt is sent when a time-out
4903                          * on a request issued by the JAGCore has occurred or
4904                          * a completion is returned with an un-successful
4905                          * status.  In both cases the request is considered
4906                          * complete. The JAGCore will automatically re-try the
4907                          * request in question. Normally information on events
4908                          * like these are sent to the host using the "Advanced
4909                          * Error Reporting" capability. This interrupt is
4910                          * another way of getting similar information. The
4911                          * only thing required is to clear the interrupt by
4912                          * reading the ISR in the global resources. The
4913                          * JAGCore will do a re-try on the request.  Normally
4914                          * you should never see this interrupt. If you start
4915                          * to see this interrupt occurring frequently then
4916                          * something bad has occurred. A reset might be the
4917                          * thing to do.
4918                          */
4919                         /* TRAP();*/
4920
4921                         dev_warn(&adapter->pdev->dev,
4922                                     "RxDMA_ERR interrupt, error %x\n",
4923                                     readl(&iomem->txmac.tx_test));
4924                 }
4925
4926                 /* Handle the Wake on LAN Event */
4927                 if (status & ET_INTR_WOL) {
4928                         /*
4929                          * This is a secondary interrupt for wake on LAN.
4930                          * The driver should never see this, if it does,
4931                          * something serious is wrong. We will TRAP the
4932                          * message when we are in DBG mode, otherwise we
4933                          * will ignore it.
4934                          */
4935                         dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4936                 }
4937
4938                 /* Let's move on to the TxMac */
4939                 if (status & ET_INTR_TXMAC) {
4940                         u32 err = readl(&iomem->txmac.err);
4941
4942                         /*
4943                          * When any of the errors occur and TXMAC generates
4944                          * an interrupt to report these errors, it usually
4945                          * means that TXMAC has detected an error in the data
4946                          * stream retrieved from the on-chip Tx Q. All of
4947                          * these errors are catastrophic and TXMAC won't be
4948                          * able to recover data when these errors occur.  In
4949                          * a nutshell, the whole Tx path will have to be reset
4950                          * and re-configured afterwards.
4951                          */
4952                         dev_warn(&adapter->pdev->dev,
4953                                     "TXMAC interrupt, error 0x%08x\n",
4954                                     err);
4955
4956                         /* If we are debugging, we want to see this error,
4957                          * otherwise we just want the device to be reset and
4958                          * continue
4959                          */
4960                 }
4961
4962                 /* Handle RXMAC Interrupt */
4963                 if (status & ET_INTR_RXMAC) {
4964                         /*
4965                          * These interrupts are catastrophic to the device,
4966                          * what we need to do is disable the interrupts and
4967                          * set the flag to cause us to reset so we can solve
4968                          * this issue.
4969                          */
4970                         /* MP_SET_FLAG( adapter,
4971                                                 fMP_ADAPTER_HARDWARE_ERROR); */
4972
4973                         dev_warn(&adapter->pdev->dev,
4974                           "RXMAC interrupt, error 0x%08x.  Requesting reset\n",
4975                                     readl(&iomem->rxmac.err_reg));
4976
4977                         dev_warn(&adapter->pdev->dev,
4978                                     "Enable 0x%08x, Diag 0x%08x\n",
4979                                     readl(&iomem->rxmac.ctrl),
4980                                     readl(&iomem->rxmac.rxq_diag));
4981
4982                         /*
4983                          * If we are debugging, we want to see this error,
4984                          * otherwise we just want the device to be reset and
4985                          * continue
4986                          */
4987                 }
4988
4989                 /* Handle MAC_STAT Interrupt */
4990                 if (status & ET_INTR_MAC_STAT) {
4991                         /*
4992                          * This means at least one of the un-masked counters
4993                          * in the MAC_STAT block has rolled over.  Use this
4994                          * to maintain the top, software managed bits of the
4995                          * counter(s).
4996                          */
4997                         et1310_handle_macstat_interrupt(adapter);
4998                 }
4999
5000                 /* Handle SLV Timeout Interrupt */
5001                 if (status & ET_INTR_SLV_TIMEOUT) {
5002                         /*
5003                          * This means a timeout has occurred on a read or
5004                          * write request to one of the JAGCore registers. The
5005                          * Global Resources block has terminated the request
5006                          * and on a read request, returned a "fake" value.
5007                          * The most likely reasons are: Bad Address or the
5008                          * addressed module is in a power-down state and
5009                          * can't respond.
5010                          */
5011                 }
5012         }
5013         et131x_enable_interrupts(adapter);
5014 }
5015
5016 /* NETDEV functions */
5017
5018 /**
5019  * et131x_stats - Return the current device statistics.
5020  * @netdev: device whose stats are being queried
5021  *
5022  * Returns 0 on success, errno on failure (as defined in errno.h)
5023  */
5024 static struct net_device_stats *et131x_stats(struct net_device *netdev)
5025 {
5026         struct et131x_adapter *adapter = netdev_priv(netdev);
5027         struct net_device_stats *stats = &adapter->net_stats;
5028         struct ce_stats *devstat = &adapter->stats;
5029
5030         stats->rx_errors = devstat->rx_length_errs +
5031                            devstat->rx_align_errs +
5032                            devstat->rx_crc_errs +
5033                            devstat->rx_code_violations +
5034                            devstat->rx_other_errs;
5035         stats->tx_errors = devstat->tx_max_pkt_errs;
5036         stats->multicast = devstat->multicast_pkts_rcvd;
5037         stats->collisions = devstat->tx_collisions;
5038
5039         stats->rx_length_errors = devstat->rx_length_errs;
5040         stats->rx_over_errors = devstat->rx_overflows;
5041         stats->rx_crc_errors = devstat->rx_crc_errs;
5042
5043         /* NOTE: These stats don't have corresponding values in CE_STATS,
5044          * so we're going to have to update these directly from within the
5045          * TX/RX code
5046          */
5047         /* stats->rx_bytes            = 20; devstat->; */
5048         /* stats->tx_bytes            = 20;  devstat->; */
5049         /* stats->rx_dropped          = devstat->; */
5050         /* stats->tx_dropped          = devstat->; */
5051
5052         /*  NOTE: Not used, can't find analogous statistics */
5053         /* stats->rx_frame_errors     = devstat->; */
5054         /* stats->rx_fifo_errors      = devstat->; */
5055         /* stats->rx_missed_errors    = devstat->; */
5056
5057         /* stats->tx_aborted_errors   = devstat->; */
5058         /* stats->tx_carrier_errors   = devstat->; */
5059         /* stats->tx_fifo_errors      = devstat->; */
5060         /* stats->tx_heartbeat_errors = devstat->; */
5061         /* stats->tx_window_errors    = devstat->; */
5062         return stats;
5063 }
5064
5065 /**
5066  * et131x_enable_txrx - Enable tx/rx queues
5067  * @netdev: device to be enabled
5068  */
5069 void et131x_enable_txrx(struct net_device *netdev)
5070 {
5071         struct et131x_adapter *adapter = netdev_priv(netdev);
5072
5073         /* Enable the Tx and Rx DMA engines (if not already enabled) */
5074         et131x_rx_dma_enable(adapter);
5075         et131x_tx_dma_enable(adapter);
5076
5077         /* Enable device interrupts */
5078         if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
5079                 et131x_enable_interrupts(adapter);
5080
5081         /* We're ready to move some data, so start the queue */
5082         netif_start_queue(netdev);
5083 }
5084
5085 /**
5086  * et131x_disable_txrx - Disable tx/rx queues
5087  * @netdev: device to be disabled
5088  */
5089 void et131x_disable_txrx(struct net_device *netdev)
5090 {
5091         struct et131x_adapter *adapter = netdev_priv(netdev);
5092
5093         /* First thing is to stop the queue */
5094         netif_stop_queue(netdev);
5095
5096         /* Stop the Tx and Rx DMA engines */
5097         et131x_rx_dma_disable(adapter);
5098         et131x_tx_dma_disable(adapter);
5099
5100         /* Disable device interrupts */
5101         et131x_disable_interrupts(adapter);
5102 }
5103
5104 /**
5105  * et131x_up - Bring up a device for use.
5106  * @netdev: device to be opened
5107  */
5108 void et131x_up(struct net_device *netdev)
5109 {
5110         struct et131x_adapter *adapter = netdev_priv(netdev);
5111
5112         et131x_enable_txrx(netdev);
5113         phy_start(adapter->phydev);
5114 }
5115
5116 /**
5117  * et131x_open - Open the device for use.
5118  * @netdev: device to be opened
5119  *
5120  * Returns 0 on success, errno on failure (as defined in errno.h)
5121  */
5122 int et131x_open(struct net_device *netdev)
5123 {
5124         int result = 0;
5125         struct et131x_adapter *adapter = netdev_priv(netdev);
5126
5127         /* Start the timer to track NIC errors */
5128         init_timer(&adapter->error_timer);
5129         adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
5130         adapter->error_timer.function = et131x_error_timer_handler;
5131         adapter->error_timer.data = (unsigned long)adapter;
5132         add_timer(&adapter->error_timer);
5133
5134         /* Register our IRQ */
5135         result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
5136                                         netdev->name, netdev);
5137         if (result) {
5138                 dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
5139                         netdev->irq);
5140                 return result;
5141         }
5142
5143         adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
5144
5145         et131x_up(netdev);
5146
5147         return result;
5148 }
5149
5150 /**
5151  * et131x_down - Bring down the device
5152  * @netdev: device to be broght down
5153  */
5154 void et131x_down(struct net_device *netdev)
5155 {
5156         struct et131x_adapter *adapter = netdev_priv(netdev);
5157
5158         /* Save the timestamp for the TX watchdog, prevent a timeout */
5159         netdev->trans_start = jiffies;
5160
5161         phy_stop(adapter->phydev);
5162         et131x_disable_txrx(netdev);
5163 }
5164
5165 /**
5166  * et131x_close - Close the device
5167  * @netdev: device to be closed
5168  *
5169  * Returns 0 on success, errno on failure (as defined in errno.h)
5170  */
5171 int et131x_close(struct net_device *netdev)
5172 {
5173         struct et131x_adapter *adapter = netdev_priv(netdev);
5174
5175         et131x_down(netdev);
5176
5177         adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
5178         free_irq(netdev->irq, netdev);
5179
5180         /* Stop the error timer */
5181         return del_timer_sync(&adapter->error_timer);
5182 }
5183
5184 /**
5185  * et131x_ioctl - The I/O Control handler for the driver
5186  * @netdev: device on which the control request is being made
5187  * @reqbuf: a pointer to the IOCTL request buffer
5188  * @cmd: the IOCTL command code
5189  *
5190  * Returns 0 on success, errno on failure (as defined in errno.h)
5191  */
5192 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
5193 {
5194         struct et131x_adapter *adapter = netdev_priv(netdev);
5195
5196         if (!adapter->phydev)
5197                 return -EINVAL;
5198
5199         return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
5200 }
5201
5202 /**
5203  * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
5204  * @adapter: pointer to our private adapter structure
5205  *
5206  * FIXME: lot of dups with MAC code
5207  *
5208  * Returns 0 on success, errno on failure
5209  */
5210 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
5211 {
5212         int status = 0;
5213         uint32_t filter = adapter->packet_filter;
5214         u32 ctrl;
5215         u32 pf_ctrl;
5216
5217         ctrl = readl(&adapter->regs->rxmac.ctrl);
5218         pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
5219
5220         /* Default to disabled packet filtering.  Enable it in the individual
5221          * case statements that require the device to filter something
5222          */
5223         ctrl |= 0x04;
5224
5225         /* Set us to be in promiscuous mode so we receive everything, this
5226          * is also true when we get a packet filter of 0
5227          */
5228         if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
5229                 pf_ctrl &= ~7;  /* Clear filter bits */
5230         else {
5231                 /*
5232                  * Set us up with Multicast packet filtering.  Three cases are
5233                  * possible - (1) we have a multi-cast list, (2) we receive ALL
5234                  * multicast entries or (3) we receive none.
5235                  */
5236                 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
5237                         pf_ctrl &= ~2;  /* Multicast filter bit */
5238                 else {
5239                         et1310_setup_device_for_multicast(adapter);
5240                         pf_ctrl |= 2;
5241                         ctrl &= ~0x04;
5242                 }
5243
5244                 /* Set us up with Unicast packet filtering */
5245                 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
5246                         et1310_setup_device_for_unicast(adapter);
5247                         pf_ctrl |= 4;
5248                         ctrl &= ~0x04;
5249                 }
5250
5251                 /* Set us up with Broadcast packet filtering */
5252                 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
5253                         pf_ctrl |= 1;   /* Broadcast filter bit */
5254                         ctrl &= ~0x04;
5255                 } else
5256                         pf_ctrl &= ~1;
5257
5258                 /* Setup the receive mac configuration registers - Packet
5259                  * Filter control + the enable / disable for packet filter
5260                  * in the control reg.
5261                  */
5262                 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
5263                 writel(ctrl, &adapter->regs->rxmac.ctrl);
5264         }
5265         return status;
5266 }
5267
5268 /**
5269  * et131x_multicast - The handler to configure multicasting on the interface
5270  * @netdev: a pointer to a net_device struct representing the device
5271  */
5272 static void et131x_multicast(struct net_device *netdev)
5273 {
5274         struct et131x_adapter *adapter = netdev_priv(netdev);
5275         uint32_t packet_filter = 0;
5276         unsigned long flags;
5277         struct netdev_hw_addr *ha;
5278         int i;
5279
5280         spin_lock_irqsave(&adapter->lock, flags);
5281
5282         /* Before we modify the platform-independent filter flags, store them
5283          * locally. This allows us to determine if anything's changed and if
5284          * we even need to bother the hardware
5285          */
5286         packet_filter = adapter->packet_filter;
5287
5288         /* Clear the 'multicast' flag locally; because we only have a single
5289          * flag to check multicast, and multiple multicast addresses can be
5290          * set, this is the easiest way to determine if more than one
5291          * multicast address is being set.
5292          */
5293         packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5294
5295         /* Check the net_device flags and set the device independent flags
5296          * accordingly
5297          */
5298
5299         if (netdev->flags & IFF_PROMISC)
5300                 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
5301         else
5302                 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
5303
5304         if (netdev->flags & IFF_ALLMULTI)
5305                 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5306
5307         if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
5308                 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5309
5310         if (netdev_mc_count(netdev) < 1) {
5311                 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
5312                 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5313         } else
5314                 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
5315
5316         /* Set values in the private adapter struct */
5317         i = 0;
5318         netdev_for_each_mc_addr(ha, netdev) {
5319                 if (i == NIC_MAX_MCAST_LIST)
5320                         break;
5321                 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
5322         }
5323         adapter->multicast_addr_count = i;
5324
5325         /* Are the new flags different from the previous ones? If not, then no
5326          * action is required
5327          *
5328          * NOTE - This block will always update the multicast_list with the
5329          *        hardware, even if the addresses aren't the same.
5330          */
5331         if (packet_filter != adapter->packet_filter) {
5332                 /* Call the device's filter function */
5333                 et131x_set_packet_filter(adapter);
5334         }
5335         spin_unlock_irqrestore(&adapter->lock, flags);
5336 }
5337
5338 /**
5339  * et131x_tx - The handler to tx a packet on the device
5340  * @skb: data to be Tx'd
5341  * @netdev: device on which data is to be Tx'd
5342  *
5343  * Returns 0 on success, errno on failure (as defined in errno.h)
5344  */
5345 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5346 {
5347         int status = 0;
5348         struct et131x_adapter *adapter = netdev_priv(netdev);
5349
5350         /* stop the queue if it's getting full */
5351         if(adapter->tx_ring.used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
5352                 netif_stop_queue(netdev);
5353
5354         /* Save the timestamp for the TX timeout watchdog */
5355         netdev->trans_start = jiffies;
5356
5357         /* Call the device-specific data Tx routine */
5358         status = et131x_send_packets(skb, netdev);
5359
5360         /* Check status and manage the netif queue if necessary */
5361         if (status != 0) {
5362                 if (status == -ENOMEM) {
5363                         status = NETDEV_TX_BUSY;
5364                 } else {
5365                         status = NETDEV_TX_OK;
5366                 }
5367         }
5368         return status;
5369 }
5370
5371 /**
5372  * et131x_tx_timeout - Timeout handler
5373  * @netdev: a pointer to a net_device struct representing the device
5374  *
5375  * The handler called when a Tx request times out. The timeout period is
5376  * specified by the 'tx_timeo" element in the net_device structure (see
5377  * et131x_alloc_device() to see how this value is set).
5378  */
5379 static void et131x_tx_timeout(struct net_device *netdev)
5380 {
5381         struct et131x_adapter *adapter = netdev_priv(netdev);
5382         struct tcb *tcb;
5383         unsigned long flags;
5384
5385         /* If the device is closed, ignore the timeout */
5386         if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5387                 return;
5388
5389         /* Any nonrecoverable hardware error?
5390          * Checks adapter->flags for any failure in phy reading
5391          */
5392         if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5393                 return;
5394
5395         /* Hardware failure? */
5396         if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5397                 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5398                 return;
5399         }
5400
5401         /* Is send stuck? */
5402         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5403
5404         tcb = adapter->tx_ring.send_head;
5405
5406         if (tcb != NULL) {
5407                 tcb->count++;
5408
5409                 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5410                         spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5411                                                flags);
5412
5413                         dev_warn(&adapter->pdev->dev,
5414                                 "Send stuck - reset.  tcb->WrIndex %x, flags 0x%08x\n",
5415                                 tcb->index,
5416                                 tcb->flags);
5417
5418                         adapter->net_stats.tx_errors++;
5419
5420                         /* perform reset of tx/rx */
5421                         et131x_disable_txrx(netdev);
5422                         et131x_enable_txrx(netdev);
5423                         return;
5424                 }
5425         }
5426
5427         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5428 }
5429
5430 /**
5431  * et131x_change_mtu - The handler called to change the MTU for the device
5432  * @netdev: device whose MTU is to be changed
5433  * @new_mtu: the desired MTU
5434  *
5435  * Returns 0 on success, errno on failure (as defined in errno.h)
5436  */
5437 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5438 {
5439         int result = 0;
5440         struct et131x_adapter *adapter = netdev_priv(netdev);
5441
5442         /* Make sure the requested MTU is valid */
5443         if (new_mtu < 64 || new_mtu > 9216)
5444                 return -EINVAL;
5445
5446         et131x_disable_txrx(netdev);
5447         et131x_handle_send_interrupt(adapter);
5448         et131x_handle_recv_interrupt(adapter);
5449
5450         /* Set the new MTU */
5451         netdev->mtu = new_mtu;
5452
5453         /* Free Rx DMA memory */
5454         et131x_adapter_memory_free(adapter);
5455
5456         /* Set the config parameter for Jumbo Packet support */
5457         adapter->registry_jumbo_packet = new_mtu + 14;
5458         et131x_soft_reset(adapter);
5459
5460         /* Alloc and init Rx DMA memory */
5461         result = et131x_adapter_memory_alloc(adapter);
5462         if (result != 0) {
5463                 dev_warn(&adapter->pdev->dev,
5464                         "Change MTU failed; couldn't re-alloc DMA memory\n");
5465                 return result;
5466         }
5467
5468         et131x_init_send(adapter);
5469
5470         et131x_hwaddr_init(adapter);
5471         memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5472
5473         /* Init the device with the new settings */
5474         et131x_adapter_setup(adapter);
5475
5476         et131x_enable_txrx(netdev);
5477
5478         return result;
5479 }
5480
5481 /**
5482  * et131x_set_mac_addr - handler to change the MAC address for the device
5483  * @netdev: device whose MAC is to be changed
5484  * @new_mac: the desired MAC address
5485  *
5486  * Returns 0 on success, errno on failure (as defined in errno.h)
5487  *
5488  * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
5489  */
5490 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5491 {
5492         int result = 0;
5493         struct et131x_adapter *adapter = netdev_priv(netdev);
5494         struct sockaddr *address = new_mac;
5495
5496         /* begin blux */
5497
5498         if (adapter == NULL)
5499                 return -ENODEV;
5500
5501         /* Make sure the requested MAC is valid */
5502         if (!is_valid_ether_addr(address->sa_data))
5503                 return -EINVAL;
5504
5505         et131x_disable_txrx(netdev);
5506         et131x_handle_send_interrupt(adapter);
5507         et131x_handle_recv_interrupt(adapter);
5508
5509         /* Set the new MAC */
5510         /* netdev->set_mac_address  = &new_mac; */
5511
5512         memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5513
5514         printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5515                         netdev->name, netdev->dev_addr);
5516
5517         /* Free Rx DMA memory */
5518         et131x_adapter_memory_free(adapter);
5519
5520         et131x_soft_reset(adapter);
5521
5522         /* Alloc and init Rx DMA memory */
5523         result = et131x_adapter_memory_alloc(adapter);
5524         if (result != 0) {
5525                 dev_err(&adapter->pdev->dev,
5526                         "Change MAC failed; couldn't re-alloc DMA memory\n");
5527                 return result;
5528         }
5529
5530         et131x_init_send(adapter);
5531
5532         et131x_hwaddr_init(adapter);
5533
5534         /* Init the device with the new settings */
5535         et131x_adapter_setup(adapter);
5536
5537         et131x_enable_txrx(netdev);
5538
5539         return result;
5540 }
5541
5542 static const struct net_device_ops et131x_netdev_ops = {
5543         .ndo_open               = et131x_open,
5544         .ndo_stop               = et131x_close,
5545         .ndo_start_xmit         = et131x_tx,
5546         .ndo_set_multicast_list = et131x_multicast,
5547         .ndo_tx_timeout         = et131x_tx_timeout,
5548         .ndo_change_mtu         = et131x_change_mtu,
5549         .ndo_set_mac_address    = et131x_set_mac_addr,
5550         .ndo_validate_addr      = eth_validate_addr,
5551         .ndo_get_stats          = et131x_stats,
5552         .ndo_do_ioctl           = et131x_ioctl,
5553 };
5554
5555 /**
5556  * et131x_device_alloc
5557  *
5558  * Returns pointer to the allocated and initialized net_device struct for
5559  * this device.
5560  *
5561  * Create instances of net_device and wl_private for the new adapter and
5562  * register the device's entry points in the net_device structure.
5563  */
5564 struct net_device *et131x_device_alloc(void)
5565 {
5566         struct net_device *netdev;
5567
5568         /* Alloc net_device and adapter structs */
5569         netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5570
5571         if (!netdev) {
5572                 printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
5573                 return NULL;
5574         }
5575
5576         /*
5577          * Setup the function registration table (and other data) for a
5578          * net_device
5579          */
5580         netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5581         netdev->netdev_ops     = &et131x_netdev_ops;
5582
5583         /* Poll? */
5584         /* netdev->poll               = &et131x_poll; */
5585         /* netdev->poll_controller    = &et131x_poll_controller; */
5586         return netdev;
5587 }
5588