3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
11 *------------------------------------------------------------------------------
15 * This software is provided subject to the following terms and conditions,
16 * which you should read carefully before using the software. Using this
17 * software indicates your acceptance of these terms and conditions. If you do
18 * not agree with these terms and conditions, do not use the software.
20 * Copyright © 2005 Agere Systems Inc.
21 * All rights reserved.
23 * Redistribution and use in source or binary forms, with or without
24 * modifications, are permitted provided that the following conditions are met:
26 * . Redistributions of source code must retain the above copyright notice, this
27 * list of conditions and the following Disclaimer as comments in the code as
28 * well as in the documentation and/or other materials provided with the
31 * . Redistributions in binary form must reproduce the above copyright notice,
32 * this list of conditions and the following Disclaimer in the documentation
33 * and/or other materials provided with the distribution.
35 * . Neither the name of Agere Systems Inc. nor the names of the contributors
36 * may be used to endorse or promote products derived from this software
37 * without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
44 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
56 #include <linux/pci.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/types.h>
60 #include <linux/kernel.h>
62 #include <linux/sched.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/ctype.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
70 #include <linux/delay.h>
71 #include <linux/bitops.h>
73 #include <asm/system.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_arp.h>
79 #include <linux/ioport.h>
80 #include <linux/crc32.h>
81 #include <linux/random.h>
82 #include <linux/phy.h>
86 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
87 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
88 MODULE_LICENSE("Dual BSD/GPL");
89 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
90 "for the ET1310 by Agere Systems");
93 #define MAX_NUM_REGISTER_POLLS 1000
94 #define MAX_NUM_WRITE_RETRIES 2
97 #define COUNTER_WRAP_16_BIT 0x10000
98 #define COUNTER_WRAP_12_BIT 0x1000
101 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
102 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
106 * For interrupts, normal running is:
107 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108 * watchdog_interrupt & txdma_xfer_done
110 * In both cases, when flow control is enabled for either Tx or bi-direction,
111 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112 * buffer rings are running low.
114 #define INT_MASK_DISABLE 0xffffffff
116 /* NOTE: Masking out MAC_STAT Interrupt for now...
117 * #define INT_MASK_ENABLE 0xfff6bf17
118 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
120 #define INT_MASK_ENABLE 0xfffebf17
121 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
123 /* General defines */
124 /* Packet and header sizes */
125 #define NIC_MIN_PACKET_SIZE 60
127 /* Multicast list size */
128 #define NIC_MAX_MCAST_LIST 128
130 /* Supported Filters */
131 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
132 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
133 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
134 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
138 #define ET131X_TX_TIMEOUT (1 * HZ)
139 #define NIC_SEND_HANG_THRESHOLD 0
142 #define fMP_DEST_MULTI 0x00000001
143 #define fMP_DEST_BROAD 0x00000002
145 /* MP_ADAPTER flags */
146 #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
147 #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
149 /* MP_SHARED flags */
150 #define fMP_ADAPTER_LOWER_POWER 0x00200000
152 #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
153 #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
155 #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
157 /* Some offsets in PCI config space that are actually used. */
158 #define ET1310_PCI_MAX_PYLD 0x4C
159 #define ET1310_PCI_MAC_ADDRESS 0xA4
160 #define ET1310_PCI_EEPROM_STATUS 0xB2
161 #define ET1310_PCI_ACK_NACK 0xC0
162 #define ET1310_PCI_REPLAY 0xC2
163 #define ET1310_PCI_L0L1LATENCY 0xCF
165 /* PCI Product IDs */
166 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
167 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
169 /* Define order of magnitude converter */
170 #define NANO_IN_A_MICRO 1000
172 #define PARM_RX_NUM_BUFS_DEF 4
173 #define PARM_RX_TIME_INT_DEF 10
174 #define PARM_RX_MEM_END_DEF 0x2bc
175 #define PARM_TX_TIME_INT_DEF 40
176 #define PARM_TX_NUM_BUFS_DEF 4
177 #define PARM_DMA_CACHE_DEF 0
182 #define FBR_CHUNKS 32
184 #define MAX_DESC_PER_RING_RX 1024
186 /* number of RFDs - default and min */
188 #define RFD_LOW_WATER_MARK 40
189 #define NIC_DEFAULT_NUM_RFD 1024
192 #define RFD_LOW_WATER_MARK 20
193 #define NIC_DEFAULT_NUM_RFD 256
197 #define NIC_MIN_NUM_RFD 64
199 #define NUM_PACKETS_HANDLED 256
201 #define ALCATEL_MULTICAST_PKT 0x01000000
202 #define ALCATEL_BROADCAST_PKT 0x02000000
204 /* typedefs for Free Buffer Descriptors */
208 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
211 /* Packet Status Ring Descriptors
215 * top 16 bits are from the Alcatel Status Word as enumerated in
216 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
219 * 1: ipa IP checksum assist
220 * 2: ipp IP checksum pass
221 * 3: tcpa TCP checksum assist
222 * 4: tcpp TCP checksum pass
224 * 6: rxmac_error RXMAC Error Indicator
225 * 7: drop Drop packet
226 * 8: ft Frame Truncated
230 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
231 * 17: asw_RX_DV_event short receive event detected
232 * 18: asw_false_carrier_event bad carrier since last good packet
233 * 19: asw_code_err one or more nibbles signalled as errors
234 * 20: asw_CRC_err CRC error
235 * 21: asw_len_chk_err frame length field incorrect
236 * 22: asw_too_long frame length > 1518 bytes
237 * 23: asw_OK valid CRC + no code error
238 * 24: asw_multicast has a multicast address
239 * 25: asw_broadcast has a broadcast address
240 * 26: asw_dribble_nibble spurious bits after EOP
241 * 27: asw_control_frame is a control frame
242 * 28: asw_pause_frame is a pause frame
243 * 29: asw_unsupported_op unsupported OP code
244 * 30: asw_VLAN_tag VLAN tag detected
245 * 31: asw_long_evt Rx long event
248 * 0-15: length length in bytes
249 * 16-25: bi Buffer Index
250 * 26-27: ri Ring Index
254 struct pkt_stat_desc {
259 /* Typedefs for the RX DMA status word */
262 * rx status word 0 holds part of the status bits of the Rx DMA engine
263 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
264 * which contains the Free Buffer ring 0 and 1 available offset.
266 * bit 0-9 FBR1 offset
267 * bit 10 Wrap flag for FBR1
268 * bit 16-25 FBR0 offset
269 * bit 26 Wrap flag for FBR0
273 * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
274 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
275 * which contains the Packet Status Ring available offset.
278 * bit 16-27 PSRoffset
284 * struct rx_status_block is a structure representing the status of the Rx
285 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
287 struct rx_status_block {
293 * Structure for look-up table holding free buffer ring pointers, addresses
297 void *virt[MAX_DESC_PER_RING_RX];
298 void *buffer1[MAX_DESC_PER_RING_RX];
299 void *buffer2[MAX_DESC_PER_RING_RX];
300 u32 bus_high[MAX_DESC_PER_RING_RX];
301 u32 bus_low[MAX_DESC_PER_RING_RX];
303 dma_addr_t ring_physaddr;
304 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
305 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
306 uint64_t real_physaddr;
314 * struct rx_ring is the sructure representing the adaptor's local
315 * reference(s) to the rings
317 ******************************************************************************
318 * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
319 * and index 1 to refer to FRB0
320 ******************************************************************************
323 struct fbr_lookup *fbr[NUM_FBRS];
324 void *ps_ring_virtaddr;
325 dma_addr_t ps_ring_physaddr;
329 struct rx_status_block *rx_status_block;
330 dma_addr_t rx_status_bus;
333 struct list_head recv_list;
338 bool unfinished_receives;
340 struct list_head recv_packet_pool;
342 /* lookaside lists */
343 struct kmem_cache *recv_lookaside;
348 * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
350 * 0-15: length of packet
353 * 29-31: VLAN priority
355 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
357 * 0: last packet in the sequence
358 * 1: first packet in the sequence
359 * 2: interrupt the processor when this pkt sent
360 * 3: Control word - no packet data
361 * 4: Issue half-duplex backpressure : XON/XOFF
362 * 5: send pause frame
363 * 6: Tx frame has error
367 * 10: Packet is a Huge packet
368 * 11: append VLAN tag
369 * 12: IP checksum assist
370 * 13: TCP checksum assist
371 * 14: UDP checksum assist
374 /* struct tx_desc represents each descriptor on the ring */
378 u32 len_vlan; /* control words how to xmit the */
379 u32 flags; /* data (detailed above) */
383 * The status of the Tx DMA engine it sits in free memory, and is pointed to
384 * by 0x101c / 0x1020. This is a DMA10 type
387 /* TCB (Transmit Control Block: Host Side) */
389 struct tcb *next; /* Next entry in ring */
390 u32 flags; /* Our flags for the packet */
391 u32 count; /* Used to spot stuck/lost packets */
392 u32 stale; /* Used to spot stuck/lost packets */
393 struct sk_buff *skb; /* Network skb we are tied to */
394 u32 index; /* Ring indexes */
398 /* Structure representing our local reference(s) to the ring */
400 /* TCB (Transmit Control Block) memory and lists */
401 struct tcb *tcb_ring;
403 /* List of TCBs that are ready to be used */
404 struct tcb *tcb_qhead;
405 struct tcb *tcb_qtail;
407 /* list of TCBs that are currently being sent. NOTE that access to all
408 * three of these (including used) are controlled via the
409 * TCBSendQLock. This lock should be secured prior to incementing /
410 * decrementing used, or any queue manipulation on send_head /
413 struct tcb *send_head;
414 struct tcb *send_tail;
417 /* The actual descriptor ring */
418 struct tx_desc *tx_desc_ring;
419 dma_addr_t tx_desc_ring_pa;
421 /* send_idx indicates where we last wrote to in the descriptor ring. */
424 /* The location of the write-back status block */
426 dma_addr_t tx_status_pa;
428 /* Packets since the last IRQ: used for interrupt coalescing */
432 /* ADAPTER defines */
434 * Do not change these values: if changed, then change also in respective
435 * TXdma and Rxdma engines
437 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
441 * These values are all superseded by registry entries to facilitate tuning.
442 * Once the desired performance has been achieved, the optimal registry values
443 * should be re-populated to these #defines:
445 #define TX_ERROR_PERIOD 1000
447 #define LO_MARK_PERCENT_FOR_PSR 15
448 #define LO_MARK_PERCENT_FOR_RX 15
450 /* RFD (Receive Frame Descriptor) */
452 struct list_head list_node;
454 u32 len; /* total size of receive frame */
461 #define FLOW_TXONLY 1
462 #define FLOW_RXONLY 2
465 /* Struct to define some device statistics */
469 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
470 * MUST have 32, then we'll need another way to perform atomic
473 u32 unicast_pkts_rcvd;
474 atomic_t unicast_pkts_xmtd;
475 u32 multicast_pkts_rcvd;
476 atomic_t multicast_pkts_xmtd;
477 u32 broadcast_pkts_rcvd;
478 atomic_t broadcast_pkts_xmtd;
479 u32 rcvd_pkts_dropped;
485 u32 tx_excessive_collisions;
486 u32 tx_first_collisions;
487 u32 tx_late_collisions;
497 u32 rx_code_violations;
500 u32 synchronous_iterations;
501 u32 interrupt_status;
504 /* The private adapter structure */
505 struct et131x_adapter {
506 struct net_device *netdev;
507 struct pci_dev *pdev;
508 struct mii_bus *mii_bus;
509 struct phy_device *phydev;
510 struct work_struct task;
512 /* Flags that indicate current state of the adapter */
515 /* local link state, to determine if a state change has occurred */
519 u8 rom_addr[ETH_ALEN];
527 spinlock_t tcb_send_qlock;
528 spinlock_t tcb_ready_qlock;
529 spinlock_t send_hw_lock;
532 spinlock_t rcv_pend_lock;
537 /* Packet Filter and look ahead size */
541 u32 multicast_addr_count;
542 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
544 /* Pointer to the device's PCI register space */
545 struct address_map __iomem *regs;
547 /* Registry parameters */
548 u8 wanted_flow; /* Flow we want for 802.3x flow control */
549 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
551 /* Derived from the registry: */
552 u8 flowcontrol; /* flow control validated by the far-end */
554 /* Minimize init-time */
555 struct timer_list error_timer;
557 /* variable putting the phy into coma mode when boot up with no cable
558 * plugged in after 5 seconds
562 /* Next two used to save power information at power down. This
563 * information will be used during power up to set up parts of Power
564 * Management in JAGCore
569 /* Tx Memory Variables */
570 struct tx_ring tx_ring;
572 /* Rx Memory Variables */
573 struct rx_ring rx_ring;
576 struct ce_stats stats;
578 struct net_device_stats net_stats;
581 void et131x_error_timer_handler(unsigned long data);
582 void et131x_enable_interrupts(struct et131x_adapter *adapter);
583 void et131x_disable_interrupts(struct et131x_adapter *adapter);
584 void et131x_align_allocated_memory(struct et131x_adapter *adapter,
586 u64 *offset, u64 mask);
587 void et131x_adapter_setup(struct et131x_adapter *adapter);
588 void et131x_soft_reset(struct et131x_adapter *adapter);
589 void et131x_isr_handler(struct work_struct *work);
590 void et1310_setup_device_for_multicast(struct et131x_adapter *adapter);
591 void et1310_setup_device_for_unicast(struct et131x_adapter *adapter);
592 void et131x_up(struct net_device *netdev);
593 void et131x_down(struct net_device *netdev);
594 struct net_device *et131x_device_alloc(void);
595 void et131x_enable_txrx(struct net_device *netdev);
596 void et131x_disable_txrx(struct net_device *netdev);
597 int et1310_in_phy_coma(struct et131x_adapter *adapter);
598 void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
600 u16 regnum, u16 bitnum, u8 *value);
601 int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
603 int32_t et131x_mii_write(struct et131x_adapter *adapter,
605 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter);
606 void et131x_rx_dma_disable(struct et131x_adapter *adapter);
607 void et131x_rx_dma_enable(struct et131x_adapter *adapter);
608 void et131x_init_send(struct et131x_adapter *adapter);
609 void et131x_tx_dma_enable(struct et131x_adapter *adapter);
611 /* EEPROM functions */
613 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
619 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
620 * bits 7,1:0 both equal to 1, at least once after reset.
621 * Subsequent operations need only to check that bits 1:0 are equal
622 * to 1 prior to starting a single byte read/write
625 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
626 /* Read registers grouped in DWORD1 */
627 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
630 /* I2C idle and Phy Queue Avail both true */
631 if ((reg & 0x3000) == 0x3000) {
642 * eeprom_write - Write a byte to the ET1310's EEPROM
643 * @adapter: pointer to our private adapter structure
644 * @addr: the address to write
645 * @data: the value to write
647 * Returns 1 for a successful write.
649 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
651 struct pci_dev *pdev = adapter->pdev;
661 * For an EEPROM, an I2C single byte write is defined as a START
662 * condition followed by the device address, EEPROM address, one byte
663 * of data and a STOP condition. The STOP condition will trigger the
664 * EEPROM's internally timed write cycle to the nonvolatile memory.
665 * All inputs are disabled during this write cycle and the EEPROM will
666 * not respond to any access until the internal write is complete.
669 err = eeprom_wait_ready(pdev, NULL);
674 * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
675 * and bits 1:0 both =0. Bit 5 should be set according to the
676 * type of EEPROM being accessed (1=two byte addressing, 0=one
679 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
680 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
685 /* Prepare EEPROM address for Step 3 */
687 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
688 /* Write the address to the LBCIF Address Register */
689 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
692 * Write the data to the LBCIF Data Register (the I2C write
695 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
698 * Monitor bit 1:0 of the LBCIF Status Register. When bits
699 * 1:0 are both equal to 1, the I2C write has completed and the
700 * internal write cycle of the EEPROM is about to start.
701 * (bits 1:0 = 01 is a legal state while waiting from both
702 * equal to 1, but bits 1:0 = 10 is invalid and implies that
703 * something is broken).
705 err = eeprom_wait_ready(pdev, &status);
710 * Check bit 3 of the LBCIF Status Register. If equal to 1,
711 * an error has occurred.Don't break here if we are revision
712 * 1, this is so we do a blind write for load bug.
714 if ((status & LBCIF_STATUS_GENERAL_ERROR)
715 && adapter->pdev->revision == 0)
719 * Check bit 2 of the LBCIF Status Register. If equal to 1 an
720 * ACK error has occurred on the address phase of the write.
721 * This could be due to an actual hardware failure or the
722 * EEPROM may still be in its internal write cycle from a
723 * previous write. This write operation was ignored and must be
726 if (status & LBCIF_STATUS_ACK_ERROR) {
728 * This could be due to an actual hardware failure
729 * or the EEPROM may still be in its internal write
730 * cycle from a previous write. This write operation
731 * was ignored and must be repeated later.
742 * Set bit 6 of the LBCIF Control Register = 0.
747 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
748 LBCIF_CONTROL_LBCIF_ENABLE))
751 /* Do read until internal ACK_ERROR goes away meaning write
755 pci_write_config_dword(pdev,
756 LBCIF_ADDRESS_REGISTER,
759 pci_read_config_dword(pdev,
760 LBCIF_DATA_REGISTER, &val);
761 } while ((val & 0x00010000) == 0);
762 } while (val & 0x00040000);
764 if ((val & 0xFF00) != 0xC000 || index == 10000)
768 return writeok ? 0 : -EIO;
772 * eeprom_read - Read a byte from the ET1310's EEPROM
773 * @adapter: pointer to our private adapter structure
774 * @addr: the address from which to read
775 * @pdata: a pointer to a byte in which to store the value of the read
776 * @eeprom_id: the ID of the EEPROM
777 * @addrmode: how the EEPROM is to be accessed
779 * Returns 1 for a successful read
781 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
783 struct pci_dev *pdev = adapter->pdev;
788 * A single byte read is similar to the single byte write, with the
789 * exception of the data flow:
792 err = eeprom_wait_ready(pdev, NULL);
796 * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
797 * and bits 1:0 both =0. Bit 5 should be set according to the type
798 * of EEPROM being accessed (1=two byte addressing, 0=one byte
801 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
802 LBCIF_CONTROL_LBCIF_ENABLE))
805 * Write the address to the LBCIF Address Register (I2C read will
808 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
811 * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
812 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
815 err = eeprom_wait_ready(pdev, &status);
819 * Regardless of error status, read data byte from LBCIF Data
824 * Check bit 2 of the LBCIF Status Register. If = 1,
825 * then an error has occurred.
827 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
830 int et131x_init_eeprom(struct et131x_adapter *adapter)
832 struct pci_dev *pdev = adapter->pdev;
835 /* We first need to check the EEPROM Status code located at offset
836 * 0xB2 of config space
838 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
841 /* THIS IS A WORKAROUND:
842 * I need to call this function twice to get my card in a
843 * LG M1 Express Dual running. I tried also a msleep before this
844 * function, because I thougth there could be some time condidions
845 * but it didn't work. Call the whole function twice also work.
847 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
849 "Could not read PCI config space for EEPROM Status\n");
853 /* Determine if the error(s) we care about are present. If they are
854 * present we need to fail.
856 if (eestatus & 0x4C) {
857 int write_failed = 0;
858 if (pdev->revision == 0x01) {
860 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
862 /* Re-write the first 4 bytes if we have an eeprom
863 * present and the revision id is 1, this fixes the
864 * corruption seen with 1310 B Silicon
866 for (i = 0; i < 3; i++)
867 if (eeprom_write(adapter, i, eedata[i]) < 0)
870 if (pdev->revision != 0x01 || write_failed) {
872 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
874 /* This error could mean that there was an error
875 * reading the eeprom or that the eeprom doesn't exist.
876 * We will treat each case the same and not try to
877 * gather additional information that normally would
878 * come from the eeprom, like MAC Address
880 adapter->has_eeprom = 0;
884 adapter->has_eeprom = 1;
886 /* Read the EEPROM for information regarding LED behavior. Refer to
887 * ET1310_phy.c, et131x_xcvr_init(), for its use.
889 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
890 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
892 if (adapter->eeprom_data[0] != 0xcd)
893 /* Disable all optional features */
894 adapter->eeprom_data[1] = 0x00;
902 * et1310_config_mac_regs1 - Initialize the first part of MAC regs
903 * @adapter: pointer to our adapter structure
905 void et1310_config_mac_regs1(struct et131x_adapter *adapter)
907 struct mac_regs __iomem *macregs = &adapter->regs->mac;
912 /* First we need to reset everything. Write to MAC configuration
913 * register 1 to perform reset.
915 writel(0xC00F0000, ¯egs->cfg1);
917 /* Next lets configure the MAC Inter-packet gap register */
918 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
919 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
920 writel(ipg, ¯egs->ipg);
922 /* Next lets configure the MAC Half Duplex register */
923 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
924 writel(0x00A1F037, ¯egs->hfdp);
926 /* Next lets configure the MAC Interface Control register */
927 writel(0, ¯egs->if_ctrl);
929 /* Let's move on to setting up the mii management configuration */
930 writel(0x07, ¯egs->mii_mgmt_cfg); /* Clock reset 0x7 */
932 /* Next lets configure the MAC Station Address register. These
933 * values are read from the EEPROM during initialization and stored
934 * in the adapter structure. We write what is stored in the adapter
935 * structure to the MAC Station Address registers high and low. This
936 * station address is used for generating and checking pause control
939 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
940 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
941 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
942 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
943 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
945 writel(station1, ¯egs->station_addr_1);
946 writel(station2, ¯egs->station_addr_2);
948 /* Max ethernet packet in bytes that will passed by the mac without
949 * being truncated. Allow the MAC to pass 4 more than our max packet
950 * size. This is 4 for the Ethernet CRC.
952 * Packets larger than (registry_jumbo_packet) that do not contain a
953 * VLAN ID will be dropped by the Rx function.
955 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
957 /* clear out MAC config reset */
958 writel(0, ¯egs->cfg1);
962 * et1310_config_mac_regs2 - Initialize the second part of MAC regs
963 * @adapter: pointer to our adapter structure
965 void et1310_config_mac_regs2(struct et131x_adapter *adapter)
968 struct mac_regs __iomem *mac = &adapter->regs->mac;
969 struct phy_device *phydev = adapter->phydev;
975 ctl = readl(&adapter->regs->txmac.ctl);
976 cfg1 = readl(&mac->cfg1);
977 cfg2 = readl(&mac->cfg2);
978 ifctrl = readl(&mac->if_ctrl);
980 /* Set up the if mode bits */
982 if (phydev && phydev->speed == SPEED_1000) {
985 ifctrl &= ~(1 << 24);
991 /* We need to enable Rx/Tx */
992 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
993 /* Initialize loop back to off */
994 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
995 if (adapter->flowcontrol == FLOW_RXONLY ||
996 adapter->flowcontrol == FLOW_BOTH)
997 cfg1 |= CFG1_RX_FLOW;
998 writel(cfg1, &mac->cfg1);
1000 /* Now we need to initialize the MAC Configuration 2 register */
1001 /* preamble 7, check length, huge frame off, pad crc, crc enable
1006 /* Turn on duplex if needed */
1007 if (phydev && phydev->duplex == DUPLEX_FULL)
1010 ifctrl &= ~(1 << 26);
1011 if (phydev && phydev->duplex == DUPLEX_HALF)
1012 ifctrl |= (1<<26); /* Enable ghd */
1014 writel(ifctrl, &mac->if_ctrl);
1015 writel(cfg2, &mac->cfg2);
1020 cfg1 = readl(&mac->cfg1);
1021 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1024 dev_warn(&adapter->pdev->dev,
1025 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1030 ctl |= 0x09; /* TX mac enable, FC disable */
1031 writel(ctl, &adapter->regs->txmac.ctl);
1033 /* Ready to start the RXDMA/TXDMA engine */
1034 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1035 et131x_rx_dma_enable(adapter);
1036 et131x_tx_dma_enable(adapter);
1040 void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1042 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1043 struct phy_device *phydev = adapter->phydev;
1048 /* Disable the MAC while it is being configured (also disable WOL) */
1049 writel(0x8, &rxmac->ctrl);
1051 /* Initialize WOL to disabled. */
1052 writel(0, &rxmac->crc0);
1053 writel(0, &rxmac->crc12);
1054 writel(0, &rxmac->crc34);
1056 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1057 * its default Values of 0x00000000 because there are not WOL masks
1060 writel(0, &rxmac->mask0_word0);
1061 writel(0, &rxmac->mask0_word1);
1062 writel(0, &rxmac->mask0_word2);
1063 writel(0, &rxmac->mask0_word3);
1065 writel(0, &rxmac->mask1_word0);
1066 writel(0, &rxmac->mask1_word1);
1067 writel(0, &rxmac->mask1_word2);
1068 writel(0, &rxmac->mask1_word3);
1070 writel(0, &rxmac->mask2_word0);
1071 writel(0, &rxmac->mask2_word1);
1072 writel(0, &rxmac->mask2_word2);
1073 writel(0, &rxmac->mask2_word3);
1075 writel(0, &rxmac->mask3_word0);
1076 writel(0, &rxmac->mask3_word1);
1077 writel(0, &rxmac->mask3_word2);
1078 writel(0, &rxmac->mask3_word3);
1080 writel(0, &rxmac->mask4_word0);
1081 writel(0, &rxmac->mask4_word1);
1082 writel(0, &rxmac->mask4_word2);
1083 writel(0, &rxmac->mask4_word3);
1085 /* Lets setup the WOL Source Address */
1086 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1087 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1088 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1090 writel(sa_lo, &rxmac->sa_lo);
1092 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1094 writel(sa_hi, &rxmac->sa_hi);
1096 /* Disable all Packet Filtering */
1097 writel(0, &rxmac->pf_ctrl);
1099 /* Let's initialize the Unicast Packet filtering address */
1100 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1101 et1310_setup_device_for_unicast(adapter);
1102 pf_ctrl |= 4; /* Unicast filter */
1104 writel(0, &rxmac->uni_pf_addr1);
1105 writel(0, &rxmac->uni_pf_addr2);
1106 writel(0, &rxmac->uni_pf_addr3);
1109 /* Let's initialize the Multicast hash */
1110 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1111 pf_ctrl |= 2; /* Multicast filter */
1112 et1310_setup_device_for_multicast(adapter);
1115 /* Runt packet filtering. Didn't work in version A silicon. */
1116 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1117 pf_ctrl |= 8; /* Fragment filter */
1119 if (adapter->registry_jumbo_packet > 8192)
1120 /* In order to transmit jumbo packets greater than 8k, the
1121 * FIFO between RxMAC and RxDMA needs to be reduced in size
1122 * to (16k - Jumbo packet size). In order to implement this,
1123 * we must use "cut through" mode in the RxMAC, which chops
1124 * packets down into segments which are (max_size * 16). In
1125 * this case we selected 256 bytes, since this is the size of
1126 * the PCI-Express TLP's that the 1310 uses.
1128 * seg_en on, fc_en off, size 0x10
1130 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1132 writel(0, &rxmac->mcif_ctrl_max_seg);
1134 /* Initialize the MCIF water marks */
1135 writel(0, &rxmac->mcif_water_mark);
1137 /* Initialize the MIF control */
1138 writel(0, &rxmac->mif_ctrl);
1140 /* Initialize the Space Available Register */
1141 writel(0, &rxmac->space_avail);
1143 /* Initialize the the mif_ctrl register
1144 * bit 3: Receive code error. One or more nibbles were signaled as
1145 * errors during the reception of the packet. Clear this
1146 * bit in Gigabit, set it in 100Mbit. This was derived
1147 * experimentally at UNH.
1148 * bit 4: Receive CRC error. The packet's CRC did not match the
1149 * internally generated CRC.
1150 * bit 5: Receive length check error. Indicates that frame length
1151 * field value in the packet does not match the actual data
1152 * byte length and is not a type field.
1153 * bit 16: Receive frame truncated.
1154 * bit 17: Drop packet enable
1156 if (phydev && phydev->speed == SPEED_100)
1157 writel(0x30038, &rxmac->mif_ctrl);
1159 writel(0x30030, &rxmac->mif_ctrl);
1161 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1162 * filter is always enabled since it is where the runt packets are
1163 * supposed to be dropped. For version A silicon, runt packet
1164 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1165 * but we still leave the packet filter on.
1167 writel(pf_ctrl, &rxmac->pf_ctrl);
1168 writel(0x9, &rxmac->ctrl);
1171 void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1173 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1175 /* We need to update the Control Frame Parameters
1176 * cfpt - control frame pause timer set to 64 (0x40)
1177 * cfep - control frame extended pause timer set to 0x0
1179 if (adapter->flowcontrol == FLOW_NONE)
1180 writel(0, &txmac->cf_param);
1182 writel(0x40, &txmac->cf_param);
1185 void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1187 struct macstat_regs __iomem *macstat =
1188 &adapter->regs->macstat;
1190 /* Next we need to initialize all the macstat registers to zero on
1193 writel(0, &macstat->txrx_0_64_byte_frames);
1194 writel(0, &macstat->txrx_65_127_byte_frames);
1195 writel(0, &macstat->txrx_128_255_byte_frames);
1196 writel(0, &macstat->txrx_256_511_byte_frames);
1197 writel(0, &macstat->txrx_512_1023_byte_frames);
1198 writel(0, &macstat->txrx_1024_1518_byte_frames);
1199 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1201 writel(0, &macstat->rx_bytes);
1202 writel(0, &macstat->rx_packets);
1203 writel(0, &macstat->rx_fcs_errs);
1204 writel(0, &macstat->rx_multicast_packets);
1205 writel(0, &macstat->rx_broadcast_packets);
1206 writel(0, &macstat->rx_control_frames);
1207 writel(0, &macstat->rx_pause_frames);
1208 writel(0, &macstat->rx_unknown_opcodes);
1209 writel(0, &macstat->rx_align_errs);
1210 writel(0, &macstat->rx_frame_len_errs);
1211 writel(0, &macstat->rx_code_errs);
1212 writel(0, &macstat->rx_carrier_sense_errs);
1213 writel(0, &macstat->rx_undersize_packets);
1214 writel(0, &macstat->rx_oversize_packets);
1215 writel(0, &macstat->rx_fragment_packets);
1216 writel(0, &macstat->rx_jabbers);
1217 writel(0, &macstat->rx_drops);
1219 writel(0, &macstat->tx_bytes);
1220 writel(0, &macstat->tx_packets);
1221 writel(0, &macstat->tx_multicast_packets);
1222 writel(0, &macstat->tx_broadcast_packets);
1223 writel(0, &macstat->tx_pause_frames);
1224 writel(0, &macstat->tx_deferred);
1225 writel(0, &macstat->tx_excessive_deferred);
1226 writel(0, &macstat->tx_single_collisions);
1227 writel(0, &macstat->tx_multiple_collisions);
1228 writel(0, &macstat->tx_late_collisions);
1229 writel(0, &macstat->tx_excessive_collisions);
1230 writel(0, &macstat->tx_total_collisions);
1231 writel(0, &macstat->tx_pause_honored_frames);
1232 writel(0, &macstat->tx_drops);
1233 writel(0, &macstat->tx_jabbers);
1234 writel(0, &macstat->tx_fcs_errs);
1235 writel(0, &macstat->tx_control_frames);
1236 writel(0, &macstat->tx_oversize_frames);
1237 writel(0, &macstat->tx_undersize_frames);
1238 writel(0, &macstat->tx_fragments);
1239 writel(0, &macstat->carry_reg1);
1240 writel(0, &macstat->carry_reg2);
1242 /* Unmask any counters that we want to track the overflow of.
1243 * Initially this will be all counters. It may become clear later
1244 * that we do not need to track all counters.
1246 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1247 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1250 void et1310_config_flow_control(struct et131x_adapter *adapter)
1252 struct phy_device *phydev = adapter->phydev;
1254 if (phydev->duplex == DUPLEX_HALF) {
1255 adapter->flowcontrol = FLOW_NONE;
1257 char remote_pause, remote_async_pause;
1259 et1310_phy_access_mii_bit(adapter,
1260 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1261 et1310_phy_access_mii_bit(adapter,
1262 TRUEPHY_BIT_READ, 5, 11,
1263 &remote_async_pause);
1265 if ((remote_pause == TRUEPHY_BIT_SET) &&
1266 (remote_async_pause == TRUEPHY_BIT_SET)) {
1267 adapter->flowcontrol = adapter->wanted_flow;
1268 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1269 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1270 if (adapter->wanted_flow == FLOW_BOTH)
1271 adapter->flowcontrol = FLOW_BOTH;
1273 adapter->flowcontrol = FLOW_NONE;
1274 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1275 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1276 adapter->flowcontrol = FLOW_NONE;
1277 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1278 remote_async_pause == TRUEPHY_SET_BIT) */
1279 if (adapter->wanted_flow == FLOW_BOTH)
1280 adapter->flowcontrol = FLOW_RXONLY;
1282 adapter->flowcontrol = FLOW_NONE;
1288 * et1310_update_macstat_host_counters - Update the local copy of the statistics
1289 * @adapter: pointer to the adapter structure
1291 void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1293 struct ce_stats *stats = &adapter->stats;
1294 struct macstat_regs __iomem *macstat =
1295 &adapter->regs->macstat;
1297 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1298 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1299 stats->tx_deferred += readl(&macstat->tx_deferred);
1300 stats->tx_excessive_collisions +=
1301 readl(&macstat->tx_multiple_collisions);
1302 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1303 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1304 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1306 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1307 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1308 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1309 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1310 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1311 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1312 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1316 * et1310_handle_macstat_interrupt
1317 * @adapter: pointer to the adapter structure
1319 * One of the MACSTAT counters has wrapped. Update the local copy of
1320 * the statistics held in the adapter structure, checking the "wrap"
1321 * bit for each counter.
1323 void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1328 /* Read the interrupt bits from the register(s). These are Clear On
1331 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1332 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1334 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1335 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1337 /* We need to do update the host copy of all the MAC_STAT counters.
1338 * For each counter, check it's overflow bit. If the overflow bit is
1339 * set, then increment the host version of the count by one complete
1340 * revolution of the counter. This routine is called when the counter
1341 * block indicates that one of the counters has wrapped.
1343 if (carry_reg1 & (1 << 14))
1344 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1345 if (carry_reg1 & (1 << 8))
1346 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1347 if (carry_reg1 & (1 << 7))
1348 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1349 if (carry_reg1 & (1 << 2))
1350 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1351 if (carry_reg1 & (1 << 6))
1352 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1353 if (carry_reg1 & (1 << 3))
1354 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1355 if (carry_reg1 & (1 << 0))
1356 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1357 if (carry_reg2 & (1 << 16))
1358 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1359 if (carry_reg2 & (1 << 15))
1360 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1361 if (carry_reg2 & (1 << 6))
1362 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1363 if (carry_reg2 & (1 << 8))
1364 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1365 if (carry_reg2 & (1 << 5))
1366 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1367 if (carry_reg2 & (1 << 4))
1368 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1369 if (carry_reg2 & (1 << 2))
1370 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1373 void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1375 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1384 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1385 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1386 * specified) then we should pass NO multi-cast addresses to the
1389 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1390 /* Loop through our multicast array and set up the device */
1391 for (nIndex = 0; nIndex < adapter->multicast_addr_count;
1393 result = ether_crc(6, adapter->multicast_list[nIndex]);
1395 result = (result & 0x3F800000) >> 23;
1398 hash1 |= (1 << result);
1399 } else if ((31 < result) && (result < 64)) {
1401 hash2 |= (1 << result);
1402 } else if ((63 < result) && (result < 96)) {
1404 hash3 |= (1 << result);
1407 hash4 |= (1 << result);
1412 /* Write out the new hash to the device */
1413 pm_csr = readl(&adapter->regs->global.pm_csr);
1414 if (!et1310_in_phy_coma(adapter)) {
1415 writel(hash1, &rxmac->multi_hash1);
1416 writel(hash2, &rxmac->multi_hash2);
1417 writel(hash3, &rxmac->multi_hash3);
1418 writel(hash4, &rxmac->multi_hash4);
1422 void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1424 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1430 /* Set up unicast packet filter reg 3 to be the first two octets of
1431 * the MAC address for both address
1433 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1434 * MAC address for second address
1436 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1437 * MAC address for first address
1439 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1440 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1441 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1444 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1445 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1446 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1449 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1450 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1451 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1454 pm_csr = readl(&adapter->regs->global.pm_csr);
1455 if (!et1310_in_phy_coma(adapter)) {
1456 writel(uni_pf1, &rxmac->uni_pf_addr1);
1457 writel(uni_pf2, &rxmac->uni_pf_addr2);
1458 writel(uni_pf3, &rxmac->uni_pf_addr3);
1464 int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1466 struct net_device *netdev = bus->priv;
1467 struct et131x_adapter *adapter = netdev_priv(netdev);
1471 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1479 int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
1481 struct net_device *netdev = bus->priv;
1482 struct et131x_adapter *adapter = netdev_priv(netdev);
1484 return et131x_mii_write(adapter, reg, value);
1487 int et131x_mdio_reset(struct mii_bus *bus)
1489 struct net_device *netdev = bus->priv;
1490 struct et131x_adapter *adapter = netdev_priv(netdev);
1492 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1497 int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1499 struct phy_device *phydev = adapter->phydev;
1504 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1508 * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1509 * @adapter: pointer to our private adapter structure
1510 * @addr: the address of the transceiver
1511 * @reg: the register to read
1512 * @value: pointer to a 16-bit value in which the value will be stored
1514 * Returns 0 on success, errno on failure (as defined in errno.h)
1516 int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1519 struct mac_regs __iomem *mac = &adapter->regs->mac;
1526 /* Save a local copy of the registers we are dealing with so we can
1529 mii_addr = readl(&mac->mii_mgmt_addr);
1530 mii_cmd = readl(&mac->mii_mgmt_cmd);
1532 /* Stop the current operation */
1533 writel(0, &mac->mii_mgmt_cmd);
1535 /* Set up the register we need to read from on the correct PHY */
1536 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1538 writel(0x1, &mac->mii_mgmt_cmd);
1543 mii_indicator = readl(&mac->mii_mgmt_indicator);
1544 } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1546 /* If we hit the max delay, we could not read the register */
1548 dev_warn(&adapter->pdev->dev,
1549 "reg 0x%08x could not be read\n", reg);
1550 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1556 /* If we hit here we were able to read the register and we need to
1557 * return the value to the caller */
1558 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1560 /* Stop the read operation */
1561 writel(0, &mac->mii_mgmt_cmd);
1563 /* set the registers we touched back to the state at which we entered
1566 writel(mii_addr, &mac->mii_mgmt_addr);
1567 writel(mii_cmd, &mac->mii_mgmt_cmd);
1573 * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1574 * @adapter: pointer to our private adapter structure
1575 * @reg: the register to read
1576 * @value: 16-bit value to write
1578 * FIXME: one caller in netdev still
1580 * Return 0 on success, errno on failure (as defined in errno.h)
1582 int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1584 struct mac_regs __iomem *mac = &adapter->regs->mac;
1585 struct phy_device *phydev = adapter->phydev;
1596 addr = phydev->addr;
1598 /* Save a local copy of the registers we are dealing with so we can
1601 mii_addr = readl(&mac->mii_mgmt_addr);
1602 mii_cmd = readl(&mac->mii_mgmt_cmd);
1604 /* Stop the current operation */
1605 writel(0, &mac->mii_mgmt_cmd);
1607 /* Set up the register we need to write to on the correct PHY */
1608 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1610 /* Add the value to write to the registers to the mac */
1611 writel(value, &mac->mii_mgmt_ctrl);
1616 mii_indicator = readl(&mac->mii_mgmt_indicator);
1617 } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1619 /* If we hit the max delay, we could not write the register */
1623 dev_warn(&adapter->pdev->dev,
1624 "reg 0x%08x could not be written", reg);
1625 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1627 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1628 readl(&mac->mii_mgmt_cmd));
1630 et131x_mii_read(adapter, reg, &tmp);
1634 /* Stop the write operation */
1635 writel(0, &mac->mii_mgmt_cmd);
1638 * set the registers we touched back to the state at which we entered
1641 writel(mii_addr, &mac->mii_mgmt_addr);
1642 writel(mii_cmd, &mac->mii_mgmt_cmd);
1648 * et1310_phy_power_down - PHY power control
1649 * @adapter: device to control
1650 * @down: true for off/false for back on
1652 * one hundred, ten, one thousand megs
1653 * How would you like to have your LAN accessed
1654 * Can't you see that this code processed
1655 * Phy power, phy power..
1657 void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1661 et131x_mii_read(adapter, MII_BMCR, &data);
1662 data &= ~BMCR_PDOWN;
1665 et131x_mii_write(adapter, MII_BMCR, data);
1668 /* Still used from _mac for BIT_READ */
1669 void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
1670 u16 regnum, u16 bitnum, u8 *value)
1673 u16 mask = 0x0001 << bitnum;
1675 /* Read the requested register */
1676 et131x_mii_read(adapter, regnum, ®);
1679 case TRUEPHY_BIT_READ:
1680 *value = (reg & mask) >> bitnum;
1683 case TRUEPHY_BIT_SET:
1684 et131x_mii_write(adapter, regnum, reg | mask);
1687 case TRUEPHY_BIT_CLEAR:
1688 et131x_mii_write(adapter, regnum, reg & ~mask);
1697 * et131x_xcvr_init - Init the phy if we are setting it into force mode
1698 * @adapter: pointer to our private adapter structure
1701 void et131x_xcvr_init(struct et131x_adapter *adapter)
1707 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1708 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1710 /* Set the link status interrupt only. Bad behavior when link status
1711 * and auto neg are set, we run into a nested interrupt problem
1713 imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1714 ET_PHY_INT_MASK_LINKSTAT &
1715 ET_PHY_INT_MASK_ENABLE);
1717 et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1719 /* Set the LED behavior such that LED 1 indicates speed (off =
1720 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1721 * link and activity (on for link, blink off for activity).
1723 * NOTE: Some customizations have been added here for specific
1724 * vendors; The LED behavior is now determined by vendor data in the
1725 * EEPROM. However, the above description is the default.
1727 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1728 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1730 lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1731 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1733 if ((adapter->eeprom_data[1] & 0x8) == 0)
1734 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1736 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1738 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1745 * et1310_in_phy_coma - check if the device is in phy coma
1746 * @adapter: pointer to our adapter structure
1748 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1750 int et1310_in_phy_coma(struct et131x_adapter *adapter)
1754 pmcsr = readl(&adapter->regs->global.pm_csr);
1756 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1760 * et1310_enable_phy_coma - called when network cable is unplugged
1761 * @adapter: pointer to our adapter structure
1763 * driver receive an phy status change interrupt while in D0 and check that
1764 * phy_status is down.
1766 * -- gate off JAGCore;
1767 * -- set gigE PHY in Coma mode
1768 * -- wake on phy_interrupt; Perform software reset JAGCore,
1769 * re-initialize jagcore and gigE PHY
1771 * Add D0-ASPM-PhyLinkDown Support:
1772 * -- while in D0, when there is a phy_interrupt indicating phy link
1773 * down status, call the MPSetPhyComa routine to enter this active
1774 * state power saving mode
1775 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
1776 * indicating linkup status, call the MPDisablePhyComa routine to
1777 * restore JAGCore and gigE PHY
1779 void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1781 unsigned long flags;
1784 pmcsr = readl(&adapter->regs->global.pm_csr);
1786 /* Save the GbE PHY speed and duplex modes. Need to restore this
1787 * when cable is plugged back in
1790 * TODO - when PM is re-enabled, check if we need to
1791 * perform a similar task as this -
1792 * adapter->pdown_speed = adapter->ai_force_speed;
1793 * adapter->pdown_duplex = adapter->ai_force_duplex;
1796 /* Stop sending packets. */
1797 spin_lock_irqsave(&adapter->send_hw_lock, flags);
1798 adapter->flags |= fMP_ADAPTER_LOWER_POWER;
1799 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
1801 /* Wait for outstanding Receive packets */
1803 et131x_disable_txrx(adapter->netdev);
1805 /* Gate off JAGCore 3 clock domains */
1806 pmcsr &= ~ET_PMCSR_INIT;
1807 writel(pmcsr, &adapter->regs->global.pm_csr);
1809 /* Program gigE PHY in to Coma mode */
1810 pmcsr |= ET_PM_PHY_SW_COMA;
1811 writel(pmcsr, &adapter->regs->global.pm_csr);
1815 * et1310_disable_phy_coma - Disable the Phy Coma Mode
1816 * @adapter: pointer to our adapter structure
1818 void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1822 pmcsr = readl(&adapter->regs->global.pm_csr);
1824 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1825 pmcsr |= ET_PMCSR_INIT;
1826 pmcsr &= ~ET_PM_PHY_SW_COMA;
1827 writel(pmcsr, &adapter->regs->global.pm_csr);
1829 /* Restore the GbE PHY speed and duplex modes;
1830 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1832 /* TODO - when PM is re-enabled, check if we need to
1833 * perform a similar task as this -
1834 * adapter->ai_force_speed = adapter->pdown_speed;
1835 * adapter->ai_force_duplex = adapter->pdown_duplex;
1838 /* Re-initialize the send structures */
1839 et131x_init_send(adapter);
1841 /* Bring the device back to the state it was during init prior to
1842 * autonegotiation being complete. This way, when we get the auto-neg
1843 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1845 et131x_soft_reset(adapter);
1847 /* setup et1310 as per the documentation ?? */
1848 et131x_adapter_setup(adapter);
1850 /* Allow Tx to restart */
1851 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
1853 et131x_enable_txrx(adapter->netdev);
1858 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1860 u32 tmp_free_buff_ring = *free_buff_ring;
1861 tmp_free_buff_ring++;
1862 /* This works for all cases where limit < 1024. The 1023 case
1863 works because 1023++ is 1024 which means the if condition is not
1864 taken but the carry of the bit into the wrap bit toggles the wrap
1866 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1867 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1868 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1870 /* For the 1023 case */
1871 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1872 *free_buff_ring = tmp_free_buff_ring;
1873 return tmp_free_buff_ring;
1877 * et131x_rx_dma_memory_alloc
1878 * @adapter: pointer to our private adapter structure
1880 * Returns 0 on success and errno on failure (as defined in errno.h)
1882 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1883 * and the Packet Status Ring.
1885 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1889 u32 pktstat_ringsize, fbr_chunksize;
1890 struct rx_ring *rx_ring;
1892 /* Setup some convenience pointers */
1893 rx_ring = &adapter->rx_ring;
1895 /* Alloc memory for the lookup table */
1897 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
1899 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
1901 /* The first thing we will do is configure the sizes of the buffer
1902 * rings. These will change based on jumbo packet support. Larger
1903 * jumbo packets increases the size of each entry in FBR0, and the
1904 * number of entries in FBR0, while at the same time decreasing the
1905 * number of entries in FBR1.
1907 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
1908 * entries are huge in order to accommodate a "jumbo" frame, then it
1909 * will have less entries. Conversely, FBR1 will now be relied upon
1910 * to carry more "normal" frames, thus it's entry size also increases
1911 * and the number of entries goes up too (since it now carries
1912 * "small" + "regular" packets.
1914 * In this scheme, we try to maintain 512 entries between the two
1915 * rings. Also, FBR1 remains a constant size - when it's size doubles
1916 * the number of entries halves. FBR0 increases in size, however.
1919 if (adapter->registry_jumbo_packet < 2048) {
1921 rx_ring->fbr[1]->buffsize = 256;
1922 rx_ring->fbr[1]->num_entries = 512;
1924 rx_ring->fbr[0]->buffsize = 2048;
1925 rx_ring->fbr[0]->num_entries = 512;
1926 } else if (adapter->registry_jumbo_packet < 4096) {
1928 rx_ring->fbr[1]->buffsize = 512;
1929 rx_ring->fbr[1]->num_entries = 1024;
1931 rx_ring->fbr[0]->buffsize = 4096;
1932 rx_ring->fbr[0]->num_entries = 512;
1935 rx_ring->fbr[1]->buffsize = 1024;
1936 rx_ring->fbr[1]->num_entries = 768;
1938 rx_ring->fbr[0]->buffsize = 16384;
1939 rx_ring->fbr[0]->num_entries = 128;
1943 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[1]->num_entries +
1944 adapter->rx_ring.fbr[0]->num_entries;
1946 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
1949 /* Allocate an area of memory for Free Buffer Ring 1 */
1950 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff;
1951 rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1953 &rx_ring->fbr[0]->ring_physaddr,
1955 if (!rx_ring->fbr[0]->ring_virtaddr) {
1956 dev_err(&adapter->pdev->dev,
1957 "Cannot alloc memory for Free Buffer Ring 1\n");
1961 /* Save physical address
1963 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
1964 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1965 * are ever returned, make sure the high part is retrieved here
1966 * before storing the adjusted address.
1968 rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
1970 /* Align Free Buffer Ring 1 on a 4K boundary */
1971 et131x_align_allocated_memory(adapter,
1972 &rx_ring->fbr[0]->real_physaddr,
1973 &rx_ring->fbr[0]->offset, 0x0FFF);
1975 rx_ring->fbr[0]->ring_virtaddr =
1976 (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
1977 rx_ring->fbr[0]->offset);
1980 /* Allocate an area of memory for Free Buffer Ring 0 */
1981 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff;
1982 rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1984 &rx_ring->fbr[1]->ring_physaddr,
1986 if (!rx_ring->fbr[1]->ring_virtaddr) {
1987 dev_err(&adapter->pdev->dev,
1988 "Cannot alloc memory for Free Buffer Ring 0\n");
1992 /* Save physical address
1994 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
1995 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1996 * are ever returned, make sure the high part is retrieved here before
1997 * storing the adjusted address.
1999 rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2001 /* Align Free Buffer Ring 0 on a 4K boundary */
2002 et131x_align_allocated_memory(adapter,
2003 &rx_ring->fbr[1]->real_physaddr,
2004 &rx_ring->fbr[1]->offset, 0x0FFF);
2006 rx_ring->fbr[1]->ring_virtaddr =
2007 (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2008 rx_ring->fbr[1]->offset);
2010 for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2012 u64 fbr1_tmp_physaddr;
2015 /* This code allocates an area of memory big enough for N
2016 * free buffers + (buffer_size - 1) so that the buffers can
2017 * be aligned on 4k boundaries. If each buffer were aligned
2018 * to a buffer_size boundary, the effect would be to double
2019 * the size of FBR0. By allocating N buffers at once, we
2020 * reduce this overhead.
2022 if (rx_ring->fbr[0]->buffsize > 4096)
2025 fbr1_align = rx_ring->fbr[0]->buffsize;
2028 (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2029 rx_ring->fbr[0]->mem_virtaddrs[i] =
2030 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2031 &rx_ring->fbr[0]->mem_physaddrs[i], GFP_KERNEL);
2033 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2034 dev_err(&adapter->pdev->dev,
2035 "Could not alloc memory\n");
2039 /* See NOTE in "Save Physical Address" comment above */
2040 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2042 et131x_align_allocated_memory(adapter,
2044 &fbr1_offset, (fbr1_align - 1));
2046 for (j = 0; j < FBR_CHUNKS; j++) {
2047 u32 index = (i * FBR_CHUNKS) + j;
2049 /* Save the Virtual address of this index for quick
2052 rx_ring->fbr[0]->virt[index] =
2053 (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2054 (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2056 /* now store the physical address in the descriptor
2057 * so the device can access it
2059 rx_ring->fbr[0]->bus_high[index] =
2060 (u32) (fbr1_tmp_physaddr >> 32);
2061 rx_ring->fbr[0]->bus_low[index] =
2062 (u32) fbr1_tmp_physaddr;
2064 fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2066 rx_ring->fbr[0]->buffer1[index] =
2067 rx_ring->fbr[0]->virt[index];
2068 rx_ring->fbr[0]->buffer2[index] =
2069 rx_ring->fbr[0]->virt[index] - 4;
2074 /* Same for FBR0 (if in use) */
2075 for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2077 u64 fbr0_tmp_physaddr;
2080 ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2081 rx_ring->fbr[1]->mem_virtaddrs[i] =
2082 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2083 &rx_ring->fbr[1]->mem_physaddrs[i], GFP_KERNEL);
2085 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2086 dev_err(&adapter->pdev->dev,
2087 "Could not alloc memory\n");
2091 /* See NOTE in "Save Physical Address" comment above */
2092 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2094 et131x_align_allocated_memory(adapter,
2097 rx_ring->fbr[1]->buffsize - 1);
2099 for (j = 0; j < FBR_CHUNKS; j++) {
2100 u32 index = (i * FBR_CHUNKS) + j;
2102 rx_ring->fbr[1]->virt[index] =
2103 (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2104 (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2106 rx_ring->fbr[1]->bus_high[index] =
2107 (u32) (fbr0_tmp_physaddr >> 32);
2108 rx_ring->fbr[1]->bus_low[index] =
2109 (u32) fbr0_tmp_physaddr;
2111 fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2113 rx_ring->fbr[1]->buffer1[index] =
2114 rx_ring->fbr[1]->virt[index];
2115 rx_ring->fbr[1]->buffer2[index] =
2116 rx_ring->fbr[1]->virt[index] - 4;
2121 /* Allocate an area of memory for FIFO of Packet Status ring entries */
2123 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2125 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2127 &rx_ring->ps_ring_physaddr,
2130 if (!rx_ring->ps_ring_virtaddr) {
2131 dev_err(&adapter->pdev->dev,
2132 "Cannot alloc memory for Packet Status Ring\n");
2135 printk(KERN_INFO "Packet Status Ring %lx\n",
2136 (unsigned long) rx_ring->ps_ring_physaddr);
2139 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2140 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2141 * are ever returned, make sure the high part is retrieved here before
2142 * storing the adjusted address.
2145 /* Allocate an area of memory for writeback of status information */
2146 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2147 sizeof(struct rx_status_block),
2148 &rx_ring->rx_status_bus,
2150 if (!rx_ring->rx_status_block) {
2151 dev_err(&adapter->pdev->dev,
2152 "Cannot alloc memory for Status Block\n");
2155 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2156 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2159 * kmem_cache_create initializes a lookaside list. After successful
2160 * creation, nonpaged fixed-size blocks can be allocated from and
2161 * freed to the lookaside list.
2162 * RFDs will be allocated from this pool.
2164 rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2171 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2173 /* The RFDs are going to be put on lists later on, so initialize the
2176 INIT_LIST_HEAD(&rx_ring->recv_list);
2181 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2182 * @adapter: pointer to our private adapter structure
2184 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2188 u32 pktstat_ringsize;
2190 struct rx_ring *rx_ring;
2192 /* Setup some convenience pointers */
2193 rx_ring = &adapter->rx_ring;
2195 /* Free RFDs and associated packet descriptors */
2196 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2198 while (!list_empty(&rx_ring->recv_list)) {
2199 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2200 struct rfd, list_node);
2202 list_del(&rfd->list_node);
2204 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2207 /* Free Free Buffer Ring 1 */
2208 if (rx_ring->fbr[0]->ring_virtaddr) {
2209 /* First the packet memory */
2210 for (index = 0; index <
2211 (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2212 if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2215 if (rx_ring->fbr[0]->buffsize > 4096)
2218 fbr1_align = rx_ring->fbr[0]->buffsize;
2221 (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2224 dma_free_coherent(&adapter->pdev->dev,
2226 rx_ring->fbr[0]->mem_virtaddrs[index],
2227 rx_ring->fbr[0]->mem_physaddrs[index]);
2229 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2233 /* Now the FIFO itself */
2234 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2235 rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2237 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries)
2240 dma_free_coherent(&adapter->pdev->dev, bufsize,
2241 rx_ring->fbr[0]->ring_virtaddr,
2242 rx_ring->fbr[0]->ring_physaddr);
2244 rx_ring->fbr[0]->ring_virtaddr = NULL;
2248 /* Now the same for Free Buffer Ring 0 */
2249 if (rx_ring->fbr[1]->ring_virtaddr) {
2250 /* First the packet memory */
2251 for (index = 0; index <
2252 (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2253 if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2255 (rx_ring->fbr[1]->buffsize *
2256 (FBR_CHUNKS + 1)) - 1;
2258 dma_free_coherent(&adapter->pdev->dev,
2260 rx_ring->fbr[1]->mem_virtaddrs[index],
2261 rx_ring->fbr[1]->mem_physaddrs[index]);
2263 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2267 /* Now the FIFO itself */
2268 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2269 rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2271 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries)
2274 dma_free_coherent(&adapter->pdev->dev,
2276 rx_ring->fbr[1]->ring_virtaddr,
2277 rx_ring->fbr[1]->ring_physaddr);
2279 rx_ring->fbr[1]->ring_virtaddr = NULL;
2283 /* Free Packet Status Ring */
2284 if (rx_ring->ps_ring_virtaddr) {
2286 sizeof(struct pkt_stat_desc) *
2287 adapter->rx_ring.psr_num_entries;
2289 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2290 rx_ring->ps_ring_virtaddr,
2291 rx_ring->ps_ring_physaddr);
2293 rx_ring->ps_ring_virtaddr = NULL;
2296 /* Free area of memory for the writeback of status information */
2297 if (rx_ring->rx_status_block) {
2298 dma_free_coherent(&adapter->pdev->dev,
2299 sizeof(struct rx_status_block),
2300 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2301 rx_ring->rx_status_block = NULL;
2304 /* Destroy the lookaside (RFD) pool */
2305 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2306 kmem_cache_destroy(rx_ring->recv_lookaside);
2307 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2310 /* Free the FBR Lookup Table */
2312 kfree(rx_ring->fbr[1]);
2315 kfree(rx_ring->fbr[0]);
2317 /* Reset Counters */
2318 rx_ring->num_ready_recv = 0;
2322 * et131x_init_recv - Initialize receive data structures.
2323 * @adapter: pointer to our private adapter structure
2325 * Returns 0 on success and errno on failure (as defined in errno.h)
2327 int et131x_init_recv(struct et131x_adapter *adapter)
2329 int status = -ENOMEM;
2330 struct rfd *rfd = NULL;
2333 struct rx_ring *rx_ring;
2335 /* Setup some convenience pointers */
2336 rx_ring = &adapter->rx_ring;
2338 /* Setup each RFD */
2339 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2340 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2341 GFP_ATOMIC | GFP_DMA);
2344 dev_err(&adapter->pdev->dev,
2345 "Couldn't alloc RFD out of kmem_cache\n");
2352 /* Add this RFD to the recv_list */
2353 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2355 /* Increment both the available RFD's, and the total RFD's. */
2356 rx_ring->num_ready_recv++;
2360 if (numrfd > NIC_MIN_NUM_RFD)
2363 rx_ring->num_rfd = numrfd;
2366 kmem_cache_free(rx_ring->recv_lookaside, rfd);
2367 dev_err(&adapter->pdev->dev,
2368 "Allocation problems in et131x_init_recv\n");
2374 * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
2375 * @adapter: pointer to our adapter structure
2377 void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
2379 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2380 struct rx_ring *rx_local = &adapter->rx_ring;
2381 struct fbr_desc *fbr_entry;
2384 unsigned long flags;
2386 /* Halt RXDMA to perform the reconfigure. */
2387 et131x_rx_dma_disable(adapter);
2389 /* Load the completion writeback physical address
2391 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2392 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2393 * are ever returned, make sure the high part is retrieved here
2394 * before storing the adjusted address.
2396 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
2397 &rx_dma->dma_wb_base_hi);
2398 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
2400 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
2402 /* Set the address and parameters of the packet status ring into the
2405 writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
2406 &rx_dma->psr_base_hi);
2407 writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
2408 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
2409 writel(0, &rx_dma->psr_full_offset);
2411 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
2412 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
2413 &rx_dma->psr_min_des);
2415 spin_lock_irqsave(&adapter->rcv_lock, flags);
2417 /* These local variables track the PSR in the adapter structure */
2418 rx_local->local_psr_full = 0;
2420 /* Now's the best time to initialize FBR1 contents */
2421 fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
2422 for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
2423 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
2424 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
2425 fbr_entry->word2 = entry;
2429 /* Set the address and parameters of Free buffer ring 1 (and 0 if
2430 * required) into the 1310's registers
2432 writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
2433 &rx_dma->fbr1_base_hi);
2434 writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
2435 writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
2436 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
2438 /* This variable tracks the free buffer ring 1 full position, so it
2439 * has to match the above.
2441 rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
2443 ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
2444 &rx_dma->fbr1_min_des);
2447 /* Now's the best time to initialize FBR0 contents */
2448 fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
2449 for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
2450 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
2451 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
2452 fbr_entry->word2 = entry;
2456 writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
2457 &rx_dma->fbr0_base_hi);
2458 writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
2459 writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
2460 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
2462 /* This variable tracks the free buffer ring 0 full position, so it
2463 * has to match the above.
2465 rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
2467 ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
2468 &rx_dma->fbr0_min_des);
2471 /* Program the number of packets we will receive before generating an
2473 * For version B silicon, this value gets updated once autoneg is
2476 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
2478 /* The "time_done" is not working correctly to coalesce interrupts
2479 * after a given time period, but rather is giving us an interrupt
2480 * regardless of whether we have received packets.
2481 * This value gets updated once autoneg is complete.
2483 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
2485 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2489 * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2490 * @adapter: pointer to our adapter structure
2492 void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2494 struct phy_device *phydev = adapter->phydev;
2499 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2500 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2502 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2503 writel(0, &adapter->regs->rxdma.max_pkt_time);
2504 writel(1, &adapter->regs->rxdma.num_pkt_done);
2509 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2510 * @adapter: pointer to our adapter
2511 * @rfd: pointer to the RFD
2513 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2515 struct rx_ring *rx_local = &adapter->rx_ring;
2516 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2517 u16 buff_index = rfd->bufferindex;
2518 u8 ring_index = rfd->ringindex;
2519 unsigned long flags;
2521 /* We don't use any of the OOB data besides status. Otherwise, we
2522 * need to clean up OOB data
2526 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2528 (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2529 spin_lock_irqsave(&adapter->fbr_lock, flags);
2531 if (ring_index == 1) {
2532 struct fbr_desc *next =
2533 (struct fbr_desc *) (rx_local->fbr[0]->ring_virtaddr) +
2534 INDEX10(rx_local->fbr[0]->local_full);
2536 /* Handle the Free Buffer Ring advancement here. Write
2537 * the PA / Buffer Index for the returned buffer into
2538 * the oldest (next to be freed)FBR entry
2540 next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2541 next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2542 next->word2 = buff_index;
2544 writel(bump_free_buff_ring(&rx_local->fbr[0]->local_full,
2545 rx_local->fbr[0]->num_entries - 1),
2546 &rx_dma->fbr1_full_offset);
2550 struct fbr_desc *next = (struct fbr_desc *)
2551 rx_local->fbr[1]->ring_virtaddr +
2552 INDEX10(rx_local->fbr[1]->local_full);
2554 /* Handle the Free Buffer Ring advancement here. Write
2555 * the PA / Buffer Index for the returned buffer into
2556 * the oldest (next to be freed) FBR entry
2558 next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2559 next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2560 next->word2 = buff_index;
2562 writel(bump_free_buff_ring(
2563 &rx_local->fbr[1]->local_full,
2564 rx_local->fbr[1]->num_entries - 1),
2565 &rx_dma->fbr0_full_offset);
2568 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2570 dev_err(&adapter->pdev->dev,
2571 "%s illegal Buffer Index returned\n", __func__);
2574 /* The processing on this RFD is done, so put it back on the tail of
2577 spin_lock_irqsave(&adapter->rcv_lock, flags);
2578 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2579 rx_local->num_ready_recv++;
2580 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2582 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2586 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
2587 * @adapter: pointer to our adapter structure
2589 void et131x_rx_dma_disable(struct et131x_adapter *adapter)
2592 /* Setup the receive dma configuration register */
2593 writel(0x00002001, &adapter->regs->rxdma.csr);
2594 csr = readl(&adapter->regs->rxdma.csr);
2595 if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
2597 csr = readl(&adapter->regs->rxdma.csr);
2598 if ((csr & 0x00020000) == 0)
2599 dev_err(&adapter->pdev->dev,
2600 "RX Dma failed to enter halt state. CSR 0x%08x\n",
2606 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
2607 * @adapter: pointer to our adapter structure
2609 void et131x_rx_dma_enable(struct et131x_adapter *adapter)
2611 /* Setup the receive dma configuration register for normal operation */
2612 u32 csr = 0x2000; /* FBR1 enable */
2614 if (adapter->rx_ring.fbr[0]->buffsize == 4096)
2616 else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
2618 else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
2621 csr |= 0x0400; /* FBR0 enable */
2622 if (adapter->rx_ring.fbr[1]->buffsize == 256)
2624 else if (adapter->rx_ring.fbr[1]->buffsize == 512)
2626 else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
2629 writel(csr, &adapter->regs->rxdma.csr);
2631 csr = readl(&adapter->regs->rxdma.csr);
2632 if ((csr & 0x00020000) != 0) {
2634 csr = readl(&adapter->regs->rxdma.csr);
2635 if ((csr & 0x00020000) != 0) {
2636 dev_err(&adapter->pdev->dev,
2637 "RX Dma failed to exit halt state. CSR 0x%08x\n",
2644 static inline void add_10bit(u32 *v, int n)
2646 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
2649 static inline void add_12bit(u32 *v, int n)
2651 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
2655 * nic_rx_pkts - Checks the hardware for available packets
2656 * @adapter: pointer to our adapter
2658 * Returns rfd, a pointer to our MPRFD.
2660 * Checks the hardware for available packets, using completion ring
2661 * If packets are available, it gets an RFD from the recv_list, attaches
2662 * the packet to it, puts the RFD in the RecvPendList, and also returns
2663 * the pointer to the RFD.
2665 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2667 struct rx_ring *rx_local = &adapter->rx_ring;
2668 struct rx_status_block *status;
2669 struct pkt_stat_desc *psr;
2673 unsigned long flags;
2674 struct list_head *element;
2681 /* RX Status block is written by the DMA engine prior to every
2682 * interrupt. It contains the next to be used entry in the Packet
2683 * Status Ring, and also the two Free Buffer rings.
2685 status = rx_local->rx_status_block;
2686 word1 = status->word1 >> 16; /* Get the useful bits */
2688 /* Check the PSR and wrap bits do not match */
2689 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2690 /* Looks like this ring is not updated yet */
2693 /* The packet status ring indicates that data is available. */
2694 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2695 (rx_local->local_psr_full & 0xFFF);
2697 /* Grab any information that is required once the PSR is
2698 * advanced, since we can no longer rely on the memory being
2701 len = psr->word1 & 0xFFFF;
2702 ring_index = (psr->word1 >> 26) & 0x03;
2703 buff_index = (psr->word1 >> 16) & 0x3FF;
2706 /* Indicate that we have used this PSR entry. */
2708 add_12bit(&rx_local->local_psr_full, 1);
2710 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2711 /* Clear psr full and toggle the wrap bit */
2712 rx_local->local_psr_full &= ~0xFFF;
2713 rx_local->local_psr_full ^= 0x1000;
2716 writel(rx_local->local_psr_full,
2717 &adapter->regs->rxdma.psr_full_offset);
2720 if (ring_index != 1)
2725 if (ring_index > 1 ||
2727 buff_index > rx_local->fbr[1]->num_entries - 1) ||
2729 buff_index > rx_local->fbr[0]->num_entries - 1))
2731 if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2734 /* Illegal buffer or ring index cannot be used by S/W*/
2735 dev_err(&adapter->pdev->dev,
2736 "NICRxPkts PSR Entry %d indicates "
2737 "length of %d and/or bad bi(%d)\n",
2738 rx_local->local_psr_full & 0xFFF,
2743 /* Get and fill the RFD. */
2744 spin_lock_irqsave(&adapter->rcv_lock, flags);
2747 element = rx_local->recv_list.next;
2748 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2751 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2755 list_del(&rfd->list_node);
2756 rx_local->num_ready_recv--;
2758 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2760 rfd->bufferindex = buff_index;
2761 rfd->ringindex = ring_index;
2763 /* In V1 silicon, there is a bug which screws up filtering of
2764 * runt packets. Therefore runt packet filtering is disabled
2765 * in the MAC and the packets are dropped here. They are
2766 * also counted here.
2768 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2769 adapter->stats.rx_other_errs++;
2774 /* Determine if this is a multicast packet coming in */
2775 if ((word0 & ALCATEL_MULTICAST_PKT) &&
2776 !(word0 & ALCATEL_BROADCAST_PKT)) {
2777 /* Promiscuous mode and Multicast mode are
2778 * not mutually exclusive as was first
2779 * thought. I guess Promiscuous is just
2780 * considered a super-set of the other
2781 * filters. Generally filter is 0x2b when in
2784 if ((adapter->packet_filter &
2785 ET131X_PACKET_TYPE_MULTICAST)
2786 && !(adapter->packet_filter &
2787 ET131X_PACKET_TYPE_PROMISCUOUS)
2788 && !(adapter->packet_filter &
2789 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2791 * Note - ring_index for fbr[] array is reversed
2794 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
2797 /* Loop through our list to see if the
2798 * destination address of this packet
2799 * matches one in our list.
2801 for (i = 0; i < adapter->multicast_addr_count;
2804 adapter->multicast_list[i][0]
2806 adapter->multicast_list[i][1]
2808 adapter->multicast_list[i][2]
2810 adapter->multicast_list[i][3]
2812 adapter->multicast_list[i][4]
2814 adapter->multicast_list[i][5]) {
2819 /* If our index is equal to the number
2820 * of Multicast address we have, then
2821 * this means we did not find this
2822 * packet's matching address in our
2823 * list. Set the len to zero,
2824 * so we free our RFD when we return
2825 * from this function.
2827 if (i == adapter->multicast_addr_count)
2832 adapter->stats.multicast_pkts_rcvd++;
2833 } else if (word0 & ALCATEL_BROADCAST_PKT)
2834 adapter->stats.broadcast_pkts_rcvd++;
2836 /* Not sure what this counter measures in
2837 * promiscuous mode. Perhaps we should check
2838 * the MAC address to see if it is directed
2839 * to us in promiscuous mode.
2841 adapter->stats.unicast_pkts_rcvd++;
2845 struct sk_buff *skb = NULL;
2847 /*rfd->len = len - 4; */
2850 skb = dev_alloc_skb(rfd->len + 2);
2852 dev_err(&adapter->pdev->dev,
2853 "Couldn't alloc an SKB for Rx\n");
2857 adapter->net_stats.rx_bytes += rfd->len;
2860 * Note - ring_index for fbr[] array is reversed,
2863 memcpy(skb_put(skb, rfd->len),
2864 rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
2867 skb->dev = adapter->netdev;
2868 skb->protocol = eth_type_trans(skb, adapter->netdev);
2869 skb->ip_summed = CHECKSUM_NONE;
2876 nic_return_rfd(adapter, rfd);
2881 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
2882 * @adapter: pointer to our adapter
2884 * Assumption, Rcv spinlock has been acquired.
2886 void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2888 struct rfd *rfd = NULL;
2892 /* Process up to available RFD's */
2893 while (count < NUM_PACKETS_HANDLED) {
2894 if (list_empty(&adapter->rx_ring.recv_list)) {
2895 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
2900 rfd = nic_rx_pkts(adapter);
2905 /* Do not receive any packets until a filter has been set.
2906 * Do not receive any packets until we have link.
2907 * If length is zero, return the RFD in order to advance the
2910 if (!adapter->packet_filter ||
2911 !netif_carrier_ok(adapter->netdev) ||
2915 /* Increment the number of packets we received */
2916 adapter->net_stats.rx_packets++;
2918 /* Set the status on the packet, either resources or success */
2919 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
2920 dev_warn(&adapter->pdev->dev,
2921 "RFD's are running out\n");
2926 if (count == NUM_PACKETS_HANDLED || !done) {
2927 adapter->rx_ring.unfinished_receives = true;
2928 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2929 &adapter->regs->global.watchdog_timer);
2931 /* Watchdog timer will disable itself if appropriate. */
2932 adapter->rx_ring.unfinished_receives = false;
2938 * et131x_tx_dma_memory_alloc
2939 * @adapter: pointer to our private adapter structure
2941 * Returns 0 on success and errno on failure (as defined in errno.h).
2943 * Allocates memory that will be visible both to the device and to the CPU.
2944 * The OS will pass us packets, pointers to which we will insert in the Tx
2945 * Descriptor queue. The device will read this queue to find the packets in
2946 * memory. The device will update the "status" in memory each time it xmits a
2949 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2952 struct tx_ring *tx_ring = &adapter->tx_ring;
2954 /* Allocate memory for the TCB's (Transmit Control Block) */
2955 adapter->tx_ring.tcb_ring =
2956 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
2957 if (!adapter->tx_ring.tcb_ring) {
2958 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
2962 /* Allocate enough memory for the Tx descriptor ring, and allocate
2963 * some extra so that the ring can be aligned on a 4k boundary.
2965 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
2966 tx_ring->tx_desc_ring =
2967 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, desc_size,
2968 &tx_ring->tx_desc_ring_pa, GFP_KERNEL);
2969 if (!adapter->tx_ring.tx_desc_ring) {
2970 dev_err(&adapter->pdev->dev,
2971 "Cannot alloc memory for Tx Ring\n");
2975 /* Save physical address
2977 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2978 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2979 * are ever returned, make sure the high part is retrieved here before
2980 * storing the adjusted address.
2982 /* Allocate memory for the Tx status block */
2983 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2985 &tx_ring->tx_status_pa,
2987 if (!adapter->tx_ring.tx_status_pa) {
2988 dev_err(&adapter->pdev->dev,
2989 "Cannot alloc memory for Tx status block\n");
2996 * et131x_tx_dma_memory_free - Free all memory allocated within this module
2997 * @adapter: pointer to our private adapter structure
2999 * Returns 0 on success and errno on failure (as defined in errno.h).
3001 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3005 if (adapter->tx_ring.tx_desc_ring) {
3006 /* Free memory relating to Tx rings here */
3007 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3009 dma_free_coherent(&adapter->pdev->dev,
3011 adapter->tx_ring.tx_desc_ring,
3012 adapter->tx_ring.tx_desc_ring_pa);
3013 adapter->tx_ring.tx_desc_ring = NULL;
3016 /* Free memory for the Tx status block */
3017 if (adapter->tx_ring.tx_status) {
3018 dma_free_coherent(&adapter->pdev->dev,
3020 adapter->tx_ring.tx_status,
3021 adapter->tx_ring.tx_status_pa);
3023 adapter->tx_ring.tx_status = NULL;
3025 /* Free the memory for the tcb structures */
3026 kfree(adapter->tx_ring.tcb_ring);
3030 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
3031 * @adapter: pointer to our private adapter structure
3033 * Configure the transmit engine with the ring buffers we have created
3034 * and prepare it for use.
3036 void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
3038 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
3040 /* Load the hardware with the start of the transmit descriptor ring. */
3041 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
3042 &txdma->pr_base_hi);
3043 writel((u32) adapter->tx_ring.tx_desc_ring_pa,
3044 &txdma->pr_base_lo);
3046 /* Initialise the transmit DMA engine */
3047 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
3049 /* Load the completion writeback physical address */
3050 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
3051 &txdma->dma_wb_base_hi);
3052 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
3054 *adapter->tx_ring.tx_status = 0;
3056 writel(0, &txdma->service_request);
3057 adapter->tx_ring.send_idx = 0;
3061 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
3062 * @adapter: pointer to our adapter structure
3064 void et131x_tx_dma_disable(struct et131x_adapter *adapter)
3066 /* Setup the tramsmit dma configuration register */
3067 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
3068 &adapter->regs->txdma.csr);
3072 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
3073 * @adapter: pointer to our adapter structure
3075 * Mainly used after a return to the D0 (full-power) state from a lower state.
3077 void et131x_tx_dma_enable(struct et131x_adapter *adapter)
3079 /* Setup the transmit dma configuration register for normal
3082 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
3083 &adapter->regs->txdma.csr);
3087 * et131x_init_send - Initialize send data structures
3088 * @adapter: pointer to our private adapter structure
3090 void et131x_init_send(struct et131x_adapter *adapter)
3094 struct tx_ring *tx_ring;
3096 /* Setup some convenience pointers */
3097 tx_ring = &adapter->tx_ring;
3098 tcb = adapter->tx_ring.tcb_ring;
3100 tx_ring->tcb_qhead = tcb;
3102 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
3104 /* Go through and set up each TCB */
3105 for (ct = 0; ct++ < NUM_TCB; tcb++)
3106 /* Set the link pointer in HW TCB to the next TCB in the
3109 tcb->next = tcb + 1;
3111 /* Set the tail pointer */
3113 tx_ring->tcb_qtail = tcb;
3115 /* Curr send queue should now be empty */
3116 tx_ring->send_head = NULL;
3117 tx_ring->send_tail = NULL;
3121 * nic_send_packet - NIC specific send handler for version B silicon.
3122 * @adapter: pointer to our adapter
3123 * @tcb: pointer to struct tcb
3125 * Returns 0 or errno.
3127 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3130 struct tx_desc desc[24]; /* 24 x 16 byte */
3132 u32 thiscopy, remainder;
3133 struct sk_buff *skb = tcb->skb;
3134 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3135 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3136 unsigned long flags;
3137 struct phy_device *phydev = adapter->phydev;
3139 /* Part of the optimizations of this send routine restrict us to
3140 * sending 24 fragments at a pass. In practice we should never see
3141 * more than 5 fragments.
3143 * NOTE: The older version of this function (below) can handle any
3144 * number of fragments. If needed, we can call this function,
3145 * although it is less efficient.
3150 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3152 for (i = 0; i < nr_frags; i++) {
3153 /* If there is something in this element, lets get a
3154 * descriptor from the ring and get the necessary data
3157 /* If the fragments are smaller than a standard MTU,
3158 * then map them to a single descriptor in the Tx
3159 * Desc ring. However, if they're larger, as is
3160 * possible with support for jumbo packets, then
3161 * split them each across 2 descriptors.
3163 * This will work until we determine why the hardware
3164 * doesn't seem to like large fragments.
3166 if ((skb->len - skb->data_len) <= 1514) {
3167 desc[frag].addr_hi = 0;
3168 /* Low 16bits are length, high is vlan and
3169 unused currently so zero */
3170 desc[frag].len_vlan =
3171 skb->len - skb->data_len;
3173 /* NOTE: Here, the dma_addr_t returned from
3174 * dma_map_single() is implicitly cast as a
3175 * u32. Although dma_addr_t can be
3176 * 64-bit, the address returned by
3177 * dma_map_single() is always 32-bit
3178 * addressable (as defined by the pci/dma
3181 desc[frag++].addr_lo =
3182 dma_map_single(&adapter->pdev->dev,
3188 desc[frag].addr_hi = 0;
3189 desc[frag].len_vlan =
3190 (skb->len - skb->data_len) / 2;
3192 /* NOTE: Here, the dma_addr_t returned from
3193 * dma_map_single() is implicitly cast as a
3194 * u32. Although dma_addr_t can be
3195 * 64-bit, the address returned by
3196 * dma_map_single() is always 32-bit
3197 * addressable (as defined by the pci/dma
3200 desc[frag++].addr_lo =
3201 dma_map_single(&adapter->pdev->dev,
3204 skb->data_len) / 2),
3206 desc[frag].addr_hi = 0;
3208 desc[frag].len_vlan =
3209 (skb->len - skb->data_len) / 2;
3211 /* NOTE: Here, the dma_addr_t returned from
3212 * dma_map_single() is implicitly cast as a
3213 * u32. Although dma_addr_t can be
3214 * 64-bit, the address returned by
3215 * dma_map_single() is always 32-bit
3216 * addressable (as defined by the pci/dma
3219 desc[frag++].addr_lo =
3220 dma_map_single(&adapter->pdev->dev,
3223 skb->data_len) / 2),
3225 skb->data_len) / 2),
3229 desc[frag].addr_hi = 0;
3230 desc[frag].len_vlan =
3233 /* NOTE: Here, the dma_addr_t returned from
3234 * dma_map_page() is implicitly cast as a u32.
3235 * Although dma_addr_t can be 64-bit, the address
3236 * returned by dma_map_page() is always 32-bit
3237 * addressable (as defined by the pci/dma subsystem)
3239 desc[frag++].addr_lo =
3240 dma_map_page(&adapter->pdev->dev,
3242 frags[i - 1].page_offset,
3251 if (phydev && phydev->speed == SPEED_1000) {
3252 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3253 /* Last element & Interrupt flag */
3254 desc[frag - 1].flags = 0x5;
3255 adapter->tx_ring.since_irq = 0;
3256 } else { /* Last element */
3257 desc[frag - 1].flags = 0x1;
3260 desc[frag - 1].flags = 0x5;
3262 desc[0].flags |= 2; /* First element flag */
3264 tcb->index_start = adapter->tx_ring.send_idx;
3267 spin_lock_irqsave(&adapter->send_hw_lock, flags);
3269 thiscopy = NUM_DESC_PER_RING_TX -
3270 INDEX10(adapter->tx_ring.send_idx);
3272 if (thiscopy >= frag) {
3276 remainder = frag - thiscopy;
3279 memcpy(adapter->tx_ring.tx_desc_ring +
3280 INDEX10(adapter->tx_ring.send_idx), desc,
3281 sizeof(struct tx_desc) * thiscopy);
3283 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3285 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3286 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3287 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3288 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3292 memcpy(adapter->tx_ring.tx_desc_ring,
3294 sizeof(struct tx_desc) * remainder);
3296 add_10bit(&adapter->tx_ring.send_idx, remainder);
3299 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3300 if (adapter->tx_ring.send_idx)
3301 tcb->index = NUM_DESC_PER_RING_TX - 1;
3303 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3305 tcb->index = adapter->tx_ring.send_idx - 1;
3307 spin_lock(&adapter->tcb_send_qlock);
3309 if (adapter->tx_ring.send_tail)
3310 adapter->tx_ring.send_tail->next = tcb;
3312 adapter->tx_ring.send_head = tcb;
3314 adapter->tx_ring.send_tail = tcb;
3316 WARN_ON(tcb->next != NULL);
3318 adapter->tx_ring.used++;
3320 spin_unlock(&adapter->tcb_send_qlock);
3322 /* Write the new write pointer back to the device. */
3323 writel(adapter->tx_ring.send_idx,
3324 &adapter->regs->txdma.service_request);
3326 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
3327 * timer to wake us up if this packet isn't followed by N more.
3329 if (phydev && phydev->speed == SPEED_1000) {
3330 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3331 &adapter->regs->global.watchdog_timer);
3333 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3339 * send_packet - Do the work to send a packet
3340 * @skb: the packet(s) to send
3341 * @adapter: a pointer to the device's private adapter structure
3343 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3345 * Assumption: Send spinlock has been acquired
3347 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3350 struct tcb *tcb = NULL;
3352 unsigned long flags;
3354 /* All packets must have at least a MAC address and a protocol type */
3355 if (skb->len < ETH_HLEN)
3358 /* Get a TCB for this packet */
3359 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3361 tcb = adapter->tx_ring.tcb_qhead;
3364 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3368 adapter->tx_ring.tcb_qhead = tcb->next;
3370 if (adapter->tx_ring.tcb_qhead == NULL)
3371 adapter->tx_ring.tcb_qtail = NULL;
3373 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3377 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3378 shbufva = (u16 *) skb->data;
3380 if ((shbufva[0] == 0xffff) &&
3381 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3382 tcb->flags |= fMP_DEST_BROAD;
3383 } else if ((shbufva[0] & 0x3) == 0x0001) {
3384 tcb->flags |= fMP_DEST_MULTI;
3390 /* Call the NIC specific send handler. */
3391 status = nic_send_packet(adapter, tcb);
3394 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3396 if (adapter->tx_ring.tcb_qtail)
3397 adapter->tx_ring.tcb_qtail->next = tcb;
3399 /* Apparently ready Q is empty. */
3400 adapter->tx_ring.tcb_qhead = tcb;
3402 adapter->tx_ring.tcb_qtail = tcb;
3403 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3406 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3411 * et131x_send_packets - This function is called by the OS to send packets
3412 * @skb: the packet(s) to send
3413 * @netdev:device on which to TX the above packet(s)
3415 * Return 0 in almost all cases; non-zero value in extreme hard failure only
3417 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3420 struct et131x_adapter *adapter = netdev_priv(netdev);
3422 /* Send these packets
3424 * NOTE: The Linux Tx entry point is only given one packet at a time
3425 * to Tx, so the PacketCount and it's array used makes no sense here
3428 /* TCB is not available */
3429 if (adapter->tx_ring.used >= NUM_TCB) {
3430 /* NOTE: If there's an error on send, no need to queue the
3431 * packet under Linux; if we just send an error up to the
3432 * netif layer, it will resend the skb to us.
3436 /* We need to see if the link is up; if it's not, make the
3437 * netif layer think we're good and drop the packet
3439 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3440 !netif_carrier_ok(netdev)) {
3441 dev_kfree_skb_any(skb);
3444 adapter->net_stats.tx_dropped++;
3446 status = send_packet(skb, adapter);
3447 if (status != 0 && status != -ENOMEM) {
3448 /* On any other error, make netif think we're
3449 * OK and drop the packet
3451 dev_kfree_skb_any(skb);
3453 adapter->net_stats.tx_dropped++;
3461 * free_send_packet - Recycle a struct tcb
3462 * @adapter: pointer to our adapter
3463 * @tcb: pointer to struct tcb
3465 * Complete the packet if necessary
3466 * Assumption - Send spinlock has been acquired
3468 static inline void free_send_packet(struct et131x_adapter *adapter,
3471 unsigned long flags;
3472 struct tx_desc *desc = NULL;
3473 struct net_device_stats *stats = &adapter->net_stats;
3475 if (tcb->flags & fMP_DEST_BROAD)
3476 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3477 else if (tcb->flags & fMP_DEST_MULTI)
3478 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3480 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3483 stats->tx_bytes += tcb->skb->len;
3485 /* Iterate through the TX descriptors on the ring
3486 * corresponding to this packet and umap the fragments
3490 desc = (struct tx_desc *)
3491 (adapter->tx_ring.tx_desc_ring +
3492 INDEX10(tcb->index_start));
3494 dma_unmap_single(&adapter->pdev->dev,
3496 desc->len_vlan, DMA_TO_DEVICE);
3498 add_10bit(&tcb->index_start, 1);
3499 if (INDEX10(tcb->index_start) >=
3500 NUM_DESC_PER_RING_TX) {
3501 tcb->index_start &= ~ET_DMA10_MASK;
3502 tcb->index_start ^= ET_DMA10_WRAP;
3504 } while (desc != (adapter->tx_ring.tx_desc_ring +
3505 INDEX10(tcb->index)));
3507 dev_kfree_skb_any(tcb->skb);
3510 memset(tcb, 0, sizeof(struct tcb));
3512 /* Add the TCB to the Ready Q */
3513 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3515 adapter->net_stats.tx_packets++;
3517 if (adapter->tx_ring.tcb_qtail)
3518 adapter->tx_ring.tcb_qtail->next = tcb;
3520 /* Apparently ready Q is empty. */
3521 adapter->tx_ring.tcb_qhead = tcb;
3523 adapter->tx_ring.tcb_qtail = tcb;
3525 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3526 WARN_ON(adapter->tx_ring.used < 0);
3530 * et131x_free_busy_send_packets - Free and complete the stopped active sends
3531 * @adapter: pointer to our adapter
3533 * Assumption - Send spinlock has been acquired
3535 void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3538 unsigned long flags;
3541 /* Any packets being sent? Check the first TCB on the send list */
3542 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3544 tcb = adapter->tx_ring.send_head;
3546 while (tcb != NULL && freed < NUM_TCB) {
3547 struct tcb *next = tcb->next;
3549 adapter->tx_ring.send_head = next;
3552 adapter->tx_ring.send_tail = NULL;
3554 adapter->tx_ring.used--;
3556 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3559 free_send_packet(adapter, tcb);
3561 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3563 tcb = adapter->tx_ring.send_head;
3566 WARN_ON(freed == NUM_TCB);
3568 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3570 adapter->tx_ring.used = 0;
3574 * et131x_handle_send_interrupt - Interrupt handler for sending processing
3575 * @adapter: pointer to our adapter
3577 * Re-claim the send resources, complete sends and get more to send from
3578 * the send wait queue.
3580 * Assumption - Send spinlock has been acquired
3582 void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3584 unsigned long flags;
3589 serviced = readl(&adapter->regs->txdma.new_service_complete);
3590 index = INDEX10(serviced);
3592 /* Has the ring wrapped? Process any descriptors that do not have
3593 * the same "wrap" indicator as the current completion indicator
3595 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3597 tcb = adapter->tx_ring.send_head;
3600 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3601 index < INDEX10(tcb->index)) {
3602 adapter->tx_ring.used--;
3603 adapter->tx_ring.send_head = tcb->next;
3604 if (tcb->next == NULL)
3605 adapter->tx_ring.send_tail = NULL;
3607 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3608 free_send_packet(adapter, tcb);
3609 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3611 /* Goto the next packet */
3612 tcb = adapter->tx_ring.send_head;
3615 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3616 && index > (tcb->index & ET_DMA10_MASK)) {
3617 adapter->tx_ring.used--;
3618 adapter->tx_ring.send_head = tcb->next;
3619 if (tcb->next == NULL)
3620 adapter->tx_ring.send_tail = NULL;
3622 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3623 free_send_packet(adapter, tcb);
3624 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3626 /* Goto the next packet */
3627 tcb = adapter->tx_ring.send_head;
3630 /* Wake up the queue when we hit a low-water mark */
3631 if (adapter->tx_ring.used <= NUM_TCB / 3)
3632 netif_wake_queue(adapter->netdev);
3634 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3637 /* ETHTOOL functions */
3639 static int et131x_get_settings(struct net_device *netdev,
3640 struct ethtool_cmd *cmd)
3642 struct et131x_adapter *adapter = netdev_priv(netdev);
3644 return phy_ethtool_gset(adapter->phydev, cmd);
3647 static int et131x_set_settings(struct net_device *netdev,
3648 struct ethtool_cmd *cmd)
3650 struct et131x_adapter *adapter = netdev_priv(netdev);
3652 return phy_ethtool_sset(adapter->phydev, cmd);
3655 static int et131x_get_regs_len(struct net_device *netdev)
3657 #define ET131X_REGS_LEN 256
3658 return ET131X_REGS_LEN * sizeof(u32);
3661 static void et131x_get_regs(struct net_device *netdev,
3662 struct ethtool_regs *regs, void *regs_data)
3664 struct et131x_adapter *adapter = netdev_priv(netdev);
3665 struct address_map __iomem *aregs = adapter->regs;
3666 u32 *regs_buff = regs_data;
3669 memset(regs_data, 0, et131x_get_regs_len(netdev));
3671 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3672 adapter->pdev->device;
3675 et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]);
3676 et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]);
3677 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]);
3678 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]);
3679 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]);
3680 et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]);
3681 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]);
3682 /* Autoneg next page transmit reg */
3683 et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]);
3684 /* Link partner next page reg */
3685 et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]);
3686 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]);
3687 et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]);
3688 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]);
3689 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]);
3690 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]);
3691 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3692 (u16 *)®s_buff[num++]);
3693 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3694 (u16 *)®s_buff[num++]);
3695 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3696 (u16 *)®s_buff[num++]);
3697 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3698 (u16 *)®s_buff[num++]);
3699 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]);
3700 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]);
3701 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]);
3702 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3703 (u16 *)®s_buff[num++]);
3704 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]);
3705 et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]);
3706 et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]);
3709 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3710 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3711 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3712 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3713 regs_buff[num++] = readl(&aregs->global.pm_csr);
3714 regs_buff[num++] = adapter->stats.interrupt_status;
3715 regs_buff[num++] = readl(&aregs->global.int_mask);
3716 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3717 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3718 regs_buff[num++] = readl(&aregs->global.sw_reset);
3719 regs_buff[num++] = readl(&aregs->global.slv_timer);
3720 regs_buff[num++] = readl(&aregs->global.msi_config);
3721 regs_buff[num++] = readl(&aregs->global.loopback);
3722 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3725 regs_buff[num++] = readl(&aregs->txdma.csr);
3726 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3727 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3728 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3729 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3730 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3731 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3732 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3733 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3734 regs_buff[num++] = readl(&aregs->txdma.service_request);
3735 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3736 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3737 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3738 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3739 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3740 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3741 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3742 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3743 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3744 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3745 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3746 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3747 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3748 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3749 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3750 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3753 regs_buff[num++] = readl(&aregs->rxdma.csr);
3754 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3755 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3756 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3757 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3758 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3759 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3760 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3761 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3762 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3763 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3764 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3765 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3766 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3767 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3768 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3769 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3770 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3771 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3772 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3773 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3774 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3775 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3776 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3777 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3778 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3779 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3780 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3781 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3784 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
3785 static void et131x_get_drvinfo(struct net_device *netdev,
3786 struct ethtool_drvinfo *info)
3788 struct et131x_adapter *adapter = netdev_priv(netdev);
3790 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3791 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3792 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3795 static struct ethtool_ops et131x_ethtool_ops = {
3796 .get_settings = et131x_get_settings,
3797 .set_settings = et131x_set_settings,
3798 .get_drvinfo = et131x_get_drvinfo,
3799 .get_regs_len = et131x_get_regs_len,
3800 .get_regs = et131x_get_regs,
3801 .get_link = ethtool_op_get_link,
3804 void et131x_set_ethtool_ops(struct net_device *netdev)
3806 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3812 * et131x_hwaddr_init - set up the MAC Address on the ET1310
3813 * @adapter: pointer to our private adapter structure
3815 void et131x_hwaddr_init(struct et131x_adapter *adapter)
3817 /* If have our default mac from init and no mac address from
3818 * EEPROM then we need to generate the last octet and set it on the
3821 if (adapter->rom_addr[0] == 0x00 &&
3822 adapter->rom_addr[1] == 0x00 &&
3823 adapter->rom_addr[2] == 0x00 &&
3824 adapter->rom_addr[3] == 0x00 &&
3825 adapter->rom_addr[4] == 0x00 &&
3826 adapter->rom_addr[5] == 0x00) {
3828 * We need to randomly generate the last octet so we
3829 * decrease our chances of setting the mac address to
3830 * same as another one of our cards in the system
3832 get_random_bytes(&adapter->addr[5], 1);
3834 * We have the default value in the register we are
3835 * working with so we need to copy the current
3836 * address into the permanent address
3838 memcpy(adapter->rom_addr,
3839 adapter->addr, ETH_ALEN);
3841 /* We do not have an override address, so set the
3842 * current address to the permanent address and add
3845 memcpy(adapter->addr,
3846 adapter->rom_addr, ETH_ALEN);
3851 * et131x_pci_init - initial PCI setup
3852 * @adapter: pointer to our private adapter structure
3853 * @pdev: our PCI device
3855 * Perform the initial setup of PCI registers and if possible initialise
3856 * the MAC address. At this point the I/O registers have yet to be mapped
3858 static int et131x_pci_init(struct et131x_adapter *adapter,
3859 struct pci_dev *pdev)
3865 if (et131x_init_eeprom(adapter) < 0)
3868 /* Let's set up the PORT LOGIC Register. First we need to know what
3869 * the max_payload_size is
3871 if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
3873 "Could not read PCI config space for Max Payload Size\n");
3877 /* Program the Ack/Nak latency and replay timers */
3878 max_payload &= 0x07; /* Only the lower 3 bits are valid */
3880 if (max_payload < 2) {
3881 static const u16 acknak[2] = { 0x76, 0xD0 };
3882 static const u16 replay[2] = { 0x1E0, 0x2ED };
3884 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3885 acknak[max_payload])) {
3887 "Could not write PCI config space for ACK/NAK\n");
3890 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3891 replay[max_payload])) {
3893 "Could not write PCI config space for Replay Timer\n");
3898 /* l0s and l1 latency timers. We are using default values.
3899 * Representing 001 for L0s and 010 for L1
3901 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3903 "Could not write PCI config space for Latency Timers\n");
3907 /* Change the max read size to 2k */
3908 if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
3910 "Could not read PCI config space for Max read size\n");
3914 read_size_reg &= 0x8f;
3915 read_size_reg |= 0x40;
3917 if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
3919 "Could not write PCI config space for Max read size\n");
3923 /* Get MAC address from config space if an eeprom exists, otherwise
3924 * the MAC address there will not be valid
3926 if (!adapter->has_eeprom) {
3927 et131x_hwaddr_init(adapter);
3931 for (i = 0; i < ETH_ALEN; i++) {
3932 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3933 adapter->rom_addr + i)) {
3934 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3938 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
3943 * et131x_error_timer_handler
3944 * @data: timer-specific variable; here a pointer to our adapter structure
3946 * The routine called when the error timer expires, to track the number of
3949 void et131x_error_timer_handler(unsigned long data)
3951 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3952 struct phy_device *phydev = adapter->phydev;
3954 if (et1310_in_phy_coma(adapter)) {
3955 /* Bring the device immediately out of coma, to
3956 * prevent it from sleeping indefinitely, this
3957 * mechanism could be improved! */
3958 et1310_disable_phy_coma(adapter);
3959 adapter->boot_coma = 20;
3961 et1310_update_macstat_host_counters(adapter);
3964 if (!phydev->link && adapter->boot_coma < 11)
3965 adapter->boot_coma++;
3967 if (adapter->boot_coma == 10) {
3968 if (!phydev->link) {
3969 if (!et1310_in_phy_coma(adapter)) {
3970 /* NOTE - This was originally a 'sync with
3971 * interrupt'. How to do that under Linux?
3973 et131x_enable_interrupts(adapter);
3974 et1310_enable_phy_coma(adapter);
3979 /* This is a periodic timer, so reschedule */
3980 mod_timer(&adapter->error_timer, jiffies +
3981 TX_ERROR_PERIOD * HZ / 1000);
3985 * et131x_configure_global_regs - configure JAGCore global regs
3986 * @adapter: pointer to our adapter structure
3988 * Used to configure the global registers on the JAGCore
3990 void et131x_configure_global_regs(struct et131x_adapter *adapter)
3992 struct global_regs __iomem *regs = &adapter->regs->global;
3994 writel(0, ®s->rxq_start_addr);
3995 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
3997 if (adapter->registry_jumbo_packet < 2048) {
3998 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
3999 * block of RAM that the driver can split between Tx
4000 * and Rx as it desires. Our default is to split it
4003 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
4004 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
4005 } else if (adapter->registry_jumbo_packet < 8192) {
4006 /* For jumbo packets > 2k but < 8k, split 50-50. */
4007 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
4008 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
4010 /* 9216 is the only packet size greater than 8k that
4011 * is available. The Tx buffer has to be big enough
4012 * for one whole packet on the Tx side. We'll make
4013 * the Tx 9408, and give the rest to Rx
4015 writel(0x01b3, ®s->rxq_end_addr);
4016 writel(0x01b4, ®s->txq_start_addr);
4019 /* Initialize the loopback register. Disable all loopbacks. */
4020 writel(0, ®s->loopback);
4023 writel(0, ®s->msi_config);
4025 /* By default, disable the watchdog timer. It will be enabled when
4026 * a packet is queued.
4028 writel(0, ®s->watchdog_timer);
4032 * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
4033 * @adapter: pointer to our private adapter structure
4035 * Returns 0 on success, errno on failure (as defined in errno.h)
4037 void et131x_adapter_setup(struct et131x_adapter *adapter)
4039 /* Configure the JAGCore */
4040 et131x_configure_global_regs(adapter);
4042 et1310_config_mac_regs1(adapter);
4044 /* Configure the MMC registers */
4045 /* All we need to do is initialize the Memory Control Register */
4046 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
4048 et1310_config_rxmac_regs(adapter);
4049 et1310_config_txmac_regs(adapter);
4051 et131x_config_rx_dma_regs(adapter);
4052 et131x_config_tx_dma_regs(adapter);
4054 et1310_config_macstat_regs(adapter);
4056 et1310_phy_power_down(adapter, 0);
4057 et131x_xcvr_init(adapter);
4061 * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
4062 * @adapter: pointer to our private adapter structure
4064 void et131x_soft_reset(struct et131x_adapter *adapter)
4066 /* Disable MAC Core */
4067 writel(0xc00f0000, &adapter->regs->mac.cfg1);
4069 /* Set everything to a reset value */
4070 writel(0x7F, &adapter->regs->global.sw_reset);
4071 writel(0x000f0000, &adapter->regs->mac.cfg1);
4072 writel(0x00000000, &adapter->regs->mac.cfg1);
4076 * et131x_align_allocated_memory - Align allocated memory on a given boundary
4077 * @adapter: pointer to our adapter structure
4078 * @phys_addr: pointer to Physical address
4079 * @offset: pointer to the offset variable
4080 * @mask: correct mask
4082 void et131x_align_allocated_memory(struct et131x_adapter *adapter,
4083 uint64_t *phys_addr,
4084 uint64_t *offset, uint64_t mask)
4090 new_addr = *phys_addr & ~mask;
4092 if (new_addr != *phys_addr) {
4093 /* Move to next aligned block */
4094 new_addr += mask + 1;
4095 /* Return offset for adjusting virt addr */
4096 *offset = new_addr - *phys_addr;
4097 /* Return new physical address */
4098 *phys_addr = new_addr;
4103 * et131x_adapter_memory_alloc
4104 * @adapter: pointer to our private adapter structure
4106 * Returns 0 on success, errno on failure (as defined in errno.h).
4108 * Allocate all the memory blocks for send, receive and others.
4110 int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4114 /* Allocate memory for the Tx Ring */
4115 status = et131x_tx_dma_memory_alloc(adapter);
4117 dev_err(&adapter->pdev->dev,
4118 "et131x_tx_dma_memory_alloc FAILED\n");
4121 /* Receive buffer memory allocation */
4122 status = et131x_rx_dma_memory_alloc(adapter);
4124 dev_err(&adapter->pdev->dev,
4125 "et131x_rx_dma_memory_alloc FAILED\n");
4126 et131x_tx_dma_memory_free(adapter);
4130 /* Init receive data structures */
4131 status = et131x_init_recv(adapter);
4133 dev_err(&adapter->pdev->dev,
4134 "et131x_init_recv FAILED\n");
4135 et131x_tx_dma_memory_free(adapter);
4136 et131x_rx_dma_memory_free(adapter);
4142 * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
4143 * @adapter: pointer to our private adapter structure
4145 void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4147 /* Free DMA memory */
4148 et131x_tx_dma_memory_free(adapter);
4149 et131x_rx_dma_memory_free(adapter);
4152 static void et131x_adjust_link(struct net_device *netdev)
4154 struct et131x_adapter *adapter = netdev_priv(netdev);
4155 struct phy_device *phydev = adapter->phydev;
4157 if (netif_carrier_ok(netdev)) {
4158 adapter->boot_coma = 20;
4160 if (phydev && phydev->speed == SPEED_10) {
4162 * NOTE - Is there a way to query this without
4164 * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4165 * EMI_TRUEPHY_A13O) {
4169 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4171 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4173 et131x_mii_write(adapter, PHY_INDEX_REG,
4174 register18 | 0x8402);
4175 et131x_mii_write(adapter, PHY_DATA_REG,
4177 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4181 et1310_config_flow_control(adapter);
4183 if (phydev && phydev->speed == SPEED_1000 &&
4184 adapter->registry_jumbo_packet > 2048) {
4187 et131x_mii_read(adapter, PHY_CONFIG, ®);
4188 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4189 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4190 et131x_mii_write(adapter, PHY_CONFIG, reg);
4193 et131x_set_rx_dma_timer(adapter);
4194 et1310_config_mac_regs2(adapter);
4197 if (phydev && phydev->link != adapter->link) {
4199 * Check to see if we are in coma mode and if
4200 * so, disable it because we will not be able
4201 * to read PHY values until we are out.
4203 if (et1310_in_phy_coma(adapter))
4204 et1310_disable_phy_coma(adapter);
4207 adapter->boot_coma = 20;
4209 dev_warn(&adapter->pdev->dev,
4210 "Link down - cable problem ?\n");
4211 adapter->boot_coma = 0;
4213 if (phydev->speed == SPEED_10) {
4214 /* NOTE - Is there a way to query this without
4216 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4221 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4223 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4225 et131x_mii_write(adapter, PHY_INDEX_REG,
4226 register18 | 0x8402);
4227 et131x_mii_write(adapter, PHY_DATA_REG,
4229 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4233 /* Free the packets being actively sent & stopped */
4234 et131x_free_busy_send_packets(adapter);
4236 /* Re-initialize the send structures */
4237 et131x_init_send(adapter);
4240 * Bring the device back to the state it was during
4241 * init prior to autonegotiation being complete. This
4242 * way, when we get the auto-neg complete interrupt,
4243 * we can complete init by calling config_mac_regs2.
4245 et131x_soft_reset(adapter);
4247 /* Setup ET1310 as per the documentation */
4248 et131x_adapter_setup(adapter);
4250 /* perform reset of tx/rx */
4251 et131x_disable_txrx(netdev);
4252 et131x_enable_txrx(netdev);
4255 adapter->link = phydev->link;
4257 phy_print_status(phydev);
4261 static int et131x_mii_probe(struct net_device *netdev)
4263 struct et131x_adapter *adapter = netdev_priv(netdev);
4264 struct phy_device *phydev = NULL;
4266 phydev = phy_find_first(adapter->mii_bus);
4268 dev_err(&adapter->pdev->dev, "no PHY found\n");
4272 phydev = phy_connect(netdev, dev_name(&phydev->dev),
4273 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4275 if (IS_ERR(phydev)) {
4276 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4277 return PTR_ERR(phydev);
4280 phydev->supported &= (SUPPORTED_10baseT_Half
4281 | SUPPORTED_10baseT_Full
4282 | SUPPORTED_100baseT_Half
4283 | SUPPORTED_100baseT_Full
4288 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4289 phydev->supported |= SUPPORTED_1000baseT_Full;
4291 phydev->advertising = phydev->supported;
4292 adapter->phydev = phydev;
4294 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4295 "(mii_bus:phy_addr=%s)\n",
4296 phydev->drv->name, dev_name(&phydev->dev));
4302 * et131x_adapter_init
4303 * @adapter: pointer to the private adapter struct
4304 * @pdev: pointer to the PCI device
4306 * Initialize the data structures for the et131x_adapter object and link
4307 * them together with the platform provided device structures.
4309 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4310 struct pci_dev *pdev)
4312 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4314 struct et131x_adapter *adapter;
4316 /* Allocate private adapter struct and copy in relevant information */
4317 adapter = netdev_priv(netdev);
4318 adapter->pdev = pci_dev_get(pdev);
4319 adapter->netdev = netdev;
4321 /* Do the same for the netdev struct */
4322 netdev->irq = pdev->irq;
4323 netdev->base_addr = pci_resource_start(pdev, 0);
4325 /* Initialize spinlocks here */
4326 spin_lock_init(&adapter->lock);
4327 spin_lock_init(&adapter->tcb_send_qlock);
4328 spin_lock_init(&adapter->tcb_ready_qlock);
4329 spin_lock_init(&adapter->send_hw_lock);
4330 spin_lock_init(&adapter->rcv_lock);
4331 spin_lock_init(&adapter->rcv_pend_lock);
4332 spin_lock_init(&adapter->fbr_lock);
4333 spin_lock_init(&adapter->phy_lock);
4335 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
4337 /* Set the MAC address to a default */
4338 memcpy(adapter->addr, default_mac, ETH_ALEN);
4344 * et131x_pci_setup - Perform device initialization
4345 * @pdev: a pointer to the device's pci_dev structure
4346 * @ent: this device's entry in the pci_device_id table
4348 * Returns 0 on success, errno on failure (as defined in errno.h)
4350 * Registered in the pci_driver structure, this function is called when the
4351 * PCI subsystem finds a new PCI device which matches the information
4352 * contained in the pci_device_id table. This routine is the equivalent to
4353 * a device insertion routine.
4355 static int __devinit et131x_pci_setup(struct pci_dev *pdev,
4356 const struct pci_device_id *ent)
4360 struct net_device *netdev;
4361 struct et131x_adapter *adapter;
4364 result = pci_enable_device(pdev);
4366 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4370 /* Perform some basic PCI checks */
4371 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4372 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4376 if (pci_request_regions(pdev, DRIVER_NAME)) {
4377 dev_err(&pdev->dev, "Can't get PCI resources\n");
4381 pci_set_master(pdev);
4383 /* Query PCI for Power Mgmt Capabilities
4385 * NOTE: Now reading PowerMgmt in another location; is this still
4388 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
4391 "Cannot find Power Management capabilities\n");
4393 goto err_release_res;
4396 /* Check the DMA addressing support of this device */
4397 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4398 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4401 "Unable to obtain 64 bit DMA for consistent allocations\n");
4402 goto err_release_res;
4404 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4405 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4408 "Unable to obtain 32 bit DMA for consistent allocations\n");
4409 goto err_release_res;
4412 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4414 goto err_release_res;
4417 /* Allocate netdev and private adapter structs */
4418 netdev = et131x_device_alloc();
4420 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4422 goto err_release_res;
4425 SET_NETDEV_DEV(netdev, &pdev->dev);
4426 et131x_set_ethtool_ops(netdev);
4428 adapter = et131x_adapter_init(netdev, pdev);
4430 /* Initialise the PCI setup for the device */
4431 et131x_pci_init(adapter, pdev);
4433 /* Map the bus-relative registers to system virtual memory */
4434 adapter->regs = pci_ioremap_bar(pdev, 0);
4435 if (!adapter->regs) {
4436 dev_err(&pdev->dev, "Cannot map device registers\n");
4441 /* If Phy COMA mode was enabled when we went down, disable it here. */
4442 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4444 /* Issue a global reset to the et1310 */
4445 et131x_soft_reset(adapter);
4447 /* Disable all interrupts (paranoid) */
4448 et131x_disable_interrupts(adapter);
4450 /* Allocate DMA memory */
4451 result = et131x_adapter_memory_alloc(adapter);
4453 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4457 /* Init send data structures */
4458 et131x_init_send(adapter);
4460 /* Set up the task structure for the ISR's deferred handler */
4461 INIT_WORK(&adapter->task, et131x_isr_handler);
4463 /* Copy address into the net_device struct */
4464 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4466 /* Init variable for counting how long we do not have link status */
4467 adapter->boot_coma = 0;
4468 et1310_disable_phy_coma(adapter);
4470 /* Setup the mii_bus struct */
4471 adapter->mii_bus = mdiobus_alloc();
4472 if (!adapter->mii_bus) {
4473 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4477 adapter->mii_bus->name = "et131x_eth_mii";
4478 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4479 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4480 adapter->mii_bus->priv = netdev;
4481 adapter->mii_bus->read = et131x_mdio_read;
4482 adapter->mii_bus->write = et131x_mdio_write;
4483 adapter->mii_bus->reset = et131x_mdio_reset;
4484 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
4485 if (!adapter->mii_bus->irq) {
4486 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
4490 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4491 adapter->mii_bus->irq[ii] = PHY_POLL;
4493 if (mdiobus_register(adapter->mii_bus)) {
4494 dev_err(&pdev->dev, "failed to register MII bus\n");
4495 mdiobus_free(adapter->mii_bus);
4496 goto err_mdio_free_irq;
4499 if (et131x_mii_probe(netdev)) {
4500 dev_err(&pdev->dev, "failed to probe MII bus\n");
4501 goto err_mdio_unregister;
4504 /* Setup et1310 as per the documentation */
4505 et131x_adapter_setup(adapter);
4507 /* We can enable interrupts now
4509 * NOTE - Because registration of interrupt handler is done in the
4510 * device's open(), defer enabling device interrupts to that
4514 /* Register the net_device struct with the Linux network layer */
4515 result = register_netdev(netdev);
4517 dev_err(&pdev->dev, "register_netdev() failed\n");
4518 goto err_mdio_unregister;
4521 /* Register the net_device struct with the PCI subsystem. Save a copy
4522 * of the PCI config space for this device now that the device has
4523 * been initialized, just in case it needs to be quickly restored.
4525 pci_set_drvdata(pdev, netdev);
4526 pci_save_state(adapter->pdev);
4530 err_mdio_unregister:
4531 mdiobus_unregister(adapter->mii_bus);
4533 kfree(adapter->mii_bus->irq);
4535 mdiobus_free(adapter->mii_bus);
4537 et131x_adapter_memory_free(adapter);
4539 iounmap(adapter->regs);
4542 free_netdev(netdev);
4544 pci_release_regions(pdev);
4546 pci_disable_device(pdev);
4553 * @pdev: a pointer to the device's pci_dev structure
4555 * Registered in the pci_driver structure, this function is called when the
4556 * PCI subsystem detects that a PCI device which matches the information
4557 * contained in the pci_device_id table has been removed.
4559 static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4561 struct net_device *netdev = pci_get_drvdata(pdev);
4562 struct et131x_adapter *adapter = netdev_priv(netdev);
4564 unregister_netdev(netdev);
4565 mdiobus_unregister(adapter->mii_bus);
4566 kfree(adapter->mii_bus->irq);
4567 mdiobus_free(adapter->mii_bus);
4569 et131x_adapter_memory_free(adapter);
4570 iounmap(adapter->regs);
4573 free_netdev(netdev);
4574 pci_release_regions(pdev);
4575 pci_disable_device(pdev);
4578 #ifdef CONFIG_PM_SLEEP
4579 static int et131x_suspend(struct device *dev)
4581 struct pci_dev *pdev = to_pci_dev(dev);
4582 struct net_device *netdev = pci_get_drvdata(pdev);
4584 if (netif_running(netdev)) {
4585 netif_device_detach(netdev);
4586 et131x_down(netdev);
4587 pci_save_state(pdev);
4593 static int et131x_resume(struct device *dev)
4595 struct pci_dev *pdev = to_pci_dev(dev);
4596 struct net_device *netdev = pci_get_drvdata(pdev);
4598 if (netif_running(netdev)) {
4599 pci_restore_state(pdev);
4601 netif_device_attach(netdev);
4607 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4608 #define ET131X_PM_OPS (&et131x_pm_ops)
4610 #define ET131X_PM_OPS NULL
4613 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
4614 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4615 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4618 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4620 static struct pci_driver et131x_driver = {
4621 .name = DRIVER_NAME,
4622 .id_table = et131x_pci_table,
4623 .probe = et131x_pci_setup,
4624 .remove = __devexit_p(et131x_pci_remove),
4625 .driver.pm = ET131X_PM_OPS,
4629 * et131x_init_module - The "main" entry point called on driver initialization
4631 * Returns 0 on success, errno on failure (as defined in errno.h)
4633 static int __init et131x_init_module(void)
4635 return pci_register_driver(&et131x_driver);
4639 * et131x_cleanup_module - The entry point called on driver cleanup
4641 static void __exit et131x_cleanup_module(void)
4643 pci_unregister_driver(&et131x_driver);
4646 module_init(et131x_init_module);
4647 module_exit(et131x_cleanup_module);
4652 * et131x_enable_interrupts - enable interrupt
4653 * @adapter: et131x device
4655 * Enable the appropriate interrupts on the ET131x according to our
4658 void et131x_enable_interrupts(struct et131x_adapter *adapter)
4662 /* Enable all global interrupts */
4663 if (adapter->flowcontrol == FLOW_TXONLY ||
4664 adapter->flowcontrol == FLOW_BOTH)
4665 mask = INT_MASK_ENABLE;
4667 mask = INT_MASK_ENABLE_NO_FLOW;
4669 writel(mask, &adapter->regs->global.int_mask);
4673 * et131x_disable_interrupts - interrupt disable
4674 * @adapter: et131x device
4676 * Block all interrupts from the et131x device at the device itself
4678 void et131x_disable_interrupts(struct et131x_adapter *adapter)
4680 /* Disable all global interrupts */
4681 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
4685 * et131x_isr - The Interrupt Service Routine for the driver.
4686 * @irq: the IRQ on which the interrupt was received.
4687 * @dev_id: device-specific info (here a pointer to a net_device struct)
4689 * Returns a value indicating if the interrupt was handled.
4691 irqreturn_t et131x_isr(int irq, void *dev_id)
4693 bool handled = true;
4694 struct net_device *netdev = (struct net_device *)dev_id;
4695 struct et131x_adapter *adapter = NULL;
4698 if (!netif_device_present(netdev)) {
4703 adapter = netdev_priv(netdev);
4705 /* If the adapter is in low power state, then it should not
4706 * recognize any interrupt
4709 /* Disable Device Interrupts */
4710 et131x_disable_interrupts(adapter);
4712 /* Get a copy of the value in the interrupt status register
4713 * so we can process the interrupting section
4715 status = readl(&adapter->regs->global.int_status);
4717 if (adapter->flowcontrol == FLOW_TXONLY ||
4718 adapter->flowcontrol == FLOW_BOTH) {
4719 status &= ~INT_MASK_ENABLE;
4721 status &= ~INT_MASK_ENABLE_NO_FLOW;
4724 /* Make sure this is our interrupt */
4727 et131x_enable_interrupts(adapter);
4731 /* This is our interrupt, so process accordingly */
4733 if (status & ET_INTR_WATCHDOG) {
4734 struct tcb *tcb = adapter->tx_ring.send_head;
4737 if (++tcb->stale > 1)
4738 status |= ET_INTR_TXDMA_ISR;
4740 if (adapter->rx_ring.unfinished_receives)
4741 status |= ET_INTR_RXDMA_XFR_DONE;
4742 else if (tcb == NULL)
4743 writel(0, &adapter->regs->global.watchdog_timer);
4745 status &= ~ET_INTR_WATCHDOG;
4749 /* This interrupt has in some way been "handled" by
4750 * the ISR. Either it was a spurious Rx interrupt, or
4751 * it was a Tx interrupt that has been filtered by
4754 et131x_enable_interrupts(adapter);
4758 /* We need to save the interrupt status value for use in our
4759 * DPC. We will clear the software copy of that in that
4762 adapter->stats.interrupt_status = status;
4764 /* Schedule the ISR handler as a bottom-half task in the
4765 * kernel's tq_immediate queue, and mark the queue for
4768 schedule_work(&adapter->task);
4770 return IRQ_RETVAL(handled);
4774 * et131x_isr_handler - The ISR handler
4775 * @p_adapter, a pointer to the device's private adapter structure
4777 * scheduled to run in a deferred context by the ISR. This is where the ISR's
4778 * work actually gets done.
4780 void et131x_isr_handler(struct work_struct *work)
4782 struct et131x_adapter *adapter =
4783 container_of(work, struct et131x_adapter, task);
4784 u32 status = adapter->stats.interrupt_status;
4785 struct address_map __iomem *iomem = adapter->regs;
4788 * These first two are by far the most common. Once handled, we clear
4789 * their two bits in the status word. If the word is now zero, we
4792 /* Handle all the completed Transmit interrupts */
4793 if (status & ET_INTR_TXDMA_ISR)
4794 et131x_handle_send_interrupt(adapter);
4796 /* Handle all the completed Receives interrupts */
4797 if (status & ET_INTR_RXDMA_XFR_DONE)
4798 et131x_handle_recv_interrupt(adapter);
4800 status &= 0xffffffd7;
4803 /* Handle the TXDMA Error interrupt */
4804 if (status & ET_INTR_TXDMA_ERR) {
4807 /* Following read also clears the register (COR) */
4808 txdma_err = readl(&iomem->txdma.tx_dma_error);
4810 dev_warn(&adapter->pdev->dev,
4811 "TXDMA_ERR interrupt, error = %d\n",
4815 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4817 (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4819 * This indicates the number of unused buffers in
4820 * RXDMA free buffer ring 0 is <= the limit you
4821 * programmed. Free buffer resources need to be
4822 * returned. Free buffers are consumed as packets
4823 * are passed from the network to the host. The host
4824 * becomes aware of the packets from the contents of
4825 * the packet status ring. This ring is queried when
4826 * the packet done interrupt occurs. Packets are then
4827 * passed to the OS. When the OS is done with the
4828 * packets the resources can be returned to the
4829 * ET1310 for re-use. This interrupt is one method of
4830 * returning resources.
4833 /* If the user has flow control on, then we will
4834 * send a pause packet, otherwise just exit
4836 if (adapter->flowcontrol == FLOW_TXONLY ||
4837 adapter->flowcontrol == FLOW_BOTH) {
4840 /* Tell the device to send a pause packet via
4841 * the back pressure register (bp req and
4844 pm_csr = readl(&iomem->global.pm_csr);
4845 if (!et1310_in_phy_coma(adapter))
4846 writel(3, &iomem->txmac.bp_ctrl);
4850 /* Handle Packet Status Ring Low Interrupt */
4851 if (status & ET_INTR_RXDMA_STAT_LOW) {
4854 * Same idea as with the two Free Buffer Rings.
4855 * Packets going from the network to the host each
4856 * consume a free buffer resource and a packet status
4857 * resource. These resoures are passed to the OS.
4858 * When the OS is done with the resources, they need
4859 * to be returned to the ET1310. This is one method
4860 * of returning the resources.
4864 /* Handle RXDMA Error Interrupt */
4865 if (status & ET_INTR_RXDMA_ERR) {
4867 * The rxdma_error interrupt is sent when a time-out
4868 * on a request issued by the JAGCore has occurred or
4869 * a completion is returned with an un-successful
4870 * status. In both cases the request is considered
4871 * complete. The JAGCore will automatically re-try the
4872 * request in question. Normally information on events
4873 * like these are sent to the host using the "Advanced
4874 * Error Reporting" capability. This interrupt is
4875 * another way of getting similar information. The
4876 * only thing required is to clear the interrupt by
4877 * reading the ISR in the global resources. The
4878 * JAGCore will do a re-try on the request. Normally
4879 * you should never see this interrupt. If you start
4880 * to see this interrupt occurring frequently then
4881 * something bad has occurred. A reset might be the
4886 dev_warn(&adapter->pdev->dev,
4887 "RxDMA_ERR interrupt, error %x\n",
4888 readl(&iomem->txmac.tx_test));
4891 /* Handle the Wake on LAN Event */
4892 if (status & ET_INTR_WOL) {
4894 * This is a secondary interrupt for wake on LAN.
4895 * The driver should never see this, if it does,
4896 * something serious is wrong. We will TRAP the
4897 * message when we are in DBG mode, otherwise we
4900 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4903 /* Let's move on to the TxMac */
4904 if (status & ET_INTR_TXMAC) {
4905 u32 err = readl(&iomem->txmac.err);
4908 * When any of the errors occur and TXMAC generates
4909 * an interrupt to report these errors, it usually
4910 * means that TXMAC has detected an error in the data
4911 * stream retrieved from the on-chip Tx Q. All of
4912 * these errors are catastrophic and TXMAC won't be
4913 * able to recover data when these errors occur. In
4914 * a nutshell, the whole Tx path will have to be reset
4915 * and re-configured afterwards.
4917 dev_warn(&adapter->pdev->dev,
4918 "TXMAC interrupt, error 0x%08x\n",
4921 /* If we are debugging, we want to see this error,
4922 * otherwise we just want the device to be reset and
4927 /* Handle RXMAC Interrupt */
4928 if (status & ET_INTR_RXMAC) {
4930 * These interrupts are catastrophic to the device,
4931 * what we need to do is disable the interrupts and
4932 * set the flag to cause us to reset so we can solve
4935 /* MP_SET_FLAG( adapter,
4936 fMP_ADAPTER_HARDWARE_ERROR); */
4938 dev_warn(&adapter->pdev->dev,
4939 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4940 readl(&iomem->rxmac.err_reg));
4942 dev_warn(&adapter->pdev->dev,
4943 "Enable 0x%08x, Diag 0x%08x\n",
4944 readl(&iomem->rxmac.ctrl),
4945 readl(&iomem->rxmac.rxq_diag));
4948 * If we are debugging, we want to see this error,
4949 * otherwise we just want the device to be reset and
4954 /* Handle MAC_STAT Interrupt */
4955 if (status & ET_INTR_MAC_STAT) {
4957 * This means at least one of the un-masked counters
4958 * in the MAC_STAT block has rolled over. Use this
4959 * to maintain the top, software managed bits of the
4962 et1310_handle_macstat_interrupt(adapter);
4965 /* Handle SLV Timeout Interrupt */
4966 if (status & ET_INTR_SLV_TIMEOUT) {
4968 * This means a timeout has occurred on a read or
4969 * write request to one of the JAGCore registers. The
4970 * Global Resources block has terminated the request
4971 * and on a read request, returned a "fake" value.
4972 * The most likely reasons are: Bad Address or the
4973 * addressed module is in a power-down state and
4978 et131x_enable_interrupts(adapter);
4981 /* NETDEV functions */
4984 * et131x_stats - Return the current device statistics.
4985 * @netdev: device whose stats are being queried
4987 * Returns 0 on success, errno on failure (as defined in errno.h)
4989 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4991 struct et131x_adapter *adapter = netdev_priv(netdev);
4992 struct net_device_stats *stats = &adapter->net_stats;
4993 struct ce_stats *devstat = &adapter->stats;
4995 stats->rx_errors = devstat->rx_length_errs +
4996 devstat->rx_align_errs +
4997 devstat->rx_crc_errs +
4998 devstat->rx_code_violations +
4999 devstat->rx_other_errs;
5000 stats->tx_errors = devstat->tx_max_pkt_errs;
5001 stats->multicast = devstat->multicast_pkts_rcvd;
5002 stats->collisions = devstat->tx_collisions;
5004 stats->rx_length_errors = devstat->rx_length_errs;
5005 stats->rx_over_errors = devstat->rx_overflows;
5006 stats->rx_crc_errors = devstat->rx_crc_errs;
5008 /* NOTE: These stats don't have corresponding values in CE_STATS,
5009 * so we're going to have to update these directly from within the
5012 /* stats->rx_bytes = 20; devstat->; */
5013 /* stats->tx_bytes = 20; devstat->; */
5014 /* stats->rx_dropped = devstat->; */
5015 /* stats->tx_dropped = devstat->; */
5017 /* NOTE: Not used, can't find analogous statistics */
5018 /* stats->rx_frame_errors = devstat->; */
5019 /* stats->rx_fifo_errors = devstat->; */
5020 /* stats->rx_missed_errors = devstat->; */
5022 /* stats->tx_aborted_errors = devstat->; */
5023 /* stats->tx_carrier_errors = devstat->; */
5024 /* stats->tx_fifo_errors = devstat->; */
5025 /* stats->tx_heartbeat_errors = devstat->; */
5026 /* stats->tx_window_errors = devstat->; */
5031 * et131x_enable_txrx - Enable tx/rx queues
5032 * @netdev: device to be enabled
5034 void et131x_enable_txrx(struct net_device *netdev)
5036 struct et131x_adapter *adapter = netdev_priv(netdev);
5038 /* Enable the Tx and Rx DMA engines (if not already enabled) */
5039 et131x_rx_dma_enable(adapter);
5040 et131x_tx_dma_enable(adapter);
5042 /* Enable device interrupts */
5043 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
5044 et131x_enable_interrupts(adapter);
5046 /* We're ready to move some data, so start the queue */
5047 netif_start_queue(netdev);
5051 * et131x_disable_txrx - Disable tx/rx queues
5052 * @netdev: device to be disabled
5054 void et131x_disable_txrx(struct net_device *netdev)
5056 struct et131x_adapter *adapter = netdev_priv(netdev);
5058 /* First thing is to stop the queue */
5059 netif_stop_queue(netdev);
5061 /* Stop the Tx and Rx DMA engines */
5062 et131x_rx_dma_disable(adapter);
5063 et131x_tx_dma_disable(adapter);
5065 /* Disable device interrupts */
5066 et131x_disable_interrupts(adapter);
5070 * et131x_up - Bring up a device for use.
5071 * @netdev: device to be opened
5073 void et131x_up(struct net_device *netdev)
5075 struct et131x_adapter *adapter = netdev_priv(netdev);
5077 et131x_enable_txrx(netdev);
5078 phy_start(adapter->phydev);
5082 * et131x_open - Open the device for use.
5083 * @netdev: device to be opened
5085 * Returns 0 on success, errno on failure (as defined in errno.h)
5087 int et131x_open(struct net_device *netdev)
5090 struct et131x_adapter *adapter = netdev_priv(netdev);
5092 /* Start the timer to track NIC errors */
5093 init_timer(&adapter->error_timer);
5094 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
5095 adapter->error_timer.function = et131x_error_timer_handler;
5096 adapter->error_timer.data = (unsigned long)adapter;
5097 add_timer(&adapter->error_timer);
5099 /* Register our IRQ */
5100 result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
5101 netdev->name, netdev);
5103 dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
5108 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
5116 * et131x_down - Bring down the device
5117 * @netdev: device to be broght down
5119 void et131x_down(struct net_device *netdev)
5121 struct et131x_adapter *adapter = netdev_priv(netdev);
5123 /* Save the timestamp for the TX watchdog, prevent a timeout */
5124 netdev->trans_start = jiffies;
5126 phy_stop(adapter->phydev);
5127 et131x_disable_txrx(netdev);
5131 * et131x_close - Close the device
5132 * @netdev: device to be closed
5134 * Returns 0 on success, errno on failure (as defined in errno.h)
5136 int et131x_close(struct net_device *netdev)
5138 struct et131x_adapter *adapter = netdev_priv(netdev);
5140 et131x_down(netdev);
5142 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
5143 free_irq(netdev->irq, netdev);
5145 /* Stop the error timer */
5146 return del_timer_sync(&adapter->error_timer);
5150 * et131x_ioctl - The I/O Control handler for the driver
5151 * @netdev: device on which the control request is being made
5152 * @reqbuf: a pointer to the IOCTL request buffer
5153 * @cmd: the IOCTL command code
5155 * Returns 0 on success, errno on failure (as defined in errno.h)
5157 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
5159 struct et131x_adapter *adapter = netdev_priv(netdev);
5161 if (!adapter->phydev)
5164 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
5168 * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
5169 * @adapter: pointer to our private adapter structure
5171 * FIXME: lot of dups with MAC code
5173 * Returns 0 on success, errno on failure
5175 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
5178 uint32_t filter = adapter->packet_filter;
5182 ctrl = readl(&adapter->regs->rxmac.ctrl);
5183 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
5185 /* Default to disabled packet filtering. Enable it in the individual
5186 * case statements that require the device to filter something
5190 /* Set us to be in promiscuous mode so we receive everything, this
5191 * is also true when we get a packet filter of 0
5193 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
5194 pf_ctrl &= ~7; /* Clear filter bits */
5197 * Set us up with Multicast packet filtering. Three cases are
5198 * possible - (1) we have a multi-cast list, (2) we receive ALL
5199 * multicast entries or (3) we receive none.
5201 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
5202 pf_ctrl &= ~2; /* Multicast filter bit */
5204 et1310_setup_device_for_multicast(adapter);
5209 /* Set us up with Unicast packet filtering */
5210 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
5211 et1310_setup_device_for_unicast(adapter);
5216 /* Set us up with Broadcast packet filtering */
5217 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
5218 pf_ctrl |= 1; /* Broadcast filter bit */
5223 /* Setup the receive mac configuration registers - Packet
5224 * Filter control + the enable / disable for packet filter
5225 * in the control reg.
5227 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
5228 writel(ctrl, &adapter->regs->rxmac.ctrl);
5234 * et131x_multicast - The handler to configure multicasting on the interface
5235 * @netdev: a pointer to a net_device struct representing the device
5237 static void et131x_multicast(struct net_device *netdev)
5239 struct et131x_adapter *adapter = netdev_priv(netdev);
5240 uint32_t packet_filter = 0;
5241 unsigned long flags;
5242 struct netdev_hw_addr *ha;
5245 spin_lock_irqsave(&adapter->lock, flags);
5247 /* Before we modify the platform-independent filter flags, store them
5248 * locally. This allows us to determine if anything's changed and if
5249 * we even need to bother the hardware
5251 packet_filter = adapter->packet_filter;
5253 /* Clear the 'multicast' flag locally; because we only have a single
5254 * flag to check multicast, and multiple multicast addresses can be
5255 * set, this is the easiest way to determine if more than one
5256 * multicast address is being set.
5258 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5260 /* Check the net_device flags and set the device independent flags
5264 if (netdev->flags & IFF_PROMISC)
5265 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
5267 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
5269 if (netdev->flags & IFF_ALLMULTI)
5270 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5272 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
5273 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5275 if (netdev_mc_count(netdev) < 1) {
5276 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
5277 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5279 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
5281 /* Set values in the private adapter struct */
5283 netdev_for_each_mc_addr(ha, netdev) {
5284 if (i == NIC_MAX_MCAST_LIST)
5286 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
5288 adapter->multicast_addr_count = i;
5290 /* Are the new flags different from the previous ones? If not, then no
5291 * action is required
5293 * NOTE - This block will always update the multicast_list with the
5294 * hardware, even if the addresses aren't the same.
5296 if (packet_filter != adapter->packet_filter) {
5297 /* Call the device's filter function */
5298 et131x_set_packet_filter(adapter);
5300 spin_unlock_irqrestore(&adapter->lock, flags);
5304 * et131x_tx - The handler to tx a packet on the device
5305 * @skb: data to be Tx'd
5306 * @netdev: device on which data is to be Tx'd
5308 * Returns 0 on success, errno on failure (as defined in errno.h)
5310 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5313 struct et131x_adapter *adapter = netdev_priv(netdev);
5315 /* stop the queue if it's getting full */
5316 if(adapter->tx_ring.used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
5317 netif_stop_queue(netdev);
5319 /* Save the timestamp for the TX timeout watchdog */
5320 netdev->trans_start = jiffies;
5322 /* Call the device-specific data Tx routine */
5323 status = et131x_send_packets(skb, netdev);
5325 /* Check status and manage the netif queue if necessary */
5327 if (status == -ENOMEM) {
5328 status = NETDEV_TX_BUSY;
5330 status = NETDEV_TX_OK;
5337 * et131x_tx_timeout - Timeout handler
5338 * @netdev: a pointer to a net_device struct representing the device
5340 * The handler called when a Tx request times out. The timeout period is
5341 * specified by the 'tx_timeo" element in the net_device structure (see
5342 * et131x_alloc_device() to see how this value is set).
5344 static void et131x_tx_timeout(struct net_device *netdev)
5346 struct et131x_adapter *adapter = netdev_priv(netdev);
5348 unsigned long flags;
5350 /* If the device is closed, ignore the timeout */
5351 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5354 /* Any nonrecoverable hardware error?
5355 * Checks adapter->flags for any failure in phy reading
5357 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5360 /* Hardware failure? */
5361 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5362 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5366 /* Is send stuck? */
5367 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5369 tcb = adapter->tx_ring.send_head;
5374 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5375 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5378 dev_warn(&adapter->pdev->dev,
5379 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5383 adapter->net_stats.tx_errors++;
5385 /* perform reset of tx/rx */
5386 et131x_disable_txrx(netdev);
5387 et131x_enable_txrx(netdev);
5392 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5396 * et131x_change_mtu - The handler called to change the MTU for the device
5397 * @netdev: device whose MTU is to be changed
5398 * @new_mtu: the desired MTU
5400 * Returns 0 on success, errno on failure (as defined in errno.h)
5402 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5405 struct et131x_adapter *adapter = netdev_priv(netdev);
5407 /* Make sure the requested MTU is valid */
5408 if (new_mtu < 64 || new_mtu > 9216)
5411 et131x_disable_txrx(netdev);
5412 et131x_handle_send_interrupt(adapter);
5413 et131x_handle_recv_interrupt(adapter);
5415 /* Set the new MTU */
5416 netdev->mtu = new_mtu;
5418 /* Free Rx DMA memory */
5419 et131x_adapter_memory_free(adapter);
5421 /* Set the config parameter for Jumbo Packet support */
5422 adapter->registry_jumbo_packet = new_mtu + 14;
5423 et131x_soft_reset(adapter);
5425 /* Alloc and init Rx DMA memory */
5426 result = et131x_adapter_memory_alloc(adapter);
5428 dev_warn(&adapter->pdev->dev,
5429 "Change MTU failed; couldn't re-alloc DMA memory\n");
5433 et131x_init_send(adapter);
5435 et131x_hwaddr_init(adapter);
5436 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5438 /* Init the device with the new settings */
5439 et131x_adapter_setup(adapter);
5441 et131x_enable_txrx(netdev);
5447 * et131x_set_mac_addr - handler to change the MAC address for the device
5448 * @netdev: device whose MAC is to be changed
5449 * @new_mac: the desired MAC address
5451 * Returns 0 on success, errno on failure (as defined in errno.h)
5453 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
5455 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5458 struct et131x_adapter *adapter = netdev_priv(netdev);
5459 struct sockaddr *address = new_mac;
5463 if (adapter == NULL)
5466 /* Make sure the requested MAC is valid */
5467 if (!is_valid_ether_addr(address->sa_data))
5470 et131x_disable_txrx(netdev);
5471 et131x_handle_send_interrupt(adapter);
5472 et131x_handle_recv_interrupt(adapter);
5474 /* Set the new MAC */
5475 /* netdev->set_mac_address = &new_mac; */
5477 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5479 printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5480 netdev->name, netdev->dev_addr);
5482 /* Free Rx DMA memory */
5483 et131x_adapter_memory_free(adapter);
5485 et131x_soft_reset(adapter);
5487 /* Alloc and init Rx DMA memory */
5488 result = et131x_adapter_memory_alloc(adapter);
5490 dev_err(&adapter->pdev->dev,
5491 "Change MAC failed; couldn't re-alloc DMA memory\n");
5495 et131x_init_send(adapter);
5497 et131x_hwaddr_init(adapter);
5499 /* Init the device with the new settings */
5500 et131x_adapter_setup(adapter);
5502 et131x_enable_txrx(netdev);
5507 static const struct net_device_ops et131x_netdev_ops = {
5508 .ndo_open = et131x_open,
5509 .ndo_stop = et131x_close,
5510 .ndo_start_xmit = et131x_tx,
5511 .ndo_set_multicast_list = et131x_multicast,
5512 .ndo_tx_timeout = et131x_tx_timeout,
5513 .ndo_change_mtu = et131x_change_mtu,
5514 .ndo_set_mac_address = et131x_set_mac_addr,
5515 .ndo_validate_addr = eth_validate_addr,
5516 .ndo_get_stats = et131x_stats,
5517 .ndo_do_ioctl = et131x_ioctl,
5521 * et131x_device_alloc
5523 * Returns pointer to the allocated and initialized net_device struct for
5526 * Create instances of net_device and wl_private for the new adapter and
5527 * register the device's entry points in the net_device structure.
5529 struct net_device *et131x_device_alloc(void)
5531 struct net_device *netdev;
5533 /* Alloc net_device and adapter structs */
5534 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5537 printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
5542 * Setup the function registration table (and other data) for a
5545 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5546 netdev->netdev_ops = &et131x_netdev_ops;
5549 /* netdev->poll = &et131x_poll; */
5550 /* netdev->poll_controller = &et131x_poll_controller; */