2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 #define FORCE_CLK (1 << 15) /* force clock request */
456 enum rtl_register_content {
457 /* InterruptStatusBits */
461 TxDescUnavail = 0x0080,
485 /* TXPoll register p.5 */
486 HPQ = 0x80, /* Poll cmd on the high prio queue */
487 NPQ = 0x40, /* Poll cmd on the low prio queue */
488 FSWInt = 0x01, /* Forced software interrupt */
492 Cfg9346_Unlock = 0xc0,
497 AcceptBroadcast = 0x08,
498 AcceptMulticast = 0x04,
500 AcceptAllPhys = 0x01,
501 #define RX_CONFIG_ACCEPT_MASK 0x3f
504 TxInterFrameGapShift = 24,
505 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
507 /* Config1 register p.24 */
510 Speed_down = (1 << 4),
514 PMEnable = (1 << 0), /* Power Management Enable */
516 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
519 PCI_Clock_66MHz = 0x01,
520 PCI_Clock_33MHz = 0x00,
522 /* Config3 register p.25 */
523 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
524 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
525 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
526 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
528 /* Config4 register */
529 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
531 /* Config5 register p.27 */
532 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
533 MWF = (1 << 5), /* Accept Multicast wakeup frame */
534 UWF = (1 << 4), /* Accept Unicast wakeup frame */
536 LanWake = (1 << 1), /* LanWake enable/disable */
537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
541 TBIReset = 0x80000000,
542 TBILoopback = 0x40000000,
543 TBINwEnable = 0x20000000,
544 TBINwRestart = 0x10000000,
545 TBILinkOk = 0x02000000,
546 TBINwComplete = 0x01000000,
549 EnableBist = (1 << 15), // 8168 8101
550 Mac_dbgo_oe = (1 << 14), // 8168 8101
551 Normal_mode = (1 << 13), // unused
552 Force_half_dup = (1 << 12), // 8168 8101
553 Force_rxflow_en = (1 << 11), // 8168 8101
554 Force_txflow_en = (1 << 10), // 8168 8101
555 Cxpl_dbg_sel = (1 << 9), // 8168 8101
556 ASF = (1 << 8), // 8168 8101
557 PktCntrDisable = (1 << 7), // 8168 8101
558 Mac_dbgo_sel = 0x001c, // 8168
563 INTT_0 = 0x0000, // 8168
564 INTT_1 = 0x0001, // 8168
565 INTT_2 = 0x0002, // 8168
566 INTT_3 = 0x0003, // 8168
568 /* rtl8169_PHYstatus */
579 TBILinkOK = 0x02000000,
581 /* DumpCounterCommand */
586 /* First doubleword. */
587 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
588 RingEnd = (1 << 30), /* End of descriptor ring */
589 FirstFrag = (1 << 29), /* First segment of a packet */
590 LastFrag = (1 << 28), /* Final segment of a packet */
594 enum rtl_tx_desc_bit {
595 /* First doubleword. */
596 TD_LSO = (1 << 27), /* Large Send Offload */
597 #define TD_MSS_MAX 0x07ffu /* MSS value */
599 /* Second doubleword. */
600 TxVlanTag = (1 << 17), /* Add VLAN tag */
603 /* 8169, 8168b and 810x except 8102e. */
604 enum rtl_tx_desc_bit_0 {
605 /* First doubleword. */
606 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
607 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
608 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
609 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
612 /* 8102e, 8168c and beyond. */
613 enum rtl_tx_desc_bit_1 {
614 /* Second doubleword. */
615 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
616 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
617 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
618 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
621 static const struct rtl_tx_desc_info {
628 } tx_desc_info [] = {
631 .udp = TD0_IP_CS | TD0_UDP_CS,
632 .tcp = TD0_IP_CS | TD0_TCP_CS
634 .mss_shift = TD0_MSS_SHIFT,
639 .udp = TD1_IP_CS | TD1_UDP_CS,
640 .tcp = TD1_IP_CS | TD1_TCP_CS
642 .mss_shift = TD1_MSS_SHIFT,
647 enum rtl_rx_desc_bit {
649 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
650 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
652 #define RxProtoUDP (PID1)
653 #define RxProtoTCP (PID0)
654 #define RxProtoIP (PID1 | PID0)
655 #define RxProtoMask RxProtoIP
657 IPFail = (1 << 16), /* IP checksum failed */
658 UDPFail = (1 << 15), /* UDP/IP checksum failed */
659 TCPFail = (1 << 14), /* TCP/IP checksum failed */
660 RxVlanTag = (1 << 16), /* VLAN tag available */
663 #define RsvdMask 0x3fffc000
680 u8 __pad[sizeof(void *) - sizeof(u32)];
684 RTL_FEATURE_WOL = (1 << 0),
685 RTL_FEATURE_MSI = (1 << 1),
686 RTL_FEATURE_GMII = (1 << 2),
687 RTL_FEATURE_FW_LOADED = (1 << 3),
690 struct rtl8169_counters {
697 __le32 tx_one_collision;
698 __le32 tx_multi_collision;
707 RTL_FLAG_TASK_ENABLED,
708 RTL_FLAG_TASK_SLOW_PENDING,
709 RTL_FLAG_TASK_RESET_PENDING,
710 RTL_FLAG_TASK_PHY_PENDING,
714 struct rtl8169_stats {
717 struct u64_stats_sync syncp;
720 struct rtl8169_private {
721 void __iomem *mmio_addr; /* memory map physical address */
722 struct pci_dev *pci_dev;
723 struct net_device *dev;
724 struct napi_struct napi;
728 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
732 struct rtl8169_stats rx_stats;
733 struct rtl8169_stats tx_stats;
734 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
735 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
736 dma_addr_t TxPhyAddr;
737 dma_addr_t RxPhyAddr;
738 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
739 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
740 struct timer_list timer;
746 void (*write)(struct rtl8169_private *, int, int);
747 int (*read)(struct rtl8169_private *, int);
750 struct pll_power_ops {
751 void (*down)(struct rtl8169_private *);
752 void (*up)(struct rtl8169_private *);
756 void (*enable)(struct rtl8169_private *);
757 void (*disable)(struct rtl8169_private *);
761 void (*write)(struct rtl8169_private *, int, int);
762 u32 (*read)(struct rtl8169_private *, int);
765 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
766 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
767 void (*phy_reset_enable)(struct rtl8169_private *tp);
768 void (*hw_start)(struct net_device *);
769 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
770 unsigned int (*link_ok)(void __iomem *);
771 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
774 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
776 struct work_struct work;
781 struct mii_if_info mii;
782 struct rtl8169_counters counters;
787 const struct firmware *fw;
789 #define RTL_VER_SIZE 32
791 char version[RTL_VER_SIZE];
793 struct rtl_fw_phy_action {
798 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
803 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
804 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
805 module_param(use_dac, int, 0);
806 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
807 module_param_named(debug, debug.msg_enable, int, 0);
808 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
809 MODULE_LICENSE("GPL");
810 MODULE_VERSION(RTL8169_VERSION);
811 MODULE_FIRMWARE(FIRMWARE_8168D_1);
812 MODULE_FIRMWARE(FIRMWARE_8168D_2);
813 MODULE_FIRMWARE(FIRMWARE_8168E_1);
814 MODULE_FIRMWARE(FIRMWARE_8168E_2);
815 MODULE_FIRMWARE(FIRMWARE_8168E_3);
816 MODULE_FIRMWARE(FIRMWARE_8105E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168F_1);
818 MODULE_FIRMWARE(FIRMWARE_8168F_2);
819 MODULE_FIRMWARE(FIRMWARE_8402_1);
820 MODULE_FIRMWARE(FIRMWARE_8411_1);
821 MODULE_FIRMWARE(FIRMWARE_8106E_1);
822 MODULE_FIRMWARE(FIRMWARE_8168G_1);
824 static void rtl_lock_work(struct rtl8169_private *tp)
826 mutex_lock(&tp->wk.mutex);
829 static void rtl_unlock_work(struct rtl8169_private *tp)
831 mutex_unlock(&tp->wk.mutex);
834 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
836 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
837 PCI_EXP_DEVCTL_READRQ, force);
841 bool (*check)(struct rtl8169_private *);
845 static void rtl_udelay(unsigned int d)
850 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
851 void (*delay)(unsigned int), unsigned int d, int n,
856 for (i = 0; i < n; i++) {
858 if (c->check(tp) == high)
861 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
862 c->msg, !high, n, d);
866 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
867 const struct rtl_cond *c,
868 unsigned int d, int n)
870 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
873 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
874 const struct rtl_cond *c,
875 unsigned int d, int n)
877 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
880 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
881 const struct rtl_cond *c,
882 unsigned int d, int n)
884 return rtl_loop_wait(tp, c, msleep, d, n, true);
887 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
888 const struct rtl_cond *c,
889 unsigned int d, int n)
891 return rtl_loop_wait(tp, c, msleep, d, n, false);
894 #define DECLARE_RTL_COND(name) \
895 static bool name ## _check(struct rtl8169_private *); \
897 static const struct rtl_cond name = { \
898 .check = name ## _check, \
902 static bool name ## _check(struct rtl8169_private *tp)
904 DECLARE_RTL_COND(rtl_ocpar_cond)
906 void __iomem *ioaddr = tp->mmio_addr;
908 return RTL_R32(OCPAR) & OCPAR_FLAG;
911 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
913 void __iomem *ioaddr = tp->mmio_addr;
915 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
917 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
921 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
923 void __iomem *ioaddr = tp->mmio_addr;
925 RTL_W32(OCPDR, data);
926 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
928 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
931 DECLARE_RTL_COND(rtl_eriar_cond)
933 void __iomem *ioaddr = tp->mmio_addr;
935 return RTL_R32(ERIAR) & ERIAR_FLAG;
938 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
940 void __iomem *ioaddr = tp->mmio_addr;
943 RTL_W32(ERIAR, 0x800010e8);
946 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
949 ocp_write(tp, 0x1, 0x30, 0x00000001);
952 #define OOB_CMD_RESET 0x00
953 #define OOB_CMD_DRIVER_START 0x05
954 #define OOB_CMD_DRIVER_STOP 0x06
956 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
958 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
961 DECLARE_RTL_COND(rtl_ocp_read_cond)
965 reg = rtl8168_get_ocp_reg(tp);
967 return ocp_read(tp, 0x0f, reg) & 0x00000800;
970 static void rtl8168_driver_start(struct rtl8169_private *tp)
972 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
974 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
977 static void rtl8168_driver_stop(struct rtl8169_private *tp)
979 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
981 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
984 static int r8168dp_check_dash(struct rtl8169_private *tp)
986 u16 reg = rtl8168_get_ocp_reg(tp);
988 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
991 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
993 if (reg & 0xffff0001) {
994 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1000 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1002 void __iomem *ioaddr = tp->mmio_addr;
1004 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1007 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1009 void __iomem *ioaddr = tp->mmio_addr;
1011 if (rtl_ocp_reg_failure(tp, reg))
1014 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1016 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1019 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1021 void __iomem *ioaddr = tp->mmio_addr;
1023 if (rtl_ocp_reg_failure(tp, reg))
1026 RTL_W32(GPHY_OCP, reg << 15);
1028 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1029 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1032 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1036 val = r8168_phy_ocp_read(tp, reg);
1037 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1040 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1042 void __iomem *ioaddr = tp->mmio_addr;
1044 if (rtl_ocp_reg_failure(tp, reg))
1047 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1050 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1052 void __iomem *ioaddr = tp->mmio_addr;
1054 if (rtl_ocp_reg_failure(tp, reg))
1057 RTL_W32(OCPDR, reg << 15);
1059 return RTL_R32(OCPDR);
1062 #define OCP_STD_PHY_BASE 0xa400
1064 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1067 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1071 if (tp->ocp_base != OCP_STD_PHY_BASE)
1074 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1077 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1079 if (tp->ocp_base != OCP_STD_PHY_BASE)
1082 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1085 DECLARE_RTL_COND(rtl_phyar_cond)
1087 void __iomem *ioaddr = tp->mmio_addr;
1089 return RTL_R32(PHYAR) & 0x80000000;
1092 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1094 void __iomem *ioaddr = tp->mmio_addr;
1096 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1098 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1100 * According to hardware specs a 20us delay is required after write
1101 * complete indication, but before sending next command.
1106 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1108 void __iomem *ioaddr = tp->mmio_addr;
1111 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1113 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1114 RTL_R32(PHYAR) & 0xffff : ~0;
1117 * According to hardware specs a 20us delay is required after read
1118 * complete indication, but before sending next command.
1125 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1127 void __iomem *ioaddr = tp->mmio_addr;
1129 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1130 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1131 RTL_W32(EPHY_RXER_NUM, 0);
1133 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1136 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1138 r8168dp_1_mdio_access(tp, reg,
1139 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1142 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1144 void __iomem *ioaddr = tp->mmio_addr;
1146 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1149 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1150 RTL_W32(EPHY_RXER_NUM, 0);
1152 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1153 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1156 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1158 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1160 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1163 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1165 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1168 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1170 void __iomem *ioaddr = tp->mmio_addr;
1172 r8168dp_2_mdio_start(ioaddr);
1174 r8169_mdio_write(tp, reg, value);
1176 r8168dp_2_mdio_stop(ioaddr);
1179 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1181 void __iomem *ioaddr = tp->mmio_addr;
1184 r8168dp_2_mdio_start(ioaddr);
1186 value = r8169_mdio_read(tp, reg);
1188 r8168dp_2_mdio_stop(ioaddr);
1193 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1195 tp->mdio_ops.write(tp, location, val);
1198 static int rtl_readphy(struct rtl8169_private *tp, int location)
1200 return tp->mdio_ops.read(tp, location);
1203 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1205 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1208 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1212 val = rtl_readphy(tp, reg_addr);
1213 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1216 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1219 struct rtl8169_private *tp = netdev_priv(dev);
1221 rtl_writephy(tp, location, val);
1224 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1226 struct rtl8169_private *tp = netdev_priv(dev);
1228 return rtl_readphy(tp, location);
1231 DECLARE_RTL_COND(rtl_ephyar_cond)
1233 void __iomem *ioaddr = tp->mmio_addr;
1235 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1238 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1240 void __iomem *ioaddr = tp->mmio_addr;
1242 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1243 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1245 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1250 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1252 void __iomem *ioaddr = tp->mmio_addr;
1254 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1256 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1257 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1260 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1263 void __iomem *ioaddr = tp->mmio_addr;
1265 BUG_ON((addr & 3) || (mask == 0));
1266 RTL_W32(ERIDR, val);
1267 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1269 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1272 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1274 void __iomem *ioaddr = tp->mmio_addr;
1276 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1278 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1279 RTL_R32(ERIDR) : ~0;
1282 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1287 val = rtl_eri_read(tp, addr, type);
1288 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1297 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1298 const struct exgmac_reg *r, int len)
1301 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1306 DECLARE_RTL_COND(rtl_efusear_cond)
1308 void __iomem *ioaddr = tp->mmio_addr;
1310 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1313 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1315 void __iomem *ioaddr = tp->mmio_addr;
1317 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1319 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1320 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1323 static u16 rtl_get_events(struct rtl8169_private *tp)
1325 void __iomem *ioaddr = tp->mmio_addr;
1327 return RTL_R16(IntrStatus);
1330 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1332 void __iomem *ioaddr = tp->mmio_addr;
1334 RTL_W16(IntrStatus, bits);
1338 static void rtl_irq_disable(struct rtl8169_private *tp)
1340 void __iomem *ioaddr = tp->mmio_addr;
1342 RTL_W16(IntrMask, 0);
1346 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1348 void __iomem *ioaddr = tp->mmio_addr;
1350 RTL_W16(IntrMask, bits);
1353 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1354 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1355 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1357 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1359 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1362 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1364 void __iomem *ioaddr = tp->mmio_addr;
1366 rtl_irq_disable(tp);
1367 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1371 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1373 void __iomem *ioaddr = tp->mmio_addr;
1375 return RTL_R32(TBICSR) & TBIReset;
1378 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1380 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1383 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1385 return RTL_R32(TBICSR) & TBILinkOk;
1388 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1390 return RTL_R8(PHYstatus) & LinkStatus;
1393 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1395 void __iomem *ioaddr = tp->mmio_addr;
1397 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1400 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1404 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1405 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1408 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1410 void __iomem *ioaddr = tp->mmio_addr;
1411 struct net_device *dev = tp->dev;
1413 if (!netif_running(dev))
1416 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1417 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1418 if (RTL_R8(PHYstatus) & _1000bpsF) {
1419 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1421 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1423 } else if (RTL_R8(PHYstatus) & _100bps) {
1424 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1426 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1429 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1431 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1434 /* Reset packet filter */
1435 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1437 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1439 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1440 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1441 if (RTL_R8(PHYstatus) & _1000bpsF) {
1442 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1444 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1447 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1449 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1452 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1453 if (RTL_R8(PHYstatus) & _10bps) {
1454 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1456 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1459 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1465 static void __rtl8169_check_link_status(struct net_device *dev,
1466 struct rtl8169_private *tp,
1467 void __iomem *ioaddr, bool pm)
1469 if (tp->link_ok(ioaddr)) {
1470 rtl_link_chg_patch(tp);
1471 /* This is to cancel a scheduled suspend if there's one. */
1473 pm_request_resume(&tp->pci_dev->dev);
1474 netif_carrier_on(dev);
1475 if (net_ratelimit())
1476 netif_info(tp, ifup, dev, "link up\n");
1478 netif_carrier_off(dev);
1479 netif_info(tp, ifdown, dev, "link down\n");
1481 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1485 static void rtl8169_check_link_status(struct net_device *dev,
1486 struct rtl8169_private *tp,
1487 void __iomem *ioaddr)
1489 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1492 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1494 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1496 void __iomem *ioaddr = tp->mmio_addr;
1500 options = RTL_R8(Config1);
1501 if (!(options & PMEnable))
1504 options = RTL_R8(Config3);
1505 if (options & LinkUp)
1506 wolopts |= WAKE_PHY;
1507 if (options & MagicPacket)
1508 wolopts |= WAKE_MAGIC;
1510 options = RTL_R8(Config5);
1512 wolopts |= WAKE_UCAST;
1514 wolopts |= WAKE_BCAST;
1516 wolopts |= WAKE_MCAST;
1521 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1523 struct rtl8169_private *tp = netdev_priv(dev);
1527 wol->supported = WAKE_ANY;
1528 wol->wolopts = __rtl8169_get_wol(tp);
1530 rtl_unlock_work(tp);
1533 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1535 void __iomem *ioaddr = tp->mmio_addr;
1537 static const struct {
1542 { WAKE_PHY, Config3, LinkUp },
1543 { WAKE_MAGIC, Config3, MagicPacket },
1544 { WAKE_UCAST, Config5, UWF },
1545 { WAKE_BCAST, Config5, BWF },
1546 { WAKE_MCAST, Config5, MWF },
1547 { WAKE_ANY, Config5, LanWake }
1551 RTL_W8(Cfg9346, Cfg9346_Unlock);
1553 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1554 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1555 if (wolopts & cfg[i].opt)
1556 options |= cfg[i].mask;
1557 RTL_W8(cfg[i].reg, options);
1560 switch (tp->mac_version) {
1561 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1562 options = RTL_R8(Config1) & ~PMEnable;
1564 options |= PMEnable;
1565 RTL_W8(Config1, options);
1568 options = RTL_R8(Config2) & ~PME_SIGNAL;
1570 options |= PME_SIGNAL;
1571 RTL_W8(Config2, options);
1575 RTL_W8(Cfg9346, Cfg9346_Lock);
1578 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1580 struct rtl8169_private *tp = netdev_priv(dev);
1585 tp->features |= RTL_FEATURE_WOL;
1587 tp->features &= ~RTL_FEATURE_WOL;
1588 __rtl8169_set_wol(tp, wol->wolopts);
1590 rtl_unlock_work(tp);
1592 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1597 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1599 return rtl_chip_infos[tp->mac_version].fw_name;
1602 static void rtl8169_get_drvinfo(struct net_device *dev,
1603 struct ethtool_drvinfo *info)
1605 struct rtl8169_private *tp = netdev_priv(dev);
1606 struct rtl_fw *rtl_fw = tp->rtl_fw;
1608 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1609 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1610 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1611 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1612 if (!IS_ERR_OR_NULL(rtl_fw))
1613 strlcpy(info->fw_version, rtl_fw->version,
1614 sizeof(info->fw_version));
1617 static int rtl8169_get_regs_len(struct net_device *dev)
1619 return R8169_REGS_SIZE;
1622 static int rtl8169_set_speed_tbi(struct net_device *dev,
1623 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1625 struct rtl8169_private *tp = netdev_priv(dev);
1626 void __iomem *ioaddr = tp->mmio_addr;
1630 reg = RTL_R32(TBICSR);
1631 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1632 (duplex == DUPLEX_FULL)) {
1633 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1634 } else if (autoneg == AUTONEG_ENABLE)
1635 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1637 netif_warn(tp, link, dev,
1638 "incorrect speed setting refused in TBI mode\n");
1645 static int rtl8169_set_speed_xmii(struct net_device *dev,
1646 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1648 struct rtl8169_private *tp = netdev_priv(dev);
1649 int giga_ctrl, bmcr;
1652 rtl_writephy(tp, 0x1f, 0x0000);
1654 if (autoneg == AUTONEG_ENABLE) {
1657 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1658 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1659 ADVERTISE_100HALF | ADVERTISE_100FULL);
1661 if (adv & ADVERTISED_10baseT_Half)
1662 auto_nego |= ADVERTISE_10HALF;
1663 if (adv & ADVERTISED_10baseT_Full)
1664 auto_nego |= ADVERTISE_10FULL;
1665 if (adv & ADVERTISED_100baseT_Half)
1666 auto_nego |= ADVERTISE_100HALF;
1667 if (adv & ADVERTISED_100baseT_Full)
1668 auto_nego |= ADVERTISE_100FULL;
1670 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1673 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1675 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1676 if (tp->mii.supports_gmii) {
1677 if (adv & ADVERTISED_1000baseT_Half)
1678 giga_ctrl |= ADVERTISE_1000HALF;
1679 if (adv & ADVERTISED_1000baseT_Full)
1680 giga_ctrl |= ADVERTISE_1000FULL;
1681 } else if (adv & (ADVERTISED_1000baseT_Half |
1682 ADVERTISED_1000baseT_Full)) {
1683 netif_info(tp, link, dev,
1684 "PHY does not support 1000Mbps\n");
1688 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1690 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1691 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1695 if (speed == SPEED_10)
1697 else if (speed == SPEED_100)
1698 bmcr = BMCR_SPEED100;
1702 if (duplex == DUPLEX_FULL)
1703 bmcr |= BMCR_FULLDPLX;
1706 rtl_writephy(tp, MII_BMCR, bmcr);
1708 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1709 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1710 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1711 rtl_writephy(tp, 0x17, 0x2138);
1712 rtl_writephy(tp, 0x0e, 0x0260);
1714 rtl_writephy(tp, 0x17, 0x2108);
1715 rtl_writephy(tp, 0x0e, 0x0000);
1724 static int rtl8169_set_speed(struct net_device *dev,
1725 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1727 struct rtl8169_private *tp = netdev_priv(dev);
1730 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1734 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1735 (advertising & ADVERTISED_1000baseT_Full)) {
1736 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1742 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1744 struct rtl8169_private *tp = netdev_priv(dev);
1747 del_timer_sync(&tp->timer);
1750 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1751 cmd->duplex, cmd->advertising);
1752 rtl_unlock_work(tp);
1757 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1758 netdev_features_t features)
1760 struct rtl8169_private *tp = netdev_priv(dev);
1762 if (dev->mtu > TD_MSS_MAX)
1763 features &= ~NETIF_F_ALL_TSO;
1765 if (dev->mtu > JUMBO_1K &&
1766 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1767 features &= ~NETIF_F_IP_CSUM;
1772 static void __rtl8169_set_features(struct net_device *dev,
1773 netdev_features_t features)
1775 struct rtl8169_private *tp = netdev_priv(dev);
1776 netdev_features_t changed = features ^ dev->features;
1777 void __iomem *ioaddr = tp->mmio_addr;
1779 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1782 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1783 if (features & NETIF_F_RXCSUM)
1784 tp->cp_cmd |= RxChkSum;
1786 tp->cp_cmd &= ~RxChkSum;
1788 if (dev->features & NETIF_F_HW_VLAN_RX)
1789 tp->cp_cmd |= RxVlan;
1791 tp->cp_cmd &= ~RxVlan;
1793 RTL_W16(CPlusCmd, tp->cp_cmd);
1796 if (changed & NETIF_F_RXALL) {
1797 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1798 if (features & NETIF_F_RXALL)
1799 tmp |= (AcceptErr | AcceptRunt);
1800 RTL_W32(RxConfig, tmp);
1804 static int rtl8169_set_features(struct net_device *dev,
1805 netdev_features_t features)
1807 struct rtl8169_private *tp = netdev_priv(dev);
1810 __rtl8169_set_features(dev, features);
1811 rtl_unlock_work(tp);
1817 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1819 return (vlan_tx_tag_present(skb)) ?
1820 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1823 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1825 u32 opts2 = le32_to_cpu(desc->opts2);
1827 if (opts2 & RxVlanTag)
1828 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1833 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1835 struct rtl8169_private *tp = netdev_priv(dev);
1836 void __iomem *ioaddr = tp->mmio_addr;
1840 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1841 cmd->port = PORT_FIBRE;
1842 cmd->transceiver = XCVR_INTERNAL;
1844 status = RTL_R32(TBICSR);
1845 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1846 cmd->autoneg = !!(status & TBINwEnable);
1848 ethtool_cmd_speed_set(cmd, SPEED_1000);
1849 cmd->duplex = DUPLEX_FULL; /* Always set */
1854 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1856 struct rtl8169_private *tp = netdev_priv(dev);
1858 return mii_ethtool_gset(&tp->mii, cmd);
1861 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1863 struct rtl8169_private *tp = netdev_priv(dev);
1867 rc = tp->get_settings(dev, cmd);
1868 rtl_unlock_work(tp);
1873 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1876 struct rtl8169_private *tp = netdev_priv(dev);
1878 if (regs->len > R8169_REGS_SIZE)
1879 regs->len = R8169_REGS_SIZE;
1882 memcpy_fromio(p, tp->mmio_addr, regs->len);
1883 rtl_unlock_work(tp);
1886 static u32 rtl8169_get_msglevel(struct net_device *dev)
1888 struct rtl8169_private *tp = netdev_priv(dev);
1890 return tp->msg_enable;
1893 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1895 struct rtl8169_private *tp = netdev_priv(dev);
1897 tp->msg_enable = value;
1900 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1907 "tx_single_collisions",
1908 "tx_multi_collisions",
1916 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1920 return ARRAY_SIZE(rtl8169_gstrings);
1926 DECLARE_RTL_COND(rtl_counters_cond)
1928 void __iomem *ioaddr = tp->mmio_addr;
1930 return RTL_R32(CounterAddrLow) & CounterDump;
1933 static void rtl8169_update_counters(struct net_device *dev)
1935 struct rtl8169_private *tp = netdev_priv(dev);
1936 void __iomem *ioaddr = tp->mmio_addr;
1937 struct device *d = &tp->pci_dev->dev;
1938 struct rtl8169_counters *counters;
1943 * Some chips are unable to dump tally counters when the receiver
1946 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1949 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1953 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1954 cmd = (u64)paddr & DMA_BIT_MASK(32);
1955 RTL_W32(CounterAddrLow, cmd);
1956 RTL_W32(CounterAddrLow, cmd | CounterDump);
1958 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1959 memcpy(&tp->counters, counters, sizeof(*counters));
1961 RTL_W32(CounterAddrLow, 0);
1962 RTL_W32(CounterAddrHigh, 0);
1964 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1967 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1968 struct ethtool_stats *stats, u64 *data)
1970 struct rtl8169_private *tp = netdev_priv(dev);
1974 rtl8169_update_counters(dev);
1976 data[0] = le64_to_cpu(tp->counters.tx_packets);
1977 data[1] = le64_to_cpu(tp->counters.rx_packets);
1978 data[2] = le64_to_cpu(tp->counters.tx_errors);
1979 data[3] = le32_to_cpu(tp->counters.rx_errors);
1980 data[4] = le16_to_cpu(tp->counters.rx_missed);
1981 data[5] = le16_to_cpu(tp->counters.align_errors);
1982 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1983 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1984 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1985 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1986 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1987 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1988 data[12] = le16_to_cpu(tp->counters.tx_underun);
1991 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1995 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2000 static const struct ethtool_ops rtl8169_ethtool_ops = {
2001 .get_drvinfo = rtl8169_get_drvinfo,
2002 .get_regs_len = rtl8169_get_regs_len,
2003 .get_link = ethtool_op_get_link,
2004 .get_settings = rtl8169_get_settings,
2005 .set_settings = rtl8169_set_settings,
2006 .get_msglevel = rtl8169_get_msglevel,
2007 .set_msglevel = rtl8169_set_msglevel,
2008 .get_regs = rtl8169_get_regs,
2009 .get_wol = rtl8169_get_wol,
2010 .set_wol = rtl8169_set_wol,
2011 .get_strings = rtl8169_get_strings,
2012 .get_sset_count = rtl8169_get_sset_count,
2013 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2014 .get_ts_info = ethtool_op_get_ts_info,
2017 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2018 struct net_device *dev, u8 default_version)
2020 void __iomem *ioaddr = tp->mmio_addr;
2022 * The driver currently handles the 8168Bf and the 8168Be identically
2023 * but they can be identified more specifically through the test below
2026 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2028 * Same thing for the 8101Eb and the 8101Ec:
2030 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2032 static const struct rtl_mac_info {
2038 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2039 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2042 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2043 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2044 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2047 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2048 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2049 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2050 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2053 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2054 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2055 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2057 /* 8168DP family. */
2058 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2059 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2060 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2063 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2064 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2065 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2066 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2067 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2068 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2069 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2070 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2071 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2074 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2075 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2076 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2077 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2080 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2081 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2082 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2083 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2084 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2085 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2086 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2087 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2088 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2089 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2090 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2091 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2092 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2093 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2094 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2095 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2096 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2097 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2098 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2099 /* FIXME: where did these entries come from ? -- FR */
2100 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2101 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2104 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2105 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2106 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2107 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2108 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2109 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2112 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2114 const struct rtl_mac_info *p = mac_info;
2117 reg = RTL_R32(TxConfig);
2118 while ((reg & p->mask) != p->val)
2120 tp->mac_version = p->mac_version;
2122 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2123 netif_notice(tp, probe, dev,
2124 "unknown MAC, using family default\n");
2125 tp->mac_version = default_version;
2129 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2131 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2139 static void rtl_writephy_batch(struct rtl8169_private *tp,
2140 const struct phy_reg *regs, int len)
2143 rtl_writephy(tp, regs->reg, regs->val);
2148 #define PHY_READ 0x00000000
2149 #define PHY_DATA_OR 0x10000000
2150 #define PHY_DATA_AND 0x20000000
2151 #define PHY_BJMPN 0x30000000
2152 #define PHY_READ_EFUSE 0x40000000
2153 #define PHY_READ_MAC_BYTE 0x50000000
2154 #define PHY_WRITE_MAC_BYTE 0x60000000
2155 #define PHY_CLEAR_READCOUNT 0x70000000
2156 #define PHY_WRITE 0x80000000
2157 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2158 #define PHY_COMP_EQ_SKIPN 0xa0000000
2159 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2160 #define PHY_WRITE_PREVIOUS 0xc0000000
2161 #define PHY_SKIPN 0xd0000000
2162 #define PHY_DELAY_MS 0xe0000000
2163 #define PHY_WRITE_ERI_WORD 0xf0000000
2167 char version[RTL_VER_SIZE];
2173 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2175 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2177 const struct firmware *fw = rtl_fw->fw;
2178 struct fw_info *fw_info = (struct fw_info *)fw->data;
2179 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2180 char *version = rtl_fw->version;
2183 if (fw->size < FW_OPCODE_SIZE)
2186 if (!fw_info->magic) {
2187 size_t i, size, start;
2190 if (fw->size < sizeof(*fw_info))
2193 for (i = 0; i < fw->size; i++)
2194 checksum += fw->data[i];
2198 start = le32_to_cpu(fw_info->fw_start);
2199 if (start > fw->size)
2202 size = le32_to_cpu(fw_info->fw_len);
2203 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2206 memcpy(version, fw_info->version, RTL_VER_SIZE);
2208 pa->code = (__le32 *)(fw->data + start);
2211 if (fw->size % FW_OPCODE_SIZE)
2214 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2216 pa->code = (__le32 *)fw->data;
2217 pa->size = fw->size / FW_OPCODE_SIZE;
2219 version[RTL_VER_SIZE - 1] = 0;
2226 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2227 struct rtl_fw_phy_action *pa)
2232 for (index = 0; index < pa->size; index++) {
2233 u32 action = le32_to_cpu(pa->code[index]);
2234 u32 regno = (action & 0x0fff0000) >> 16;
2236 switch(action & 0xf0000000) {
2240 case PHY_READ_EFUSE:
2241 case PHY_CLEAR_READCOUNT:
2243 case PHY_WRITE_PREVIOUS:
2248 if (regno > index) {
2249 netif_err(tp, ifup, tp->dev,
2250 "Out of range of firmware\n");
2254 case PHY_READCOUNT_EQ_SKIP:
2255 if (index + 2 >= pa->size) {
2256 netif_err(tp, ifup, tp->dev,
2257 "Out of range of firmware\n");
2261 case PHY_COMP_EQ_SKIPN:
2262 case PHY_COMP_NEQ_SKIPN:
2264 if (index + 1 + regno >= pa->size) {
2265 netif_err(tp, ifup, tp->dev,
2266 "Out of range of firmware\n");
2271 case PHY_READ_MAC_BYTE:
2272 case PHY_WRITE_MAC_BYTE:
2273 case PHY_WRITE_ERI_WORD:
2275 netif_err(tp, ifup, tp->dev,
2276 "Invalid action 0x%08x\n", action);
2285 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2287 struct net_device *dev = tp->dev;
2290 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2291 netif_err(tp, ifup, dev, "invalid firwmare\n");
2295 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2301 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2303 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2307 predata = count = 0;
2309 for (index = 0; index < pa->size; ) {
2310 u32 action = le32_to_cpu(pa->code[index]);
2311 u32 data = action & 0x0000ffff;
2312 u32 regno = (action & 0x0fff0000) >> 16;
2317 switch(action & 0xf0000000) {
2319 predata = rtl_readphy(tp, regno);
2334 case PHY_READ_EFUSE:
2335 predata = rtl8168d_efuse_read(tp, regno);
2338 case PHY_CLEAR_READCOUNT:
2343 rtl_writephy(tp, regno, data);
2346 case PHY_READCOUNT_EQ_SKIP:
2347 index += (count == data) ? 2 : 1;
2349 case PHY_COMP_EQ_SKIPN:
2350 if (predata == data)
2354 case PHY_COMP_NEQ_SKIPN:
2355 if (predata != data)
2359 case PHY_WRITE_PREVIOUS:
2360 rtl_writephy(tp, regno, predata);
2371 case PHY_READ_MAC_BYTE:
2372 case PHY_WRITE_MAC_BYTE:
2373 case PHY_WRITE_ERI_WORD:
2380 static void rtl_release_firmware(struct rtl8169_private *tp)
2382 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2383 release_firmware(tp->rtl_fw->fw);
2386 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2389 static void rtl_apply_firmware(struct rtl8169_private *tp)
2391 struct rtl_fw *rtl_fw = tp->rtl_fw;
2393 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2394 if (!IS_ERR_OR_NULL(rtl_fw)) {
2395 rtl_phy_write_fw(tp, rtl_fw);
2396 tp->features |= RTL_FEATURE_FW_LOADED;
2400 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2402 if (rtl_readphy(tp, reg) != val)
2403 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2405 rtl_apply_firmware(tp);
2408 static void r810x_aldps_disable(struct rtl8169_private *tp)
2410 rtl_writephy(tp, 0x1f, 0x0000);
2411 rtl_writephy(tp, 0x18, 0x0310);
2415 static void r810x_aldps_enable(struct rtl8169_private *tp)
2417 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2420 rtl_writephy(tp, 0x1f, 0x0000);
2421 rtl_writephy(tp, 0x18, 0x8310);
2424 static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2426 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2429 rtl_writephy(tp, 0x1f, 0x0000);
2430 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2433 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2435 static const struct phy_reg phy_reg_init[] = {
2497 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2500 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2502 static const struct phy_reg phy_reg_init[] = {
2508 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2511 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2513 struct pci_dev *pdev = tp->pci_dev;
2515 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2516 (pdev->subsystem_device != 0xe000))
2519 rtl_writephy(tp, 0x1f, 0x0001);
2520 rtl_writephy(tp, 0x10, 0xf01b);
2521 rtl_writephy(tp, 0x1f, 0x0000);
2524 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2526 static const struct phy_reg phy_reg_init[] = {
2566 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2568 rtl8169scd_hw_phy_config_quirk(tp);
2571 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2573 static const struct phy_reg phy_reg_init[] = {
2621 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2624 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2626 static const struct phy_reg phy_reg_init[] = {
2631 rtl_writephy(tp, 0x1f, 0x0001);
2632 rtl_patchphy(tp, 0x16, 1 << 0);
2634 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2637 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2639 static const struct phy_reg phy_reg_init[] = {
2645 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2648 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2650 static const struct phy_reg phy_reg_init[] = {
2658 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2661 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2663 static const struct phy_reg phy_reg_init[] = {
2669 rtl_writephy(tp, 0x1f, 0x0000);
2670 rtl_patchphy(tp, 0x14, 1 << 5);
2671 rtl_patchphy(tp, 0x0d, 1 << 5);
2673 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2676 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2678 static const struct phy_reg phy_reg_init[] = {
2698 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2700 rtl_patchphy(tp, 0x14, 1 << 5);
2701 rtl_patchphy(tp, 0x0d, 1 << 5);
2702 rtl_writephy(tp, 0x1f, 0x0000);
2705 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2707 static const struct phy_reg phy_reg_init[] = {
2725 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2727 rtl_patchphy(tp, 0x16, 1 << 0);
2728 rtl_patchphy(tp, 0x14, 1 << 5);
2729 rtl_patchphy(tp, 0x0d, 1 << 5);
2730 rtl_writephy(tp, 0x1f, 0x0000);
2733 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2735 static const struct phy_reg phy_reg_init[] = {
2747 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2749 rtl_patchphy(tp, 0x16, 1 << 0);
2750 rtl_patchphy(tp, 0x14, 1 << 5);
2751 rtl_patchphy(tp, 0x0d, 1 << 5);
2752 rtl_writephy(tp, 0x1f, 0x0000);
2755 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2757 rtl8168c_3_hw_phy_config(tp);
2760 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2762 static const struct phy_reg phy_reg_init_0[] = {
2763 /* Channel Estimation */
2784 * Enhance line driver power
2793 * Can not link to 1Gbps with bad cable
2794 * Decrease SNR threshold form 21.07dB to 19.04dB
2803 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2807 * Fine Tune Switching regulator parameter
2809 rtl_writephy(tp, 0x1f, 0x0002);
2810 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2811 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2813 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2814 static const struct phy_reg phy_reg_init[] = {
2824 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2826 val = rtl_readphy(tp, 0x0d);
2828 if ((val & 0x00ff) != 0x006c) {
2829 static const u32 set[] = {
2830 0x0065, 0x0066, 0x0067, 0x0068,
2831 0x0069, 0x006a, 0x006b, 0x006c
2835 rtl_writephy(tp, 0x1f, 0x0002);
2838 for (i = 0; i < ARRAY_SIZE(set); i++)
2839 rtl_writephy(tp, 0x0d, val | set[i]);
2842 static const struct phy_reg phy_reg_init[] = {
2850 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2853 /* RSET couple improve */
2854 rtl_writephy(tp, 0x1f, 0x0002);
2855 rtl_patchphy(tp, 0x0d, 0x0300);
2856 rtl_patchphy(tp, 0x0f, 0x0010);
2858 /* Fine tune PLL performance */
2859 rtl_writephy(tp, 0x1f, 0x0002);
2860 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2861 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2863 rtl_writephy(tp, 0x1f, 0x0005);
2864 rtl_writephy(tp, 0x05, 0x001b);
2866 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2868 rtl_writephy(tp, 0x1f, 0x0000);
2871 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2873 static const struct phy_reg phy_reg_init_0[] = {
2874 /* Channel Estimation */
2895 * Enhance line driver power
2904 * Can not link to 1Gbps with bad cable
2905 * Decrease SNR threshold form 21.07dB to 19.04dB
2914 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2916 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2917 static const struct phy_reg phy_reg_init[] = {
2928 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2930 val = rtl_readphy(tp, 0x0d);
2931 if ((val & 0x00ff) != 0x006c) {
2932 static const u32 set[] = {
2933 0x0065, 0x0066, 0x0067, 0x0068,
2934 0x0069, 0x006a, 0x006b, 0x006c
2938 rtl_writephy(tp, 0x1f, 0x0002);
2941 for (i = 0; i < ARRAY_SIZE(set); i++)
2942 rtl_writephy(tp, 0x0d, val | set[i]);
2945 static const struct phy_reg phy_reg_init[] = {
2953 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2956 /* Fine tune PLL performance */
2957 rtl_writephy(tp, 0x1f, 0x0002);
2958 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2959 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2961 /* Switching regulator Slew rate */
2962 rtl_writephy(tp, 0x1f, 0x0002);
2963 rtl_patchphy(tp, 0x0f, 0x0017);
2965 rtl_writephy(tp, 0x1f, 0x0005);
2966 rtl_writephy(tp, 0x05, 0x001b);
2968 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2970 rtl_writephy(tp, 0x1f, 0x0000);
2973 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2975 static const struct phy_reg phy_reg_init[] = {
3031 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3034 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3036 static const struct phy_reg phy_reg_init[] = {
3046 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3047 rtl_patchphy(tp, 0x0d, 1 << 5);
3050 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3052 static const struct phy_reg phy_reg_init[] = {
3053 /* Enable Delay cap */
3059 /* Channel estimation fine tune */
3068 /* Update PFM & 10M TX idle timer */
3080 rtl_apply_firmware(tp);
3082 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3084 /* DCO enable for 10M IDLE Power */
3085 rtl_writephy(tp, 0x1f, 0x0007);
3086 rtl_writephy(tp, 0x1e, 0x0023);
3087 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3088 rtl_writephy(tp, 0x1f, 0x0000);
3090 /* For impedance matching */
3091 rtl_writephy(tp, 0x1f, 0x0002);
3092 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3093 rtl_writephy(tp, 0x1f, 0x0000);
3095 /* PHY auto speed down */
3096 rtl_writephy(tp, 0x1f, 0x0007);
3097 rtl_writephy(tp, 0x1e, 0x002d);
3098 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3099 rtl_writephy(tp, 0x1f, 0x0000);
3100 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3102 rtl_writephy(tp, 0x1f, 0x0005);
3103 rtl_writephy(tp, 0x05, 0x8b86);
3104 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3105 rtl_writephy(tp, 0x1f, 0x0000);
3107 rtl_writephy(tp, 0x1f, 0x0005);
3108 rtl_writephy(tp, 0x05, 0x8b85);
3109 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3110 rtl_writephy(tp, 0x1f, 0x0007);
3111 rtl_writephy(tp, 0x1e, 0x0020);
3112 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3113 rtl_writephy(tp, 0x1f, 0x0006);
3114 rtl_writephy(tp, 0x00, 0x5a00);
3115 rtl_writephy(tp, 0x1f, 0x0000);
3116 rtl_writephy(tp, 0x0d, 0x0007);
3117 rtl_writephy(tp, 0x0e, 0x003c);
3118 rtl_writephy(tp, 0x0d, 0x4007);
3119 rtl_writephy(tp, 0x0e, 0x0000);
3120 rtl_writephy(tp, 0x0d, 0x0000);
3123 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3126 addr[0] | (addr[1] << 8),
3127 addr[2] | (addr[3] << 8),
3128 addr[4] | (addr[5] << 8)
3130 const struct exgmac_reg e[] = {
3131 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3132 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3133 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3134 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3137 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3140 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3142 static const struct phy_reg phy_reg_init[] = {
3143 /* Enable Delay cap */
3152 /* Channel estimation fine tune */
3169 rtl_apply_firmware(tp);
3171 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3173 /* For 4-corner performance improve */
3174 rtl_writephy(tp, 0x1f, 0x0005);
3175 rtl_writephy(tp, 0x05, 0x8b80);
3176 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3177 rtl_writephy(tp, 0x1f, 0x0000);
3179 /* PHY auto speed down */
3180 rtl_writephy(tp, 0x1f, 0x0004);
3181 rtl_writephy(tp, 0x1f, 0x0007);
3182 rtl_writephy(tp, 0x1e, 0x002d);
3183 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3184 rtl_writephy(tp, 0x1f, 0x0002);
3185 rtl_writephy(tp, 0x1f, 0x0000);
3186 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3188 /* improve 10M EEE waveform */
3189 rtl_writephy(tp, 0x1f, 0x0005);
3190 rtl_writephy(tp, 0x05, 0x8b86);
3191 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3192 rtl_writephy(tp, 0x1f, 0x0000);
3194 /* Improve 2-pair detection performance */
3195 rtl_writephy(tp, 0x1f, 0x0005);
3196 rtl_writephy(tp, 0x05, 0x8b85);
3197 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3198 rtl_writephy(tp, 0x1f, 0x0000);
3201 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3202 rtl_writephy(tp, 0x1f, 0x0005);
3203 rtl_writephy(tp, 0x05, 0x8b85);
3204 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3205 rtl_writephy(tp, 0x1f, 0x0004);
3206 rtl_writephy(tp, 0x1f, 0x0007);
3207 rtl_writephy(tp, 0x1e, 0x0020);
3208 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3209 rtl_writephy(tp, 0x1f, 0x0002);
3210 rtl_writephy(tp, 0x1f, 0x0000);
3211 rtl_writephy(tp, 0x0d, 0x0007);
3212 rtl_writephy(tp, 0x0e, 0x003c);
3213 rtl_writephy(tp, 0x0d, 0x4007);
3214 rtl_writephy(tp, 0x0e, 0x0000);
3215 rtl_writephy(tp, 0x0d, 0x0000);
3218 rtl_writephy(tp, 0x1f, 0x0003);
3219 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3220 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3221 rtl_writephy(tp, 0x1f, 0x0000);
3223 r8168_aldps_enable_1(tp);
3225 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3226 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3229 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3231 /* For 4-corner performance improve */
3232 rtl_writephy(tp, 0x1f, 0x0005);
3233 rtl_writephy(tp, 0x05, 0x8b80);
3234 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3235 rtl_writephy(tp, 0x1f, 0x0000);
3237 /* PHY auto speed down */
3238 rtl_writephy(tp, 0x1f, 0x0007);
3239 rtl_writephy(tp, 0x1e, 0x002d);
3240 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3241 rtl_writephy(tp, 0x1f, 0x0000);
3242 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3244 /* Improve 10M EEE waveform */
3245 rtl_writephy(tp, 0x1f, 0x0005);
3246 rtl_writephy(tp, 0x05, 0x8b86);
3247 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3248 rtl_writephy(tp, 0x1f, 0x0000);
3251 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3253 static const struct phy_reg phy_reg_init[] = {
3254 /* Channel estimation fine tune */
3259 /* Modify green table for giga & fnet */
3276 /* Modify green table for 10M */
3282 /* Disable hiimpedance detection (RTCT) */
3288 rtl_apply_firmware(tp);
3290 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3292 rtl8168f_hw_phy_config(tp);
3294 /* Improve 2-pair detection performance */
3295 rtl_writephy(tp, 0x1f, 0x0005);
3296 rtl_writephy(tp, 0x05, 0x8b85);
3297 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3298 rtl_writephy(tp, 0x1f, 0x0000);
3300 r8168_aldps_enable_1(tp);
3303 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3305 rtl_apply_firmware(tp);
3307 rtl8168f_hw_phy_config(tp);
3309 r8168_aldps_enable_1(tp);
3312 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3314 static const struct phy_reg phy_reg_init[] = {
3315 /* Channel estimation fine tune */
3320 /* Modify green table for giga & fnet */
3337 /* Modify green table for 10M */
3343 /* Disable hiimpedance detection (RTCT) */
3350 rtl_apply_firmware(tp);
3352 rtl8168f_hw_phy_config(tp);
3354 /* Improve 2-pair detection performance */
3355 rtl_writephy(tp, 0x1f, 0x0005);
3356 rtl_writephy(tp, 0x05, 0x8b85);
3357 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3358 rtl_writephy(tp, 0x1f, 0x0000);
3360 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3362 /* Modify green table for giga */
3363 rtl_writephy(tp, 0x1f, 0x0005);
3364 rtl_writephy(tp, 0x05, 0x8b54);
3365 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3366 rtl_writephy(tp, 0x05, 0x8b5d);
3367 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3368 rtl_writephy(tp, 0x05, 0x8a7c);
3369 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3370 rtl_writephy(tp, 0x05, 0x8a7f);
3371 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3372 rtl_writephy(tp, 0x05, 0x8a82);
3373 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3374 rtl_writephy(tp, 0x05, 0x8a85);
3375 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3376 rtl_writephy(tp, 0x05, 0x8a88);
3377 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3378 rtl_writephy(tp, 0x1f, 0x0000);
3380 /* uc same-seed solution */
3381 rtl_writephy(tp, 0x1f, 0x0005);
3382 rtl_writephy(tp, 0x05, 0x8b85);
3383 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3384 rtl_writephy(tp, 0x1f, 0x0000);
3387 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3388 rtl_writephy(tp, 0x1f, 0x0005);
3389 rtl_writephy(tp, 0x05, 0x8b85);
3390 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3391 rtl_writephy(tp, 0x1f, 0x0004);
3392 rtl_writephy(tp, 0x1f, 0x0007);
3393 rtl_writephy(tp, 0x1e, 0x0020);
3394 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3395 rtl_writephy(tp, 0x1f, 0x0000);
3396 rtl_writephy(tp, 0x0d, 0x0007);
3397 rtl_writephy(tp, 0x0e, 0x003c);
3398 rtl_writephy(tp, 0x0d, 0x4007);
3399 rtl_writephy(tp, 0x0e, 0x0000);
3400 rtl_writephy(tp, 0x0d, 0x0000);
3403 rtl_writephy(tp, 0x1f, 0x0003);
3404 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3405 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3406 rtl_writephy(tp, 0x1f, 0x0000);
3408 r8168_aldps_enable_1(tp);
3411 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3413 static const u16 mac_ocp_patch[] = {
3414 0xe008, 0xe01b, 0xe01d, 0xe01f,
3415 0xe021, 0xe023, 0xe025, 0xe027,
3416 0x49d2, 0xf10d, 0x766c, 0x49e2,
3417 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3419 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3420 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3421 0xbe00, 0xb416, 0x0076, 0xe86c,
3422 0xc602, 0xbe00, 0x0000, 0xc602,
3424 0xbe00, 0x0000, 0xc602, 0xbe00,
3425 0x0000, 0xc602, 0xbe00, 0x0000,
3426 0xc602, 0xbe00, 0x0000, 0xc602,
3427 0xbe00, 0x0000, 0xc602, 0xbe00,
3429 0x0000, 0x0000, 0x0000, 0x0000
3433 /* Patch code for GPHY reset */
3434 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3435 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3436 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3437 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3439 rtl_apply_firmware(tp);
3441 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3442 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3444 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3446 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3447 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3449 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3451 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3452 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3454 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3455 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3457 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3460 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3462 static const struct phy_reg phy_reg_init[] = {
3469 rtl_writephy(tp, 0x1f, 0x0000);
3470 rtl_patchphy(tp, 0x11, 1 << 12);
3471 rtl_patchphy(tp, 0x19, 1 << 13);
3472 rtl_patchphy(tp, 0x10, 1 << 15);
3474 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3477 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3479 static const struct phy_reg phy_reg_init[] = {
3493 /* Disable ALDPS before ram code */
3494 r810x_aldps_disable(tp);
3496 rtl_apply_firmware(tp);
3498 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3500 r810x_aldps_enable(tp);
3503 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3505 /* Disable ALDPS before setting firmware */
3506 r810x_aldps_disable(tp);
3508 rtl_apply_firmware(tp);
3511 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3512 rtl_writephy(tp, 0x1f, 0x0004);
3513 rtl_writephy(tp, 0x10, 0x401f);
3514 rtl_writephy(tp, 0x19, 0x7030);
3515 rtl_writephy(tp, 0x1f, 0x0000);
3517 r810x_aldps_enable(tp);
3520 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3522 static const struct phy_reg phy_reg_init[] = {
3529 /* Disable ALDPS before ram code */
3530 r810x_aldps_disable(tp);
3532 rtl_apply_firmware(tp);
3534 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3535 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3537 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3539 r810x_aldps_enable(tp);
3542 static void rtl_hw_phy_config(struct net_device *dev)
3544 struct rtl8169_private *tp = netdev_priv(dev);
3546 rtl8169_print_mac_version(tp);
3548 switch (tp->mac_version) {
3549 case RTL_GIGA_MAC_VER_01:
3551 case RTL_GIGA_MAC_VER_02:
3552 case RTL_GIGA_MAC_VER_03:
3553 rtl8169s_hw_phy_config(tp);
3555 case RTL_GIGA_MAC_VER_04:
3556 rtl8169sb_hw_phy_config(tp);
3558 case RTL_GIGA_MAC_VER_05:
3559 rtl8169scd_hw_phy_config(tp);
3561 case RTL_GIGA_MAC_VER_06:
3562 rtl8169sce_hw_phy_config(tp);
3564 case RTL_GIGA_MAC_VER_07:
3565 case RTL_GIGA_MAC_VER_08:
3566 case RTL_GIGA_MAC_VER_09:
3567 rtl8102e_hw_phy_config(tp);
3569 case RTL_GIGA_MAC_VER_11:
3570 rtl8168bb_hw_phy_config(tp);
3572 case RTL_GIGA_MAC_VER_12:
3573 rtl8168bef_hw_phy_config(tp);
3575 case RTL_GIGA_MAC_VER_17:
3576 rtl8168bef_hw_phy_config(tp);
3578 case RTL_GIGA_MAC_VER_18:
3579 rtl8168cp_1_hw_phy_config(tp);
3581 case RTL_GIGA_MAC_VER_19:
3582 rtl8168c_1_hw_phy_config(tp);
3584 case RTL_GIGA_MAC_VER_20:
3585 rtl8168c_2_hw_phy_config(tp);
3587 case RTL_GIGA_MAC_VER_21:
3588 rtl8168c_3_hw_phy_config(tp);
3590 case RTL_GIGA_MAC_VER_22:
3591 rtl8168c_4_hw_phy_config(tp);
3593 case RTL_GIGA_MAC_VER_23:
3594 case RTL_GIGA_MAC_VER_24:
3595 rtl8168cp_2_hw_phy_config(tp);
3597 case RTL_GIGA_MAC_VER_25:
3598 rtl8168d_1_hw_phy_config(tp);
3600 case RTL_GIGA_MAC_VER_26:
3601 rtl8168d_2_hw_phy_config(tp);
3603 case RTL_GIGA_MAC_VER_27:
3604 rtl8168d_3_hw_phy_config(tp);
3606 case RTL_GIGA_MAC_VER_28:
3607 rtl8168d_4_hw_phy_config(tp);
3609 case RTL_GIGA_MAC_VER_29:
3610 case RTL_GIGA_MAC_VER_30:
3611 rtl8105e_hw_phy_config(tp);
3613 case RTL_GIGA_MAC_VER_31:
3616 case RTL_GIGA_MAC_VER_32:
3617 case RTL_GIGA_MAC_VER_33:
3618 rtl8168e_1_hw_phy_config(tp);
3620 case RTL_GIGA_MAC_VER_34:
3621 rtl8168e_2_hw_phy_config(tp);
3623 case RTL_GIGA_MAC_VER_35:
3624 rtl8168f_1_hw_phy_config(tp);
3626 case RTL_GIGA_MAC_VER_36:
3627 rtl8168f_2_hw_phy_config(tp);
3630 case RTL_GIGA_MAC_VER_37:
3631 rtl8402_hw_phy_config(tp);
3634 case RTL_GIGA_MAC_VER_38:
3635 rtl8411_hw_phy_config(tp);
3638 case RTL_GIGA_MAC_VER_39:
3639 rtl8106e_hw_phy_config(tp);
3642 case RTL_GIGA_MAC_VER_40:
3643 rtl8168g_1_hw_phy_config(tp);
3646 case RTL_GIGA_MAC_VER_41:
3652 static void rtl_phy_work(struct rtl8169_private *tp)
3654 struct timer_list *timer = &tp->timer;
3655 void __iomem *ioaddr = tp->mmio_addr;
3656 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3658 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3660 if (tp->phy_reset_pending(tp)) {
3662 * A busy loop could burn quite a few cycles on nowadays CPU.
3663 * Let's delay the execution of the timer for a few ticks.
3669 if (tp->link_ok(ioaddr))
3672 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3674 tp->phy_reset_enable(tp);
3677 mod_timer(timer, jiffies + timeout);
3680 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3682 if (!test_and_set_bit(flag, tp->wk.flags))
3683 schedule_work(&tp->wk.work);
3686 static void rtl8169_phy_timer(unsigned long __opaque)
3688 struct net_device *dev = (struct net_device *)__opaque;
3689 struct rtl8169_private *tp = netdev_priv(dev);
3691 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3694 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3695 void __iomem *ioaddr)
3698 pci_release_regions(pdev);
3699 pci_clear_mwi(pdev);
3700 pci_disable_device(pdev);
3704 DECLARE_RTL_COND(rtl_phy_reset_cond)
3706 return tp->phy_reset_pending(tp);
3709 static void rtl8169_phy_reset(struct net_device *dev,
3710 struct rtl8169_private *tp)
3712 tp->phy_reset_enable(tp);
3713 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3716 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3718 void __iomem *ioaddr = tp->mmio_addr;
3720 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3721 (RTL_R8(PHYstatus) & TBI_Enable);
3724 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3726 void __iomem *ioaddr = tp->mmio_addr;
3728 rtl_hw_phy_config(dev);
3730 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3731 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3735 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3737 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3738 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3740 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3741 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3743 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3744 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3747 rtl8169_phy_reset(dev, tp);
3749 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3750 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3751 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3752 (tp->mii.supports_gmii ?
3753 ADVERTISED_1000baseT_Half |
3754 ADVERTISED_1000baseT_Full : 0));
3756 if (rtl_tbi_enabled(tp))
3757 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3760 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3762 void __iomem *ioaddr = tp->mmio_addr;
3766 RTL_W8(Cfg9346, Cfg9346_Unlock);
3768 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3771 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3774 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3775 rtl_rar_exgmac_set(tp, addr);
3777 RTL_W8(Cfg9346, Cfg9346_Lock);
3779 rtl_unlock_work(tp);
3782 static int rtl_set_mac_address(struct net_device *dev, void *p)
3784 struct rtl8169_private *tp = netdev_priv(dev);
3785 struct sockaddr *addr = p;
3787 if (!is_valid_ether_addr(addr->sa_data))
3788 return -EADDRNOTAVAIL;
3790 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3792 rtl_rar_set(tp, dev->dev_addr);
3797 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3799 struct rtl8169_private *tp = netdev_priv(dev);
3800 struct mii_ioctl_data *data = if_mii(ifr);
3802 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3805 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3806 struct mii_ioctl_data *data, int cmd)
3810 data->phy_id = 32; /* Internal PHY */
3814 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3818 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3824 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3829 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3831 if (tp->features & RTL_FEATURE_MSI) {
3832 pci_disable_msi(pdev);
3833 tp->features &= ~RTL_FEATURE_MSI;
3837 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3839 struct mdio_ops *ops = &tp->mdio_ops;
3841 switch (tp->mac_version) {
3842 case RTL_GIGA_MAC_VER_27:
3843 ops->write = r8168dp_1_mdio_write;
3844 ops->read = r8168dp_1_mdio_read;
3846 case RTL_GIGA_MAC_VER_28:
3847 case RTL_GIGA_MAC_VER_31:
3848 ops->write = r8168dp_2_mdio_write;
3849 ops->read = r8168dp_2_mdio_read;
3851 case RTL_GIGA_MAC_VER_40:
3852 case RTL_GIGA_MAC_VER_41:
3853 ops->write = r8168g_mdio_write;
3854 ops->read = r8168g_mdio_read;
3857 ops->write = r8169_mdio_write;
3858 ops->read = r8169_mdio_read;
3863 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3865 void __iomem *ioaddr = tp->mmio_addr;
3867 switch (tp->mac_version) {
3868 case RTL_GIGA_MAC_VER_25:
3869 case RTL_GIGA_MAC_VER_26:
3870 case RTL_GIGA_MAC_VER_29:
3871 case RTL_GIGA_MAC_VER_30:
3872 case RTL_GIGA_MAC_VER_32:
3873 case RTL_GIGA_MAC_VER_33:
3874 case RTL_GIGA_MAC_VER_34:
3875 case RTL_GIGA_MAC_VER_37:
3876 case RTL_GIGA_MAC_VER_38:
3877 case RTL_GIGA_MAC_VER_39:
3878 case RTL_GIGA_MAC_VER_40:
3879 case RTL_GIGA_MAC_VER_41:
3880 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3881 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3888 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3890 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3893 rtl_writephy(tp, 0x1f, 0x0000);
3894 rtl_writephy(tp, MII_BMCR, 0x0000);
3896 rtl_wol_suspend_quirk(tp);
3901 static void r810x_phy_power_down(struct rtl8169_private *tp)
3903 rtl_writephy(tp, 0x1f, 0x0000);
3904 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3907 static void r810x_phy_power_up(struct rtl8169_private *tp)
3909 rtl_writephy(tp, 0x1f, 0x0000);
3910 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3913 static void r810x_pll_power_down(struct rtl8169_private *tp)
3915 void __iomem *ioaddr = tp->mmio_addr;
3917 if (rtl_wol_pll_power_down(tp))
3920 r810x_phy_power_down(tp);
3922 switch (tp->mac_version) {
3923 case RTL_GIGA_MAC_VER_07:
3924 case RTL_GIGA_MAC_VER_08:
3925 case RTL_GIGA_MAC_VER_09:
3926 case RTL_GIGA_MAC_VER_10:
3927 case RTL_GIGA_MAC_VER_13:
3928 case RTL_GIGA_MAC_VER_16:
3931 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3936 static void r810x_pll_power_up(struct rtl8169_private *tp)
3938 void __iomem *ioaddr = tp->mmio_addr;
3940 r810x_phy_power_up(tp);
3942 switch (tp->mac_version) {
3943 case RTL_GIGA_MAC_VER_07:
3944 case RTL_GIGA_MAC_VER_08:
3945 case RTL_GIGA_MAC_VER_09:
3946 case RTL_GIGA_MAC_VER_10:
3947 case RTL_GIGA_MAC_VER_13:
3948 case RTL_GIGA_MAC_VER_16:
3951 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3956 static void r8168_phy_power_up(struct rtl8169_private *tp)
3958 rtl_writephy(tp, 0x1f, 0x0000);
3959 switch (tp->mac_version) {
3960 case RTL_GIGA_MAC_VER_11:
3961 case RTL_GIGA_MAC_VER_12:
3962 case RTL_GIGA_MAC_VER_17:
3963 case RTL_GIGA_MAC_VER_18:
3964 case RTL_GIGA_MAC_VER_19:
3965 case RTL_GIGA_MAC_VER_20:
3966 case RTL_GIGA_MAC_VER_21:
3967 case RTL_GIGA_MAC_VER_22:
3968 case RTL_GIGA_MAC_VER_23:
3969 case RTL_GIGA_MAC_VER_24:
3970 case RTL_GIGA_MAC_VER_25:
3971 case RTL_GIGA_MAC_VER_26:
3972 case RTL_GIGA_MAC_VER_27:
3973 case RTL_GIGA_MAC_VER_28:
3974 case RTL_GIGA_MAC_VER_31:
3975 rtl_writephy(tp, 0x0e, 0x0000);
3980 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3983 static void r8168_phy_power_down(struct rtl8169_private *tp)
3985 rtl_writephy(tp, 0x1f, 0x0000);
3986 switch (tp->mac_version) {
3987 case RTL_GIGA_MAC_VER_32:
3988 case RTL_GIGA_MAC_VER_33:
3989 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3992 case RTL_GIGA_MAC_VER_11:
3993 case RTL_GIGA_MAC_VER_12:
3994 case RTL_GIGA_MAC_VER_17:
3995 case RTL_GIGA_MAC_VER_18:
3996 case RTL_GIGA_MAC_VER_19:
3997 case RTL_GIGA_MAC_VER_20:
3998 case RTL_GIGA_MAC_VER_21:
3999 case RTL_GIGA_MAC_VER_22:
4000 case RTL_GIGA_MAC_VER_23:
4001 case RTL_GIGA_MAC_VER_24:
4002 case RTL_GIGA_MAC_VER_25:
4003 case RTL_GIGA_MAC_VER_26:
4004 case RTL_GIGA_MAC_VER_27:
4005 case RTL_GIGA_MAC_VER_28:
4006 case RTL_GIGA_MAC_VER_31:
4007 rtl_writephy(tp, 0x0e, 0x0200);
4009 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4014 static void r8168_pll_power_down(struct rtl8169_private *tp)
4016 void __iomem *ioaddr = tp->mmio_addr;
4018 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4019 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4020 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4021 r8168dp_check_dash(tp)) {
4025 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4026 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4027 (RTL_R16(CPlusCmd) & ASF)) {
4031 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4032 tp->mac_version == RTL_GIGA_MAC_VER_33)
4033 rtl_ephy_write(tp, 0x19, 0xff64);
4035 if (rtl_wol_pll_power_down(tp))
4038 r8168_phy_power_down(tp);
4040 switch (tp->mac_version) {
4041 case RTL_GIGA_MAC_VER_25:
4042 case RTL_GIGA_MAC_VER_26:
4043 case RTL_GIGA_MAC_VER_27:
4044 case RTL_GIGA_MAC_VER_28:
4045 case RTL_GIGA_MAC_VER_31:
4046 case RTL_GIGA_MAC_VER_32:
4047 case RTL_GIGA_MAC_VER_33:
4048 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4053 static void r8168_pll_power_up(struct rtl8169_private *tp)
4055 void __iomem *ioaddr = tp->mmio_addr;
4057 switch (tp->mac_version) {
4058 case RTL_GIGA_MAC_VER_25:
4059 case RTL_GIGA_MAC_VER_26:
4060 case RTL_GIGA_MAC_VER_27:
4061 case RTL_GIGA_MAC_VER_28:
4062 case RTL_GIGA_MAC_VER_31:
4063 case RTL_GIGA_MAC_VER_32:
4064 case RTL_GIGA_MAC_VER_33:
4065 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4069 r8168_phy_power_up(tp);
4072 static void rtl_generic_op(struct rtl8169_private *tp,
4073 void (*op)(struct rtl8169_private *))
4079 static void rtl_pll_power_down(struct rtl8169_private *tp)
4081 rtl_generic_op(tp, tp->pll_power_ops.down);
4084 static void rtl_pll_power_up(struct rtl8169_private *tp)
4086 rtl_generic_op(tp, tp->pll_power_ops.up);
4089 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4091 struct pll_power_ops *ops = &tp->pll_power_ops;
4093 switch (tp->mac_version) {
4094 case RTL_GIGA_MAC_VER_07:
4095 case RTL_GIGA_MAC_VER_08:
4096 case RTL_GIGA_MAC_VER_09:
4097 case RTL_GIGA_MAC_VER_10:
4098 case RTL_GIGA_MAC_VER_16:
4099 case RTL_GIGA_MAC_VER_29:
4100 case RTL_GIGA_MAC_VER_30:
4101 case RTL_GIGA_MAC_VER_37:
4102 case RTL_GIGA_MAC_VER_39:
4103 ops->down = r810x_pll_power_down;
4104 ops->up = r810x_pll_power_up;
4107 case RTL_GIGA_MAC_VER_11:
4108 case RTL_GIGA_MAC_VER_12:
4109 case RTL_GIGA_MAC_VER_17:
4110 case RTL_GIGA_MAC_VER_18:
4111 case RTL_GIGA_MAC_VER_19:
4112 case RTL_GIGA_MAC_VER_20:
4113 case RTL_GIGA_MAC_VER_21:
4114 case RTL_GIGA_MAC_VER_22:
4115 case RTL_GIGA_MAC_VER_23:
4116 case RTL_GIGA_MAC_VER_24:
4117 case RTL_GIGA_MAC_VER_25:
4118 case RTL_GIGA_MAC_VER_26:
4119 case RTL_GIGA_MAC_VER_27:
4120 case RTL_GIGA_MAC_VER_28:
4121 case RTL_GIGA_MAC_VER_31:
4122 case RTL_GIGA_MAC_VER_32:
4123 case RTL_GIGA_MAC_VER_33:
4124 case RTL_GIGA_MAC_VER_34:
4125 case RTL_GIGA_MAC_VER_35:
4126 case RTL_GIGA_MAC_VER_36:
4127 case RTL_GIGA_MAC_VER_38:
4128 case RTL_GIGA_MAC_VER_40:
4129 case RTL_GIGA_MAC_VER_41:
4130 ops->down = r8168_pll_power_down;
4131 ops->up = r8168_pll_power_up;
4141 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4143 void __iomem *ioaddr = tp->mmio_addr;
4145 switch (tp->mac_version) {
4146 case RTL_GIGA_MAC_VER_01:
4147 case RTL_GIGA_MAC_VER_02:
4148 case RTL_GIGA_MAC_VER_03:
4149 case RTL_GIGA_MAC_VER_04:
4150 case RTL_GIGA_MAC_VER_05:
4151 case RTL_GIGA_MAC_VER_06:
4152 case RTL_GIGA_MAC_VER_10:
4153 case RTL_GIGA_MAC_VER_11:
4154 case RTL_GIGA_MAC_VER_12:
4155 case RTL_GIGA_MAC_VER_13:
4156 case RTL_GIGA_MAC_VER_14:
4157 case RTL_GIGA_MAC_VER_15:
4158 case RTL_GIGA_MAC_VER_16:
4159 case RTL_GIGA_MAC_VER_17:
4160 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4162 case RTL_GIGA_MAC_VER_18:
4163 case RTL_GIGA_MAC_VER_19:
4164 case RTL_GIGA_MAC_VER_20:
4165 case RTL_GIGA_MAC_VER_21:
4166 case RTL_GIGA_MAC_VER_22:
4167 case RTL_GIGA_MAC_VER_23:
4168 case RTL_GIGA_MAC_VER_24:
4169 case RTL_GIGA_MAC_VER_34:
4170 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4173 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4178 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4180 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4183 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4185 void __iomem *ioaddr = tp->mmio_addr;
4187 RTL_W8(Cfg9346, Cfg9346_Unlock);
4188 rtl_generic_op(tp, tp->jumbo_ops.enable);
4189 RTL_W8(Cfg9346, Cfg9346_Lock);
4192 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4194 void __iomem *ioaddr = tp->mmio_addr;
4196 RTL_W8(Cfg9346, Cfg9346_Unlock);
4197 rtl_generic_op(tp, tp->jumbo_ops.disable);
4198 RTL_W8(Cfg9346, Cfg9346_Lock);
4201 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4203 void __iomem *ioaddr = tp->mmio_addr;
4205 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4206 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4207 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4210 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4212 void __iomem *ioaddr = tp->mmio_addr;
4214 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4215 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4216 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4219 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4221 void __iomem *ioaddr = tp->mmio_addr;
4223 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4226 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4228 void __iomem *ioaddr = tp->mmio_addr;
4230 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4233 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4235 void __iomem *ioaddr = tp->mmio_addr;
4237 RTL_W8(MaxTxPacketSize, 0x3f);
4238 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4239 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4240 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4243 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4245 void __iomem *ioaddr = tp->mmio_addr;
4247 RTL_W8(MaxTxPacketSize, 0x0c);
4248 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4249 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4250 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4253 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4255 rtl_tx_performance_tweak(tp->pci_dev,
4256 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4259 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4261 rtl_tx_performance_tweak(tp->pci_dev,
4262 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4265 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4267 void __iomem *ioaddr = tp->mmio_addr;
4269 r8168b_0_hw_jumbo_enable(tp);
4271 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4274 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4276 void __iomem *ioaddr = tp->mmio_addr;
4278 r8168b_0_hw_jumbo_disable(tp);
4280 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4283 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4285 struct jumbo_ops *ops = &tp->jumbo_ops;
4287 switch (tp->mac_version) {
4288 case RTL_GIGA_MAC_VER_11:
4289 ops->disable = r8168b_0_hw_jumbo_disable;
4290 ops->enable = r8168b_0_hw_jumbo_enable;
4292 case RTL_GIGA_MAC_VER_12:
4293 case RTL_GIGA_MAC_VER_17:
4294 ops->disable = r8168b_1_hw_jumbo_disable;
4295 ops->enable = r8168b_1_hw_jumbo_enable;
4297 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4298 case RTL_GIGA_MAC_VER_19:
4299 case RTL_GIGA_MAC_VER_20:
4300 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4301 case RTL_GIGA_MAC_VER_22:
4302 case RTL_GIGA_MAC_VER_23:
4303 case RTL_GIGA_MAC_VER_24:
4304 case RTL_GIGA_MAC_VER_25:
4305 case RTL_GIGA_MAC_VER_26:
4306 ops->disable = r8168c_hw_jumbo_disable;
4307 ops->enable = r8168c_hw_jumbo_enable;
4309 case RTL_GIGA_MAC_VER_27:
4310 case RTL_GIGA_MAC_VER_28:
4311 ops->disable = r8168dp_hw_jumbo_disable;
4312 ops->enable = r8168dp_hw_jumbo_enable;
4314 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4315 case RTL_GIGA_MAC_VER_32:
4316 case RTL_GIGA_MAC_VER_33:
4317 case RTL_GIGA_MAC_VER_34:
4318 ops->disable = r8168e_hw_jumbo_disable;
4319 ops->enable = r8168e_hw_jumbo_enable;
4323 * No action needed for jumbo frames with 8169.
4324 * No jumbo for 810x at all.
4326 case RTL_GIGA_MAC_VER_40:
4327 case RTL_GIGA_MAC_VER_41:
4329 ops->disable = NULL;
4335 DECLARE_RTL_COND(rtl_chipcmd_cond)
4337 void __iomem *ioaddr = tp->mmio_addr;
4339 return RTL_R8(ChipCmd) & CmdReset;
4342 static void rtl_hw_reset(struct rtl8169_private *tp)
4344 void __iomem *ioaddr = tp->mmio_addr;
4346 RTL_W8(ChipCmd, CmdReset);
4348 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4351 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4353 struct rtl_fw *rtl_fw;
4357 name = rtl_lookup_firmware_name(tp);
4359 goto out_no_firmware;
4361 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4365 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4369 rc = rtl_check_firmware(tp, rtl_fw);
4371 goto err_release_firmware;
4373 tp->rtl_fw = rtl_fw;
4377 err_release_firmware:
4378 release_firmware(rtl_fw->fw);
4382 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4389 static void rtl_request_firmware(struct rtl8169_private *tp)
4391 if (IS_ERR(tp->rtl_fw))
4392 rtl_request_uncached_firmware(tp);
4395 static void rtl_rx_close(struct rtl8169_private *tp)
4397 void __iomem *ioaddr = tp->mmio_addr;
4399 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4402 DECLARE_RTL_COND(rtl_npq_cond)
4404 void __iomem *ioaddr = tp->mmio_addr;
4406 return RTL_R8(TxPoll) & NPQ;
4409 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4411 void __iomem *ioaddr = tp->mmio_addr;
4413 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4416 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4418 void __iomem *ioaddr = tp->mmio_addr;
4420 /* Disable interrupts */
4421 rtl8169_irq_mask_and_ack(tp);
4425 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4426 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4427 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4428 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4429 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4430 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4431 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4432 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4433 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4434 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4435 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4436 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4437 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4439 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4446 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4448 void __iomem *ioaddr = tp->mmio_addr;
4450 /* Set DMA burst size and Interframe Gap Time */
4451 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4452 (InterFrameGap << TxInterFrameGapShift));
4455 static void rtl_hw_start(struct net_device *dev)
4457 struct rtl8169_private *tp = netdev_priv(dev);
4461 rtl_irq_enable_all(tp);
4464 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4465 void __iomem *ioaddr)
4468 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4469 * register to be written before TxDescAddrLow to work.
4470 * Switching from MMIO to I/O access fixes the issue as well.
4472 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4473 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4474 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4475 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4478 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4482 cmd = RTL_R16(CPlusCmd);
4483 RTL_W16(CPlusCmd, cmd);
4487 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4489 /* Low hurts. Let's disable the filtering. */
4490 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4493 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4495 static const struct rtl_cfg2_info {
4500 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4501 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4502 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4503 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4505 const struct rtl_cfg2_info *p = cfg2_info;
4509 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4510 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4511 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4512 RTL_W32(0x7c, p->val);
4518 static void rtl_set_rx_mode(struct net_device *dev)
4520 struct rtl8169_private *tp = netdev_priv(dev);
4521 void __iomem *ioaddr = tp->mmio_addr;
4522 u32 mc_filter[2]; /* Multicast hash filter */
4526 if (dev->flags & IFF_PROMISC) {
4527 /* Unconditionally log net taps. */
4528 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4530 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4532 mc_filter[1] = mc_filter[0] = 0xffffffff;
4533 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4534 (dev->flags & IFF_ALLMULTI)) {
4535 /* Too many to filter perfectly -- accept all multicasts. */
4536 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4537 mc_filter[1] = mc_filter[0] = 0xffffffff;
4539 struct netdev_hw_addr *ha;
4541 rx_mode = AcceptBroadcast | AcceptMyPhys;
4542 mc_filter[1] = mc_filter[0] = 0;
4543 netdev_for_each_mc_addr(ha, dev) {
4544 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4545 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4546 rx_mode |= AcceptMulticast;
4550 if (dev->features & NETIF_F_RXALL)
4551 rx_mode |= (AcceptErr | AcceptRunt);
4553 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4555 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4556 u32 data = mc_filter[0];
4558 mc_filter[0] = swab32(mc_filter[1]);
4559 mc_filter[1] = swab32(data);
4562 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4563 mc_filter[1] = mc_filter[0] = 0xffffffff;
4565 RTL_W32(MAR0 + 4, mc_filter[1]);
4566 RTL_W32(MAR0 + 0, mc_filter[0]);
4568 RTL_W32(RxConfig, tmp);
4571 static void rtl_hw_start_8169(struct net_device *dev)
4573 struct rtl8169_private *tp = netdev_priv(dev);
4574 void __iomem *ioaddr = tp->mmio_addr;
4575 struct pci_dev *pdev = tp->pci_dev;
4577 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4578 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4579 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4582 RTL_W8(Cfg9346, Cfg9346_Unlock);
4583 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4584 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4585 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4586 tp->mac_version == RTL_GIGA_MAC_VER_04)
4587 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4591 RTL_W8(EarlyTxThres, NoEarlyTx);
4593 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4595 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4596 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4597 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4598 tp->mac_version == RTL_GIGA_MAC_VER_04)
4599 rtl_set_rx_tx_config_registers(tp);
4601 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4603 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4604 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4605 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4606 "Bit-3 and bit-14 MUST be 1\n");
4607 tp->cp_cmd |= (1 << 14);
4610 RTL_W16(CPlusCmd, tp->cp_cmd);
4612 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4615 * Undocumented corner. Supposedly:
4616 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4618 RTL_W16(IntrMitigate, 0x0000);
4620 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4622 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4623 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4624 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4625 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4626 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4627 rtl_set_rx_tx_config_registers(tp);
4630 RTL_W8(Cfg9346, Cfg9346_Lock);
4632 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4635 RTL_W32(RxMissed, 0);
4637 rtl_set_rx_mode(dev);
4639 /* no early-rx interrupts */
4640 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4643 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4645 if (tp->csi_ops.write)
4646 tp->csi_ops.write(tp, addr, value);
4649 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4651 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4654 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4658 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4659 rtl_csi_write(tp, 0x070c, csi | bits);
4662 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4664 rtl_csi_access_enable(tp, 0x17000000);
4667 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4669 rtl_csi_access_enable(tp, 0x27000000);
4672 DECLARE_RTL_COND(rtl_csiar_cond)
4674 void __iomem *ioaddr = tp->mmio_addr;
4676 return RTL_R32(CSIAR) & CSIAR_FLAG;
4679 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4681 void __iomem *ioaddr = tp->mmio_addr;
4683 RTL_W32(CSIDR, value);
4684 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4685 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4687 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4690 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4692 void __iomem *ioaddr = tp->mmio_addr;
4694 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4695 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4697 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4698 RTL_R32(CSIDR) : ~0;
4701 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4703 void __iomem *ioaddr = tp->mmio_addr;
4705 RTL_W32(CSIDR, value);
4706 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4707 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4710 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4713 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4715 void __iomem *ioaddr = tp->mmio_addr;
4717 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4718 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4720 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4721 RTL_R32(CSIDR) : ~0;
4724 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4726 struct csi_ops *ops = &tp->csi_ops;
4728 switch (tp->mac_version) {
4729 case RTL_GIGA_MAC_VER_01:
4730 case RTL_GIGA_MAC_VER_02:
4731 case RTL_GIGA_MAC_VER_03:
4732 case RTL_GIGA_MAC_VER_04:
4733 case RTL_GIGA_MAC_VER_05:
4734 case RTL_GIGA_MAC_VER_06:
4735 case RTL_GIGA_MAC_VER_10:
4736 case RTL_GIGA_MAC_VER_11:
4737 case RTL_GIGA_MAC_VER_12:
4738 case RTL_GIGA_MAC_VER_13:
4739 case RTL_GIGA_MAC_VER_14:
4740 case RTL_GIGA_MAC_VER_15:
4741 case RTL_GIGA_MAC_VER_16:
4742 case RTL_GIGA_MAC_VER_17:
4747 case RTL_GIGA_MAC_VER_37:
4748 case RTL_GIGA_MAC_VER_38:
4749 ops->write = r8402_csi_write;
4750 ops->read = r8402_csi_read;
4754 ops->write = r8169_csi_write;
4755 ops->read = r8169_csi_read;
4761 unsigned int offset;
4766 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4772 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4773 rtl_ephy_write(tp, e->offset, w);
4778 static void rtl_disable_clock_request(struct pci_dev *pdev)
4780 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4781 PCI_EXP_LNKCTL_CLKREQ_EN);
4784 static void rtl_enable_clock_request(struct pci_dev *pdev)
4786 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4787 PCI_EXP_LNKCTL_CLKREQ_EN);
4790 #define R8168_CPCMD_QUIRK_MASK (\
4801 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4803 void __iomem *ioaddr = tp->mmio_addr;
4804 struct pci_dev *pdev = tp->pci_dev;
4806 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4808 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4810 rtl_tx_performance_tweak(pdev,
4811 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4814 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4816 void __iomem *ioaddr = tp->mmio_addr;
4818 rtl_hw_start_8168bb(tp);
4820 RTL_W8(MaxTxPacketSize, TxPacketMax);
4822 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4825 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4827 void __iomem *ioaddr = tp->mmio_addr;
4828 struct pci_dev *pdev = tp->pci_dev;
4830 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4832 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4834 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4836 rtl_disable_clock_request(pdev);
4838 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4841 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4843 static const struct ephy_info e_info_8168cp[] = {
4844 { 0x01, 0, 0x0001 },
4845 { 0x02, 0x0800, 0x1000 },
4846 { 0x03, 0, 0x0042 },
4847 { 0x06, 0x0080, 0x0000 },
4851 rtl_csi_access_enable_2(tp);
4853 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4855 __rtl_hw_start_8168cp(tp);
4858 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4860 void __iomem *ioaddr = tp->mmio_addr;
4861 struct pci_dev *pdev = tp->pci_dev;
4863 rtl_csi_access_enable_2(tp);
4865 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4867 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4869 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4872 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4874 void __iomem *ioaddr = tp->mmio_addr;
4875 struct pci_dev *pdev = tp->pci_dev;
4877 rtl_csi_access_enable_2(tp);
4879 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4882 RTL_W8(DBG_REG, 0x20);
4884 RTL_W8(MaxTxPacketSize, TxPacketMax);
4886 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4888 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4891 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4893 void __iomem *ioaddr = tp->mmio_addr;
4894 static const struct ephy_info e_info_8168c_1[] = {
4895 { 0x02, 0x0800, 0x1000 },
4896 { 0x03, 0, 0x0002 },
4897 { 0x06, 0x0080, 0x0000 }
4900 rtl_csi_access_enable_2(tp);
4902 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4904 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4906 __rtl_hw_start_8168cp(tp);
4909 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4911 static const struct ephy_info e_info_8168c_2[] = {
4912 { 0x01, 0, 0x0001 },
4913 { 0x03, 0x0400, 0x0220 }
4916 rtl_csi_access_enable_2(tp);
4918 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4920 __rtl_hw_start_8168cp(tp);
4923 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4925 rtl_hw_start_8168c_2(tp);
4928 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4930 rtl_csi_access_enable_2(tp);
4932 __rtl_hw_start_8168cp(tp);
4935 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4937 void __iomem *ioaddr = tp->mmio_addr;
4938 struct pci_dev *pdev = tp->pci_dev;
4940 rtl_csi_access_enable_2(tp);
4942 rtl_disable_clock_request(pdev);
4944 RTL_W8(MaxTxPacketSize, TxPacketMax);
4946 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4948 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4951 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4953 void __iomem *ioaddr = tp->mmio_addr;
4954 struct pci_dev *pdev = tp->pci_dev;
4956 rtl_csi_access_enable_1(tp);
4958 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4960 RTL_W8(MaxTxPacketSize, TxPacketMax);
4962 rtl_disable_clock_request(pdev);
4965 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4967 void __iomem *ioaddr = tp->mmio_addr;
4968 struct pci_dev *pdev = tp->pci_dev;
4969 static const struct ephy_info e_info_8168d_4[] = {
4971 { 0x19, 0x20, 0x50 },
4976 rtl_csi_access_enable_1(tp);
4978 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4980 RTL_W8(MaxTxPacketSize, TxPacketMax);
4982 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4983 const struct ephy_info *e = e_info_8168d_4 + i;
4986 w = rtl_ephy_read(tp, e->offset);
4987 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4990 rtl_enable_clock_request(pdev);
4993 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4995 void __iomem *ioaddr = tp->mmio_addr;
4996 struct pci_dev *pdev = tp->pci_dev;
4997 static const struct ephy_info e_info_8168e_1[] = {
4998 { 0x00, 0x0200, 0x0100 },
4999 { 0x00, 0x0000, 0x0004 },
5000 { 0x06, 0x0002, 0x0001 },
5001 { 0x06, 0x0000, 0x0030 },
5002 { 0x07, 0x0000, 0x2000 },
5003 { 0x00, 0x0000, 0x0020 },
5004 { 0x03, 0x5800, 0x2000 },
5005 { 0x03, 0x0000, 0x0001 },
5006 { 0x01, 0x0800, 0x1000 },
5007 { 0x07, 0x0000, 0x4000 },
5008 { 0x1e, 0x0000, 0x2000 },
5009 { 0x19, 0xffff, 0xfe6c },
5010 { 0x0a, 0x0000, 0x0040 }
5013 rtl_csi_access_enable_2(tp);
5015 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5017 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5019 RTL_W8(MaxTxPacketSize, TxPacketMax);
5021 rtl_disable_clock_request(pdev);
5023 /* Reset tx FIFO pointer */
5024 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5025 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5027 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5030 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5032 void __iomem *ioaddr = tp->mmio_addr;
5033 struct pci_dev *pdev = tp->pci_dev;
5034 static const struct ephy_info e_info_8168e_2[] = {
5035 { 0x09, 0x0000, 0x0080 },
5036 { 0x19, 0x0000, 0x0224 }
5039 rtl_csi_access_enable_1(tp);
5041 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5043 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5045 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5046 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5047 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5048 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5049 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5050 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5051 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5052 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5054 RTL_W8(MaxTxPacketSize, EarlySize);
5056 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5057 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5059 /* Adjust EEE LED frequency */
5060 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5062 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5063 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5064 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5065 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5068 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5070 void __iomem *ioaddr = tp->mmio_addr;
5071 struct pci_dev *pdev = tp->pci_dev;
5073 rtl_csi_access_enable_2(tp);
5075 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5077 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5078 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5079 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5080 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5081 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5082 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5083 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5084 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5085 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5086 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5088 RTL_W8(MaxTxPacketSize, EarlySize);
5090 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5091 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5092 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5093 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5094 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5095 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5098 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5100 void __iomem *ioaddr = tp->mmio_addr;
5101 static const struct ephy_info e_info_8168f_1[] = {
5102 { 0x06, 0x00c0, 0x0020 },
5103 { 0x08, 0x0001, 0x0002 },
5104 { 0x09, 0x0000, 0x0080 },
5105 { 0x19, 0x0000, 0x0224 }
5108 rtl_hw_start_8168f(tp);
5110 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5112 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5114 /* Adjust EEE LED frequency */
5115 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5118 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5120 static const struct ephy_info e_info_8168f_1[] = {
5121 { 0x06, 0x00c0, 0x0020 },
5122 { 0x0f, 0xffff, 0x5200 },
5123 { 0x1e, 0x0000, 0x4000 },
5124 { 0x19, 0x0000, 0x0224 }
5127 rtl_hw_start_8168f(tp);
5129 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5131 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5134 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5136 void __iomem *ioaddr = tp->mmio_addr;
5137 struct pci_dev *pdev = tp->pci_dev;
5139 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5140 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5141 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5142 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5144 rtl_csi_access_enable_1(tp);
5146 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5148 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5149 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5151 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5152 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5153 RTL_W8(MaxTxPacketSize, EarlySize);
5154 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5155 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5157 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5158 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5160 /* Adjust EEE LED frequency */
5161 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5163 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5166 static void rtl_hw_start_8168(struct net_device *dev)
5168 struct rtl8169_private *tp = netdev_priv(dev);
5169 void __iomem *ioaddr = tp->mmio_addr;
5171 RTL_W8(Cfg9346, Cfg9346_Unlock);
5173 RTL_W8(MaxTxPacketSize, TxPacketMax);
5175 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5177 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5179 RTL_W16(CPlusCmd, tp->cp_cmd);
5181 RTL_W16(IntrMitigate, 0x5151);
5183 /* Work around for RxFIFO overflow. */
5184 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5185 tp->event_slow |= RxFIFOOver | PCSTimeout;
5186 tp->event_slow &= ~RxOverflow;
5189 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5191 rtl_set_rx_mode(dev);
5193 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5194 (InterFrameGap << TxInterFrameGapShift));
5198 switch (tp->mac_version) {
5199 case RTL_GIGA_MAC_VER_11:
5200 rtl_hw_start_8168bb(tp);
5203 case RTL_GIGA_MAC_VER_12:
5204 case RTL_GIGA_MAC_VER_17:
5205 rtl_hw_start_8168bef(tp);
5208 case RTL_GIGA_MAC_VER_18:
5209 rtl_hw_start_8168cp_1(tp);
5212 case RTL_GIGA_MAC_VER_19:
5213 rtl_hw_start_8168c_1(tp);
5216 case RTL_GIGA_MAC_VER_20:
5217 rtl_hw_start_8168c_2(tp);
5220 case RTL_GIGA_MAC_VER_21:
5221 rtl_hw_start_8168c_3(tp);
5224 case RTL_GIGA_MAC_VER_22:
5225 rtl_hw_start_8168c_4(tp);
5228 case RTL_GIGA_MAC_VER_23:
5229 rtl_hw_start_8168cp_2(tp);
5232 case RTL_GIGA_MAC_VER_24:
5233 rtl_hw_start_8168cp_3(tp);
5236 case RTL_GIGA_MAC_VER_25:
5237 case RTL_GIGA_MAC_VER_26:
5238 case RTL_GIGA_MAC_VER_27:
5239 rtl_hw_start_8168d(tp);
5242 case RTL_GIGA_MAC_VER_28:
5243 rtl_hw_start_8168d_4(tp);
5246 case RTL_GIGA_MAC_VER_31:
5247 rtl_hw_start_8168dp(tp);
5250 case RTL_GIGA_MAC_VER_32:
5251 case RTL_GIGA_MAC_VER_33:
5252 rtl_hw_start_8168e_1(tp);
5254 case RTL_GIGA_MAC_VER_34:
5255 rtl_hw_start_8168e_2(tp);
5258 case RTL_GIGA_MAC_VER_35:
5259 case RTL_GIGA_MAC_VER_36:
5260 rtl_hw_start_8168f_1(tp);
5263 case RTL_GIGA_MAC_VER_38:
5264 rtl_hw_start_8411(tp);
5267 case RTL_GIGA_MAC_VER_40:
5268 case RTL_GIGA_MAC_VER_41:
5269 rtl_hw_start_8168g_1(tp);
5273 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5274 dev->name, tp->mac_version);
5278 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5280 RTL_W8(Cfg9346, Cfg9346_Lock);
5282 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5285 #define R810X_CPCMD_QUIRK_MASK (\
5296 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5298 void __iomem *ioaddr = tp->mmio_addr;
5299 struct pci_dev *pdev = tp->pci_dev;
5300 static const struct ephy_info e_info_8102e_1[] = {
5301 { 0x01, 0, 0x6e65 },
5302 { 0x02, 0, 0x091f },
5303 { 0x03, 0, 0xc2f9 },
5304 { 0x06, 0, 0xafb5 },
5305 { 0x07, 0, 0x0e00 },
5306 { 0x19, 0, 0xec80 },
5307 { 0x01, 0, 0x2e65 },
5312 rtl_csi_access_enable_2(tp);
5314 RTL_W8(DBG_REG, FIX_NAK_1);
5316 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5319 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5320 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5322 cfg1 = RTL_R8(Config1);
5323 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5324 RTL_W8(Config1, cfg1 & ~LEDS0);
5326 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5329 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5331 void __iomem *ioaddr = tp->mmio_addr;
5332 struct pci_dev *pdev = tp->pci_dev;
5334 rtl_csi_access_enable_2(tp);
5336 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5338 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5339 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5342 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5344 rtl_hw_start_8102e_2(tp);
5346 rtl_ephy_write(tp, 0x03, 0xc2f9);
5349 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5351 void __iomem *ioaddr = tp->mmio_addr;
5352 static const struct ephy_info e_info_8105e_1[] = {
5353 { 0x07, 0, 0x4000 },
5354 { 0x19, 0, 0x0200 },
5355 { 0x19, 0, 0x0020 },
5356 { 0x1e, 0, 0x2000 },
5357 { 0x03, 0, 0x0001 },
5358 { 0x19, 0, 0x0100 },
5359 { 0x19, 0, 0x0004 },
5363 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5364 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5366 /* Disable Early Tally Counter */
5367 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5369 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5370 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5371 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5372 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5373 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5375 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5378 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5380 rtl_hw_start_8105e_1(tp);
5381 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5384 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5386 void __iomem *ioaddr = tp->mmio_addr;
5387 static const struct ephy_info e_info_8402[] = {
5388 { 0x19, 0xffff, 0xff64 },
5392 rtl_csi_access_enable_2(tp);
5394 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5395 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5397 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5398 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5399 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5400 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5401 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5403 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5405 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5407 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5408 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5409 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5410 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5411 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5412 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5413 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5416 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5418 void __iomem *ioaddr = tp->mmio_addr;
5420 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5421 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5424 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5425 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5426 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5427 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5428 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5431 static void rtl_hw_start_8101(struct net_device *dev)
5433 struct rtl8169_private *tp = netdev_priv(dev);
5434 void __iomem *ioaddr = tp->mmio_addr;
5435 struct pci_dev *pdev = tp->pci_dev;
5437 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5438 tp->event_slow &= ~RxFIFOOver;
5440 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5441 tp->mac_version == RTL_GIGA_MAC_VER_16)
5442 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5443 PCI_EXP_DEVCTL_NOSNOOP_EN);
5445 RTL_W8(Cfg9346, Cfg9346_Unlock);
5447 switch (tp->mac_version) {
5448 case RTL_GIGA_MAC_VER_07:
5449 rtl_hw_start_8102e_1(tp);
5452 case RTL_GIGA_MAC_VER_08:
5453 rtl_hw_start_8102e_3(tp);
5456 case RTL_GIGA_MAC_VER_09:
5457 rtl_hw_start_8102e_2(tp);
5460 case RTL_GIGA_MAC_VER_29:
5461 rtl_hw_start_8105e_1(tp);
5463 case RTL_GIGA_MAC_VER_30:
5464 rtl_hw_start_8105e_2(tp);
5467 case RTL_GIGA_MAC_VER_37:
5468 rtl_hw_start_8402(tp);
5471 case RTL_GIGA_MAC_VER_39:
5472 rtl_hw_start_8106(tp);
5476 RTL_W8(Cfg9346, Cfg9346_Lock);
5478 RTL_W8(MaxTxPacketSize, TxPacketMax);
5480 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5482 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5483 RTL_W16(CPlusCmd, tp->cp_cmd);
5485 RTL_W16(IntrMitigate, 0x0000);
5487 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5489 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5490 rtl_set_rx_tx_config_registers(tp);
5494 rtl_set_rx_mode(dev);
5496 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5499 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5501 struct rtl8169_private *tp = netdev_priv(dev);
5503 if (new_mtu < ETH_ZLEN ||
5504 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5507 if (new_mtu > ETH_DATA_LEN)
5508 rtl_hw_jumbo_enable(tp);
5510 rtl_hw_jumbo_disable(tp);
5513 netdev_update_features(dev);
5518 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5520 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5521 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5524 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5525 void **data_buff, struct RxDesc *desc)
5527 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5532 rtl8169_make_unusable_by_asic(desc);
5535 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5537 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5539 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5542 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5545 desc->addr = cpu_to_le64(mapping);
5547 rtl8169_mark_to_asic(desc, rx_buf_sz);
5550 static inline void *rtl8169_align(void *data)
5552 return (void *)ALIGN((long)data, 16);
5555 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5556 struct RxDesc *desc)
5560 struct device *d = &tp->pci_dev->dev;
5561 struct net_device *dev = tp->dev;
5562 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5564 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5568 if (rtl8169_align(data) != data) {
5570 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5575 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5577 if (unlikely(dma_mapping_error(d, mapping))) {
5578 if (net_ratelimit())
5579 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5583 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5591 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5595 for (i = 0; i < NUM_RX_DESC; i++) {
5596 if (tp->Rx_databuff[i]) {
5597 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5598 tp->RxDescArray + i);
5603 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5605 desc->opts1 |= cpu_to_le32(RingEnd);
5608 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5612 for (i = 0; i < NUM_RX_DESC; i++) {
5615 if (tp->Rx_databuff[i])
5618 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5620 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5623 tp->Rx_databuff[i] = data;
5626 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5630 rtl8169_rx_clear(tp);
5634 static int rtl8169_init_ring(struct net_device *dev)
5636 struct rtl8169_private *tp = netdev_priv(dev);
5638 rtl8169_init_ring_indexes(tp);
5640 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5641 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5643 return rtl8169_rx_fill(tp);
5646 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5647 struct TxDesc *desc)
5649 unsigned int len = tx_skb->len;
5651 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5659 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5664 for (i = 0; i < n; i++) {
5665 unsigned int entry = (start + i) % NUM_TX_DESC;
5666 struct ring_info *tx_skb = tp->tx_skb + entry;
5667 unsigned int len = tx_skb->len;
5670 struct sk_buff *skb = tx_skb->skb;
5672 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5673 tp->TxDescArray + entry);
5675 tp->dev->stats.tx_dropped++;
5683 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5685 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5686 tp->cur_tx = tp->dirty_tx = 0;
5689 static void rtl_reset_work(struct rtl8169_private *tp)
5691 struct net_device *dev = tp->dev;
5694 napi_disable(&tp->napi);
5695 netif_stop_queue(dev);
5696 synchronize_sched();
5698 rtl8169_hw_reset(tp);
5700 for (i = 0; i < NUM_RX_DESC; i++)
5701 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5703 rtl8169_tx_clear(tp);
5704 rtl8169_init_ring_indexes(tp);
5706 napi_enable(&tp->napi);
5708 netif_wake_queue(dev);
5709 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5712 static void rtl8169_tx_timeout(struct net_device *dev)
5714 struct rtl8169_private *tp = netdev_priv(dev);
5716 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5719 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5722 struct skb_shared_info *info = skb_shinfo(skb);
5723 unsigned int cur_frag, entry;
5724 struct TxDesc * uninitialized_var(txd);
5725 struct device *d = &tp->pci_dev->dev;
5728 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5729 const skb_frag_t *frag = info->frags + cur_frag;
5734 entry = (entry + 1) % NUM_TX_DESC;
5736 txd = tp->TxDescArray + entry;
5737 len = skb_frag_size(frag);
5738 addr = skb_frag_address(frag);
5739 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5740 if (unlikely(dma_mapping_error(d, mapping))) {
5741 if (net_ratelimit())
5742 netif_err(tp, drv, tp->dev,
5743 "Failed to map TX fragments DMA!\n");
5747 /* Anti gcc 2.95.3 bugware (sic) */
5748 status = opts[0] | len |
5749 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5751 txd->opts1 = cpu_to_le32(status);
5752 txd->opts2 = cpu_to_le32(opts[1]);
5753 txd->addr = cpu_to_le64(mapping);
5755 tp->tx_skb[entry].len = len;
5759 tp->tx_skb[entry].skb = skb;
5760 txd->opts1 |= cpu_to_le32(LastFrag);
5766 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5770 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5771 struct sk_buff *skb, u32 *opts)
5773 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5774 u32 mss = skb_shinfo(skb)->gso_size;
5775 int offset = info->opts_offset;
5779 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5780 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5781 const struct iphdr *ip = ip_hdr(skb);
5783 if (ip->protocol == IPPROTO_TCP)
5784 opts[offset] |= info->checksum.tcp;
5785 else if (ip->protocol == IPPROTO_UDP)
5786 opts[offset] |= info->checksum.udp;
5792 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5793 struct net_device *dev)
5795 struct rtl8169_private *tp = netdev_priv(dev);
5796 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5797 struct TxDesc *txd = tp->TxDescArray + entry;
5798 void __iomem *ioaddr = tp->mmio_addr;
5799 struct device *d = &tp->pci_dev->dev;
5805 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5806 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5810 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5813 len = skb_headlen(skb);
5814 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5815 if (unlikely(dma_mapping_error(d, mapping))) {
5816 if (net_ratelimit())
5817 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5821 tp->tx_skb[entry].len = len;
5822 txd->addr = cpu_to_le64(mapping);
5824 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5827 rtl8169_tso_csum(tp, skb, opts);
5829 frags = rtl8169_xmit_frags(tp, skb, opts);
5833 opts[0] |= FirstFrag;
5835 opts[0] |= FirstFrag | LastFrag;
5836 tp->tx_skb[entry].skb = skb;
5839 txd->opts2 = cpu_to_le32(opts[1]);
5841 skb_tx_timestamp(skb);
5845 /* Anti gcc 2.95.3 bugware (sic) */
5846 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5847 txd->opts1 = cpu_to_le32(status);
5849 tp->cur_tx += frags + 1;
5853 RTL_W8(TxPoll, NPQ);
5857 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5858 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5859 * not miss a ring update when it notices a stopped queue.
5862 netif_stop_queue(dev);
5863 /* Sync with rtl_tx:
5864 * - publish queue status and cur_tx ring index (write barrier)
5865 * - refresh dirty_tx ring index (read barrier).
5866 * May the current thread have a pessimistic view of the ring
5867 * status and forget to wake up queue, a racing rtl_tx thread
5871 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5872 netif_wake_queue(dev);
5875 return NETDEV_TX_OK;
5878 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5881 dev->stats.tx_dropped++;
5882 return NETDEV_TX_OK;
5885 netif_stop_queue(dev);
5886 dev->stats.tx_dropped++;
5887 return NETDEV_TX_BUSY;
5890 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5892 struct rtl8169_private *tp = netdev_priv(dev);
5893 struct pci_dev *pdev = tp->pci_dev;
5894 u16 pci_status, pci_cmd;
5896 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5897 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5899 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5900 pci_cmd, pci_status);
5903 * The recovery sequence below admits a very elaborated explanation:
5904 * - it seems to work;
5905 * - I did not see what else could be done;
5906 * - it makes iop3xx happy.
5908 * Feel free to adjust to your needs.
5910 if (pdev->broken_parity_status)
5911 pci_cmd &= ~PCI_COMMAND_PARITY;
5913 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5915 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5917 pci_write_config_word(pdev, PCI_STATUS,
5918 pci_status & (PCI_STATUS_DETECTED_PARITY |
5919 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5920 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5922 /* The infamous DAC f*ckup only happens at boot time */
5923 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5924 void __iomem *ioaddr = tp->mmio_addr;
5926 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5927 tp->cp_cmd &= ~PCIDAC;
5928 RTL_W16(CPlusCmd, tp->cp_cmd);
5929 dev->features &= ~NETIF_F_HIGHDMA;
5932 rtl8169_hw_reset(tp);
5934 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5937 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5939 unsigned int dirty_tx, tx_left;
5941 dirty_tx = tp->dirty_tx;
5943 tx_left = tp->cur_tx - dirty_tx;
5945 while (tx_left > 0) {
5946 unsigned int entry = dirty_tx % NUM_TX_DESC;
5947 struct ring_info *tx_skb = tp->tx_skb + entry;
5951 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5952 if (status & DescOwn)
5955 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5956 tp->TxDescArray + entry);
5957 if (status & LastFrag) {
5958 u64_stats_update_begin(&tp->tx_stats.syncp);
5959 tp->tx_stats.packets++;
5960 tp->tx_stats.bytes += tx_skb->skb->len;
5961 u64_stats_update_end(&tp->tx_stats.syncp);
5962 dev_kfree_skb(tx_skb->skb);
5969 if (tp->dirty_tx != dirty_tx) {
5970 tp->dirty_tx = dirty_tx;
5971 /* Sync with rtl8169_start_xmit:
5972 * - publish dirty_tx ring index (write barrier)
5973 * - refresh cur_tx ring index and queue status (read barrier)
5974 * May the current thread miss the stopped queue condition,
5975 * a racing xmit thread can only have a right view of the
5979 if (netif_queue_stopped(dev) &&
5980 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5981 netif_wake_queue(dev);
5984 * 8168 hack: TxPoll requests are lost when the Tx packets are
5985 * too close. Let's kick an extra TxPoll request when a burst
5986 * of start_xmit activity is detected (if it is not detected,
5987 * it is slow enough). -- FR
5989 if (tp->cur_tx != dirty_tx) {
5990 void __iomem *ioaddr = tp->mmio_addr;
5992 RTL_W8(TxPoll, NPQ);
5997 static inline int rtl8169_fragmented_frame(u32 status)
5999 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
6002 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
6004 u32 status = opts1 & RxProtoMask;
6006 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
6007 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
6008 skb->ip_summed = CHECKSUM_UNNECESSARY;
6010 skb_checksum_none_assert(skb);
6013 static struct sk_buff *rtl8169_try_rx_copy(void *data,
6014 struct rtl8169_private *tp,
6018 struct sk_buff *skb;
6019 struct device *d = &tp->pci_dev->dev;
6021 data = rtl8169_align(data);
6022 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6024 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6026 memcpy(skb->data, data, pkt_size);
6027 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6032 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6034 unsigned int cur_rx, rx_left;
6037 cur_rx = tp->cur_rx;
6038 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
6039 rx_left = min(rx_left, budget);
6041 for (; rx_left > 0; rx_left--, cur_rx++) {
6042 unsigned int entry = cur_rx % NUM_RX_DESC;
6043 struct RxDesc *desc = tp->RxDescArray + entry;
6047 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6049 if (status & DescOwn)
6051 if (unlikely(status & RxRES)) {
6052 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6054 dev->stats.rx_errors++;
6055 if (status & (RxRWT | RxRUNT))
6056 dev->stats.rx_length_errors++;
6058 dev->stats.rx_crc_errors++;
6059 if (status & RxFOVF) {
6060 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6061 dev->stats.rx_fifo_errors++;
6063 if ((status & (RxRUNT | RxCRC)) &&
6064 !(status & (RxRWT | RxFOVF)) &&
6065 (dev->features & NETIF_F_RXALL))
6068 rtl8169_mark_to_asic(desc, rx_buf_sz);
6070 struct sk_buff *skb;
6075 addr = le64_to_cpu(desc->addr);
6076 if (likely(!(dev->features & NETIF_F_RXFCS)))
6077 pkt_size = (status & 0x00003fff) - 4;
6079 pkt_size = status & 0x00003fff;
6082 * The driver does not support incoming fragmented
6083 * frames. They are seen as a symptom of over-mtu
6086 if (unlikely(rtl8169_fragmented_frame(status))) {
6087 dev->stats.rx_dropped++;
6088 dev->stats.rx_length_errors++;
6089 rtl8169_mark_to_asic(desc, rx_buf_sz);
6093 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6094 tp, pkt_size, addr);
6095 rtl8169_mark_to_asic(desc, rx_buf_sz);
6097 dev->stats.rx_dropped++;
6101 rtl8169_rx_csum(skb, status);
6102 skb_put(skb, pkt_size);
6103 skb->protocol = eth_type_trans(skb, dev);
6105 rtl8169_rx_vlan_tag(desc, skb);
6107 napi_gro_receive(&tp->napi, skb);
6109 u64_stats_update_begin(&tp->rx_stats.syncp);
6110 tp->rx_stats.packets++;
6111 tp->rx_stats.bytes += pkt_size;
6112 u64_stats_update_end(&tp->rx_stats.syncp);
6116 count = cur_rx - tp->cur_rx;
6117 tp->cur_rx = cur_rx;
6119 tp->dirty_rx += count;
6124 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6126 struct net_device *dev = dev_instance;
6127 struct rtl8169_private *tp = netdev_priv(dev);
6131 status = rtl_get_events(tp);
6132 if (status && status != 0xffff) {
6133 status &= RTL_EVENT_NAPI | tp->event_slow;
6137 rtl_irq_disable(tp);
6138 napi_schedule(&tp->napi);
6141 return IRQ_RETVAL(handled);
6145 * Workqueue context.
6147 static void rtl_slow_event_work(struct rtl8169_private *tp)
6149 struct net_device *dev = tp->dev;
6152 status = rtl_get_events(tp) & tp->event_slow;
6153 rtl_ack_events(tp, status);
6155 if (unlikely(status & RxFIFOOver)) {
6156 switch (tp->mac_version) {
6157 /* Work around for rx fifo overflow */
6158 case RTL_GIGA_MAC_VER_11:
6159 netif_stop_queue(dev);
6160 /* XXX - Hack alert. See rtl_task(). */
6161 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6167 if (unlikely(status & SYSErr))
6168 rtl8169_pcierr_interrupt(dev);
6170 if (status & LinkChg)
6171 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6173 rtl_irq_enable_all(tp);
6176 static void rtl_task(struct work_struct *work)
6178 static const struct {
6180 void (*action)(struct rtl8169_private *);
6182 /* XXX - keep rtl_slow_event_work() as first element. */
6183 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6184 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6185 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6187 struct rtl8169_private *tp =
6188 container_of(work, struct rtl8169_private, wk.work);
6189 struct net_device *dev = tp->dev;
6194 if (!netif_running(dev) ||
6195 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6198 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6201 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6203 rtl_work[i].action(tp);
6207 rtl_unlock_work(tp);
6210 static int rtl8169_poll(struct napi_struct *napi, int budget)
6212 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6213 struct net_device *dev = tp->dev;
6214 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6218 status = rtl_get_events(tp);
6219 rtl_ack_events(tp, status & ~tp->event_slow);
6221 if (status & RTL_EVENT_NAPI_RX)
6222 work_done = rtl_rx(dev, tp, (u32) budget);
6224 if (status & RTL_EVENT_NAPI_TX)
6227 if (status & tp->event_slow) {
6228 enable_mask &= ~tp->event_slow;
6230 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6233 if (work_done < budget) {
6234 napi_complete(napi);
6236 rtl_irq_enable(tp, enable_mask);
6243 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6245 struct rtl8169_private *tp = netdev_priv(dev);
6247 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6250 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6251 RTL_W32(RxMissed, 0);
6254 static void rtl8169_down(struct net_device *dev)
6256 struct rtl8169_private *tp = netdev_priv(dev);
6257 void __iomem *ioaddr = tp->mmio_addr;
6259 del_timer_sync(&tp->timer);
6261 napi_disable(&tp->napi);
6262 netif_stop_queue(dev);
6264 rtl8169_hw_reset(tp);
6266 * At this point device interrupts can not be enabled in any function,
6267 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6268 * and napi is disabled (rtl8169_poll).
6270 rtl8169_rx_missed(dev, ioaddr);
6272 /* Give a racing hard_start_xmit a few cycles to complete. */
6273 synchronize_sched();
6275 rtl8169_tx_clear(tp);
6277 rtl8169_rx_clear(tp);
6279 rtl_pll_power_down(tp);
6282 static int rtl8169_close(struct net_device *dev)
6284 struct rtl8169_private *tp = netdev_priv(dev);
6285 struct pci_dev *pdev = tp->pci_dev;
6287 pm_runtime_get_sync(&pdev->dev);
6289 /* Update counters before going down */
6290 rtl8169_update_counters(dev);
6293 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6296 rtl_unlock_work(tp);
6298 free_irq(pdev->irq, dev);
6300 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6302 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6304 tp->TxDescArray = NULL;
6305 tp->RxDescArray = NULL;
6307 pm_runtime_put_sync(&pdev->dev);
6312 #ifdef CONFIG_NET_POLL_CONTROLLER
6313 static void rtl8169_netpoll(struct net_device *dev)
6315 struct rtl8169_private *tp = netdev_priv(dev);
6317 rtl8169_interrupt(tp->pci_dev->irq, dev);
6321 static int rtl_open(struct net_device *dev)
6323 struct rtl8169_private *tp = netdev_priv(dev);
6324 void __iomem *ioaddr = tp->mmio_addr;
6325 struct pci_dev *pdev = tp->pci_dev;
6326 int retval = -ENOMEM;
6328 pm_runtime_get_sync(&pdev->dev);
6331 * Rx and Tx descriptors needs 256 bytes alignment.
6332 * dma_alloc_coherent provides more.
6334 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6335 &tp->TxPhyAddr, GFP_KERNEL);
6336 if (!tp->TxDescArray)
6337 goto err_pm_runtime_put;
6339 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6340 &tp->RxPhyAddr, GFP_KERNEL);
6341 if (!tp->RxDescArray)
6344 retval = rtl8169_init_ring(dev);
6348 INIT_WORK(&tp->wk.work, rtl_task);
6352 rtl_request_firmware(tp);
6354 retval = request_irq(pdev->irq, rtl8169_interrupt,
6355 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6358 goto err_release_fw_2;
6362 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6364 napi_enable(&tp->napi);
6366 rtl8169_init_phy(dev, tp);
6368 __rtl8169_set_features(dev, dev->features);
6370 rtl_pll_power_up(tp);
6374 netif_start_queue(dev);
6376 rtl_unlock_work(tp);
6378 tp->saved_wolopts = 0;
6379 pm_runtime_put_noidle(&pdev->dev);
6381 rtl8169_check_link_status(dev, tp, ioaddr);
6386 rtl_release_firmware(tp);
6387 rtl8169_rx_clear(tp);
6389 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6391 tp->RxDescArray = NULL;
6393 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6395 tp->TxDescArray = NULL;
6397 pm_runtime_put_noidle(&pdev->dev);
6401 static struct rtnl_link_stats64 *
6402 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6404 struct rtl8169_private *tp = netdev_priv(dev);
6405 void __iomem *ioaddr = tp->mmio_addr;
6408 if (netif_running(dev))
6409 rtl8169_rx_missed(dev, ioaddr);
6412 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6413 stats->rx_packets = tp->rx_stats.packets;
6414 stats->rx_bytes = tp->rx_stats.bytes;
6415 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6419 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6420 stats->tx_packets = tp->tx_stats.packets;
6421 stats->tx_bytes = tp->tx_stats.bytes;
6422 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6424 stats->rx_dropped = dev->stats.rx_dropped;
6425 stats->tx_dropped = dev->stats.tx_dropped;
6426 stats->rx_length_errors = dev->stats.rx_length_errors;
6427 stats->rx_errors = dev->stats.rx_errors;
6428 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6429 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6430 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6435 static void rtl8169_net_suspend(struct net_device *dev)
6437 struct rtl8169_private *tp = netdev_priv(dev);
6439 if (!netif_running(dev))
6442 netif_device_detach(dev);
6443 netif_stop_queue(dev);
6446 napi_disable(&tp->napi);
6447 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6448 rtl_unlock_work(tp);
6450 rtl_pll_power_down(tp);
6455 static int rtl8169_suspend(struct device *device)
6457 struct pci_dev *pdev = to_pci_dev(device);
6458 struct net_device *dev = pci_get_drvdata(pdev);
6460 rtl8169_net_suspend(dev);
6465 static void __rtl8169_resume(struct net_device *dev)
6467 struct rtl8169_private *tp = netdev_priv(dev);
6469 netif_device_attach(dev);
6471 rtl_pll_power_up(tp);
6474 napi_enable(&tp->napi);
6475 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6476 rtl_unlock_work(tp);
6478 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6481 static int rtl8169_resume(struct device *device)
6483 struct pci_dev *pdev = to_pci_dev(device);
6484 struct net_device *dev = pci_get_drvdata(pdev);
6485 struct rtl8169_private *tp = netdev_priv(dev);
6487 rtl8169_init_phy(dev, tp);
6489 if (netif_running(dev))
6490 __rtl8169_resume(dev);
6495 static int rtl8169_runtime_suspend(struct device *device)
6497 struct pci_dev *pdev = to_pci_dev(device);
6498 struct net_device *dev = pci_get_drvdata(pdev);
6499 struct rtl8169_private *tp = netdev_priv(dev);
6501 if (!tp->TxDescArray)
6505 tp->saved_wolopts = __rtl8169_get_wol(tp);
6506 __rtl8169_set_wol(tp, WAKE_ANY);
6507 rtl_unlock_work(tp);
6509 rtl8169_net_suspend(dev);
6514 static int rtl8169_runtime_resume(struct device *device)
6516 struct pci_dev *pdev = to_pci_dev(device);
6517 struct net_device *dev = pci_get_drvdata(pdev);
6518 struct rtl8169_private *tp = netdev_priv(dev);
6520 if (!tp->TxDescArray)
6524 __rtl8169_set_wol(tp, tp->saved_wolopts);
6525 tp->saved_wolopts = 0;
6526 rtl_unlock_work(tp);
6528 rtl8169_init_phy(dev, tp);
6530 __rtl8169_resume(dev);
6535 static int rtl8169_runtime_idle(struct device *device)
6537 struct pci_dev *pdev = to_pci_dev(device);
6538 struct net_device *dev = pci_get_drvdata(pdev);
6539 struct rtl8169_private *tp = netdev_priv(dev);
6541 return tp->TxDescArray ? -EBUSY : 0;
6544 static const struct dev_pm_ops rtl8169_pm_ops = {
6545 .suspend = rtl8169_suspend,
6546 .resume = rtl8169_resume,
6547 .freeze = rtl8169_suspend,
6548 .thaw = rtl8169_resume,
6549 .poweroff = rtl8169_suspend,
6550 .restore = rtl8169_resume,
6551 .runtime_suspend = rtl8169_runtime_suspend,
6552 .runtime_resume = rtl8169_runtime_resume,
6553 .runtime_idle = rtl8169_runtime_idle,
6556 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6558 #else /* !CONFIG_PM */
6560 #define RTL8169_PM_OPS NULL
6562 #endif /* !CONFIG_PM */
6564 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6566 void __iomem *ioaddr = tp->mmio_addr;
6568 /* WoL fails with 8168b when the receiver is disabled. */
6569 switch (tp->mac_version) {
6570 case RTL_GIGA_MAC_VER_11:
6571 case RTL_GIGA_MAC_VER_12:
6572 case RTL_GIGA_MAC_VER_17:
6573 pci_clear_master(tp->pci_dev);
6575 RTL_W8(ChipCmd, CmdRxEnb);
6584 static void rtl_shutdown(struct pci_dev *pdev)
6586 struct net_device *dev = pci_get_drvdata(pdev);
6587 struct rtl8169_private *tp = netdev_priv(dev);
6588 struct device *d = &pdev->dev;
6590 pm_runtime_get_sync(d);
6592 rtl8169_net_suspend(dev);
6594 /* Restore original MAC address */
6595 rtl_rar_set(tp, dev->perm_addr);
6597 rtl8169_hw_reset(tp);
6599 if (system_state == SYSTEM_POWER_OFF) {
6600 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6601 rtl_wol_suspend_quirk(tp);
6602 rtl_wol_shutdown_quirk(tp);
6605 pci_wake_from_d3(pdev, true);
6606 pci_set_power_state(pdev, PCI_D3hot);
6609 pm_runtime_put_noidle(d);
6612 static void rtl_remove_one(struct pci_dev *pdev)
6614 struct net_device *dev = pci_get_drvdata(pdev);
6615 struct rtl8169_private *tp = netdev_priv(dev);
6617 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6618 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6619 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6620 rtl8168_driver_stop(tp);
6623 cancel_work_sync(&tp->wk.work);
6625 netif_napi_del(&tp->napi);
6627 unregister_netdev(dev);
6629 rtl_release_firmware(tp);
6631 if (pci_dev_run_wake(pdev))
6632 pm_runtime_get_noresume(&pdev->dev);
6634 /* restore original MAC address */
6635 rtl_rar_set(tp, dev->perm_addr);
6637 rtl_disable_msi(pdev, tp);
6638 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6639 pci_set_drvdata(pdev, NULL);
6642 static const struct net_device_ops rtl_netdev_ops = {
6643 .ndo_open = rtl_open,
6644 .ndo_stop = rtl8169_close,
6645 .ndo_get_stats64 = rtl8169_get_stats64,
6646 .ndo_start_xmit = rtl8169_start_xmit,
6647 .ndo_tx_timeout = rtl8169_tx_timeout,
6648 .ndo_validate_addr = eth_validate_addr,
6649 .ndo_change_mtu = rtl8169_change_mtu,
6650 .ndo_fix_features = rtl8169_fix_features,
6651 .ndo_set_features = rtl8169_set_features,
6652 .ndo_set_mac_address = rtl_set_mac_address,
6653 .ndo_do_ioctl = rtl8169_ioctl,
6654 .ndo_set_rx_mode = rtl_set_rx_mode,
6655 #ifdef CONFIG_NET_POLL_CONTROLLER
6656 .ndo_poll_controller = rtl8169_netpoll,
6661 static const struct rtl_cfg_info {
6662 void (*hw_start)(struct net_device *);
6663 unsigned int region;
6668 } rtl_cfg_infos [] = {
6670 .hw_start = rtl_hw_start_8169,
6673 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6674 .features = RTL_FEATURE_GMII,
6675 .default_ver = RTL_GIGA_MAC_VER_01,
6678 .hw_start = rtl_hw_start_8168,
6681 .event_slow = SYSErr | LinkChg | RxOverflow,
6682 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6683 .default_ver = RTL_GIGA_MAC_VER_11,
6686 .hw_start = rtl_hw_start_8101,
6689 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6691 .features = RTL_FEATURE_MSI,
6692 .default_ver = RTL_GIGA_MAC_VER_13,
6696 /* Cfg9346_Unlock assumed. */
6697 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6698 const struct rtl_cfg_info *cfg)
6700 void __iomem *ioaddr = tp->mmio_addr;
6704 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6705 if (cfg->features & RTL_FEATURE_MSI) {
6706 if (pci_enable_msi(tp->pci_dev)) {
6707 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6710 msi = RTL_FEATURE_MSI;
6713 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6714 RTL_W8(Config2, cfg2);
6718 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6720 void __iomem *ioaddr = tp->mmio_addr;
6722 return RTL_R8(MCU) & LINK_LIST_RDY;
6725 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6727 void __iomem *ioaddr = tp->mmio_addr;
6729 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6732 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6734 void __iomem *ioaddr = tp->mmio_addr;
6737 tp->ocp_base = OCP_STD_PHY_BASE;
6739 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6741 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6744 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6747 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6749 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6751 data = r8168_mac_ocp_read(tp, 0xe8de);
6753 r8168_mac_ocp_write(tp, 0xe8de, data);
6755 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6758 data = r8168_mac_ocp_read(tp, 0xe8de);
6760 r8168_mac_ocp_write(tp, 0xe8de, data);
6762 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6766 static void rtl_hw_initialize(struct rtl8169_private *tp)
6768 switch (tp->mac_version) {
6769 case RTL_GIGA_MAC_VER_40:
6770 case RTL_GIGA_MAC_VER_41:
6771 rtl_hw_init_8168g(tp);
6780 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6782 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6783 const unsigned int region = cfg->region;
6784 struct rtl8169_private *tp;
6785 struct mii_if_info *mii;
6786 struct net_device *dev;
6787 void __iomem *ioaddr;
6791 if (netif_msg_drv(&debug)) {
6792 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6793 MODULENAME, RTL8169_VERSION);
6796 dev = alloc_etherdev(sizeof (*tp));
6802 SET_NETDEV_DEV(dev, &pdev->dev);
6803 dev->netdev_ops = &rtl_netdev_ops;
6804 tp = netdev_priv(dev);
6807 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6811 mii->mdio_read = rtl_mdio_read;
6812 mii->mdio_write = rtl_mdio_write;
6813 mii->phy_id_mask = 0x1f;
6814 mii->reg_num_mask = 0x1f;
6815 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6817 /* disable ASPM completely as that cause random device stop working
6818 * problems as well as full system hangs for some PCIe devices users */
6819 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6820 PCIE_LINK_STATE_CLKPM);
6822 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6823 rc = pci_enable_device(pdev);
6825 netif_err(tp, probe, dev, "enable failure\n");
6826 goto err_out_free_dev_1;
6829 if (pci_set_mwi(pdev) < 0)
6830 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6832 /* make sure PCI base addr 1 is MMIO */
6833 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6834 netif_err(tp, probe, dev,
6835 "region #%d not an MMIO resource, aborting\n",
6841 /* check for weird/broken PCI region reporting */
6842 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6843 netif_err(tp, probe, dev,
6844 "Invalid PCI region size(s), aborting\n");
6849 rc = pci_request_regions(pdev, MODULENAME);
6851 netif_err(tp, probe, dev, "could not request regions\n");
6855 tp->cp_cmd = RxChkSum;
6857 if ((sizeof(dma_addr_t) > 4) &&
6858 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6859 tp->cp_cmd |= PCIDAC;
6860 dev->features |= NETIF_F_HIGHDMA;
6862 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6864 netif_err(tp, probe, dev, "DMA configuration failed\n");
6865 goto err_out_free_res_3;
6869 /* ioremap MMIO region */
6870 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6872 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6874 goto err_out_free_res_3;
6876 tp->mmio_addr = ioaddr;
6878 if (!pci_is_pcie(pdev))
6879 netif_info(tp, probe, dev, "not PCI Express\n");
6881 /* Identify chip attached to board */
6882 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6886 rtl_irq_disable(tp);
6888 rtl_hw_initialize(tp);
6892 rtl_ack_events(tp, 0xffff);
6894 pci_set_master(pdev);
6897 * Pretend we are using VLANs; This bypasses a nasty bug where
6898 * Interrupts stop flowing on high load on 8110SCd controllers.
6900 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6901 tp->cp_cmd |= RxVlan;
6903 rtl_init_mdio_ops(tp);
6904 rtl_init_pll_power_ops(tp);
6905 rtl_init_jumbo_ops(tp);
6906 rtl_init_csi_ops(tp);
6908 rtl8169_print_mac_version(tp);
6910 chipset = tp->mac_version;
6911 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6913 RTL_W8(Cfg9346, Cfg9346_Unlock);
6914 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6915 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6916 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6917 tp->features |= RTL_FEATURE_WOL;
6918 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6919 tp->features |= RTL_FEATURE_WOL;
6920 tp->features |= rtl_try_msi(tp, cfg);
6921 RTL_W8(Cfg9346, Cfg9346_Lock);
6923 if (rtl_tbi_enabled(tp)) {
6924 tp->set_speed = rtl8169_set_speed_tbi;
6925 tp->get_settings = rtl8169_gset_tbi;
6926 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6927 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6928 tp->link_ok = rtl8169_tbi_link_ok;
6929 tp->do_ioctl = rtl_tbi_ioctl;
6931 tp->set_speed = rtl8169_set_speed_xmii;
6932 tp->get_settings = rtl8169_gset_xmii;
6933 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6934 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6935 tp->link_ok = rtl8169_xmii_link_ok;
6936 tp->do_ioctl = rtl_xmii_ioctl;
6939 mutex_init(&tp->wk.mutex);
6941 /* Get MAC address */
6942 for (i = 0; i < ETH_ALEN; i++)
6943 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6944 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6946 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6947 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6949 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6951 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6952 * properly for all devices */
6953 dev->features |= NETIF_F_RXCSUM |
6954 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6956 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6957 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6958 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6961 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6962 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6963 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6965 dev->hw_features |= NETIF_F_RXALL;
6966 dev->hw_features |= NETIF_F_RXFCS;
6968 tp->hw_start = cfg->hw_start;
6969 tp->event_slow = cfg->event_slow;
6971 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6972 ~(RxBOVF | RxFOVF) : ~0;
6974 init_timer(&tp->timer);
6975 tp->timer.data = (unsigned long) dev;
6976 tp->timer.function = rtl8169_phy_timer;
6978 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6980 rc = register_netdev(dev);
6984 pci_set_drvdata(pdev, dev);
6986 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6987 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6988 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6989 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6990 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6991 "tx checksumming: %s]\n",
6992 rtl_chip_infos[chipset].jumbo_max,
6993 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6996 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6997 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6998 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6999 rtl8168_driver_start(tp);
7002 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
7004 if (pci_dev_run_wake(pdev))
7005 pm_runtime_put_noidle(&pdev->dev);
7007 netif_carrier_off(dev);
7013 netif_napi_del(&tp->napi);
7014 rtl_disable_msi(pdev, tp);
7017 pci_release_regions(pdev);
7019 pci_clear_mwi(pdev);
7020 pci_disable_device(pdev);
7026 static struct pci_driver rtl8169_pci_driver = {
7028 .id_table = rtl8169_pci_tbl,
7029 .probe = rtl_init_one,
7030 .remove = rtl_remove_one,
7031 .shutdown = rtl_shutdown,
7032 .driver.pm = RTL8169_PM_OPS,
7035 module_pci_driver(rtl8169_pci_driver);