2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/of_address.h>
30 #include <linux/phy.h>
31 #include <linux/clk.h>
34 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
35 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
36 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
37 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
38 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
39 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
40 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
41 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
42 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
43 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
44 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
45 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
46 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
47 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
48 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
49 #define MVNETA_PORT_RX_RESET 0x1cc0
50 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
51 #define MVNETA_PHY_ADDR 0x2000
52 #define MVNETA_PHY_ADDR_MASK 0x1f
53 #define MVNETA_MBUS_RETRY 0x2010
54 #define MVNETA_UNIT_INTR_CAUSE 0x2080
55 #define MVNETA_UNIT_CONTROL 0x20B0
56 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
57 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
58 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
59 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
60 #define MVNETA_BASE_ADDR_ENABLE 0x2290
61 #define MVNETA_PORT_CONFIG 0x2400
62 #define MVNETA_UNI_PROMISC_MODE BIT(0)
63 #define MVNETA_DEF_RXQ(q) ((q) << 1)
64 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
65 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
66 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
67 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
68 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
69 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
70 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
71 MVNETA_DEF_RXQ_ARP(q) | \
72 MVNETA_DEF_RXQ_TCP(q) | \
73 MVNETA_DEF_RXQ_UDP(q) | \
74 MVNETA_DEF_RXQ_BPDU(q) | \
75 MVNETA_TX_UNSET_ERR_SUM | \
76 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
77 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
78 #define MVNETA_MAC_ADDR_LOW 0x2414
79 #define MVNETA_MAC_ADDR_HIGH 0x2418
80 #define MVNETA_SDMA_CONFIG 0x241c
81 #define MVNETA_SDMA_BRST_SIZE_16 4
82 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
83 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
84 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
85 #define MVNETA_DESC_SWAP BIT(6)
86 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
87 #define MVNETA_PORT_STATUS 0x2444
88 #define MVNETA_TX_IN_PRGRS BIT(1)
89 #define MVNETA_TX_FIFO_EMPTY BIT(8)
90 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
91 #define MVNETA_SGMII_SERDES_CFG 0x24A0
92 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
93 #define MVNETA_TYPE_PRIO 0x24bc
94 #define MVNETA_FORCE_UNI BIT(21)
95 #define MVNETA_TXQ_CMD_1 0x24e4
96 #define MVNETA_TXQ_CMD 0x2448
97 #define MVNETA_TXQ_DISABLE_SHIFT 8
98 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
99 #define MVNETA_ACC_MODE 0x2500
100 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
101 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
102 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
103 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
104 #define MVNETA_INTR_NEW_CAUSE 0x25a0
105 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
106 #define MVNETA_INTR_NEW_MASK 0x25a4
107 #define MVNETA_INTR_OLD_CAUSE 0x25a8
108 #define MVNETA_INTR_OLD_MASK 0x25ac
109 #define MVNETA_INTR_MISC_CAUSE 0x25b0
110 #define MVNETA_INTR_MISC_MASK 0x25b4
111 #define MVNETA_INTR_ENABLE 0x25b8
112 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
113 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
114 #define MVNETA_RXQ_CMD 0x2680
115 #define MVNETA_RXQ_DISABLE_SHIFT 8
116 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
117 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
118 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
119 #define MVNETA_GMAC_CTRL_0 0x2c00
120 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
121 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
122 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
123 #define MVNETA_GMAC_CTRL_2 0x2c08
124 #define MVNETA_GMAC2_PSC_ENABLE BIT(3)
125 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
126 #define MVNETA_GMAC2_PORT_RESET BIT(6)
127 #define MVNETA_GMAC_STATUS 0x2c10
128 #define MVNETA_GMAC_LINK_UP BIT(0)
129 #define MVNETA_GMAC_SPEED_1000 BIT(1)
130 #define MVNETA_GMAC_SPEED_100 BIT(2)
131 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
132 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
133 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
134 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
135 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
136 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
137 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
138 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
139 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
140 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
141 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
142 #define MVNETA_MIB_COUNTERS_BASE 0x3080
143 #define MVNETA_MIB_LATE_COLLISION 0x7c
144 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
145 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
146 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
147 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
148 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
149 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
150 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
151 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
152 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
153 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
154 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
155 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
156 #define MVNETA_PORT_TX_RESET 0x3cf0
157 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
158 #define MVNETA_TX_MTU 0x3e0c
159 #define MVNETA_TX_TOKEN_SIZE 0x3e14
160 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
161 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
162 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
164 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
166 /* Descriptor ring Macros */
167 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
168 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
170 /* Various constants */
173 #define MVNETA_TXDONE_COAL_PKTS 16
174 #define MVNETA_RX_COAL_PKTS 32
175 #define MVNETA_RX_COAL_USEC 100
178 #define MVNETA_TX_DONE_TIMER_PERIOD 10
180 /* Napi polling weight */
181 #define MVNETA_RX_POLL_WEIGHT 64
183 /* The two bytes Marvell header. Either contains a special value used
184 * by Marvell switches when a specific hardware mode is enabled (not
185 * supported by this driver) or is filled automatically by zeroes on
186 * the RX side. Those two bytes being at the front of the Ethernet
187 * header, they allow to have the IP header aligned on a 4 bytes
188 * boundary automatically: the hardware skips those two bytes on its
191 #define MVNETA_MH_SIZE 2
193 #define MVNETA_VLAN_TAG_LEN 4
195 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
196 #define MVNETA_TX_CSUM_MAX_SIZE 9800
197 #define MVNETA_ACC_MODE_EXT 1
199 /* Timeout constants */
200 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
201 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
202 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
204 #define MVNETA_TX_MTU_MAX 0x3ffff
206 /* Max number of Rx descriptors */
207 #define MVNETA_MAX_RXD 128
209 /* Max number of Tx descriptors */
210 #define MVNETA_MAX_TXD 532
212 /* descriptor aligned size */
213 #define MVNETA_DESC_ALIGNED_SIZE 32
215 #define MVNETA_RX_PKT_SIZE(mtu) \
216 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
217 ETH_HLEN + ETH_FCS_LEN, \
218 MVNETA_CPU_D_CACHE_LINE_SIZE)
220 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
222 struct mvneta_stats {
223 struct u64_stats_sync syncp;
231 struct mvneta_rx_queue *rxqs;
232 struct mvneta_tx_queue *txqs;
233 struct timer_list tx_done_timer;
234 struct net_device *dev;
237 struct napi_struct napi;
241 #define MVNETA_F_TX_DONE_TIMER_BIT 0
251 struct mvneta_stats tx_stats;
252 struct mvneta_stats rx_stats;
254 struct mii_bus *mii_bus;
255 struct phy_device *phy_dev;
256 phy_interface_t phy_interface;
257 struct device_node *phy_node;
263 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
264 * layout of the transmit and reception DMA descriptors, and their
265 * layout is therefore defined by the hardware design
268 #define MVNETA_TX_L3_OFF_SHIFT 0
269 #define MVNETA_TX_IP_HLEN_SHIFT 8
270 #define MVNETA_TX_L4_UDP BIT(16)
271 #define MVNETA_TX_L3_IP6 BIT(17)
272 #define MVNETA_TXD_IP_CSUM BIT(18)
273 #define MVNETA_TXD_Z_PAD BIT(19)
274 #define MVNETA_TXD_L_DESC BIT(20)
275 #define MVNETA_TXD_F_DESC BIT(21)
276 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
277 MVNETA_TXD_L_DESC | \
279 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
280 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
282 #define MVNETA_RXD_ERR_CRC 0x0
283 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
284 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
285 #define MVNETA_RXD_ERR_LEN BIT(18)
286 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
287 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
288 #define MVNETA_RXD_L3_IP4 BIT(25)
289 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
290 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
292 #if defined(__LITTLE_ENDIAN)
293 struct mvneta_tx_desc {
294 u32 command; /* Options used by HW for packet transmitting.*/
295 u16 reserverd1; /* csum_l4 (for future use) */
296 u16 data_size; /* Data size of transmitted packet in bytes */
297 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
298 u32 reserved2; /* hw_cmd - (for future use, PMT) */
299 u32 reserved3[4]; /* Reserved - (for future use) */
302 struct mvneta_rx_desc {
303 u32 status; /* Info about received packet */
304 u16 reserved1; /* pnc_info - (for future use, PnC) */
305 u16 data_size; /* Size of received packet in bytes */
307 u32 buf_phys_addr; /* Physical address of the buffer */
308 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
310 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
311 u16 reserved3; /* prefetch_cmd, for future use */
312 u16 reserved4; /* csum_l4 - (for future use, PnC) */
314 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
315 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
318 struct mvneta_tx_desc {
319 u16 data_size; /* Data size of transmitted packet in bytes */
320 u16 reserverd1; /* csum_l4 (for future use) */
321 u32 command; /* Options used by HW for packet transmitting.*/
322 u32 reserved2; /* hw_cmd - (for future use, PMT) */
323 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
324 u32 reserved3[4]; /* Reserved - (for future use) */
327 struct mvneta_rx_desc {
328 u16 data_size; /* Size of received packet in bytes */
329 u16 reserved1; /* pnc_info - (for future use, PnC) */
330 u32 status; /* Info about received packet */
332 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
333 u32 buf_phys_addr; /* Physical address of the buffer */
335 u16 reserved4; /* csum_l4 - (for future use, PnC) */
336 u16 reserved3; /* prefetch_cmd, for future use */
337 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
339 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
340 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
344 struct mvneta_tx_queue {
345 /* Number of this TX queue, in the range 0-7 */
348 /* Number of TX DMA descriptors in the descriptor ring */
351 /* Number of currently used TX DMA descriptor in the
356 /* Array of transmitted skb */
357 struct sk_buff **tx_skb;
359 /* Index of last TX DMA descriptor that was inserted */
362 /* Index of the TX DMA descriptor to be cleaned up */
367 /* Virtual address of the TX DMA descriptors array */
368 struct mvneta_tx_desc *descs;
370 /* DMA address of the TX DMA descriptors array */
371 dma_addr_t descs_phys;
373 /* Index of the last TX DMA descriptor */
376 /* Index of the next TX DMA descriptor to process */
377 int next_desc_to_proc;
380 struct mvneta_rx_queue {
381 /* rx queue number, in the range 0-7 */
384 /* num of rx descriptors in the rx descriptor ring */
387 /* counter of times when mvneta_refill() failed */
393 /* Virtual address of the RX DMA descriptors array */
394 struct mvneta_rx_desc *descs;
396 /* DMA address of the RX DMA descriptors array */
397 dma_addr_t descs_phys;
399 /* Index of the last RX DMA descriptor */
402 /* Index of the next RX DMA descriptor to process */
403 int next_desc_to_proc;
406 static int rxq_number = 8;
407 static int txq_number = 8;
411 #define MVNETA_DRIVER_NAME "mvneta"
412 #define MVNETA_DRIVER_VERSION "1.0"
414 /* Utility/helper methods */
416 /* Write helper method */
417 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
419 writel(data, pp->base + offset);
422 /* Read helper method */
423 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
425 return readl(pp->base + offset);
428 /* Increment txq get counter */
429 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
431 txq->txq_get_index++;
432 if (txq->txq_get_index == txq->size)
433 txq->txq_get_index = 0;
436 /* Increment txq put counter */
437 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
439 txq->txq_put_index++;
440 if (txq->txq_put_index == txq->size)
441 txq->txq_put_index = 0;
445 /* Clear all MIB counters */
446 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
451 /* Perform dummy reads from MIB counters */
452 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
453 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
456 /* Get System Network Statistics */
457 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
458 struct rtnl_link_stats64 *stats)
460 struct mvneta_port *pp = netdev_priv(dev);
463 memset(stats, 0, sizeof(struct rtnl_link_stats64));
466 start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
467 stats->rx_packets = pp->rx_stats.packets;
468 stats->rx_bytes = pp->rx_stats.bytes;
469 } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
473 start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
474 stats->tx_packets = pp->tx_stats.packets;
475 stats->tx_bytes = pp->tx_stats.bytes;
476 } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
478 stats->rx_errors = dev->stats.rx_errors;
479 stats->rx_dropped = dev->stats.rx_dropped;
481 stats->tx_dropped = dev->stats.tx_dropped;
486 /* Rx descriptors helper methods */
488 /* Checks whether the given RX descriptor is both the first and the
489 * last descriptor for the RX packet. Each RX packet is currently
490 * received through a single RX descriptor, so not having each RX
491 * descriptor with its first and last bits set is an error
493 static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
495 return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
496 MVNETA_RXD_FIRST_LAST_DESC;
499 /* Add number of descriptors ready to receive new packets */
500 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
501 struct mvneta_rx_queue *rxq,
504 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
507 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
508 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
509 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
510 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
511 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
514 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
515 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
518 /* Get number of RX descriptors occupied by received packets */
519 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
520 struct mvneta_rx_queue *rxq)
524 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
525 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
528 /* Update num of rx desc called upon return from rx path or
529 * from mvneta_rxq_drop_pkts().
531 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
532 struct mvneta_rx_queue *rxq,
533 int rx_done, int rx_filled)
537 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
539 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
540 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
544 /* Only 255 descriptors can be added at once */
545 while ((rx_done > 0) || (rx_filled > 0)) {
546 if (rx_done <= 0xff) {
553 if (rx_filled <= 0xff) {
554 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
557 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
560 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
564 /* Get pointer to next RX descriptor to be processed by SW */
565 static struct mvneta_rx_desc *
566 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
568 int rx_desc = rxq->next_desc_to_proc;
570 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
571 return rxq->descs + rx_desc;
574 /* Change maximum receive size of the port. */
575 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
579 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
580 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
581 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
582 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
583 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
587 /* Set rx queue offset */
588 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
589 struct mvneta_rx_queue *rxq,
594 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
595 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
598 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
599 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
603 /* Tx descriptors helper methods */
605 /* Update HW with number of TX descriptors to be sent */
606 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
607 struct mvneta_tx_queue *txq,
612 /* Only 255 descriptors can be added at once ; Assume caller
613 * process TX desriptors in quanta less than 256
616 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
619 /* Get pointer to next TX descriptor to be processed (send) by HW */
620 static struct mvneta_tx_desc *
621 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
623 int tx_desc = txq->next_desc_to_proc;
625 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
626 return txq->descs + tx_desc;
629 /* Release the last allocated TX descriptor. Useful to handle DMA
630 * mapping failures in the TX path.
632 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
634 if (txq->next_desc_to_proc == 0)
635 txq->next_desc_to_proc = txq->last_desc - 1;
637 txq->next_desc_to_proc--;
640 /* Set rxq buf size */
641 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
642 struct mvneta_rx_queue *rxq,
647 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
649 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
650 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
652 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
655 /* Disable buffer management (BM) */
656 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
657 struct mvneta_rx_queue *rxq)
661 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
662 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
663 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
668 /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
669 static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
673 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
676 val |= MVNETA_GMAC2_PORT_RGMII;
678 val &= ~MVNETA_GMAC2_PORT_RGMII;
680 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
683 /* Config SGMII port */
684 static void mvneta_port_sgmii_config(struct mvneta_port *pp)
688 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
689 val |= MVNETA_GMAC2_PSC_ENABLE;
690 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
692 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
695 /* Start the Ethernet port RX and TX activity */
696 static void mvneta_port_up(struct mvneta_port *pp)
701 /* Enable all initialized TXs. */
702 mvneta_mib_counters_clear(pp);
704 for (queue = 0; queue < txq_number; queue++) {
705 struct mvneta_tx_queue *txq = &pp->txqs[queue];
706 if (txq->descs != NULL)
707 q_map |= (1 << queue);
709 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
711 /* Enable all initialized RXQs. */
713 for (queue = 0; queue < rxq_number; queue++) {
714 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
715 if (rxq->descs != NULL)
716 q_map |= (1 << queue);
719 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
722 /* Stop the Ethernet port activity */
723 static void mvneta_port_down(struct mvneta_port *pp)
728 /* Stop Rx port activity. Check port Rx activity. */
729 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
731 /* Issue stop command for active channels only */
733 mvreg_write(pp, MVNETA_RXQ_CMD,
734 val << MVNETA_RXQ_DISABLE_SHIFT);
736 /* Wait for all Rx activity to terminate. */
739 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
741 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
747 val = mvreg_read(pp, MVNETA_RXQ_CMD);
748 } while (val & 0xff);
750 /* Stop Tx port activity. Check port Tx activity. Issue stop
751 * command for active channels only
753 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
756 mvreg_write(pp, MVNETA_TXQ_CMD,
757 (val << MVNETA_TXQ_DISABLE_SHIFT));
759 /* Wait for all Tx activity to terminate. */
762 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
764 "TIMEOUT for TX stopped status=0x%08x\n",
770 /* Check TX Command reg that all Txqs are stopped */
771 val = mvreg_read(pp, MVNETA_TXQ_CMD);
773 } while (val & 0xff);
775 /* Double check to verify that TX FIFO is empty */
778 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
780 "TX FIFO empty timeout status=0x08%x\n",
786 val = mvreg_read(pp, MVNETA_PORT_STATUS);
787 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
788 (val & MVNETA_TX_IN_PRGRS));
793 /* Enable the port by setting the port enable bit of the MAC control register */
794 static void mvneta_port_enable(struct mvneta_port *pp)
799 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
800 val |= MVNETA_GMAC0_PORT_ENABLE;
801 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
804 /* Disable the port and wait for about 200 usec before retuning */
805 static void mvneta_port_disable(struct mvneta_port *pp)
809 /* Reset the Enable bit in the Serial Control Register */
810 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
811 val &= ~MVNETA_GMAC0_PORT_ENABLE;
812 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
817 /* Multicast tables methods */
819 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
820 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
828 val = 0x1 | (queue << 1);
829 val |= (val << 24) | (val << 16) | (val << 8);
832 for (offset = 0; offset <= 0xc; offset += 4)
833 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
836 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
837 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
845 val = 0x1 | (queue << 1);
846 val |= (val << 24) | (val << 16) | (val << 8);
849 for (offset = 0; offset <= 0xfc; offset += 4)
850 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
854 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
855 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
861 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
864 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
865 val = 0x1 | (queue << 1);
866 val |= (val << 24) | (val << 16) | (val << 8);
869 for (offset = 0; offset <= 0xfc; offset += 4)
870 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
873 /* This method sets defaults to the NETA port:
874 * Clears interrupt Cause and Mask registers.
875 * Clears all MAC tables.
876 * Sets defaults to all registers.
877 * Resets RX and TX descriptor rings.
879 * This method can be called after mvneta_port_down() to return the port
880 * settings to defaults.
882 static void mvneta_defaults_set(struct mvneta_port *pp)
888 /* Clear all Cause registers */
889 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
890 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
891 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
893 /* Mask all interrupts */
894 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
895 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
896 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
897 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
899 /* Enable MBUS Retry bit16 */
900 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
902 /* Set CPU queue access map - all CPUs have access to all RX
903 * queues and to all TX queues
905 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
906 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
907 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
908 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
910 /* Reset RX and TX DMAs */
911 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
912 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
914 /* Disable Legacy WRR, Disable EJP, Release from reset */
915 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
916 for (queue = 0; queue < txq_number; queue++) {
917 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
918 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
921 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
922 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
924 /* Set Port Acceleration Mode */
925 val = MVNETA_ACC_MODE_EXT;
926 mvreg_write(pp, MVNETA_ACC_MODE, val);
928 /* Update val of portCfg register accordingly with all RxQueue types */
929 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
930 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
933 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
934 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
936 /* Build PORT_SDMA_CONFIG_REG */
939 /* Default burst size */
940 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
941 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
942 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
944 #if defined(__BIG_ENDIAN)
945 val |= MVNETA_DESC_SWAP;
948 /* Assign port SDMA configuration */
949 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
951 mvneta_set_ucast_table(pp, -1);
952 mvneta_set_special_mcast_table(pp, -1);
953 mvneta_set_other_mcast_table(pp, -1);
955 /* Set port interrupt enable register - default enable all */
956 mvreg_write(pp, MVNETA_INTR_ENABLE,
957 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
958 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
961 /* Set max sizes for tx queues */
962 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
968 mtu = max_tx_size * 8;
969 if (mtu > MVNETA_TX_MTU_MAX)
970 mtu = MVNETA_TX_MTU_MAX;
973 val = mvreg_read(pp, MVNETA_TX_MTU);
974 val &= ~MVNETA_TX_MTU_MAX;
976 mvreg_write(pp, MVNETA_TX_MTU, val);
978 /* TX token size and all TXQs token size must be larger that MTU */
979 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
981 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
984 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
986 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
988 for (queue = 0; queue < txq_number; queue++) {
989 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
991 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
994 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
996 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1001 /* Set unicast address */
1002 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1005 unsigned int unicast_reg;
1006 unsigned int tbl_offset;
1007 unsigned int reg_offset;
1009 /* Locate the Unicast table entry */
1010 last_nibble = (0xf & last_nibble);
1012 /* offset from unicast tbl base */
1013 tbl_offset = (last_nibble / 4) * 4;
1015 /* offset within the above reg */
1016 reg_offset = last_nibble % 4;
1018 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1021 /* Clear accepts frame bit at specified unicast DA tbl entry */
1022 unicast_reg &= ~(0xff << (8 * reg_offset));
1024 unicast_reg &= ~(0xff << (8 * reg_offset));
1025 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1028 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1031 /* Set mac address */
1032 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1039 mac_l = (addr[4] << 8) | (addr[5]);
1040 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1041 (addr[2] << 8) | (addr[3] << 0);
1043 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1044 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1047 /* Accept frames of this address */
1048 mvneta_set_ucast_addr(pp, addr[5], queue);
1051 /* Set the number of packets that will be received before RX interrupt
1052 * will be generated by HW.
1054 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1055 struct mvneta_rx_queue *rxq, u32 value)
1057 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1058 value | MVNETA_RXQ_NON_OCCUPIED(0));
1059 rxq->pkts_coal = value;
1062 /* Set the time delay in usec before RX interrupt will be generated by
1065 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1066 struct mvneta_rx_queue *rxq, u32 value)
1069 unsigned long clk_rate;
1071 clk_rate = clk_get_rate(pp->clk);
1072 val = (clk_rate / 1000000) * value;
1074 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1075 rxq->time_coal = value;
1078 /* Set threshold for TX_DONE pkts coalescing */
1079 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1080 struct mvneta_tx_queue *txq, u32 value)
1084 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1086 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1087 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1089 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1091 txq->done_pkts_coal = value;
1094 /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1095 static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
1097 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
1098 pp->tx_done_timer.expires = jiffies +
1099 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
1100 add_timer(&pp->tx_done_timer);
1105 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1106 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1107 u32 phys_addr, u32 cookie)
1109 rx_desc->buf_cookie = cookie;
1110 rx_desc->buf_phys_addr = phys_addr;
1113 /* Decrement sent descriptors counter */
1114 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1115 struct mvneta_tx_queue *txq,
1120 /* Only 255 TX descriptors can be updated at once */
1121 while (sent_desc > 0xff) {
1122 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1123 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1124 sent_desc = sent_desc - 0xff;
1127 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1128 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1131 /* Get number of TX descriptors already sent by HW */
1132 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1133 struct mvneta_tx_queue *txq)
1138 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1139 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1140 MVNETA_TXQ_SENT_DESC_SHIFT;
1145 /* Get number of sent descriptors and decrement counter.
1146 * The number of sent descriptors is returned.
1148 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1149 struct mvneta_tx_queue *txq)
1153 /* Get number of sent descriptors */
1154 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1156 /* Decrement sent descriptors counter */
1158 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1163 /* Set TXQ descriptors fields relevant for CSUM calculation */
1164 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1165 int ip_hdr_len, int l4_proto)
1169 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1170 * G_L4_chk, L4_type; required only for checksum
1173 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1174 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1176 if (l3_proto == swab16(ETH_P_IP))
1177 command |= MVNETA_TXD_IP_CSUM;
1179 command |= MVNETA_TX_L3_IP6;
1181 if (l4_proto == IPPROTO_TCP)
1182 command |= MVNETA_TX_L4_CSUM_FULL;
1183 else if (l4_proto == IPPROTO_UDP)
1184 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1186 command |= MVNETA_TX_L4_CSUM_NOT;
1192 /* Display more error info */
1193 static void mvneta_rx_error(struct mvneta_port *pp,
1194 struct mvneta_rx_desc *rx_desc)
1196 u32 status = rx_desc->status;
1198 if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
1200 "bad rx status %08x (buffer oversize), size=%d\n",
1201 rx_desc->status, rx_desc->data_size);
1205 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1206 case MVNETA_RXD_ERR_CRC:
1207 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1208 status, rx_desc->data_size);
1210 case MVNETA_RXD_ERR_OVERRUN:
1211 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1212 status, rx_desc->data_size);
1214 case MVNETA_RXD_ERR_LEN:
1215 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1216 status, rx_desc->data_size);
1218 case MVNETA_RXD_ERR_RESOURCE:
1219 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1220 status, rx_desc->data_size);
1225 /* Handle RX checksum offload */
1226 static void mvneta_rx_csum(struct mvneta_port *pp,
1227 struct mvneta_rx_desc *rx_desc,
1228 struct sk_buff *skb)
1230 if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
1231 (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
1233 skb->ip_summed = CHECKSUM_UNNECESSARY;
1237 skb->ip_summed = CHECKSUM_NONE;
1240 /* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1241 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1244 int queue = fls(cause) - 1;
1246 return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
1249 /* Free tx queue skbuffs */
1250 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1251 struct mvneta_tx_queue *txq, int num)
1255 for (i = 0; i < num; i++) {
1256 struct mvneta_tx_desc *tx_desc = txq->descs +
1258 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1260 mvneta_txq_inc_get(txq);
1265 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1266 tx_desc->data_size, DMA_TO_DEVICE);
1267 dev_kfree_skb_any(skb);
1271 /* Handle end of transmission */
1272 static int mvneta_txq_done(struct mvneta_port *pp,
1273 struct mvneta_tx_queue *txq)
1275 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1278 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1281 mvneta_txq_bufs_free(pp, txq, tx_done);
1283 txq->count -= tx_done;
1285 if (netif_tx_queue_stopped(nq)) {
1286 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1287 netif_tx_wake_queue(nq);
1293 /* Refill processing */
1294 static int mvneta_rx_refill(struct mvneta_port *pp,
1295 struct mvneta_rx_desc *rx_desc)
1298 dma_addr_t phys_addr;
1299 struct sk_buff *skb;
1301 skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
1305 phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
1306 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1308 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1313 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1318 /* Handle tx checksum */
1319 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1321 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1325 if (skb->protocol == htons(ETH_P_IP)) {
1326 struct iphdr *ip4h = ip_hdr(skb);
1328 /* Calculate IPv4 checksum and L4 checksum */
1329 ip_hdr_len = ip4h->ihl;
1330 l4_proto = ip4h->protocol;
1331 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1332 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1334 /* Read l4_protocol from one of IPv6 extra headers */
1335 if (skb_network_header_len(skb) > 0)
1336 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1337 l4_proto = ip6h->nexthdr;
1339 return MVNETA_TX_L4_CSUM_NOT;
1341 return mvneta_txq_desc_csum(skb_network_offset(skb),
1342 skb->protocol, ip_hdr_len, l4_proto);
1345 return MVNETA_TX_L4_CSUM_NOT;
1348 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1351 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1354 int queue = fls(cause >> 8) - 1;
1356 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1359 /* Drop packets received by the RXQ and free buffers */
1360 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1361 struct mvneta_rx_queue *rxq)
1365 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1366 for (i = 0; i < rxq->size; i++) {
1367 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1368 struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
1370 dev_kfree_skb_any(skb);
1371 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1372 rx_desc->data_size, DMA_FROM_DEVICE);
1376 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1379 /* Main rx processing */
1380 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1381 struct mvneta_rx_queue *rxq)
1383 struct net_device *dev = pp->dev;
1384 int rx_done, rx_filled;
1386 /* Get number of received packets */
1387 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1389 if (rx_todo > rx_done)
1395 /* Fairness NAPI loop */
1396 while (rx_done < rx_todo) {
1397 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1398 struct sk_buff *skb;
1405 rx_status = rx_desc->status;
1406 skb = (struct sk_buff *)rx_desc->buf_cookie;
1408 if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
1409 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1410 dev->stats.rx_errors++;
1411 mvneta_rx_error(pp, rx_desc);
1412 mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
1417 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1418 rx_desc->data_size, DMA_FROM_DEVICE);
1420 rx_bytes = rx_desc->data_size -
1421 (ETH_FCS_LEN + MVNETA_MH_SIZE);
1422 u64_stats_update_begin(&pp->rx_stats.syncp);
1423 pp->rx_stats.packets++;
1424 pp->rx_stats.bytes += rx_bytes;
1425 u64_stats_update_end(&pp->rx_stats.syncp);
1427 /* Linux processing */
1428 skb_reserve(skb, MVNETA_MH_SIZE);
1429 skb_put(skb, rx_bytes);
1431 skb->protocol = eth_type_trans(skb, dev);
1433 mvneta_rx_csum(pp, rx_desc, skb);
1435 napi_gro_receive(&pp->napi, skb);
1437 /* Refill processing */
1438 err = mvneta_rx_refill(pp, rx_desc);
1440 netdev_err(pp->dev, "Linux processing - Can't refill\n");
1446 /* Update rxq management counters */
1447 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1452 /* Handle tx fragmentation processing */
1453 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1454 struct mvneta_tx_queue *txq)
1456 struct mvneta_tx_desc *tx_desc;
1459 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1461 void *addr = page_address(frag->page.p) + frag->page_offset;
1463 tx_desc = mvneta_txq_next_desc_get(txq);
1464 tx_desc->data_size = frag->size;
1466 tx_desc->buf_phys_addr =
1467 dma_map_single(pp->dev->dev.parent, addr,
1468 tx_desc->data_size, DMA_TO_DEVICE);
1470 if (dma_mapping_error(pp->dev->dev.parent,
1471 tx_desc->buf_phys_addr)) {
1472 mvneta_txq_desc_put(txq);
1476 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1477 /* Last descriptor */
1478 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1480 txq->tx_skb[txq->txq_put_index] = skb;
1482 mvneta_txq_inc_put(txq);
1484 /* Descriptor in the middle: Not First, Not Last */
1485 tx_desc->command = 0;
1487 txq->tx_skb[txq->txq_put_index] = NULL;
1488 mvneta_txq_inc_put(txq);
1495 /* Release all descriptors that were used to map fragments of
1496 * this packet, as well as the corresponding DMA mappings
1498 for (i = i - 1; i >= 0; i--) {
1499 tx_desc = txq->descs + i;
1500 dma_unmap_single(pp->dev->dev.parent,
1501 tx_desc->buf_phys_addr,
1504 mvneta_txq_desc_put(txq);
1510 /* Main tx processing */
1511 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1513 struct mvneta_port *pp = netdev_priv(dev);
1514 u16 txq_id = skb_get_queue_mapping(skb);
1515 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1516 struct mvneta_tx_desc *tx_desc;
1517 struct netdev_queue *nq;
1521 if (!netif_running(dev))
1524 frags = skb_shinfo(skb)->nr_frags + 1;
1525 nq = netdev_get_tx_queue(dev, txq_id);
1527 /* Get a descriptor for the first part of the packet */
1528 tx_desc = mvneta_txq_next_desc_get(txq);
1530 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1532 tx_desc->data_size = skb_headlen(skb);
1534 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1537 if (unlikely(dma_mapping_error(dev->dev.parent,
1538 tx_desc->buf_phys_addr))) {
1539 mvneta_txq_desc_put(txq);
1545 /* First and Last descriptor */
1546 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1547 tx_desc->command = tx_cmd;
1548 txq->tx_skb[txq->txq_put_index] = skb;
1549 mvneta_txq_inc_put(txq);
1551 /* First but not Last */
1552 tx_cmd |= MVNETA_TXD_F_DESC;
1553 txq->tx_skb[txq->txq_put_index] = NULL;
1554 mvneta_txq_inc_put(txq);
1555 tx_desc->command = tx_cmd;
1556 /* Continue with other skb fragments */
1557 if (mvneta_tx_frag_process(pp, skb, txq)) {
1558 dma_unmap_single(dev->dev.parent,
1559 tx_desc->buf_phys_addr,
1562 mvneta_txq_desc_put(txq);
1568 txq->count += frags;
1569 mvneta_txq_pend_desc_add(pp, txq, frags);
1571 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1572 netif_tx_stop_queue(nq);
1576 u64_stats_update_begin(&pp->tx_stats.syncp);
1577 pp->tx_stats.packets++;
1578 pp->tx_stats.bytes += skb->len;
1579 u64_stats_update_end(&pp->tx_stats.syncp);
1582 dev->stats.tx_dropped++;
1583 dev_kfree_skb_any(skb);
1586 if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1587 mvneta_txq_done(pp, txq);
1589 /* If after calling mvneta_txq_done, count equals
1590 * frags, we need to set the timer
1592 if (txq->count == frags && frags > 0)
1593 mvneta_add_tx_done_timer(pp);
1595 return NETDEV_TX_OK;
1599 /* Free tx resources, when resetting a port */
1600 static void mvneta_txq_done_force(struct mvneta_port *pp,
1601 struct mvneta_tx_queue *txq)
1604 int tx_done = txq->count;
1606 mvneta_txq_bufs_free(pp, txq, tx_done);
1610 txq->txq_put_index = 0;
1611 txq->txq_get_index = 0;
1614 /* handle tx done - called from tx done timer callback */
1615 static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1618 struct mvneta_tx_queue *txq;
1620 struct netdev_queue *nq;
1623 while (cause_tx_done != 0) {
1624 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1628 nq = netdev_get_tx_queue(pp->dev, txq->id);
1629 __netif_tx_lock(nq, smp_processor_id());
1632 tx_done += mvneta_txq_done(pp, txq);
1633 *tx_todo += txq->count;
1636 __netif_tx_unlock(nq);
1637 cause_tx_done &= ~((1 << txq->id));
1643 /* Compute crc8 of the specified address, using a unique algorithm ,
1644 * according to hw spec, different than generic crc8 algorithm
1646 static int mvneta_addr_crc(unsigned char *addr)
1651 for (i = 0; i < ETH_ALEN; i++) {
1654 crc = (crc ^ addr[i]) << 8;
1655 for (j = 7; j >= 0; j--) {
1656 if (crc & (0x100 << j))
1664 /* This method controls the net device special MAC multicast support.
1665 * The Special Multicast Table for MAC addresses supports MAC of the form
1666 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1667 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1668 * Table entries in the DA-Filter table. This method set the Special
1669 * Multicast Table appropriate entry.
1671 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1672 unsigned char last_byte,
1675 unsigned int smc_table_reg;
1676 unsigned int tbl_offset;
1677 unsigned int reg_offset;
1679 /* Register offset from SMC table base */
1680 tbl_offset = (last_byte / 4);
1681 /* Entry offset within the above reg */
1682 reg_offset = last_byte % 4;
1684 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1688 smc_table_reg &= ~(0xff << (8 * reg_offset));
1690 smc_table_reg &= ~(0xff << (8 * reg_offset));
1691 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1694 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1698 /* This method controls the network device Other MAC multicast support.
1699 * The Other Multicast Table is used for multicast of another type.
1700 * A CRC-8 is used as an index to the Other Multicast Table entries
1701 * in the DA-Filter table.
1702 * The method gets the CRC-8 value from the calling routine and
1703 * sets the Other Multicast Table appropriate entry according to the
1706 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1710 unsigned int omc_table_reg;
1711 unsigned int tbl_offset;
1712 unsigned int reg_offset;
1714 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1715 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1717 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1720 /* Clear accepts frame bit at specified Other DA table entry */
1721 omc_table_reg &= ~(0xff << (8 * reg_offset));
1723 omc_table_reg &= ~(0xff << (8 * reg_offset));
1724 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1727 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1730 /* The network device supports multicast using two tables:
1731 * 1) Special Multicast Table for MAC addresses of the form
1732 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1733 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1734 * Table entries in the DA-Filter table.
1735 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1736 * is used as an index to the Other Multicast Table entries in the
1739 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1742 unsigned char crc_result = 0;
1744 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1745 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1749 crc_result = mvneta_addr_crc(p_addr);
1751 if (pp->mcast_count[crc_result] == 0) {
1752 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1757 pp->mcast_count[crc_result]--;
1758 if (pp->mcast_count[crc_result] != 0) {
1759 netdev_info(pp->dev,
1760 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1761 pp->mcast_count[crc_result], crc_result);
1765 pp->mcast_count[crc_result]++;
1767 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1772 /* Configure Fitering mode of Ethernet port */
1773 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1776 u32 port_cfg_reg, val;
1778 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1780 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1782 /* Set / Clear UPM bit in port configuration register */
1784 /* Accept all Unicast addresses */
1785 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1786 val |= MVNETA_FORCE_UNI;
1787 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1788 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1790 /* Reject all Unicast addresses */
1791 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1792 val &= ~MVNETA_FORCE_UNI;
1795 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1796 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1799 /* register unicast and multicast addresses */
1800 static void mvneta_set_rx_mode(struct net_device *dev)
1802 struct mvneta_port *pp = netdev_priv(dev);
1803 struct netdev_hw_addr *ha;
1805 if (dev->flags & IFF_PROMISC) {
1806 /* Accept all: Multicast + Unicast */
1807 mvneta_rx_unicast_promisc_set(pp, 1);
1808 mvneta_set_ucast_table(pp, rxq_def);
1809 mvneta_set_special_mcast_table(pp, rxq_def);
1810 mvneta_set_other_mcast_table(pp, rxq_def);
1812 /* Accept single Unicast */
1813 mvneta_rx_unicast_promisc_set(pp, 0);
1814 mvneta_set_ucast_table(pp, -1);
1815 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1817 if (dev->flags & IFF_ALLMULTI) {
1818 /* Accept all multicast */
1819 mvneta_set_special_mcast_table(pp, rxq_def);
1820 mvneta_set_other_mcast_table(pp, rxq_def);
1822 /* Accept only initialized multicast */
1823 mvneta_set_special_mcast_table(pp, -1);
1824 mvneta_set_other_mcast_table(pp, -1);
1826 if (!netdev_mc_empty(dev)) {
1827 netdev_for_each_mc_addr(ha, dev) {
1828 mvneta_mcast_addr_set(pp, ha->addr,
1836 /* Interrupt handling - the callback for request_irq() */
1837 static irqreturn_t mvneta_isr(int irq, void *dev_id)
1839 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1841 /* Mask all interrupts */
1842 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1844 napi_schedule(&pp->napi);
1850 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1851 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1852 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1853 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1854 * Each CPU has its own causeRxTx register
1856 static int mvneta_poll(struct napi_struct *napi, int budget)
1860 unsigned long flags;
1861 struct mvneta_port *pp = netdev_priv(napi->dev);
1863 if (!netif_running(pp->dev)) {
1864 napi_complete(napi);
1868 /* Read cause register */
1869 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1870 MVNETA_RX_INTR_MASK(rxq_number);
1872 /* For the case where the last mvneta_poll did not process all
1875 cause_rx_tx |= pp->cause_rx_tx;
1876 if (rxq_number > 1) {
1877 while ((cause_rx_tx != 0) && (budget > 0)) {
1879 struct mvneta_rx_queue *rxq;
1880 /* get rx queue number from cause_rx_tx */
1881 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1885 /* process the packet in that rx queue */
1886 count = mvneta_rx(pp, budget, rxq);
1890 /* set off the rx bit of the
1891 * corresponding bit in the cause rx
1892 * tx register, so that next iteration
1893 * will find the next rx queue where
1894 * packets are received on
1896 cause_rx_tx &= ~((1 << rxq->id) << 8);
1900 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1906 napi_complete(napi);
1907 local_irq_save(flags);
1908 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1909 MVNETA_RX_INTR_MASK(rxq_number));
1910 local_irq_restore(flags);
1913 pp->cause_rx_tx = cause_rx_tx;
1917 /* tx done timer callback */
1918 static void mvneta_tx_done_timer_callback(unsigned long data)
1920 struct net_device *dev = (struct net_device *)data;
1921 struct mvneta_port *pp = netdev_priv(dev);
1922 int tx_done = 0, tx_todo = 0;
1924 if (!netif_running(dev))
1927 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1929 tx_done = mvneta_tx_done_gbe(pp,
1930 (((1 << txq_number) - 1) &
1931 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1934 mvneta_add_tx_done_timer(pp);
1937 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1938 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1941 struct net_device *dev = pp->dev;
1944 for (i = 0; i < num; i++) {
1945 struct sk_buff *skb;
1946 struct mvneta_rx_desc *rx_desc;
1947 unsigned long phys_addr;
1949 skb = dev_alloc_skb(pp->pkt_size);
1951 netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
1952 __func__, rxq->id, i, num);
1956 rx_desc = rxq->descs + i;
1957 memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
1958 phys_addr = dma_map_single(dev->dev.parent, skb->head,
1959 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1961 if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
1966 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1969 /* Add this number of RX descriptors as non occupied (ready to
1972 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1977 /* Free all packets pending transmit from all TXQs and reset TX port */
1978 static void mvneta_tx_reset(struct mvneta_port *pp)
1982 /* free the skb's in the hal tx ring */
1983 for (queue = 0; queue < txq_number; queue++)
1984 mvneta_txq_done_force(pp, &pp->txqs[queue]);
1986 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1987 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1990 static void mvneta_rx_reset(struct mvneta_port *pp)
1992 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1993 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1996 /* Rx/Tx queue initialization/cleanup methods */
1998 /* Create a specified RX queue */
1999 static int mvneta_rxq_init(struct mvneta_port *pp,
2000 struct mvneta_rx_queue *rxq)
2003 rxq->size = pp->rx_ring_size;
2005 /* Allocate memory for RX descriptors */
2006 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2007 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2008 &rxq->descs_phys, GFP_KERNEL);
2009 if (rxq->descs == NULL)
2012 BUG_ON(rxq->descs !=
2013 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2015 rxq->last_desc = rxq->size - 1;
2017 /* Set Rx descriptors queue starting address */
2018 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2019 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2022 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2024 /* Set coalescing pkts and time */
2025 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2026 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2028 /* Fill RXQ with buffers from RX pool */
2029 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2030 mvneta_rxq_bm_disable(pp, rxq);
2031 mvneta_rxq_fill(pp, rxq, rxq->size);
2036 /* Cleanup Rx queue */
2037 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2038 struct mvneta_rx_queue *rxq)
2040 mvneta_rxq_drop_pkts(pp, rxq);
2043 dma_free_coherent(pp->dev->dev.parent,
2044 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2050 rxq->next_desc_to_proc = 0;
2051 rxq->descs_phys = 0;
2054 /* Create and initialize a tx queue */
2055 static int mvneta_txq_init(struct mvneta_port *pp,
2056 struct mvneta_tx_queue *txq)
2058 txq->size = pp->tx_ring_size;
2060 /* Allocate memory for TX descriptors */
2061 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2062 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2063 &txq->descs_phys, GFP_KERNEL);
2064 if (txq->descs == NULL)
2067 /* Make sure descriptor address is cache line size aligned */
2068 BUG_ON(txq->descs !=
2069 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2071 txq->last_desc = txq->size - 1;
2073 /* Set maximum bandwidth for enabled TXQs */
2074 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2075 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2077 /* Set Tx descriptors queue starting address */
2078 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2079 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2081 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2082 if (txq->tx_skb == NULL) {
2083 dma_free_coherent(pp->dev->dev.parent,
2084 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2085 txq->descs, txq->descs_phys);
2088 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2093 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2094 static void mvneta_txq_deinit(struct mvneta_port *pp,
2095 struct mvneta_tx_queue *txq)
2100 dma_free_coherent(pp->dev->dev.parent,
2101 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2102 txq->descs, txq->descs_phys);
2106 txq->next_desc_to_proc = 0;
2107 txq->descs_phys = 0;
2109 /* Set minimum bandwidth for disabled TXQs */
2110 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2111 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2113 /* Set Tx descriptors queue starting address and size */
2114 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2115 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2118 /* Cleanup all Tx queues */
2119 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2123 for (queue = 0; queue < txq_number; queue++)
2124 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2127 /* Cleanup all Rx queues */
2128 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2132 for (queue = 0; queue < rxq_number; queue++)
2133 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2137 /* Init all Rx queues */
2138 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2142 for (queue = 0; queue < rxq_number; queue++) {
2143 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2145 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2147 mvneta_cleanup_rxqs(pp);
2155 /* Init all tx queues */
2156 static int mvneta_setup_txqs(struct mvneta_port *pp)
2160 for (queue = 0; queue < txq_number; queue++) {
2161 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2163 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2165 mvneta_cleanup_txqs(pp);
2173 static void mvneta_start_dev(struct mvneta_port *pp)
2175 mvneta_max_rx_size_set(pp, pp->pkt_size);
2176 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2178 /* start the Rx/Tx activity */
2179 mvneta_port_enable(pp);
2181 /* Enable polling on the port */
2182 napi_enable(&pp->napi);
2184 /* Unmask interrupts */
2185 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2186 MVNETA_RX_INTR_MASK(rxq_number));
2188 phy_start(pp->phy_dev);
2189 netif_tx_start_all_queues(pp->dev);
2192 static void mvneta_stop_dev(struct mvneta_port *pp)
2194 phy_stop(pp->phy_dev);
2196 napi_disable(&pp->napi);
2198 netif_carrier_off(pp->dev);
2200 mvneta_port_down(pp);
2201 netif_tx_stop_all_queues(pp->dev);
2203 /* Stop the port activity */
2204 mvneta_port_disable(pp);
2206 /* Clear all ethernet port interrupts */
2207 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2208 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2210 /* Mask all ethernet port interrupts */
2211 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2212 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2213 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2215 mvneta_tx_reset(pp);
2216 mvneta_rx_reset(pp);
2219 /* tx timeout callback - display a message and stop/start the network device */
2220 static void mvneta_tx_timeout(struct net_device *dev)
2222 struct mvneta_port *pp = netdev_priv(dev);
2224 netdev_info(dev, "tx timeout\n");
2225 mvneta_stop_dev(pp);
2226 mvneta_start_dev(pp);
2229 /* Return positive if MTU is valid */
2230 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2233 netdev_err(dev, "cannot change mtu to less than 68\n");
2237 /* 9676 == 9700 - 20 and rounding to 8 */
2239 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2243 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2244 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2245 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2246 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2252 /* Change the device mtu */
2253 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2255 struct mvneta_port *pp = netdev_priv(dev);
2258 mtu = mvneta_check_mtu_valid(dev, mtu);
2264 if (!netif_running(dev))
2267 /* The interface is running, so we have to force a
2268 * reallocation of the RXQs
2270 mvneta_stop_dev(pp);
2272 mvneta_cleanup_txqs(pp);
2273 mvneta_cleanup_rxqs(pp);
2275 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2277 ret = mvneta_setup_rxqs(pp);
2279 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2283 mvneta_setup_txqs(pp);
2285 mvneta_start_dev(pp);
2291 /* Get mac address */
2292 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2294 u32 mac_addr_l, mac_addr_h;
2296 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2297 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2298 addr[0] = (mac_addr_h >> 24) & 0xFF;
2299 addr[1] = (mac_addr_h >> 16) & 0xFF;
2300 addr[2] = (mac_addr_h >> 8) & 0xFF;
2301 addr[3] = mac_addr_h & 0xFF;
2302 addr[4] = (mac_addr_l >> 8) & 0xFF;
2303 addr[5] = mac_addr_l & 0xFF;
2306 /* Handle setting mac address */
2307 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2309 struct mvneta_port *pp = netdev_priv(dev);
2313 if (netif_running(dev))
2316 /* Remove previous address table entry */
2317 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2319 /* Set new addr in hw */
2320 mvneta_mac_addr_set(pp, mac, rxq_def);
2322 /* Set addr in the device */
2323 for (i = 0; i < ETH_ALEN; i++)
2324 dev->dev_addr[i] = mac[i];
2329 static void mvneta_adjust_link(struct net_device *ndev)
2331 struct mvneta_port *pp = netdev_priv(ndev);
2332 struct phy_device *phydev = pp->phy_dev;
2333 int status_change = 0;
2336 if ((pp->speed != phydev->speed) ||
2337 (pp->duplex != phydev->duplex)) {
2340 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2341 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2342 MVNETA_GMAC_CONFIG_GMII_SPEED |
2343 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2346 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2348 if (phydev->speed == SPEED_1000)
2349 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2351 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2353 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2355 pp->duplex = phydev->duplex;
2356 pp->speed = phydev->speed;
2360 if (phydev->link != pp->link) {
2361 if (!phydev->link) {
2366 pp->link = phydev->link;
2370 if (status_change) {
2372 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2373 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2374 MVNETA_GMAC_FORCE_LINK_DOWN);
2375 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2377 netdev_info(pp->dev, "link up\n");
2379 mvneta_port_down(pp);
2380 netdev_info(pp->dev, "link down\n");
2385 static int mvneta_mdio_probe(struct mvneta_port *pp)
2387 struct phy_device *phy_dev;
2389 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2392 netdev_err(pp->dev, "could not find the PHY\n");
2396 phy_dev->supported &= PHY_GBIT_FEATURES;
2397 phy_dev->advertising = phy_dev->supported;
2399 pp->phy_dev = phy_dev;
2407 static void mvneta_mdio_remove(struct mvneta_port *pp)
2409 phy_disconnect(pp->phy_dev);
2413 static int mvneta_open(struct net_device *dev)
2415 struct mvneta_port *pp = netdev_priv(dev);
2418 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2420 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2422 ret = mvneta_setup_rxqs(pp);
2426 ret = mvneta_setup_txqs(pp);
2428 goto err_cleanup_rxqs;
2430 /* Connect to port interrupt line */
2431 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2432 MVNETA_DRIVER_NAME, pp);
2434 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2435 goto err_cleanup_txqs;
2438 /* In default link is down */
2439 netif_carrier_off(pp->dev);
2441 ret = mvneta_mdio_probe(pp);
2443 netdev_err(dev, "cannot probe MDIO bus\n");
2447 mvneta_start_dev(pp);
2452 free_irq(pp->dev->irq, pp);
2454 mvneta_cleanup_txqs(pp);
2456 mvneta_cleanup_rxqs(pp);
2460 /* Stop the port, free port interrupt line */
2461 static int mvneta_stop(struct net_device *dev)
2463 struct mvneta_port *pp = netdev_priv(dev);
2465 mvneta_stop_dev(pp);
2466 mvneta_mdio_remove(pp);
2467 free_irq(dev->irq, pp);
2468 mvneta_cleanup_rxqs(pp);
2469 mvneta_cleanup_txqs(pp);
2470 del_timer(&pp->tx_done_timer);
2471 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2476 /* Ethtool methods */
2478 /* Get settings (phy address, speed) for ethtools */
2479 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2481 struct mvneta_port *pp = netdev_priv(dev);
2486 return phy_ethtool_gset(pp->phy_dev, cmd);
2489 /* Set settings (phy address, speed) for ethtools */
2490 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2492 struct mvneta_port *pp = netdev_priv(dev);
2497 return phy_ethtool_sset(pp->phy_dev, cmd);
2500 /* Set interrupt coalescing for ethtools */
2501 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2502 struct ethtool_coalesce *c)
2504 struct mvneta_port *pp = netdev_priv(dev);
2507 for (queue = 0; queue < rxq_number; queue++) {
2508 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2509 rxq->time_coal = c->rx_coalesce_usecs;
2510 rxq->pkts_coal = c->rx_max_coalesced_frames;
2511 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2512 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2515 for (queue = 0; queue < txq_number; queue++) {
2516 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2517 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2518 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2524 /* get coalescing for ethtools */
2525 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2526 struct ethtool_coalesce *c)
2528 struct mvneta_port *pp = netdev_priv(dev);
2530 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2531 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2533 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2538 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2539 struct ethtool_drvinfo *drvinfo)
2541 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2542 sizeof(drvinfo->driver));
2543 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2544 sizeof(drvinfo->version));
2545 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2546 sizeof(drvinfo->bus_info));
2550 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2551 struct ethtool_ringparam *ring)
2553 struct mvneta_port *pp = netdev_priv(netdev);
2555 ring->rx_max_pending = MVNETA_MAX_RXD;
2556 ring->tx_max_pending = MVNETA_MAX_TXD;
2557 ring->rx_pending = pp->rx_ring_size;
2558 ring->tx_pending = pp->tx_ring_size;
2561 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2562 struct ethtool_ringparam *ring)
2564 struct mvneta_port *pp = netdev_priv(dev);
2566 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2568 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2569 ring->rx_pending : MVNETA_MAX_RXD;
2570 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2571 ring->tx_pending : MVNETA_MAX_TXD;
2573 if (netif_running(dev)) {
2575 if (mvneta_open(dev)) {
2577 "error on opening device after ring param change\n");
2585 static const struct net_device_ops mvneta_netdev_ops = {
2586 .ndo_open = mvneta_open,
2587 .ndo_stop = mvneta_stop,
2588 .ndo_start_xmit = mvneta_tx,
2589 .ndo_set_rx_mode = mvneta_set_rx_mode,
2590 .ndo_set_mac_address = mvneta_set_mac_addr,
2591 .ndo_change_mtu = mvneta_change_mtu,
2592 .ndo_tx_timeout = mvneta_tx_timeout,
2593 .ndo_get_stats64 = mvneta_get_stats64,
2596 const struct ethtool_ops mvneta_eth_tool_ops = {
2597 .get_link = ethtool_op_get_link,
2598 .get_settings = mvneta_ethtool_get_settings,
2599 .set_settings = mvneta_ethtool_set_settings,
2600 .set_coalesce = mvneta_ethtool_set_coalesce,
2601 .get_coalesce = mvneta_ethtool_get_coalesce,
2602 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2603 .get_ringparam = mvneta_ethtool_get_ringparam,
2604 .set_ringparam = mvneta_ethtool_set_ringparam,
2608 static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2613 mvneta_port_disable(pp);
2615 /* Set port default values */
2616 mvneta_defaults_set(pp);
2618 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2623 /* Initialize TX descriptor rings */
2624 for (queue = 0; queue < txq_number; queue++) {
2625 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2627 txq->size = pp->tx_ring_size;
2628 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2631 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2638 /* Create Rx descriptor rings */
2639 for (queue = 0; queue < rxq_number; queue++) {
2640 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2642 rxq->size = pp->rx_ring_size;
2643 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2644 rxq->time_coal = MVNETA_RX_COAL_USEC;
2650 static void mvneta_deinit(struct mvneta_port *pp)
2656 /* platform glue : initialize decoding windows */
2657 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2658 const struct mbus_dram_target_info *dram)
2664 for (i = 0; i < 6; i++) {
2665 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2666 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2669 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2675 for (i = 0; i < dram->num_cs; i++) {
2676 const struct mbus_dram_window *cs = dram->cs + i;
2677 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2678 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2680 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2681 (cs->size - 1) & 0xffff0000);
2683 win_enable &= ~(1 << i);
2684 win_protect |= 3 << (2 * i);
2687 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2690 /* Power up the port */
2691 static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2695 /* MAC Cause register should be cleared */
2696 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2698 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2699 mvneta_port_sgmii_config(pp);
2701 mvneta_gmac_rgmii_set(pp, 1);
2703 /* Cancel Port Reset */
2704 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2705 val &= ~MVNETA_GMAC2_PORT_RESET;
2706 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2708 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2709 MVNETA_GMAC2_PORT_RESET) != 0)
2713 /* Device initialization routine */
2714 static int mvneta_probe(struct platform_device *pdev)
2716 const struct mbus_dram_target_info *dram_target_info;
2717 struct device_node *dn = pdev->dev.of_node;
2718 struct device_node *phy_node;
2720 struct mvneta_port *pp;
2721 struct net_device *dev;
2722 const char *dt_mac_addr;
2723 char hw_mac_addr[ETH_ALEN];
2724 const char *mac_from;
2728 /* Our multiqueue support is not complete, so for now, only
2729 * allow the usage of the first RX queue
2732 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2736 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
2740 dev->irq = irq_of_parse_and_map(dn, 0);
2741 if (dev->irq == 0) {
2743 goto err_free_netdev;
2746 phy_node = of_parse_phandle(dn, "phy", 0);
2748 dev_err(&pdev->dev, "no associated PHY\n");
2753 phy_mode = of_get_phy_mode(dn);
2755 dev_err(&pdev->dev, "incorrect phy-mode\n");
2760 dev->tx_queue_len = MVNETA_MAX_TXD;
2761 dev->watchdog_timeo = 5 * HZ;
2762 dev->netdev_ops = &mvneta_netdev_ops;
2764 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2766 pp = netdev_priv(dev);
2768 pp->weight = MVNETA_RX_POLL_WEIGHT;
2769 pp->phy_node = phy_node;
2770 pp->phy_interface = phy_mode;
2772 pp->clk = devm_clk_get(&pdev->dev, NULL);
2773 if (IS_ERR(pp->clk)) {
2774 err = PTR_ERR(pp->clk);
2778 clk_prepare_enable(pp->clk);
2780 pp->base = of_iomap(dn, 0);
2781 if (pp->base == NULL) {
2786 dt_mac_addr = of_get_mac_address(dn);
2787 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
2788 mac_from = "device tree";
2789 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
2791 mvneta_get_mac_addr(pp, hw_mac_addr);
2792 if (is_valid_ether_addr(hw_mac_addr)) {
2793 mac_from = "hardware";
2794 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
2796 mac_from = "random";
2797 eth_hw_addr_random(dev);
2801 pp->tx_done_timer.data = (unsigned long)dev;
2802 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2803 init_timer(&pp->tx_done_timer);
2804 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2806 pp->tx_ring_size = MVNETA_MAX_TXD;
2807 pp->rx_ring_size = MVNETA_MAX_RXD;
2810 SET_NETDEV_DEV(dev, &pdev->dev);
2812 err = mvneta_init(pp, phy_addr);
2814 dev_err(&pdev->dev, "can't init eth hal\n");
2817 mvneta_port_power_up(pp, phy_mode);
2819 dram_target_info = mv_mbus_dram_info();
2820 if (dram_target_info)
2821 mvneta_conf_mbus_windows(pp, dram_target_info);
2823 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2825 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2826 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2827 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2828 dev->priv_flags |= IFF_UNICAST_FLT;
2830 err = register_netdev(dev);
2832 dev_err(&pdev->dev, "failed to register\n");
2836 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
2839 platform_set_drvdata(pdev, pp->dev);
2848 clk_disable_unprepare(pp->clk);
2850 irq_dispose_mapping(dev->irq);
2856 /* Device removal routine */
2857 static int mvneta_remove(struct platform_device *pdev)
2859 struct net_device *dev = platform_get_drvdata(pdev);
2860 struct mvneta_port *pp = netdev_priv(dev);
2862 unregister_netdev(dev);
2864 clk_disable_unprepare(pp->clk);
2866 irq_dispose_mapping(dev->irq);
2872 static const struct of_device_id mvneta_match[] = {
2873 { .compatible = "marvell,armada-370-neta" },
2876 MODULE_DEVICE_TABLE(of, mvneta_match);
2878 static struct platform_driver mvneta_driver = {
2879 .probe = mvneta_probe,
2880 .remove = mvneta_remove,
2882 .name = MVNETA_DRIVER_NAME,
2883 .of_match_table = mvneta_match,
2887 module_platform_driver(mvneta_driver);
2889 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2890 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2891 MODULE_LICENSE("GPL");
2893 module_param(rxq_number, int, S_IRUGO);
2894 module_param(txq_number, int, S_IRUGO);
2896 module_param(rxq_def, int, S_IRUGO);