1 /*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25 *******************************************************************************/
27 #include <linux/netdevice.h>
28 #include <linux/crc32.h>
29 #include <linux/mii.h>
30 #include <linux/phy.h>
34 #include "dwmac_dma.h"
37 /*#define GMAC_DEBUG*/
38 #undef FRAME_FILTER_DEBUG
39 /*#define FRAME_FILTER_DEBUG*/
41 #define DBG(fmt, args...) printk(fmt, ## args)
43 #define DBG(fmt, args...) do { } while (0)
46 static void gmac_dump_regs(unsigned long ioaddr)
49 pr_info("\t----------------------------------------------\n"
50 "\t GMAC registers (base addr = 0x%8x)\n"
51 "\t----------------------------------------------\n",
52 (unsigned int)ioaddr);
54 for (i = 0; i < 55; i++) {
56 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
57 offset, readl(ioaddr + offset));
62 static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
64 u32 value = readl(ioaddr + DMA_BUS_MODE);
66 value |= DMA_BUS_MODE_SFT_RESET;
67 writel(value, ioaddr + DMA_BUS_MODE);
68 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
70 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
71 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
72 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
74 #ifdef CONFIG_STMMAC_DA
75 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
77 writel(value, ioaddr + DMA_BUS_MODE);
79 /* Mask interrupts by writing to CSR7 */
80 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
82 /* The base address of the RX/TX descriptor lists must be written into
83 * DMA CSR3 and CSR4, respectively. */
84 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
85 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
90 /* Transmit FIFO flush operation */
91 static void gmac_flush_tx_fifo(unsigned long ioaddr)
93 u32 csr6 = readl(ioaddr + DMA_CONTROL);
94 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
96 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
99 static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
102 u32 csr6 = readl(ioaddr + DMA_CONTROL);
104 if (txmode == SF_DMA_MODE) {
105 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
106 /* Transmit COE type 2 cannot be done in cut-through mode. */
107 csr6 |= DMA_CONTROL_TSF;
108 /* Operating on second frame increase the performance
109 * especially when transmit store-and-forward is used.*/
110 csr6 |= DMA_CONTROL_OSF;
112 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
113 " (threshold = %d)\n", txmode);
114 csr6 &= ~DMA_CONTROL_TSF;
115 csr6 &= DMA_CONTROL_TC_TX_MASK;
116 /* Set the transmit threshold */
118 csr6 |= DMA_CONTROL_TTC_32;
119 else if (txmode <= 64)
120 csr6 |= DMA_CONTROL_TTC_64;
121 else if (txmode <= 128)
122 csr6 |= DMA_CONTROL_TTC_128;
123 else if (txmode <= 192)
124 csr6 |= DMA_CONTROL_TTC_192;
126 csr6 |= DMA_CONTROL_TTC_256;
129 if (rxmode == SF_DMA_MODE) {
130 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
131 csr6 |= DMA_CONTROL_RSF;
133 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
134 " (threshold = %d)\n", rxmode);
135 csr6 &= ~DMA_CONTROL_RSF;
136 csr6 &= DMA_CONTROL_TC_RX_MASK;
138 csr6 |= DMA_CONTROL_RTC_32;
139 else if (rxmode <= 64)
140 csr6 |= DMA_CONTROL_RTC_64;
141 else if (rxmode <= 96)
142 csr6 |= DMA_CONTROL_RTC_96;
144 csr6 |= DMA_CONTROL_RTC_128;
147 writel(csr6, ioaddr + DMA_CONTROL);
151 /* Not yet implemented --- no RMON module */
152 static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
153 unsigned long ioaddr)
158 static void gmac_dump_dma_regs(unsigned long ioaddr)
161 pr_info(" DMA registers\n");
162 for (i = 0; i < 22; i++) {
163 if ((i < 9) || (i > 17)) {
165 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
166 (DMA_BUS_MODE + offset),
167 readl(ioaddr + DMA_BUS_MODE + offset));
173 static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
174 struct dma_desc *p, unsigned long ioaddr)
177 struct net_device_stats *stats = (struct net_device_stats *)data;
179 if (unlikely(p->des01.etx.error_summary)) {
180 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
181 if (unlikely(p->des01.etx.jabber_timeout)) {
182 DBG(KERN_ERR "\tjabber_timeout error\n");
186 if (unlikely(p->des01.etx.frame_flushed)) {
187 DBG(KERN_ERR "\tframe_flushed error\n");
188 x->tx_frame_flushed++;
189 gmac_flush_tx_fifo(ioaddr);
192 if (unlikely(p->des01.etx.loss_carrier)) {
193 DBG(KERN_ERR "\tloss_carrier error\n");
195 stats->tx_carrier_errors++;
197 if (unlikely(p->des01.etx.no_carrier)) {
198 DBG(KERN_ERR "\tno_carrier error\n");
200 stats->tx_carrier_errors++;
202 if (unlikely(p->des01.etx.late_collision)) {
203 DBG(KERN_ERR "\tlate_collision error\n");
204 stats->collisions += p->des01.etx.collision_count;
206 if (unlikely(p->des01.etx.excessive_collisions)) {
207 DBG(KERN_ERR "\texcessive_collisions\n");
208 stats->collisions += p->des01.etx.collision_count;
210 if (unlikely(p->des01.etx.excessive_deferral)) {
211 DBG(KERN_INFO "\texcessive tx_deferral\n");
215 if (unlikely(p->des01.etx.underflow_error)) {
216 DBG(KERN_ERR "\tunderflow error\n");
217 gmac_flush_tx_fifo(ioaddr);
221 if (unlikely(p->des01.etx.ip_header_error)) {
222 DBG(KERN_ERR "\tTX IP header csum error\n");
223 x->tx_ip_header_error++;
226 if (unlikely(p->des01.etx.payload_error)) {
227 DBG(KERN_ERR "\tAddr/Payload csum error\n");
228 x->tx_payload_error++;
229 gmac_flush_tx_fifo(ioaddr);
235 if (unlikely(p->des01.etx.deferred)) {
236 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
239 #ifdef STMMAC_VLAN_TAG_USED
240 if (p->des01.etx.vlan_frame) {
241 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
249 static int gmac_get_tx_len(struct dma_desc *p)
251 return p->des01.etx.buffer1_size;
254 static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
256 int ret = good_frame;
257 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
259 /* bits 5 7 0 | Frame status
260 * ----------------------------------------------------------
261 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
262 * 1 0 0 | IPv4/6 No CSUM errorS.
263 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
264 * 1 1 0 | IPv4/6 CSUM IP HR error
265 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
266 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
267 * 0 1 1 | COE bypassed.. no IPv4/6 frame
271 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
273 } else if (status == 0x4) {
274 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
276 } else if (status == 0x5) {
277 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
279 } else if (status == 0x6) {
280 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
282 } else if (status == 0x7) {
284 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
286 } else if (status == 0x1) {
288 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
290 } else if (status == 0x3) {
291 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
297 static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
300 int ret = good_frame;
301 struct net_device_stats *stats = (struct net_device_stats *)data;
303 if (unlikely(p->des01.erx.error_summary)) {
304 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
305 if (unlikely(p->des01.erx.descriptor_error)) {
306 DBG(KERN_ERR "\tdescriptor error\n");
308 stats->rx_length_errors++;
310 if (unlikely(p->des01.erx.overflow_error)) {
311 DBG(KERN_ERR "\toverflow error\n");
312 x->rx_gmac_overflow++;
315 if (unlikely(p->des01.erx.ipc_csum_error))
316 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
318 if (unlikely(p->des01.erx.late_collision)) {
319 DBG(KERN_ERR "\tlate_collision error\n");
323 if (unlikely(p->des01.erx.receive_watchdog)) {
324 DBG(KERN_ERR "\treceive_watchdog error\n");
327 if (unlikely(p->des01.erx.error_gmii)) {
328 DBG(KERN_ERR "\tReceive Error\n");
331 if (unlikely(p->des01.erx.crc_error)) {
332 DBG(KERN_ERR "\tCRC error\n");
334 stats->rx_crc_errors++;
339 /* After a payload csum error, the ES bit is set.
340 * It doesn't match with the information reported into the databook.
341 * At any rate, we need to understand if the CSUM hw computation is ok
342 * and report this info to the upper layers. */
343 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
344 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
346 if (unlikely(p->des01.erx.dribbling)) {
347 DBG(KERN_ERR "GMAC RX: dribbling error\n");
350 if (unlikely(p->des01.erx.sa_filter_fail)) {
351 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
352 x->sa_rx_filter_fail++;
355 if (unlikely(p->des01.erx.da_filter_fail)) {
356 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
357 x->da_rx_filter_fail++;
360 if (unlikely(p->des01.erx.length_error)) {
361 DBG(KERN_ERR "GMAC RX: length_error error\n");
365 #ifdef STMMAC_VLAN_TAG_USED
366 if (p->des01.erx.vlan_tag) {
367 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
374 static void gmac_irq_status(unsigned long ioaddr)
376 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
378 /* Not used events (e.g. MMC interrupts) are not handled. */
379 if ((intr_status & mmc_tx_irq))
380 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
381 readl(ioaddr + GMAC_MMC_TX_INTR));
382 if (unlikely(intr_status & mmc_rx_irq))
383 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
384 readl(ioaddr + GMAC_MMC_RX_INTR));
385 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
386 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
387 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
388 if (unlikely(intr_status & pmt_irq)) {
389 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
390 /* clear the PMT bits 5 and 6 by reading the PMT
391 * status register. */
392 readl(ioaddr + GMAC_PMT);
398 static void gmac_core_init(unsigned long ioaddr)
400 u32 value = readl(ioaddr + GMAC_CONTROL);
401 value |= GMAC_CORE_INIT;
402 writel(value, ioaddr + GMAC_CONTROL);
404 /* Freeze MMC counters */
405 writel(0x8, ioaddr + GMAC_MMC_CTRL);
406 /* Mask GMAC interrupts */
407 writel(0x207, ioaddr + GMAC_INT_MASK);
409 #ifdef STMMAC_VLAN_TAG_USED
410 /* Tag detection without filtering */
411 writel(0x0, ioaddr + GMAC_VLAN_TAG);
416 static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
420 GMAC_ADDR_LOW(reg_n));
423 static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
427 GMAC_ADDR_LOW(reg_n));
430 static void gmac_set_filter(struct net_device *dev)
432 unsigned long ioaddr = dev->base_addr;
433 unsigned int value = 0;
435 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
436 __func__, dev->mc_count, dev->uc.count);
438 if (dev->flags & IFF_PROMISC)
439 value = GMAC_FRAME_FILTER_PR;
440 else if ((dev->mc_count > HASH_TABLE_SIZE)
441 || (dev->flags & IFF_ALLMULTI)) {
442 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
443 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
444 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
445 } else if (dev->mc_count > 0) {
448 struct dev_mc_list *mclist;
450 /* Hash filter for multicast */
451 value = GMAC_FRAME_FILTER_HMC;
453 memset(mc_filter, 0, sizeof(mc_filter));
454 for (i = 0, mclist = dev->mc_list;
455 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
456 /* The upper 6 bits of the calculated CRC are used to
457 index the contens of the hash table */
459 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
460 /* The most significant bit determines the register to
461 * use (H/L) while the other 5 bits determine the bit
462 * within the register. */
463 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
465 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
466 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 /* Handle multiple unicast addresses (perfect filtering)*/
470 if (dev->uc.count > GMAC_MAX_UNICAST_ADDRESSES)
471 /* Switch to promiscuous mode is more than 16 addrs
473 value |= GMAC_FRAME_FILTER_PR;
476 struct netdev_hw_addr *ha;
478 list_for_each_entry(ha, &dev->uc.list, list) {
479 gmac_set_umac_addr(ioaddr, ha->addr, reg);
484 #ifdef FRAME_FILTER_DEBUG
485 /* Enable Receive all mode (to debug filtering_fail errors) */
486 value |= GMAC_FRAME_FILTER_RA;
488 writel(value, ioaddr + GMAC_FRAME_FILTER);
490 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
491 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
492 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
497 static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
498 unsigned int fc, unsigned int pause_time)
500 unsigned int flow = 0;
502 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
504 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
505 flow |= GMAC_FLOW_CTRL_RFE;
508 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
509 flow |= GMAC_FLOW_CTRL_TFE;
513 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
514 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
517 writel(flow, ioaddr + GMAC_FLOW_CTRL);
521 static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
523 unsigned int pmt = 0;
525 if (mode == WAKE_MAGIC) {
526 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
527 pmt |= power_down | magic_pkt_en;
528 } else if (mode == WAKE_UCAST) {
529 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
530 pmt |= global_unicast;
533 writel(pmt, ioaddr + GMAC_PMT);
537 static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
541 for (i = 0; i < ring_size; i++) {
542 p->des01.erx.own = 1;
543 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
544 /* To support jumbo frames */
545 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
546 if (i == ring_size - 1)
547 p->des01.erx.end_ring = 1;
549 p->des01.erx.disable_ic = 1;
555 static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
559 for (i = 0; i < ring_size; i++) {
560 p->des01.etx.own = 0;
561 if (i == ring_size - 1)
562 p->des01.etx.end_ring = 1;
569 static int gmac_get_tx_owner(struct dma_desc *p)
571 return p->des01.etx.own;
574 static int gmac_get_rx_owner(struct dma_desc *p)
576 return p->des01.erx.own;
579 static void gmac_set_tx_owner(struct dma_desc *p)
581 p->des01.etx.own = 1;
584 static void gmac_set_rx_owner(struct dma_desc *p)
586 p->des01.erx.own = 1;
589 static int gmac_get_tx_ls(struct dma_desc *p)
591 return p->des01.etx.last_segment;
594 static void gmac_release_tx_desc(struct dma_desc *p)
596 int ter = p->des01.etx.end_ring;
598 memset(p, 0, sizeof(struct dma_desc));
599 p->des01.etx.end_ring = ter;
604 static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
607 p->des01.etx.first_segment = is_fs;
608 if (unlikely(len > BUF_SIZE_4KiB)) {
609 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
610 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
612 p->des01.etx.buffer1_size = len;
614 if (likely(csum_flag))
615 p->des01.etx.checksum_insertion = cic_full;
618 static void gmac_clear_tx_ic(struct dma_desc *p)
620 p->des01.etx.interrupt = 0;
623 static void gmac_close_tx_desc(struct dma_desc *p)
625 p->des01.etx.last_segment = 1;
626 p->des01.etx.interrupt = 1;
629 static int gmac_get_rx_frame_len(struct dma_desc *p)
631 return p->des01.erx.frame_length;
634 struct stmmac_ops gmac_ops = {
635 .core_init = gmac_core_init,
636 .dump_regs = gmac_dump_regs,
637 .host_irq_status = gmac_irq_status,
638 .set_filter = gmac_set_filter,
639 .flow_ctrl = gmac_flow_ctrl,
641 .set_umac_addr = gmac_set_umac_addr,
642 .get_umac_addr = gmac_get_umac_addr,
645 struct stmmac_dma_ops gmac_dma_ops = {
646 .init = gmac_dma_init,
647 .dump_regs = gmac_dump_dma_regs,
648 .dma_mode = gmac_dma_operation_mode,
649 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
650 .enable_dma_transmission = dwmac_enable_dma_transmission,
651 .enable_dma_irq = dwmac_enable_dma_irq,
652 .disable_dma_irq = dwmac_disable_dma_irq,
653 .start_tx = dwmac_dma_start_tx,
654 .stop_tx = dwmac_dma_stop_tx,
655 .start_rx = dwmac_dma_start_rx,
656 .stop_rx = dwmac_dma_stop_rx,
657 .dma_interrupt = dwmac_dma_interrupt,
660 struct stmmac_desc_ops gmac_desc_ops = {
661 .tx_status = gmac_get_tx_frame_status,
662 .rx_status = gmac_get_rx_frame_status,
663 .get_tx_len = gmac_get_tx_len,
664 .init_rx_desc = gmac_init_rx_desc,
665 .init_tx_desc = gmac_init_tx_desc,
666 .get_tx_owner = gmac_get_tx_owner,
667 .get_rx_owner = gmac_get_rx_owner,
668 .release_tx_desc = gmac_release_tx_desc,
669 .prepare_tx_desc = gmac_prepare_tx_desc,
670 .clear_tx_ic = gmac_clear_tx_ic,
671 .close_tx_desc = gmac_close_tx_desc,
672 .get_tx_ls = gmac_get_tx_ls,
673 .set_tx_owner = gmac_set_tx_owner,
674 .set_rx_owner = gmac_set_rx_owner,
675 .get_rx_frame_len = gmac_get_rx_frame_len,
678 struct mac_device_info *gmac_setup(unsigned long ioaddr)
680 struct mac_device_info *mac;
681 u32 uid = readl(ioaddr + GMAC_VERSION);
683 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
684 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
686 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
688 mac->mac = &gmac_ops;
689 mac->desc = &gmac_desc_ops;
690 mac->dma = &gmac_dma_ops;
692 mac->pmt = PMT_SUPPORTED;
693 mac->link.port = GMAC_CONTROL_PS;
694 mac->link.duplex = GMAC_CONTROL_DM;
695 mac->link.speed = GMAC_CONTROL_FES;
696 mac->mii.addr = GMAC_MII_ADDR;
697 mac->mii.data = GMAC_MII_DATA;