2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/interrupt.h>
23 #include <linux/uaccess.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
29 #include <linux/tcp.h>
30 #include <linux/skbuff.h>
32 #include <linux/platform_device.h>
33 #include <linux/ethtool.h>
34 #include <linux/init.h>
35 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/module.h>
40 #include <asm/checksum.h>
42 #include <lantiq_soc.h>
44 #include <lantiq_platform.h>
46 #define LTQ_ETOP_MDIO 0x11804
47 #define MDIO_REQUEST 0x80000000
48 #define MDIO_READ 0x40000000
49 #define MDIO_ADDR_MASK 0x1f
50 #define MDIO_ADDR_OFFSET 0x15
51 #define MDIO_REG_MASK 0x1f
52 #define MDIO_REG_OFFSET 0x10
53 #define MDIO_VAL_MASK 0xffff
55 #define PPE32_CGEN 0x800
56 #define LQ_PPE32_ENET_MAC_CFG 0x1840
58 #define LTQ_ETOP_ENETS0 0x11850
59 #define LTQ_ETOP_MAC_DA0 0x1186C
60 #define LTQ_ETOP_MAC_DA1 0x11870
61 #define LTQ_ETOP_CFG 0x16020
62 #define LTQ_ETOP_IGPLEN 0x16080
64 #define MAX_DMA_CHAN 0x8
65 #define MAX_DMA_CRC_LEN 0x4
66 #define MAX_DMA_DATA_LEN 0x600
68 #define ETOP_FTCU BIT(28)
69 #define ETOP_MII_MASK 0xf
70 #define ETOP_MII_NORMAL 0xd
71 #define ETOP_MII_REVERSE 0xe
72 #define ETOP_PLEN_UNDER 0x40
73 #define ETOP_CGEN 0x800
75 /* use 2 static channels for TX/RX */
76 #define LTQ_ETOP_TX_CHANNEL 1
77 #define LTQ_ETOP_RX_CHANNEL 6
78 #define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
79 #define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
81 #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
82 #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
83 #define ltq_etop_w32_mask(x, y, z) \
84 ltq_w32_mask(x, y, ltq_etop_membase + (z))
86 #define DRV_VERSION "1.0"
88 static void __iomem *ltq_etop_membase;
90 struct ltq_etop_chan {
93 struct net_device *netdev;
94 struct napi_struct napi;
95 struct ltq_dma_channel dma;
96 struct sk_buff *skb[LTQ_DESC_NUM];
99 struct ltq_etop_priv {
100 struct net_device *netdev;
101 struct ltq_eth_data *pldata;
102 struct resource *res;
104 struct mii_bus *mii_bus;
105 struct phy_device *phydev;
107 struct ltq_etop_chan ch[MAX_DMA_CHAN];
108 int tx_free[MAX_DMA_CHAN >> 1];
114 ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
116 ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
117 if (!ch->skb[ch->dma.desc])
119 ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
120 ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
122 ch->dma.desc_base[ch->dma.desc].addr =
123 CPHYSADDR(ch->skb[ch->dma.desc]->data);
124 ch->dma.desc_base[ch->dma.desc].ctl =
125 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
127 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
132 ltq_etop_hw_receive(struct ltq_etop_chan *ch)
134 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
135 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
136 struct sk_buff *skb = ch->skb[ch->dma.desc];
137 int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
140 spin_lock_irqsave(&priv->lock, flags);
141 if (ltq_etop_alloc_skb(ch)) {
142 netdev_err(ch->netdev,
143 "failed to allocate new rx buffer, stopping DMA\n");
144 ltq_dma_close(&ch->dma);
147 ch->dma.desc %= LTQ_DESC_NUM;
148 spin_unlock_irqrestore(&priv->lock, flags);
151 skb->dev = ch->netdev;
152 skb->protocol = eth_type_trans(skb, ch->netdev);
153 netif_receive_skb(skb);
157 ltq_etop_poll_rx(struct napi_struct *napi, int budget)
159 struct ltq_etop_chan *ch = container_of(napi,
160 struct ltq_etop_chan, napi);
164 while ((rx < budget) && !complete) {
165 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
167 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
168 ltq_etop_hw_receive(ch);
174 if (complete || !rx) {
175 napi_complete(&ch->napi);
176 ltq_dma_ack_irq(&ch->dma);
182 ltq_etop_poll_tx(struct napi_struct *napi, int budget)
184 struct ltq_etop_chan *ch =
185 container_of(napi, struct ltq_etop_chan, napi);
186 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
187 struct netdev_queue *txq =
188 netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
191 spin_lock_irqsave(&priv->lock, flags);
192 while ((ch->dma.desc_base[ch->tx_free].ctl &
193 (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
194 dev_kfree_skb_any(ch->skb[ch->tx_free]);
195 ch->skb[ch->tx_free] = NULL;
196 memset(&ch->dma.desc_base[ch->tx_free], 0,
197 sizeof(struct ltq_dma_desc));
199 ch->tx_free %= LTQ_DESC_NUM;
201 spin_unlock_irqrestore(&priv->lock, flags);
203 if (netif_tx_queue_stopped(txq))
204 netif_tx_start_queue(txq);
205 napi_complete(&ch->napi);
206 ltq_dma_ack_irq(&ch->dma);
211 ltq_etop_dma_irq(int irq, void *_priv)
213 struct ltq_etop_priv *priv = _priv;
214 int ch = irq - LTQ_DMA_CH0_INT;
216 napi_schedule(&priv->ch[ch].napi);
221 ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
223 struct ltq_etop_priv *priv = netdev_priv(dev);
225 ltq_dma_free(&ch->dma);
227 free_irq(ch->dma.irq, priv);
228 if (IS_RX(ch->idx)) {
230 for (desc = 0; desc < LTQ_DESC_NUM; desc++)
231 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
236 ltq_etop_hw_exit(struct net_device *dev)
238 struct ltq_etop_priv *priv = netdev_priv(dev);
241 ltq_pmu_disable(PMU_PPE);
242 for (i = 0; i < MAX_DMA_CHAN; i++)
243 if (IS_TX(i) || IS_RX(i))
244 ltq_etop_free_channel(dev, &priv->ch[i]);
248 ltq_etop_hw_init(struct net_device *dev)
250 struct ltq_etop_priv *priv = netdev_priv(dev);
253 ltq_pmu_enable(PMU_PPE);
255 switch (priv->pldata->mii_mode) {
256 case PHY_INTERFACE_MODE_RMII:
257 ltq_etop_w32_mask(ETOP_MII_MASK,
258 ETOP_MII_REVERSE, LTQ_ETOP_CFG);
261 case PHY_INTERFACE_MODE_MII:
262 ltq_etop_w32_mask(ETOP_MII_MASK,
263 ETOP_MII_NORMAL, LTQ_ETOP_CFG);
267 netdev_err(dev, "unknown mii mode %d\n",
268 priv->pldata->mii_mode);
272 /* enable crc generation */
273 ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
275 ltq_dma_init_port(DMA_PORT_ETOP);
277 for (i = 0; i < MAX_DMA_CHAN; i++) {
278 int irq = LTQ_DMA_CH0_INT + i;
279 struct ltq_etop_chan *ch = &priv->ch[i];
281 ch->idx = ch->dma.nr = i;
284 ltq_dma_alloc_tx(&ch->dma);
285 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
287 } else if (IS_RX(i)) {
288 ltq_dma_alloc_rx(&ch->dma);
289 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
291 if (ltq_etop_alloc_skb(ch))
294 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
303 ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
305 strcpy(info->driver, "Lantiq ETOP");
306 strcpy(info->bus_info, "internal");
307 strcpy(info->version, DRV_VERSION);
311 ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
313 struct ltq_etop_priv *priv = netdev_priv(dev);
315 return phy_ethtool_gset(priv->phydev, cmd);
319 ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
321 struct ltq_etop_priv *priv = netdev_priv(dev);
323 return phy_ethtool_sset(priv->phydev, cmd);
327 ltq_etop_nway_reset(struct net_device *dev)
329 struct ltq_etop_priv *priv = netdev_priv(dev);
331 return phy_start_aneg(priv->phydev);
334 static const struct ethtool_ops ltq_etop_ethtool_ops = {
335 .get_drvinfo = ltq_etop_get_drvinfo,
336 .get_settings = ltq_etop_get_settings,
337 .set_settings = ltq_etop_set_settings,
338 .nway_reset = ltq_etop_nway_reset,
342 ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
344 u32 val = MDIO_REQUEST |
345 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
346 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
349 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
351 ltq_etop_w32(val, LTQ_ETOP_MDIO);
356 ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
358 u32 val = MDIO_REQUEST | MDIO_READ |
359 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
360 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
362 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
364 ltq_etop_w32(val, LTQ_ETOP_MDIO);
365 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
367 val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
372 ltq_etop_mdio_link(struct net_device *dev)
378 ltq_etop_mdio_probe(struct net_device *dev)
380 struct ltq_etop_priv *priv = netdev_priv(dev);
381 struct phy_device *phydev = NULL;
384 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
385 if (priv->mii_bus->phy_map[phy_addr]) {
386 phydev = priv->mii_bus->phy_map[phy_addr];
392 netdev_err(dev, "no PHY found\n");
396 phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link,
397 0, priv->pldata->mii_mode);
399 if (IS_ERR(phydev)) {
400 netdev_err(dev, "Could not attach to PHY\n");
401 return PTR_ERR(phydev);
404 phydev->supported &= (SUPPORTED_10baseT_Half
405 | SUPPORTED_10baseT_Full
406 | SUPPORTED_100baseT_Half
407 | SUPPORTED_100baseT_Full
412 phydev->advertising = phydev->supported;
413 priv->phydev = phydev;
414 pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
415 dev->name, phydev->drv->name,
416 dev_name(&phydev->dev), phydev->irq);
422 ltq_etop_mdio_init(struct net_device *dev)
424 struct ltq_etop_priv *priv = netdev_priv(dev);
428 priv->mii_bus = mdiobus_alloc();
429 if (!priv->mii_bus) {
430 netdev_err(dev, "failed to allocate mii bus\n");
435 priv->mii_bus->priv = dev;
436 priv->mii_bus->read = ltq_etop_mdio_rd;
437 priv->mii_bus->write = ltq_etop_mdio_wr;
438 priv->mii_bus->name = "ltq_mii";
439 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
440 priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
441 if (!priv->mii_bus->irq) {
443 goto err_out_free_mdiobus;
446 for (i = 0; i < PHY_MAX_ADDR; ++i)
447 priv->mii_bus->irq[i] = PHY_POLL;
449 if (mdiobus_register(priv->mii_bus)) {
451 goto err_out_free_mdio_irq;
454 if (ltq_etop_mdio_probe(dev)) {
456 goto err_out_unregister_bus;
460 err_out_unregister_bus:
461 mdiobus_unregister(priv->mii_bus);
462 err_out_free_mdio_irq:
463 kfree(priv->mii_bus->irq);
464 err_out_free_mdiobus:
465 mdiobus_free(priv->mii_bus);
471 ltq_etop_mdio_cleanup(struct net_device *dev)
473 struct ltq_etop_priv *priv = netdev_priv(dev);
475 phy_disconnect(priv->phydev);
476 mdiobus_unregister(priv->mii_bus);
477 kfree(priv->mii_bus->irq);
478 mdiobus_free(priv->mii_bus);
482 ltq_etop_open(struct net_device *dev)
484 struct ltq_etop_priv *priv = netdev_priv(dev);
487 for (i = 0; i < MAX_DMA_CHAN; i++) {
488 struct ltq_etop_chan *ch = &priv->ch[i];
490 if (!IS_TX(i) && (!IS_RX(i)))
492 ltq_dma_open(&ch->dma);
493 napi_enable(&ch->napi);
495 phy_start(priv->phydev);
496 netif_tx_start_all_queues(dev);
501 ltq_etop_stop(struct net_device *dev)
503 struct ltq_etop_priv *priv = netdev_priv(dev);
506 netif_tx_stop_all_queues(dev);
507 phy_stop(priv->phydev);
508 for (i = 0; i < MAX_DMA_CHAN; i++) {
509 struct ltq_etop_chan *ch = &priv->ch[i];
511 if (!IS_RX(i) && !IS_TX(i))
513 napi_disable(&ch->napi);
514 ltq_dma_close(&ch->dma);
520 ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
522 int queue = skb_get_queue_mapping(skb);
523 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
524 struct ltq_etop_priv *priv = netdev_priv(dev);
525 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
526 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
531 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
533 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
534 dev_kfree_skb_any(skb);
535 netdev_err(dev, "tx ring full\n");
536 netif_tx_stop_queue(txq);
537 return NETDEV_TX_BUSY;
540 /* dma needs to start on a 16 byte aligned address */
541 byte_offset = CPHYSADDR(skb->data) % 16;
542 ch->skb[ch->dma.desc] = skb;
544 dev->trans_start = jiffies;
546 spin_lock_irqsave(&priv->lock, flags);
547 desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
548 DMA_TO_DEVICE)) - byte_offset;
550 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
551 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
553 ch->dma.desc %= LTQ_DESC_NUM;
554 spin_unlock_irqrestore(&priv->lock, flags);
556 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
557 netif_tx_stop_queue(txq);
563 ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
565 int ret = eth_change_mtu(dev, new_mtu);
568 struct ltq_etop_priv *priv = netdev_priv(dev);
571 spin_lock_irqsave(&priv->lock, flags);
572 ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
574 spin_unlock_irqrestore(&priv->lock, flags);
580 ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
582 struct ltq_etop_priv *priv = netdev_priv(dev);
584 /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
585 return phy_mii_ioctl(priv->phydev, rq, cmd);
589 ltq_etop_set_mac_address(struct net_device *dev, void *p)
591 int ret = eth_mac_addr(dev, p);
594 struct ltq_etop_priv *priv = netdev_priv(dev);
597 /* store the mac for the unicast filter */
598 spin_lock_irqsave(&priv->lock, flags);
599 ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
600 ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
602 spin_unlock_irqrestore(&priv->lock, flags);
608 ltq_etop_set_multicast_list(struct net_device *dev)
610 struct ltq_etop_priv *priv = netdev_priv(dev);
613 /* ensure that the unicast filter is not enabled in promiscious mode */
614 spin_lock_irqsave(&priv->lock, flags);
615 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
616 ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
618 ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
619 spin_unlock_irqrestore(&priv->lock, flags);
623 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
625 /* we are currently only using the first queue */
630 ltq_etop_init(struct net_device *dev)
632 struct ltq_etop_priv *priv = netdev_priv(dev);
637 dev->watchdog_timeo = 10 * HZ;
638 err = ltq_etop_hw_init(dev);
641 ltq_etop_change_mtu(dev, 1500);
643 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
644 if (!is_valid_ether_addr(mac.sa_data)) {
645 pr_warn("etop: invalid MAC, using random\n");
646 random_ether_addr(mac.sa_data);
649 err = ltq_etop_set_mac_address(dev, &mac);
652 ltq_etop_set_multicast_list(dev);
653 err = ltq_etop_mdio_init(dev);
659 unregister_netdev(dev);
662 ltq_etop_hw_exit(dev);
667 ltq_etop_tx_timeout(struct net_device *dev)
671 ltq_etop_hw_exit(dev);
672 err = ltq_etop_hw_init(dev);
675 dev->trans_start = jiffies;
676 netif_wake_queue(dev);
680 ltq_etop_hw_exit(dev);
681 netdev_err(dev, "failed to restart etop after TX timeout\n");
684 static const struct net_device_ops ltq_eth_netdev_ops = {
685 .ndo_open = ltq_etop_open,
686 .ndo_stop = ltq_etop_stop,
687 .ndo_start_xmit = ltq_etop_tx,
688 .ndo_change_mtu = ltq_etop_change_mtu,
689 .ndo_do_ioctl = ltq_etop_ioctl,
690 .ndo_set_mac_address = ltq_etop_set_mac_address,
691 .ndo_validate_addr = eth_validate_addr,
692 .ndo_set_rx_mode = ltq_etop_set_multicast_list,
693 .ndo_select_queue = ltq_etop_select_queue,
694 .ndo_init = ltq_etop_init,
695 .ndo_tx_timeout = ltq_etop_tx_timeout,
699 ltq_etop_probe(struct platform_device *pdev)
701 struct net_device *dev;
702 struct ltq_etop_priv *priv;
703 struct resource *res;
707 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
709 dev_err(&pdev->dev, "failed to get etop resource\n");
714 res = devm_request_mem_region(&pdev->dev, res->start,
715 resource_size(res), dev_name(&pdev->dev));
717 dev_err(&pdev->dev, "failed to request etop resource\n");
722 ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
723 res->start, resource_size(res));
724 if (!ltq_etop_membase) {
725 dev_err(&pdev->dev, "failed to remap etop engine %d\n",
731 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
732 strcpy(dev->name, "eth%d");
733 dev->netdev_ops = <q_eth_netdev_ops;
734 dev->ethtool_ops = <q_etop_ethtool_ops;
735 priv = netdev_priv(dev);
737 priv->pldata = dev_get_platdata(&pdev->dev);
739 spin_lock_init(&priv->lock);
741 for (i = 0; i < MAX_DMA_CHAN; i++) {
743 netif_napi_add(dev, &priv->ch[i].napi,
744 ltq_etop_poll_tx, 8);
746 netif_napi_add(dev, &priv->ch[i].napi,
747 ltq_etop_poll_rx, 32);
748 priv->ch[i].netdev = dev;
751 err = register_netdev(dev);
755 platform_set_drvdata(pdev, dev);
765 ltq_etop_remove(struct platform_device *pdev)
767 struct net_device *dev = platform_get_drvdata(pdev);
770 netif_tx_stop_all_queues(dev);
771 ltq_etop_hw_exit(dev);
772 ltq_etop_mdio_cleanup(dev);
773 unregister_netdev(dev);
778 static struct platform_driver ltq_mii_driver = {
779 .remove = __devexit_p(ltq_etop_remove),
782 .owner = THIS_MODULE,
789 int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe);
792 pr_err("ltq_etop: Error registering platfom driver!");
799 platform_driver_unregister(<q_mii_driver);
802 module_init(init_ltq_etop);
803 module_exit(exit_ltq_etop);
805 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
806 MODULE_DESCRIPTION("Lantiq SoC ETOP");
807 MODULE_LICENSE("GPL");