This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
ST Ethernet IPs are built around a Synopsys IP Core.
- Copyright (C) 2007-2009 STMicroelectronics Ltd
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/phy.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
-#include "stmmac.h"
#ifdef CONFIG_STMMAC_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
+#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
struct phy_device *phydev;
char phy_id[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
-
+ int interface = priv->plat->interface;
priv->oldlink = 0;
priv->speed = 0;
priv->oldduplex = -1;
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
- priv->plat->interface);
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
}
+ /* Stop Advertising 1000BASE Capability if interface is not GMII */
+ if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) ||
+ (interface == PHY_INTERFACE_MODE_RMII))) {
+ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phydev->advertising = phydev->supported;
+ }
+
/*
* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
}
}
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu >= DMA_BUFFER_SIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DMA_BUFFER_SIZE;
+
+ return ret;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
* Description: this function initializes the DMA RX/TX descriptors
- * and allocates the socket buffers.
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
*/
static void init_dma_desc_rings(struct net_device *dev)
{
struct sk_buff *skb;
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
- unsigned int bfsize = priv->dma_buf_sz;
- int buff2_needed = 0, dis_ic = 0;
+ unsigned int bfsize;
+ int dis_ic = 0;
+ int des3_as_data_buf = 0;
- /* Set the Buffer size according to the MTU;
- * indeed, in case of jumbo we need to bump-up the buffer sizes.
- */
- if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
- bfsize = BUF_SIZE_16KiB;
- else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
- bfsize = BUF_SIZE_8KiB;
- else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
- bfsize = BUF_SIZE_4KiB;
- else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
- bfsize = BUF_SIZE_2KiB;
+ /* Set the max buffer size according to the DESC mode
+ * and the MTU. Note that RING mode allows 16KiB bsize. */
+ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+
+ if (bfsize == BUF_SIZE_16KiB)
+ des3_as_data_buf = 1;
else
- bfsize = DMA_BUFFER_SIZE;
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
#ifdef CONFIG_STMMAC_TIMER
/* Disable interrupts on completion for the reception if timer is on */
if (likely(priv->tm->enable))
dis_ic = 1;
#endif
- /* If the MTU exceeds 8k so use the second buffer in the chain */
- if (bfsize >= BUF_SIZE_8KiB)
- buff2_needed = 1;
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
return;
}
- DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+ DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
"Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
dev->name, priv->dma_rx, priv->dma_tx,
(unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
for (i = 0; i < rxsize; i++) {
struct dma_desc *p = priv->dma_rx + i;
- skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
+ GFP_KERNEL);
if (unlikely(skb == NULL)) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
break;
}
+ skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
bfsize, DMA_FROM_DEVICE);
p->des2 = priv->rx_skbuff_dma[i];
- if (unlikely(buff2_needed))
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+
+ priv->hw->ring->init_desc3(des3_as_data_buf, p);
+
DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
}
priv->tx_skbuff[i] = NULL;
priv->dma_tx[i].des2 = 0;
}
+
+ /* In case of Chained mode this sets the des3 to the next
+ * element in the chain */
+ priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
+ priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
+
priv->dirty_tx = 0;
priv->cur_tx = 0;
{
unsigned int txsize = priv->dma_tx_size;
+ spin_lock(&priv->tx_lock);
+
while (priv->dirty_tx != priv->cur_tx) {
int last;
unsigned int entry = priv->dirty_tx % txsize;
dma_unmap_single(priv->device, p->des2,
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
- if (unlikely(p->des3))
- p->des3 = 0;
+ priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) {
/*
}
netif_tx_unlock(priv->dev);
}
+ spin_unlock(&priv->tx_lock);
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
-
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
- u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
-
- if (likely(hw_cap)) {
- priv->dma_cap.mbps_10_100 = (hw_cap & 0x1);
- priv->dma_cap.mbps_1000 = (hw_cap & 0x2) >> 1;
- priv->dma_cap.half_duplex = (hw_cap & 0x4) >> 2;
- priv->dma_cap.hash_filter = (hw_cap & 0x10) >> 4;
- priv->dma_cap.multi_addr = (hw_cap & 0x20) >> 5;
- priv->dma_cap.pcs = (hw_cap & 0x40) >> 6;
- priv->dma_cap.sma_mdio = (hw_cap & 0x100) >> 8;
- priv->dma_cap.pmt_remote_wake_up = (hw_cap & 0x200) >> 9;
- priv->dma_cap.pmt_magic_frame = (hw_cap & 0x400) >> 10;
- priv->dma_cap.rmon = (hw_cap & 0x800) >> 11; /* MMC */
+ u32 hw_cap = 0;
+ if (priv->hw->dma->get_hw_feature) {
+ hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
+
+ priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ priv->dma_cap.multi_addr =
+ (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
+ priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ priv->dma_cap.pmt_remote_wake_up =
+ (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ priv->dma_cap.pmt_magic_frame =
+ (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /*MMC*/
+ priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
/* IEEE 1588-2002*/
- priv->dma_cap.time_stamp = (hw_cap & 0x1000) >> 12;
+ priv->dma_cap.time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
/* IEEE 1588-2008*/
- priv->dma_cap.atime_stamp = (hw_cap & 0x2000) >> 13;
+ priv->dma_cap.atime_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
/* 802.3az - Energy-Efficient Ethernet (EEE) */
- priv->dma_cap.eee = (hw_cap & 0x4000) >> 14;
- priv->dma_cap.av = (hw_cap & 0x8000) >> 15;
+ priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
/* TX and RX csum */
- priv->dma_cap.tx_coe = (hw_cap & 0x10000) >> 16;
- priv->dma_cap.rx_coe_type1 = (hw_cap & 0x20000) >> 17;
- priv->dma_cap.rx_coe_type2 = (hw_cap & 0x40000) >> 18;
- priv->dma_cap.rxfifo_over_2048 = (hw_cap & 0x80000) >> 19;
+ priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ priv->dma_cap.rx_coe_type1 =
+ (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ priv->dma_cap.rx_coe_type2 =
+ (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ priv->dma_cap.rxfifo_over_2048 =
+ (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
/* TX and RX number of channels */
- priv->dma_cap.number_rx_channel = (hw_cap & 0x300000) >> 20;
- priv->dma_cap.number_tx_channel = (hw_cap & 0xc00000) >> 22;
+ priv->dma_cap.number_rx_channel =
+ (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ priv->dma_cap.number_tx_channel =
+ (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
/* Alternate (enhanced) DESC mode*/
- priv->dma_cap.enh_desc = (hw_cap & 0x1000000) >> 24;
+ priv->dma_cap.enh_desc =
+ (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
} else
pr_debug("\tNo HW DMA feature register supported");
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- stmmac_mmc_setup(priv);
+ if (priv->dma_cap.rmon)
+ stmmac_mmc_setup(priv);
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
return 0;
}
-static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
- struct net_device *dev,
- int csum_insertion)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int nopaged_len = skb_headlen(skb);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
- struct dma_desc *desc = priv->dma_tx + entry;
-
- if (nopaged_len > BUF_SIZE_8KiB) {
-
- int buf2_size = nopaged_len - BUF_SIZE_8KiB;
-
- desc->des2 = dma_map_single(priv->device, skb->data,
- BUF_SIZE_8KiB, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
-
- entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
-
- desc->des2 = dma_map_single(priv->device,
- skb->data + BUF_SIZE_8KiB,
- buf2_size, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
- csum_insertion);
- priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
- } else {
- desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
- }
- return entry;
-}
-
/**
* stmmac_xmit:
* @skb : the socket buffer
int i, csum_insertion = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
+ unsigned int nopaged_len = skb_headlen(skb);
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
return NETDEV_TX_BUSY;
}
+ spin_lock(&priv->tx_lock);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
pr_info("stmmac xmit:\n"
"\tskb addr %p - len: %d - nopaged_len: %d\n"
"\tn_frags: %d - ip_summed: %d - %s gso\n",
- skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+ skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
!skb_is_gso(skb) ? "isn't" : "is");
#endif
if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
"\t\tn_frags: %d, ip_summed: %d\n",
- skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+ skb->len, nopaged_len, nfrags, skb->ip_summed);
#endif
priv->tx_skbuff[entry] = skb;
- if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
- entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+
+ if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) {
+ entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion);
desc = priv->dma_tx + entry;
} else {
- unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
}
for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
- desc->des2 = dma_map_page(priv->device, frag->page,
- frag->page_offset,
- len, DMA_TO_DEVICE);
+ desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ spin_unlock(&priv->tx_lock);
+
return NETDEV_TX_OK;
}
DMA_FROM_DEVICE);
(p + entry)->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->plat->has_gmac)) {
- if (bfsize >= BUF_SIZE_8KiB)
- (p + entry)->des3 =
- (p + entry)->des2 + BUF_SIZE_8KiB;
- }
+
+ if (unlikely(priv->plat->has_gmac))
+ priv->hw->ring->refill_desc3(bfsize, p + entry);
+
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
wmb();
return -EBUSY;
}
- if (priv->plat->has_gmac)
+ if (priv->plat->enh_desc)
max_mtu = JUMBO_LEN;
else
- max_mtu = ETH_DATA_LEN;
+ max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
"please, use ifconfig or nwhwconfig!\n");
spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->tx_lock);
ret = register_netdev(dev);
if (ret) {
device->desc = &ndesc_ops;
priv->hw = device;
+ priv->hw->ring = &ring_mode_ops;
if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */