#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.10"
+#define DRV_VERSION "1.11"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
static u32 wol_supported(const struct skge_hw *hw)
{
- if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
- return WAKE_MAGIC | WAKE_PHY;
- else
+ if (hw->chip_id == CHIP_ID_GENESIS)
+ return 0;
+
+ if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
return 0;
+
+ return WAKE_MAGIC | WAKE_PHY;
}
static u32 pci_wake_enabled(struct pci_dev *dev)
.phys_id = skge_phys_id,
.get_stats_count = skge_get_stats_count,
.get_ethtool_stats = skge_get_ethtool_stats,
- .get_perm_addr = ethtool_op_get_perm_addr,
};
/*
static inline int skge_avail(const struct skge_ring *ring)
{
+ smp_mb();
return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
+ (ring->to_clean - ring->to_use) - 1;
}
dev->name, e - skge->tx_ring.start, skb->len);
skge->tx_ring.to_use = e->next;
+ smp_wmb();
+
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
pr_debug("%s: transmit queue full\n", dev->name);
netif_stop_queue(dev);
{
struct pci_dev *pdev = skge->hw->pdev;
- BUG_ON(!e->skb);
-
/* skb header vs. fragment */
if (control & BMU_STF)
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
dev_kfree_skb(e->skb);
}
- e->skb = NULL;
}
/* Free all buffers in transmit ring */
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
- netif_tx_lock(dev);
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
- struct skge_tx_desc *td = e->desc;
+ u32 control = ((const struct skge_tx_desc *) e->desc)->control;
- if (td->control & BMU_OWN)
+ if (control & BMU_OWN)
break;
- skge_tx_free(skge, e, td->control);
+ skge_tx_free(skge, e, control);
}
skge->tx_ring.to_clean = e;
- if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
- netif_wake_queue(dev);
+ /* Can run lockless until we need to synchronize to restart queue. */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+ netif_tx_lock(dev);
+ if (unlikely(netif_queue_stopped(dev) &&
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+ netif_wake_queue(dev);
- netif_tx_unlock(dev);
+ }
+ netif_tx_unlock(dev);
+ }
}
static int skge_poll(struct net_device *dev, int *budget)
skge->duplex = -1;
skge->speed = -1;
skge->advertising = skge_supported_modes(hw);
- skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
+
+ if (pci_wake_enabled(hw->pdev))
+ skge->wol = wol_supported(hw) & WAKE_MAGIC;
hw->dev[port] = dev;
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, err, wol = 0;
+ if (!hw)
+ return 0;
+
err = pci_save_state(pdev);
if (err)
return err;
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, err;
+ if (!hw)
+ return 0;
+
err = pci_set_power_state(pdev, PCI_D0);
if (err)
goto out;
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, wol = 0;
+ if (!hw)
+ return;
+
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct skge_port *skge = netdev_priv(dev);