{
struct mv643xx_private *mp = netdev_priv(dev);
- netif_device_detach(dev);
+ if (!netif_running(dev))
+ return;
+
+ netif_stop_queue(dev);
+
eth_port_reset(mp->port_num);
eth_port_start(dev);
- netif_device_attach(dev);
+
+ if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
+ netif_wake_queue(dev);
}
/**
#else
if (eth_int_cause & ETH_INT_CAUSE_RX)
mv643xx_eth_receive_queue(dev, INT_MAX);
+#endif
if (eth_int_cause_ext & ETH_INT_CAUSE_TX)
mv643xx_eth_free_completed_tx_descs(dev);
-#endif
/*
* If no real interrupt occured, exit.
BUG_ON(netif_queue_stopped(dev));
BUG_ON(skb == NULL);
- BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB);
+
+ if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
+ printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
+ netif_stop_queue(dev);
+ return 1;
+ }
if (has_tiny_unaligned_frags(skb)) {
if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {