Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[pandora-kernel.git] / drivers / net / ethernet / sun / sunvnet.c
index b7cca71..3652afd 100644 (file)
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/mutex.h>
+#include <linux/if_vlan.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/icmpv6.h>
+#endif
+
+#include <net/icmp.h>
+#include <net/route.h>
 
 #include <asm/vio.h>
 #include <asm/ldc.h>
@@ -37,8 +45,11 @@ MODULE_VERSION(DRV_MODULE_VERSION);
  */
 #define        VNET_MAX_RETRIES        10
 
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
+
 /* Ordered from largest major to lowest */
 static struct vio_version vnet_versions[] = {
+       { .major = 1, .minor = 6 },
        { .major = 1, .minor = 0 },
 };
 
@@ -65,6 +76,7 @@ static int vnet_send_attr(struct vio_driver_state *vio)
        struct vnet_port *port = to_vnet_port(vio);
        struct net_device *dev = port->vp->dev;
        struct vio_net_attr_info pkt;
+       int framelen = ETH_FRAME_LEN;
        int i;
 
        memset(&pkt, 0, sizeof(pkt));
@@ -72,19 +84,41 @@ static int vnet_send_attr(struct vio_driver_state *vio)
        pkt.tag.stype = VIO_SUBTYPE_INFO;
        pkt.tag.stype_env = VIO_ATTR_INFO;
        pkt.tag.sid = vio_send_sid(vio);
-       pkt.xfer_mode = VIO_DRING_MODE;
+       if (vio_version_before(vio, 1, 2))
+               pkt.xfer_mode = VIO_DRING_MODE;
+       else
+               pkt.xfer_mode = VIO_NEW_DRING_MODE;
        pkt.addr_type = VNET_ADDR_ETHERMAC;
        pkt.ack_freq = 0;
        for (i = 0; i < 6; i++)
                pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
-       pkt.mtu = ETH_FRAME_LEN;
+       if (vio_version_after(vio, 1, 3)) {
+               if (port->rmtu) {
+                       port->rmtu = min(VNET_MAXPACKET, port->rmtu);
+                       pkt.mtu = port->rmtu;
+               } else {
+                       port->rmtu = VNET_MAXPACKET;
+                       pkt.mtu = port->rmtu;
+               }
+               if (vio_version_after_eq(vio, 1, 6))
+                       pkt.options = VIO_TX_DRING;
+       } else if (vio_version_before(vio, 1, 3)) {
+               pkt.mtu = framelen;
+       } else { /* v1.3 */
+               pkt.mtu = framelen + VLAN_HLEN;
+       }
+
+       pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
+       pkt.cflags = 0;
 
        viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
-              "ackfreq[%u] mtu[%llu]\n",
+              "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
+              "cflags[0x%04x] lso_max[%u]\n",
               pkt.xfer_mode, pkt.addr_type,
-              (unsigned long long) pkt.addr,
-              pkt.ack_freq,
-              (unsigned long long) pkt.mtu);
+              (unsigned long long)pkt.addr,
+              pkt.ack_freq, pkt.plnk_updt, pkt.options,
+              (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
+
 
        return vio_ldc_send(vio, &pkt, sizeof(pkt));
 }
@@ -92,18 +126,52 @@ static int vnet_send_attr(struct vio_driver_state *vio)
 static int handle_attr_info(struct vio_driver_state *vio,
                            struct vio_net_attr_info *pkt)
 {
-       viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
-              "ackfreq[%u] mtu[%llu]\n",
+       struct vnet_port *port = to_vnet_port(vio);
+       u64     localmtu;
+       u8      xfer_mode;
+
+       viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+              "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
+              " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
               pkt->xfer_mode, pkt->addr_type,
-              (unsigned long long) pkt->addr,
-              pkt->ack_freq,
-              (unsigned long long) pkt->mtu);
+              (unsigned long long)pkt->addr,
+              pkt->ack_freq, pkt->plnk_updt, pkt->options,
+              (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
+              pkt->ipv4_lso_maxlen);
 
        pkt->tag.sid = vio_send_sid(vio);
 
-       if (pkt->xfer_mode != VIO_DRING_MODE ||
+       xfer_mode = pkt->xfer_mode;
+       /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
+       if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
+               xfer_mode = VIO_NEW_DRING_MODE;
+
+       /* MTU negotiation:
+        *      < v1.3 - ETH_FRAME_LEN exactly
+        *      > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
+        *                      pkt->mtu for ACK
+        *      = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
+        */
+       if (vio_version_before(vio, 1, 3)) {
+               localmtu = ETH_FRAME_LEN;
+       } else if (vio_version_after(vio, 1, 3)) {
+               localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
+               localmtu = min(pkt->mtu, localmtu);
+               pkt->mtu = localmtu;
+       } else { /* v1.3 */
+               localmtu = ETH_FRAME_LEN + VLAN_HLEN;
+       }
+       port->rmtu = localmtu;
+
+       /* for version >= 1.6, ACK packet mode we support */
+       if (vio_version_after_eq(vio, 1, 6)) {
+               pkt->xfer_mode = VIO_NEW_DRING_MODE;
+               pkt->options = VIO_TX_DRING;
+       }
+
+       if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
            pkt->addr_type != VNET_ADDR_ETHERMAC ||
-           pkt->mtu != ETH_FRAME_LEN) {
+           pkt->mtu != localmtu) {
                viodbg(HS, "SEND NET ATTR NACK\n");
 
                pkt->tag.stype = VIO_SUBTYPE_NACK;
@@ -112,7 +180,14 @@ static int handle_attr_info(struct vio_driver_state *vio,
 
                return -ECONNRESET;
        } else {
-               viodbg(HS, "SEND NET ATTR ACK\n");
+               viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
+                      "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
+                      "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
+                      pkt->xfer_mode, pkt->addr_type,
+                      (unsigned long long)pkt->addr,
+                      pkt->ack_freq, pkt->plnk_updt, pkt->options,
+                      (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
+                      pkt->ipv4_lso_maxlen);
 
                pkt->tag.stype = VIO_SUBTYPE_ACK;
 
@@ -208,7 +283,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
        int err;
 
        err = -EMSGSIZE;
-       if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
+       if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
                dev->stats.rx_length_errors++;
                goto out_dropped;
        }
@@ -283,10 +358,18 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
                                port->raddr[0], port->raddr[1],
                                port->raddr[2], port->raddr[3],
                                port->raddr[4], port->raddr[5]);
-                       err = -ECONNRESET;
+                       break;
                }
        } while (err == -EAGAIN);
 
+       if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
+               port->stop_rx_idx = end;
+               port->stop_rx = true;
+       } else {
+               port->stop_rx_idx = 0;
+               port->stop_rx = false;
+       }
+
        return err;
 }
 
@@ -350,14 +433,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        if (IS_ERR(desc))
                return PTR_ERR(desc);
 
+       if (desc->hdr.state != VIO_DESC_READY)
+               return 1;
+
+       rmb();
+
        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
               desc->hdr.state, desc->hdr.ack,
               desc->size, desc->ncookies,
               desc->cookies[0].cookie_addr,
               desc->cookies[0].cookie_size);
 
-       if (desc->hdr.state != VIO_DESC_READY)
-               return 1;
        err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
        if (err == -ECONNRESET)
                return err;
@@ -448,7 +534,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
        struct net_device *dev;
        struct vnet *vp;
        u32 end;
-
+       struct vio_net_desc *desc;
        if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
                return 0;
 
@@ -456,7 +542,24 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
        if (unlikely(!idx_is_pending(dr, end)))
                return 0;
 
+       /* sync for race conditions with vnet_start_xmit() and tell xmit it
+        * is time to send a trigger.
+        */
        dr->cons = next_idx(end, dr);
+       desc = vio_dring_entry(dr, dr->cons);
+       if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
+               /* vnet_start_xmit() just populated this dring but missed
+                * sending the "start" LDC message to the consumer.
+                * Send a "start" trigger on its behalf.
+                */
+               if (__vnet_tx_trigger(port, dr->cons) > 0)
+                       port->start_cons = false;
+               else
+                       port->start_cons = true;
+       } else {
+               port->start_cons = true;
+       }
+
 
        vp = port->vp;
        dev = vp->dev;
@@ -528,13 +631,15 @@ static void vnet_event(void *arg, int event)
                vio_link_state_change(vio, event);
                spin_unlock_irqrestore(&vio->lock, flags);
 
-               if (event == LDC_EVENT_RESET)
+               if (event == LDC_EVENT_RESET) {
+                       port->rmtu = 0;
                        vio_port_up(vio);
+               }
                return;
        }
 
        if (unlikely(event != LDC_EVENT_DATA_READY)) {
-               pr_warning("Unexpected LDC event %d\n", event);
+               pr_warn("Unexpected LDC event %d\n", event);
                spin_unlock_irqrestore(&vio->lock, flags);
                return;
        }
@@ -597,7 +702,7 @@ static void vnet_event(void *arg, int event)
        local_irq_restore(flags);
 }
 
-static int __vnet_tx_trigger(struct vnet_port *port)
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
 {
        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
        struct vio_dring_data hdr = {
@@ -608,12 +713,21 @@ static int __vnet_tx_trigger(struct vnet_port *port)
                        .sid            = vio_send_sid(&port->vio),
                },
                .dring_ident            = dr->ident,
-               .start_idx              = dr->prod,
+               .start_idx              = start,
                .end_idx                = (u32) -1,
        };
        int err, delay;
        int retries = 0;
 
+       if (port->stop_rx) {
+               err = vnet_send_ack(port,
+                                   &port->vio.drings[VIO_DRIVER_RX_RING],
+                                   port->stop_rx_idx, -1,
+                                   VIO_DRING_STOPPED);
+               if (err <= 0)
+                       return err;
+       }
+
        hdr.seq = dr->snd_nxt;
        delay = 1;
        do {
@@ -673,6 +787,117 @@ struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
        return ret;
 }
 
+static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
+                                         unsigned *pending)
+{
+       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       struct sk_buff *skb = NULL;
+       int i, txi;
+
+       *pending = 0;
+
+       txi = dr->prod-1;
+       if (txi < 0)
+               txi = VNET_TX_RING_SIZE-1;
+
+       for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
+               struct vio_net_desc *d;
+
+               d = vio_dring_entry(dr, txi);
+
+               if (d->hdr.state == VIO_DESC_DONE) {
+                       if (port->tx_bufs[txi].skb) {
+                               BUG_ON(port->tx_bufs[txi].skb->next);
+
+                               port->tx_bufs[txi].skb->next = skb;
+                               skb = port->tx_bufs[txi].skb;
+                               port->tx_bufs[txi].skb = NULL;
+
+                               ldc_unmap(port->vio.lp,
+                                         port->tx_bufs[txi].cookies,
+                                         port->tx_bufs[txi].ncookies);
+                       }
+                       d->hdr.state = VIO_DESC_FREE;
+               } else if (d->hdr.state == VIO_DESC_READY) {
+                       (*pending)++;
+               } else if (d->hdr.state == VIO_DESC_FREE) {
+                       break;
+               }
+               --txi;
+               if (txi < 0)
+                       txi = VNET_TX_RING_SIZE-1;
+       }
+       return skb;
+}
+
+static inline void vnet_free_skbs(struct sk_buff *skb)
+{
+       struct sk_buff *next;
+
+       while (skb) {
+               next = skb->next;
+               skb->next = NULL;
+               dev_kfree_skb(skb);
+               skb = next;
+       }
+}
+
+static void vnet_clean_timer_expire(unsigned long port0)
+{
+       struct vnet_port *port = (struct vnet_port *)port0;
+       struct sk_buff *freeskbs;
+       unsigned pending;
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->vio.lock, flags);
+       freeskbs = vnet_clean_tx_ring(port, &pending);
+       spin_unlock_irqrestore(&port->vio.lock, flags);
+
+       vnet_free_skbs(freeskbs);
+
+       if (pending)
+               (void)mod_timer(&port->clean_timer,
+                               jiffies + VNET_CLEAN_TIMEOUT);
+        else
+               del_timer(&port->clean_timer);
+}
+
+static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
+                                            int *plen)
+{
+       struct sk_buff *nskb;
+       int len, pad;
+
+       len = skb->len;
+       pad = 0;
+       if (len < ETH_ZLEN) {
+               pad += ETH_ZLEN - skb->len;
+               len += pad;
+       }
+       len += VNET_PACKET_SKIP;
+       pad += 8 - (len & 7);
+       len += 8 - (len & 7);
+
+       if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
+           skb_tailroom(skb) < pad ||
+           skb_headroom(skb) < VNET_PACKET_SKIP) {
+               nskb = alloc_and_align_skb(skb->dev, skb->len);
+               skb_reserve(nskb, VNET_PACKET_SKIP);
+               if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
+                       dev_kfree_skb(nskb);
+                       dev_kfree_skb(skb);
+                       return NULL;
+               }
+               (void)skb_put(nskb, skb->len);
+               dev_kfree_skb(skb);
+               skb = nskb;
+       }
+
+       *pstart = skb->data - VNET_PACKET_SKIP;
+       *plen = len;
+       return skb;
+}
+
 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vnet *vp = netdev_priv(dev);
@@ -681,12 +906,51 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct vio_net_desc *d;
        unsigned long flags;
        unsigned int len;
-       void *tx_buf;
-       int i, err;
+       struct sk_buff *freeskbs = NULL;
+       int i, err, txi;
+       void *start = NULL;
+       int nlen = 0;
+       unsigned pending = 0;
 
        if (unlikely(!port))
                goto out_dropped;
 
+       skb = vnet_skb_shape(skb, &start, &nlen);
+
+       if (unlikely(!skb))
+               goto out_dropped;
+
+       if (skb->len > port->rmtu) {
+               unsigned long localmtu = port->rmtu - ETH_HLEN;
+
+               if (vio_version_after_eq(&port->vio, 1, 3))
+                       localmtu -= VLAN_HLEN;
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       struct flowi4 fl4;
+                       struct rtable *rt = NULL;
+
+                       memset(&fl4, 0, sizeof(fl4));
+                       fl4.flowi4_oif = dev->ifindex;
+                       fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+                       fl4.daddr = ip_hdr(skb)->daddr;
+                       fl4.saddr = ip_hdr(skb)->saddr;
+
+                       rt = ip_route_output_key(dev_net(dev), &fl4);
+                       if (!IS_ERR(rt)) {
+                               skb_dst_set(skb, &rt->dst);
+                               icmp_send(skb, ICMP_DEST_UNREACH,
+                                         ICMP_FRAG_NEEDED,
+                                         htonl(localmtu));
+                       }
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+#endif
+               goto out_dropped;
+       }
+
        spin_lock_irqsave(&port->vio.lock, flags);
 
        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
@@ -704,14 +968,27 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        d = vio_dring_cur(dr);
 
-       tx_buf = port->tx_bufs[dr->prod].buf;
-       skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
+       txi = dr->prod;
+
+       freeskbs = vnet_clean_tx_ring(port, &pending);
+
+       BUG_ON(port->tx_bufs[txi].skb);
 
        len = skb->len;
-       if (len < ETH_ZLEN) {
+       if (len < ETH_ZLEN)
                len = ETH_ZLEN;
-               memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
+
+       port->tx_bufs[txi].skb = skb;
+       skb = NULL;
+
+       err = ldc_map_single(port->vio.lp, start, nlen,
+                            port->tx_bufs[txi].cookies, VNET_MAXCOOKIES,
+                            (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
+       if (err < 0) {
+               netdev_info(dev, "tx buffer map error %d\n", err);
+               goto out_dropped_unlock;
        }
+       port->tx_bufs[txi].ncookies = err;
 
        /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
         * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
@@ -723,9 +1000,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        d->hdr.ack = VIO_ACK_DISABLE;
        d->size = len;
-       d->ncookies = port->tx_bufs[dr->prod].ncookies;
+       d->ncookies = port->tx_bufs[txi].ncookies;
        for (i = 0; i < d->ncookies; i++)
-               d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
+               d->cookies[i] = port->tx_bufs[txi].cookies[i];
 
        /* This has to be a non-SMP write barrier because we are writing
         * to memory which is shared with the peer LDOM.
@@ -734,7 +1011,30 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        d->hdr.state = VIO_DESC_READY;
 
-       err = __vnet_tx_trigger(port);
+       /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
+        * to notify the consumer that some descriptors are READY.
+        * After that "start" trigger, no additional triggers are needed until
+        * a DRING_STOPPED is received from the consumer. The dr->cons field
+        * (set up by vnet_ack()) has the value of the next dring index
+        * that has not yet been ack-ed. We send a "start" trigger here
+        * if, and only if, start_cons is true (reset it afterward). Conversely,
+        * vnet_ack() should check if the dring corresponding to cons
+        * is marked READY, but start_cons was false.
+        * If so, vnet_ack() should send out the missed "start" trigger.
+        *
+        * Note that the wmb() above makes sure the cookies et al. are
+        * not globally visible before the VIO_DESC_READY, and that the
+        * stores are ordered correctly by the compiler. The consumer will
+        * not proceed until the VIO_DESC_READY is visible assuring that
+        * the consumer does not observe anything related to descriptors
+        * out of order. The HV trap from the LDC start trigger is the
+        * producer to consumer announcement that work is available to the
+        * consumer
+        */
+       if (!port->start_cons)
+               goto ldc_start_done; /* previous trigger suffices */
+
+       err = __vnet_tx_trigger(port, dr->cons);
        if (unlikely(err < 0)) {
                netdev_info(dev, "TX trigger error %d\n", err);
                d->hdr.state = VIO_DESC_FREE;
@@ -742,8 +1042,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out_dropped_unlock;
        }
 
+ldc_start_done:
+       port->start_cons = false;
+
        dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
 
        dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
        if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
@@ -754,7 +1057,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        spin_unlock_irqrestore(&port->vio.lock, flags);
 
-       dev_kfree_skb(skb);
+       vnet_free_skbs(freeskbs);
+
+       (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
 
        return NETDEV_TX_OK;
 
@@ -762,7 +1067,14 @@ out_dropped_unlock:
        spin_unlock_irqrestore(&port->vio.lock, flags);
 
 out_dropped:
-       dev_kfree_skb(skb);
+       if (skb)
+               dev_kfree_skb(skb);
+       vnet_free_skbs(freeskbs);
+       if (pending)
+               (void)mod_timer(&port->clean_timer,
+                               jiffies + VNET_CLEAN_TIMEOUT);
+       else if (port)
+               del_timer(&port->clean_timer);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
@@ -908,7 +1220,7 @@ static void vnet_set_rx_mode(struct net_device *dev)
 
 static int vnet_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if (new_mtu != ETH_DATA_LEN)
+       if (new_mtu < 68 || new_mtu > 65535)
                return -EINVAL;
 
        dev->mtu = new_mtu;
@@ -964,17 +1276,22 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
        }
 
        for (i = 0; i < VNET_TX_RING_SIZE; i++) {
-               void *buf = port->tx_bufs[i].buf;
+               struct vio_net_desc *d;
+               void *skb = port->tx_bufs[i].skb;
 
-               if (!buf)
+               if (!skb)
                        continue;
 
+               d = vio_dring_entry(dr, i);
+               if (d->hdr.state == VIO_DESC_READY)
+                       pr_warn("active transmit buffers freed\n");
+
                ldc_unmap(port->vio.lp,
                          port->tx_bufs[i].cookies,
                          port->tx_bufs[i].ncookies);
-
-               kfree(buf);
-               port->tx_bufs[i].buf = NULL;
+               dev_kfree_skb(skb);
+               port->tx_bufs[i].skb = NULL;
+               d->hdr.state = VIO_DESC_FREE;
        }
 }
 
@@ -985,34 +1302,6 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
        int i, err, ncookies;
        void *dring;
 
-       for (i = 0; i < VNET_TX_RING_SIZE; i++) {
-               void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
-               int map_len = (ETH_FRAME_LEN + 7) & ~7;
-
-               err = -ENOMEM;
-               if (!buf)
-                       goto err_out;
-
-               err = -EFAULT;
-               if ((unsigned long)buf & (8UL - 1)) {
-                       pr_err("TX buffer misaligned\n");
-                       kfree(buf);
-                       goto err_out;
-               }
-
-               err = ldc_map_single(port->vio.lp, buf, map_len,
-                                    port->tx_bufs[i].cookies, 2,
-                                    (LDC_MAP_SHADOW |
-                                     LDC_MAP_DIRECT |
-                                     LDC_MAP_RW));
-               if (err < 0) {
-                       kfree(buf);
-                       goto err_out;
-               }
-               port->tx_bufs[i].buf = buf;
-               port->tx_bufs[i].ncookies = err;
-       }
-
        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 
        len = (VNET_TX_RING_SIZE *
@@ -1035,9 +1324,16 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
                          (sizeof(struct ldc_trans_cookie) * 2));
        dr->num_entries = VNET_TX_RING_SIZE;
        dr->prod = dr->cons = 0;
+       port->start_cons  = true; /* need an initial trigger */
        dr->pending = VNET_TX_RING_SIZE;
        dr->ncookies = ncookies;
 
+       for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
+               struct vio_net_desc *d;
+
+               d = vio_dring_entry(dr, i);
+               d->hdr.state = VIO_DESC_FREE;
+       }
        return 0;
 
 err_out:
@@ -1069,6 +1365,8 @@ static struct vnet *vnet_new(const u64 *local_mac)
        dev = alloc_etherdev(sizeof(*vp));
        if (!dev)
                return ERR_PTR(-ENOMEM);
+       dev->needed_headroom = VNET_PACKET_SKIP + 8;
+       dev->needed_tailroom = 8;
 
        for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
@@ -1263,6 +1561,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        pr_info("%s: PORT ( remote-mac %pM%s )\n",
                vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
 
+       setup_timer(&port->clean_timer, vnet_clean_timer_expire,
+                   (unsigned long)port);
+
        vio_port_up(&port->vio);
 
        mdesc_release(hp);
@@ -1289,6 +1590,7 @@ static int vnet_port_remove(struct vio_dev *vdev)
                unsigned long flags;
 
                del_timer_sync(&port->vio.timer);
+               del_timer_sync(&port->clean_timer);
 
                spin_lock_irqsave(&vp->lock, flags);
                list_del(&port->list);