}
}
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ __napi_schedule(&vi->napi);
+ }
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
- napi_enable(&vi->napi);
+ virtnet_napi_enable(vi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
{
struct virtnet_info *vi = netdev_priv(dev);
- napi_enable(&vi->napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- virtqueue_disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
+ virtnet_napi_enable(vi);
return 0;
}
return 0;
}
-static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_device *vdev = vi->vdev;
-
- if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
- return -ENOSYS;
-
- return ethtool_op_set_tx_hw_csum(dev, data);
-}
-
static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .set_tx_csum = virtnet_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
- .set_ufo = ethtool_op_set_ufo,
.get_link = ethtool_op_get_link,
};
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
- if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
- dev->features |= NETIF_F_TSO | NETIF_F_UFO
+ dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ if (csum)
+ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
- dev->features |= NETIF_F_TSO;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
- dev->features |= NETIF_F_TSO6;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
- dev->features |= NETIF_F_TSO_ECN;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
- dev->features |= NETIF_F_UFO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
+ dev->hw_features |= NETIF_F_TSO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
+ dev->hw_features |= NETIF_F_TSO6;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ dev->hw_features |= NETIF_F_TSO_ECN;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+ dev->hw_features |= NETIF_F_UFO;
+
+ if (gso)
+ dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+ /* (!csum && gso) case will be fixed by register_netdev() */
}
/* Configuration may specify what MAC to use. Otherwise random. */