F: drivers/usb/host/ohci-ep93xx.c
CIRRUS LOGIC CS4270 SOUND DRIVER
- M: Timur Tabi <timur@freescale.com>
+ M: Timur Tabi <timur@tabi.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
- S: Supported
+ S: Odd Fixes
F: sound/soc/codecs/cs4270*
CLEANCACHE API
F: drivers/net/ethernet/i825xx/eexpress.*
ETHERNET BRIDGE
- M: Stephen Hemminger <shemminger@vyatta.com>
+ M: Stephen Hemminger <stephen@networkplumber.org>
L: bridge@lists.linux-foundation.org
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net:Bridge
F: include/linux/netfilter_bridge/
F: net/bridge/
-ETHERTEAM 16I DRIVER
-M: Mika Kuoppala <miku@iki.fi>
-S: Maintained
-F: drivers/net/ethernet/fujitsu/eth16i.c
-
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
F: include/uapi/linux/fb.h
FREESCALE DIU FRAMEBUFFER DRIVER
- M: Timur Tabi <timur@freescale.com>
+ M: Timur Tabi <timur@tabi.org>
L: linux-fbdev@vger.kernel.org
- S: Supported
+ S: Maintained
F: drivers/video/fsl-diu-fb.*
FREESCALE DMA DRIVER
F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY
- M: Timur Tabi <timur@freescale.com>
L: linuxppc-dev@lists.ozlabs.org
- S: Supported
+ S: Orphan
F: arch/powerpc/sysdev/qe_lib/
F: arch/powerpc/include/asm/*qe.h
F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER
- M: Timur Tabi <timur@freescale.com>
+ M: Timur Tabi <timur@tabi.org>
L: linuxppc-dev@lists.ozlabs.org
- S: Supported
+ S: Maintained
F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC SOUND DRIVERS
- M: Timur Tabi <timur@freescale.com>
+ M: Timur Tabi <timur@tabi.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
- S: Supported
+ S: Maintained
F: sound/soc/fsl/fsl*
F: sound/soc/fsl/mpc8610_hpcd.c
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
- M: Stephen Hemminger <shemminger@vyatta.com>
+ M: Stephen Hemminger <stephen@networkplumber.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/marvell/sk*
F: drivers/infiniband/hw/nes/
NETEM NETWORK EMULATOR
- M: Stephen Hemminger <shemminger@vyatta.com>
+ M: Stephen Hemminger <stephen@networkplumber.org>
L: netem@lists.linux-foundation.org
S: Maintained
F: net/sched/sch_netem.c
F: include/uapi/linux/nfs*
F: include/uapi/linux/sunrpc/
-NI5010 NETWORK DRIVER
-M: Jan-Pascal van Best <janpascal@vanbest.org>
-M: Andreas Mohr <andi@lisas.de>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/racal/ni5010.*
-
NILFS2 FILESYSTEM
M: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
L: linux-nilfs@vger.kernel.org
F: include/media/s3c_camif.h
SERIAL DRIVERS
- M: Alan Cox <alan@linux.intel.com>
+ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include "c_can.h"
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
return 0;
}
C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
c_can_inval_msg_object(dev, 0, msg_obj_no);
} else {
break;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL);
break;
case LEC_BIT1_ERROR:
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL);
break;
default:
napi_enable(&priv->napi);
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
/* start the c_can controller */
c_can_start(dev);
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
err = register_candev(dev);
if (err)
c_can_pm_runtime_disable(priv);
+ else
+ devm_can_led_init(dev);
return err;
}
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include <linux/can/platform/ti_hecc.h>
#define DRV_NAME "ti_hecc"
spin_unlock_irqrestore(&priv->mbx_lock, flags);
stats->rx_bytes += cf->can_dlc;
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
netif_receive_skb(skb);
stats->rx_packets++;
}
if (err_status & HECC_CANES_CRCE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
}
if (err_status & HECC_CANES_ACKE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL;
}
}
stats->tx_bytes += hecc_read_mbx(priv, mbxno,
HECC_CANMCF) & 0xF;
stats->tx_packets++;
+ can_led_event(ndev, CAN_LED_EVENT_TX);
can_get_echo_skb(ndev, mbxno);
--priv->tx_tail;
}
return err;
}
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+
ti_hecc_start(ndev);
napi_enable(&priv->napi);
netif_start_queue(ndev);
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+
return 0;
}
dev_err(&pdev->dev, "register_candev() failed\n");
goto probe_exit_clk;
}
+
+ devm_can_led_init(ndev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32) ndev->irq);
return -1;
}
+ /* All frames should fit into a single buffer */
+ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+ return -1;
+
/* Check if packet has checksum already */
if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
!(ext_status & RXDESC_IP_PAYLOAD_MASK))
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
-
- return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
- c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+ struct sge_rspq *q;
+ int i;
+ int r = 0;
+
+ for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
+ q = &adap->sge.ethrxq[i].rspq;
+ r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
+ if (r) {
+ dev_err(&dev->dev, "failed to set coalesce %d\n", r);
+ break;
+ }
+ }
+ return r;
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_GET(
- FW_PFVF_CMD_CMASK_MASK),
+ FW_PFVF_CMD_CMASK_MASK,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
/* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!cxgb4_debugfs_root)
- pr_warning("could not create debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
- tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- (!!vlan_tx_tag_present(skb));
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
netif_tx_stop_queue(ring->tx_queue);
priv->port_stats.queue_stopped++;
- return NETDEV_TX_BUSY;
+ /* If queue was emptied after the if, and before the
+ * stop_queue - need to wake the queue, or else it will remain
+ * stopped forever.
+ * Need a memory barrier to make sure ring->cons was not
+ * updated before queue was stopped.
+ */
+ wmb();
+
+ if (unlikely(((int)(ring->prod - ring->cons)) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
+ } else {
+ return NETDEV_TX_BUSY;
+ }
}
/* Track current inflight packets for performance analysis */
ring->tx_csum++;
}
- /* Copy dst mac address to wqe */
- ethh = (struct ethhdr *)skb->data;
- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+ /* Copy dst mac address to wqe. This allows loopback in eSwitch,
+ * so that VFs and PF can communicate with each other
+ */
+ ethh = (struct ethhdr *)skb->data;
+ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
int i;
if (msi_x) {
- /* In multifunction mode each function gets 2 msi-X vectors
- * one for data path completions anf the other for asynch events
- * or command completions */
- if (mlx4_is_mfunc(dev)) {
- nreq = 2;
- } else {
- nreq = min_t(int, dev->caps.num_eqs -
- dev->caps.reserved_eqs, nreq);
- }
+ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ nreq);
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
else {
dev->num_slaves = 0;
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init slave mfunc"
" interface, aborting.\n");
goto err_cmd;
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
+ err = -ENOSYS;
mlx4_err(dev, "INTx is not supported in multi-function mode."
" aborting.\n");
goto err_free_eq;
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = *(p + 5 - i);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
return 0;
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
out_err:
return -ENOMEM;
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
u16 mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
- u32 dirty_rx;
u32 dirty_tx;
struct rtl8169_stats rx_stats;
struct rtl8169_stats tx_stats;
if (opts2 & RxVlanTag)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
-
- desc->opts2 = 0;
}
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
- tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+ tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
void __iomem *ioaddr = tp->mmio_addr;
netif_info(tp, intr, dev, "disabling PCI DAC\n");
unsigned int count;
cur_rx = tp->cur_rx;
- rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
- rx_left = min(rx_left, budget);
- for (; rx_left > 0; rx_left--, cur_rx++) {
+ for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
!(status & (RxRWT | RxFOVF)) &&
(dev->features & NETIF_F_RXALL))
goto process_pkt;
-
- rtl8169_mark_to_asic(desc, rx_buf_sz);
} else {
struct sk_buff *skb;
dma_addr_t addr;
if (unlikely(rtl8169_fragmented_frame(status))) {
dev->stats.rx_dropped++;
dev->stats.rx_length_errors++;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- continue;
+ goto release_descriptor;
}
skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
tp, pkt_size, addr);
- rtl8169_mark_to_asic(desc, rx_buf_sz);
if (!skb) {
dev->stats.rx_dropped++;
- continue;
+ goto release_descriptor;
}
rtl8169_rx_csum(skb, status);
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
}
-
- /* Work around for AMD plateform. */
- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- desc->opts2 = 0;
- cur_rx++;
- }
+ release_descriptor:
+ desc->opts2 = 0;
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
}
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- tp->dirty_rx += count;
-
return count;
}
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, HV_DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, HV_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct net_device_context *ndevctx = netdev_priv(ndev);
struct hv_device *hdev = ndevctx->device_ctx;
struct sockaddr *addr = p;
- char save_adr[14];
+ char save_adr[ETH_ALEN];
unsigned char save_aatype;
int err;
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
} else {
/* Rehash and update the device filters */
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- snprintf(drvinfo->driver, 32, "macvlan");
- snprintf(drvinfo->version, 32, "0.1");
+ strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
static int macvlan_ethtool_get_settings(struct net_device *dev,
memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
}
+ err = netdev_upper_dev_link(lowerdev, dev);
+ if (err)
+ goto destroy_port;
+
port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto destroy_port;
+ goto upper_dev_unlink;
list_add_tail(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+upper_dev_unlink:
+ netdev_upper_dev_unlink(lowerdev, dev);
destroy_port:
port->count -= 1;
if (!port->count)
list_del(&vlan->list);
unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(vlan->lowerdev, dev);
}
EXPORT_SYMBOL_GPL(macvlan_dellink);
static size_t macvlan_get_size(const struct net_device *dev)
{
- return nla_total_size(4);
+ return (0
+ + nla_total_size(4) /* IFLA_MACVLAN_MODE */
+ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ );
}
static int macvlan_fill_info(struct sk_buff *skb,
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
- /* 1024 is probably a high enough limit: modern hypervisors seem to support on
- * the order of 100-200 CPUs so this leaves us some breathing space if we want
- * to match a queue per guest CPU.
- */
- #define MAX_TAP_QUEUES 1024
+ /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+ * the netdevice to be fit in one page. So we can make sure the success of
+ * memory allocation. TODO: increase the limit. */
+ #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+ #define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
unsigned int numdisabled;
struct list_head disabled;
void *security;
+ u32 flow_count;
};
static inline u32 tun_hashfn(u32 rxhash)
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
+ ++tun->flow_count;
}
return e;
}
e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
+ --tun->flow_count;
}
static void tun_flow_flush(struct tun_struct *tun)
e->updated = jiffies;
} else {
spin_lock_bh(&tun->lock);
- if (!tun_flow_find(head, rxhash))
+ if (!tun_flow_find(head, rxhash) &&
+ tun->flow_count < MAX_TAP_FLOWS)
tun_flow_create(tun, head, rxhash, queue_index);
if (!timer_pending(&tun->flow_gc_timer))
skb->data_len += len;
skb->len += len;
skb->truesize += truesize;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
while (len) {
int off = base & ~PAGE_MASK;
}
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ unsigned short gso_type = 0;
+
pr_debug("GSO!\n");
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ gso_type = SKB_GSO_UDP;
break;
default:
tun->dev->stats.rx_frame_errors++;
}
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = gso.gso_size;
+ skb_shinfo(skb)->gso_type |= gso_type;
if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
else {
char *name;
unsigned long flags = 0;
+ int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+ MAX_TAP_QUEUES : 1;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
name = ifr->ifr_name;
dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
- tun_setup,
- MAX_TAP_QUEUES, MAX_TAP_QUEUES);
+ tun_setup, queues, queues);
+
if (!dev)
return -ENOMEM;
{
struct usbnet *dev = netdev_priv(net);
- strncpy(info->driver, dev->driver_name, sizeof(info->driver));
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strncpy(info->fw_version, dev->driver_info->description,
+ strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, dev->driver_info->description,
sizeof(info->fw_version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
len -= temp;
}
+ /* some buggy devices have an IAD but no CDC Union */
+ if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+ ctx->control = intf;
+ ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
+ dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
+ }
+
/* check if we got everything */
if ((ctx->control == NULL) || (ctx->data == NULL) ||
((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
error2:
usb_set_intfdata(ctx->control, NULL);
usb_set_intfdata(ctx->data, NULL);
- usb_driver_release_interface(driver, ctx->data);
+ if (ctx->data != ctx->control)
+ usb_driver_release_interface(driver, ctx->data);
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
.tx_fixup = cdc_ncm_tx_fixup,
};
+ /* Same as wwan_info, but with FLAG_NOARP */
+ static const struct driver_info wwan_noarp_info = {
+ .description = "Mobile Broadband Network Device (NO ARP)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_WWAN | FLAG_NOARP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+ };
+
static const struct usb_device_id cdc_devs[] = {
/* Ericsson MBM devices like F5521gw */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
.driver_info = (unsigned long)&wwan_info,
},
+ /* Infineon(now Intel) HSPA Modem platform */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
#define DM_MCAST_ADDR 0x16 /* 8 bytes */
#define DM_GPR_CTRL 0x1e
#define DM_GPR_DATA 0x1f
+ #define DM_CHIP_ID 0x2c
+ #define DM_MODE_CTRL 0x91 /* only on dm9620 */
+
+ /* chip id values */
+ #define ID_DM9601 0
+ #define ID_DM9620 1
#define DM_MAX_MCAST 64
#define DM_MCAST_SIZE 8
#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */
#define DM_TIMEOUT 1000
-
static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
{
int err;
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- return usbnet_write_cmd(dev, DM_WRITE_REGS,
+ return usbnet_write_cmd(dev, DM_WRITE_REG,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, reg, NULL, 0);
}
- static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
- u16 length, void *data)
+ static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
{
usbnet_write_cmd_async(dev, DM_WRITE_REGS,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, reg, data, length);
- }
-
- static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
- {
- netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
-
- dm_write_async_helper(dev, reg, 0, length, data);
+ 0, reg, data, length);
}
static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
- reg, value);
-
- dm_write_async_helper(dev, reg, value, 0, NULL);
+ usbnet_write_cmd_async(dev, DM_WRITE_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, NULL, 0);
}
static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4);
for (i = 0; i < DM_TIMEOUT; i++) {
- u8 tmp;
+ u8 tmp = 0;
udelay(1);
ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
for (i = 0; i < DM_TIMEOUT; i++) {
- u8 tmp;
+ u8 tmp = 0;
udelay(1);
ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 mac[ETH_ALEN];
+ u8 mac[ETH_ALEN], id;
ret = usbnet_get_endpoints(dev, intf);
if (ret)
__dm9601_set_mac_address(dev);
}
+ if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
+ netdev_err(dev->net, "Error reading chip ID\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* put dm9620 devices in dm9601 mode */
+ if (id == ID_DM9620) {
+ u8 mode;
+
+ if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
+ netdev_err(dev->net, "Error reading MODE_CTRL\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
+ }
+
/* power up phy */
dm_write_reg(dev, DM_GPR_CTRL, 1);
dm_write_reg(dev, DM_GPR_DATA, 0);
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+ #include <linux/cpu.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
+
+ /* Per-cpu variable to show the mapping from CPU to virtqueue */
+ int __percpu *vq_index;
+
+ /* CPU hot plug notifier */
+ struct notifier_block nb;
};
struct skb_vnet_hdr {
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
*len -= size;
}
ntohs(skb->protocol), skb->len, skb->pkt_type);
if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ unsigned short gso_type = 0;
+
pr_debug("GSO!\n");
switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ gso_type = SKB_GSO_UDP;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ gso_type = SKB_GSO_TCPV6;
break;
default:
net_warn_ratelimited("%s: bad gso type %u.\n",
}
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
goto frame_err;
}
+ skb_shinfo(skb)->gso_type |= gso_type;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
return NETDEV_TX_OK;
}
+/*
+ * Send command via the control virtqueue and check status. Commands
+ * supported by the hypervisor, as indicated by feature bits, should
+ * never fail unless improperly formated.
+ */
+static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *data, int out, int in)
+{
+ struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
+ struct virtio_net_ctrl_hdr ctrl;
+ virtio_net_ctrl_ack status = ~0;
+ unsigned int tmp;
+ int i;
+
+ /* Caller should know better */
+ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
+ (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
+
+ out++; /* Add header */
+ in++; /* Add return status */
+
+ ctrl.class = class;
+ ctrl.cmd = cmd;
+
+ sg_init_table(sg, out + in);
+
+ sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
+ for_each_sg(data, s, out + in - 2, i)
+ sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
+ sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
+
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
+
+ virtqueue_kick(vi->cvq);
+
+ /* Spin for a response, the kick causes an ioport write, trapping
+ * into the hypervisor, so the request should be handled immediately.
+ */
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
+ cpu_relax();
+
+ return status == VIRTIO_NET_OK;
+}
+
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
+ struct sockaddr *addr = p;
+ struct scatterlist sg;
- ret = eth_mac_addr(dev, p);
+ ret = eth_prepare_mac_addr_change(dev, p);
if (ret)
return ret;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ sg_init_one(&sg, addr->sa_data, dev->addr_len);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ &sg, 1, 0)) {
+ dev_warn(&vdev->dev,
+ "Failed to set mac address by vq command.\n");
+ return -EINVAL;
+ }
+ } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len);
+ addr->sa_data, dev->addr_len);
+ }
+
+ eth_commit_mac_addr_change(dev, p);
return 0;
}
}
#endif
-/*
- * Send command via the control virtqueue and check status. Commands
- * supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
- */
-static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *data, int out, int in)
-{
- struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
- struct virtio_net_ctrl_hdr ctrl;
- virtio_net_ctrl_ack status = ~0;
- unsigned int tmp;
- int i;
-
- /* Caller should know better */
- BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
- (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
-
- out++; /* Add header */
- in++; /* Add return status */
-
- ctrl.class = class;
- ctrl.cmd = cmd;
-
- sg_init_table(sg, out + in);
-
- sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
- for_each_sg(data, s, out + in - 2, i)
- sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
- sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
-
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
-
- virtqueue_kick(vi->cvq);
-
- /*
- * Spin for a response, the kick causes an ioport write, trapping
- * into the hypervisor, so the request should be handled immediately.
- */
- while (!virtqueue_get_buf(vi->cvq, &tmp))
- cpu_relax();
-
- return status == VIRTIO_NET_OK;
-}
-
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
return 0;
}
- static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
+ int cpu;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtqueue_set_affinity(vi->rq[i].vq, -1);
+ virtqueue_set_affinity(vi->sq[i].vq, -1);
+ }
+
+ vi->affinity_hint_set = false;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == hcpu) {
+ *per_cpu_ptr(vi->vq_index, cpu) = -1;
+ } else {
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
+ }
+ }
+ }
+
+ static void virtnet_set_affinity(struct virtnet_info *vi)
+ {
+ int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
* setting the affinity hint to eliminate the contention.
*/
- if ((vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
- set = false;
- else
- return;
+ if (vi->curr_queue_pairs == 1 ||
+ vi->max_queue_pairs != num_online_cpus()) {
+ virtnet_clean_affinity(vi, -1);
+ return;
}
- for (i = 0; i < vi->max_queue_pairs; i++) {
- int cpu = set ? i : -1;
+ i = 0;
+ for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
}
- if (set)
- vi->affinity_hint_set = true;
- else
- vi->affinity_hint_set = false;
+ vi->affinity_hint_set = true;
+ }
+
+ static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+ {
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ case CPU_DEAD:
+ virtnet_set_affinity(vi);
+ break;
+ case CPU_DOWN_PREPARE:
+ virtnet_clean_affinity(vi, (long)hcpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
}
static void virtnet_get_ringparam(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs)
return -EINVAL;
+ get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
- virtnet_set_affinity(vi, true);
+ virtnet_set_affinity(vi);
}
+ put_online_cpus();
return err;
}
/* To avoid contending a lock hold by a vcpu who would exit to host, select the
* txq based on the processor id.
- * TODO: handle cpu hotplug.
*/
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
+ int txq;
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (skb_rx_queue_recorded(skb)) {
+ txq = skb_get_rx_queue(skb);
+ } else {
+ txq = *__this_cpu_ptr(vi->vq_index);
+ if (txq == -1)
+ txq = 0;
+ }
while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues;
{
struct virtio_device *vdev = vi->vdev;
- virtnet_set_affinity(vi, false);
+ virtnet_clean_affinity(vi, -1);
vdev->config->del_vqs(vdev);
if (ret)
goto err_free;
- virtnet_set_affinity(vi, true);
+ get_online_cpus();
+ virtnet_set_affinity(vi);
+ put_online_cpus();
+
return 0;
err_free:
if (vi->stats == NULL)
goto free;
+ vi->vq_index = alloc_percpu(int);
+ if (vi->vq_index == NULL)
+ goto free_stats;
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free_index;
netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1);
}
}
+ vi->nb.notifier_call = &virtnet_cpu_callback;
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_recv_bufs;
+ }
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
+ free_index:
+ free_percpu(vi->vq_index);
free_stats:
free_percpu(vi->stats);
free:
{
struct virtnet_info *vi = vdev->priv;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device. */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
flush_work(&vi->config_work);
+ free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+ VIRTIO_NET_F_CTRL_MAC_ADDR,
};
static struct virtio_driver virtio_net_driver = {
-/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
struct arphdr *arphdr;
struct ethhdr *ethhdr;
__be32 ip_src, ip_dst;
+ uint8_t *hw_src, *hw_dst;
uint16_t type = 0;
/* pull the ethernet header */
ip_src = batadv_arp_ip_src(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
- ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst))
+ ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
+ ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
+ ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
goto out;
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
+ goto out;
+
+ /* we don't care about the destination MAC address in ARP requests */
+ if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
+ hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ if (is_zero_ether_addr(hw_dst) ||
+ is_multicast_ether_addr(hw_dst))
+ goto out;
+ }
+
type = ntohs(arphdr->ar_op);
out:
return type;
*/
ret = !batadv_is_my_client(bat_priv, hw_dst);
out:
+ if (ret)
+ kfree_skb(skb);
/* if ret == false -> packet has to be delivered to the interface */
return ret;
}
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
const struct iphdr *tiph;
struct flowi4 fl4;
u8 tos;
skb_checksum_help(skb))
goto tx_error;
+ old_iph = ip_hdr(skb);
+
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
ttl = tiph->ttl;
tos = tiph->tos;
- if (tos == 1) {
- tos = 0;
+ if (tos & 0x1) {
+ tos &= ~0x1;
if (skb->protocol == htons(ETH_P_IP))
tos = old_iph->tos;
else if (skb->protocol == htons(ETH_P_IPV6))
ptr--;
}
if (tunnel->parms.o_flags&GRE_CSUM) {
+ int offset = skb_transport_offset(skb);
+
*ptr = 0;
- *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
+ *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
+ skb->len - offset,
+ 0));
}
}
* We do take care of PMTU discovery (RFC1191) special case :
* we can receive locally generated ICMP messages while socket is held.
*/
- if (sock_owned_by_user(sk) &&
- type != ICMP_DEST_UNREACH &&
- code != ICMP_FRAG_NEEDED)
- NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
-
+ if (sock_owned_by_user(sk)) {
+ if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
+ NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+ }
if (sk->sk_state == TCP_CLOSE)
goto out;
* no RST generated if md5 hash doesn't match.
*/
sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
- &tcp_hashinfo, ip_hdr(skb)->daddr,
+ &tcp_hashinfo, ip_hdr(skb)->saddr,
+ th->source, ip_hdr(skb)->daddr,
ntohs(th->source), inet_iif(skb));
/* don't send rst if it can't find key */
if (!sk1)
goto drop_and_free;
if (!want_cookie || tmp_opt.tstamp_ok)
- TCP_ECN_create_request(req, skb);
+ TCP_ECN_create_request(req, skb, sock_net(sk));
if (want_cookie) {
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
&tcp_hashinfo,
+ iph->saddr, th->source,
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
static int __net_init tcp_sk_init(struct net *net)
{
+ net->ipv4.sysctl_tcp_ecn = 2;
return 0;
}
{
struct sock *sk2;
struct hlist_nulls_node *node;
+ kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
+ (!sk2->sk_reuseport || !sk->sk_reuseport ||
+ !uid_eq(uid, sock_i_uid(sk2))) &&
(*saddr_comp)(sk, sk2)) {
if (bitmap)
__set_bit(udp_sk(sk2)->udp_port_hash >> log,
{
struct sock *sk2;
struct hlist_nulls_node *node;
+ kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
+ (!sk2->sk_reuseport || !sk->sk_reuseport ||
+ !uid_eq(uid, sock_i_uid(sk2))) &&
(*saddr_comp)(sk, sk2)) {
res = 1;
break;
!ipv6_only_sock(sk)) {
struct inet_sock *inet = inet_sk(sk);
- score = (sk->sk_family == PF_INET ? 1 : 0);
+ score = (sk->sk_family == PF_INET ? 2 : 1);
if (inet->inet_rcv_saddr) {
if (inet->inet_rcv_saddr != daddr)
return -1;
- score += 2;
+ score += 4;
}
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
- score += 2;
+ score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
- score += 2;
+ score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
- score += 2;
+ score += 4;
}
}
return score;
/*
* In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
*/
-#define SCORE2_MAX (1 + 2 + 2 + 2)
static inline int compute_score2(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif)
if (inet->inet_num != hnum)
return -1;
- score = (sk->sk_family == PF_INET ? 1 : 0);
+ score = (sk->sk_family == PF_INET ? 2 : 1);
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
- score += 2;
+ score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
- score += 2;
+ score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
- score += 2;
+ score += 4;
}
}
return score;
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
- int score, badness;
+ int score, badness, matches = 0, reuseport = 0;
+ u32 hash = 0;
begin:
result = NULL;
- badness = -1;
+ badness = 0;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
- if (score == SCORE2_MAX)
- goto exact_match;
+ reuseport = sk->sk_reuseport;
+ if (reuseport) {
+ hash = inet_ehashfn(net, daddr, hnum,
+ saddr, htons(sport));
+ matches = 1;
+ }
+ } else if (score == badness && reuseport) {
+ matches++;
+ if (((u64)hash * matches) >> 32 == 0)
+ result = sk;
+ hash = next_pseudo_random32(hash);
}
}
/*
*/
if (get_nulls_value(node) != slot2)
goto begin;
-
if (result) {
-exact_match:
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
- int score, badness;
+ int score, badness, matches = 0, reuseport = 0;
+ u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
}
begin:
result = NULL;
- badness = -1;
+ badness = 0;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, saddr, hnum, sport,
daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
+ reuseport = sk->sk_reuseport;
+ if (reuseport) {
+ hash = inet_ehashfn(net, daddr, hnum,
+ saddr, htons(sport));
+ matches = 1;
+ }
+ } else if (score == badness && reuseport) {
+ matches++;
+ if (((u64)hash * matches) >> 32 == 0)
+ result = sk;
+ hash = next_pseudo_random32(hash);
}
}
/*
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
- if (skb && !IS_ERR(skb))
+ if (!IS_ERR_OR_NULL(skb))
err = udp_send_skb(skb, fl4);
goto out;
}
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
.backlog_rcv = __udp_queue_rcv_skb,
+ .release_cb = ip4_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
#include <net/checksum.h>
#include <linux/mroute6.h>
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-
int __ip6_local_out(struct sk_buff *skb)
{
int len;
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
- struct rt6_info *rt;
+ struct in6_addr *nexthop;
+ int ret;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
skb->len);
}
- rt = (struct rt6_info *) dst;
- neigh = rt->n;
- if (neigh)
- return dst_neigh_output(dst, neigh, skb);
+ rcu_read_lock_bh();
+ nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
+ if (!IS_ERR(neigh)) {
+ ret = dst_neigh_output(dst, neigh, skb);
+ rcu_read_unlock_bh();
+ return ret;
+ }
+ rcu_read_unlock_bh();
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
+ ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
EXPORT_SYMBOL(ip6_xmit);
-/*
- * To avoid extra problems ND packets are send through this
- * routine. It's code duplication but I really want to avoid
- * extra checks since ipv6_build_header is used by TCP (which
- * is for us performance critical)
- */
-
-int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
- const struct in6_addr *saddr, const struct in6_addr *daddr,
- int proto, int len)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct ipv6hdr *hdr;
-
- skb->protocol = htons(ETH_P_IPV6);
- skb->dev = dev;
-
- skb_reset_network_header(skb);
- skb_put(skb, sizeof(struct ipv6hdr));
- hdr = ipv6_hdr(skb);
-
- *(__be32*)hdr = htonl(0x60000000);
-
- hdr->payload_len = htons(len);
- hdr->nexthdr = proto;
- hdr->hop_limit = np->hop_limit;
-
- hdr->saddr = *saddr;
- hdr->daddr = *daddr;
-
- return 0;
-}
-
static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
{
struct ip6_ra_chain *ra;
* dst entry of the nexthop router
*/
rt = (struct rt6_info *) *dst;
- n = rt->n;
- if (n && !(n->nud_state & NUD_VALID)) {
+ rcu_read_lock_bh();
+ n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
+ err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
+ rcu_read_unlock_bh();
+
+ if (err) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
int redirect;
if (dst_allfrag(rt->dst.path))
cork->flags |= IPCORK_ALLFRAG;
cork->length = 0;
- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
+ exthdrlen = (opt ? opt->opt_flen : 0);
length += exthdrlen;
transhdrlen += exthdrlen;
- dst_exthdrlen = rt->dst.header_len;
+ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
} else {
rt = (struct rt6_info *)cork->dst;
fl6 = &inet->cork.fl.u.ip6;
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
- *(__be32*)hdr = fl6->flowlabel |
- htonl(0x60000000 | ((int)np->cork.tclass << 20));
-
+ ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
hdr->hop_limit = np->cork.hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
return NULL;
}
+/* Look for a (*,*,oif) entry */
+static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
+ mifi_t mifi)
+{
+ int line = MFC6_HASH(&in6addr_any, &in6addr_any);
+ struct mfc6_cache *c;
+
+ list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
+ if (ipv6_addr_any(&c->mf6c_origin) &&
+ ipv6_addr_any(&c->mf6c_mcastgrp) &&
+ (c->mfc_un.res.ttls[mifi] < 255))
+ return c;
+
+ return NULL;
+}
+
+/* Look for a (*,G) entry */
+static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
+ struct in6_addr *mcastgrp,
+ mifi_t mifi)
+{
+ int line = MFC6_HASH(mcastgrp, &in6addr_any);
+ struct mfc6_cache *c, *proxy;
+
+ if (ipv6_addr_any(mcastgrp))
+ goto skip;
+
+ list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
+ if (ipv6_addr_any(&c->mf6c_origin) &&
+ ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
+ if (c->mfc_un.res.ttls[mifi] < 255)
+ return c;
+
+ /* It's ok if the mifi is part of the static tree */
+ proxy = ip6mr_cache_find_any_parent(mrt,
+ c->mf6c_parent);
+ if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
+ return c;
+ }
+
+skip:
+ return ip6mr_cache_find_any_parent(mrt, mifi);
+}
+
/*
* Allocate a multicast cache entry
*/
* MFC6 cache manipulation by user space
*/
-static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
+static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
+ int parent)
{
int line;
struct mfc6_cache *c, *next;
list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
- ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
+ ipv6_addr_equal(&c->mf6c_mcastgrp,
+ &mfc->mf6cc_mcastgrp.sin6_addr) &&
+ (parent == -1 || parent == c->mf6c_parent)) {
write_lock_bh(&mrt_lock);
list_del(&c->list);
write_unlock_bh(&mrt_lock);
}
static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
- struct mf6cctl *mfc, int mrtsock)
+ struct mf6cctl *mfc, int mrtsock, int parent)
{
bool found = false;
int line;
list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
- ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
+ ipv6_addr_equal(&c->mf6c_mcastgrp,
+ &mfc->mf6cc_mcastgrp.sin6_addr) &&
+ (parent == -1 || parent == mfc->mf6cc_parent)) {
found = true;
break;
}
return 0;
}
- if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
+ if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
+ !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
return -EINVAL;
c = ip6mr_cache_alloc();
int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
{
- int ret;
+ int ret, parent = 0;
struct mif6ctl vif;
struct mf6cctl mfc;
mifi_t mifi;
*/
case MRT6_ADD_MFC:
case MRT6_DEL_MFC:
+ parent = -1;
+ case MRT6_ADD_MFC_PROXY:
+ case MRT6_DEL_MFC_PROXY:
if (optlen < sizeof(mfc))
return -EINVAL;
if (copy_from_user(&mfc, optval, sizeof(mfc)))
return -EFAULT;
+ if (parent == 0)
+ parent = mfc.mf6cc_parent;
rtnl_lock();
- if (optname == MRT6_DEL_MFC)
- ret = ip6mr_mfc_delete(mrt, &mfc);
+ if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
+ ret = ip6mr_mfc_delete(mrt, &mfc, parent);
else
- ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
+ ret = ip6mr_mfc_add(net, mrt, &mfc,
+ sk == mrt->mroute6_sk, parent);
rtnl_unlock();
return ret;
return -EINVAL;
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
+ /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ if (v != RT_TABLE_DEFAULT && v >= 100000000)
+ return -EINVAL;
if (sk == mrt->mroute6_sk)
return -EBUSY;
{
int psend = -1;
int vif, ct;
+ int true_vifi = ip6mr_find_vif(mrt, skb->dev);
vif = cache->mf6c_parent;
cache->mfc_un.res.pkt++;
cache->mfc_un.res.bytes += skb->len;
+ if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
+ struct mfc6_cache *cache_proxy;
+
+ /* For an (*,G) entry, we only check that the incomming
+ * interface is part of the static tree.
+ */
+ cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
+ if (cache_proxy &&
+ cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
+ goto forward;
+ }
+
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
if (mrt->vif6_table[vif].dev != skb->dev) {
- int true_vifi;
-
cache->mfc_un.res.wrong_if++;
- true_vifi = ip6mr_find_vif(mrt, skb->dev);
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
goto dont_forward;
}
+forward:
mrt->vif6_table[vif].pkt_in++;
mrt->vif6_table[vif].bytes_in += skb->len;
/*
* Forward the frame
*/
+ if (ipv6_addr_any(&cache->mf6c_origin) &&
+ ipv6_addr_any(&cache->mf6c_mcastgrp)) {
+ if (true_vifi >= 0 &&
+ true_vifi != cache->mf6c_parent &&
+ ipv6_hdr(skb)->hop_limit >
+ cache->mfc_un.res.ttls[cache->mf6c_parent]) {
+ /* It's an (*,*) entry and the packet is not coming from
+ * the upstream: forward the packet to the upstream
+ * only.
+ */
+ psend = cache->mf6c_parent;
+ goto last_forward;
+ }
+ goto dont_forward;
+ }
for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
- if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
+ /* For (*,G) entry, don't forward to the incoming interface */
+ if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
+ ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
psend = ct;
}
}
+last_forward:
if (psend != -1) {
ip6mr_forward2(net, mrt, skb, cache, psend);
return 0;
read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
+ if (cache == NULL) {
+ int vif = ip6mr_find_vif(mrt, skb->dev);
+
+ if (vif >= 0)
+ cache = ip6mr_cache_find_any(mrt,
+ &ipv6_hdr(skb)->daddr,
+ vif);
+ }
/*
* No usable cache entry
read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
+ if (!cache && skb->dev) {
+ int vif = ip6mr_find_vif(mrt, skb->dev);
+
+ if (vif >= 0)
+ cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
+ vif);
+ }
if (!cache) {
struct sk_buff *skb2;