S: Maintained
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-keystone/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/clk/keystone/
ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/clocksource/timer-keystone.c
ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/power/reset/keystone-reset.c
ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/memory/*emif*
S: Supported
F: drivers/spi/spi-atmel.*
+ ATMEL SSC DRIVER
+ M: Bo Shen <voice.shen@atmel.com>
+ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ S: Supported
+ F: drivers/misc/atmel-ssc.c
+ F: include/linux/atmel-ssc.h
+
ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
F: drivers/mmc/host/sdhci-bcm-kona.c
F: drivers/clocksource/bcm_kona_timer.c
- BROADCOM BCM2835 ARM ARCHICTURE
+ BROADCOM BCM2835 ARM ARCHITECTURE
M: Stephen Warren <swarren@wwwdotorg.org>
L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git
S: Maintained
- F: arch/arm/mach-bcm/board_bcm2835.c
- F: arch/arm/boot/dts/bcm2835*
- F: arch/arm/configs/bcm2835_defconfig
- F: drivers/*/*bcm2835*
+ N: bcm2835
- BROADCOM BCM5301X ARM ARCHICTURE
+ BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/crypto/nx/
IBM Power 842 compression accelerator
- M: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+ M: Dan Streetman <ddstreet@us.ibm.com>
S: Supported
F: drivers/crypto/nx/nx-842.c
F: include/linux/nx842.h
F: drivers/net/macvlan.c
F: include/linux/if_macvlan.h
+ MAILBOX API
+ M: Jassi Brar <jassisinghbrar@gmail.com>
+ L: linux-kernel@vger.kernel.org
+ S: Maintained
+ F: drivers/mailbox/
+ F: include/linux/mailbox_client.h
+ F: include/linux/mailbox_controller.h
+
MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
M: Michael Kerrisk <mtk.manpages@gmail.com>
W: http://www.kernel.org/doc/man-pages
S: Maintained
F: drivers/gpu/drm/armada/
+MARVELL 88E6352 DSA support
+M: Guenter Roeck <linux@roeck-us.net>
+S: Maintained
+F: drivers/net/dsa/mv88e6352.c
+
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
M: Stephen Hemminger <stephen@networkplumber.org>
OMAP GPIO DRIVER
M: Javier Martinez Canillas <javier@dowhile0.org>
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/net/wireless/orinoco/
OSD LIBRARY and FILESYSTEM
- M: Boaz Harrosh <bharrosh@panasas.com>
+ M: Boaz Harrosh <ooo@electrozaur.com>
M: Benny Halevy <bhalevy@primarydata.com>
L: osd-dev@open-osd.org
W: http://open-osd.org
F: include/scsi/osd_*
F: fs/exofs/
+ OVERLAYFS FILESYSTEM
+ M: Miklos Szeredi <miklos@szeredi.hu>
+ L: linux-fsdevel@vger.kernel.org
+ S: Supported
+ F: fs/overlayfs/*
+ F: Documentation/filesystems/overlayfs.txt
+
P54 WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
F: include/linux/tifm.h
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
- M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 21
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
/* required last entry */
{0, }
};
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
+ struct i40e_ring *p;
u32 rx_page, rx_buf;
+ u64 bytes, packets;
+ unsigned int start;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
rx_buf = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
- struct i40e_ring *p;
- u64 bytes, packets;
- unsigned int start;
-
/* locate Tx ring */
p = ACCESS_ONCE(vsi->tx_rings[q]);
if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
/* warn the TX unit of coming changes */
i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
if (!enable)
- udelay(10);
+ usleep_range(10, 20);
for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
case I40E_LINK_SPEED_1GB:
strlcpy(speed, "1000 Mbps", SPEED_SIZE);
break;
+ case I40E_LINK_SPEED_100MB:
+ strncpy(speed, "100 Mbps", SPEED_SIZE);
+ break;
default:
break;
}
static int i40e_up_complete(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- u8 set_fc_aq_fail = 0;
int err;
- /* force flow control off */
- i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
-
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
i40e_vsi_configure_msix(vsi);
else
{
bool new_link, old_link;
- new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
+ /* set this to force the get_link_status call to refresh state */
+ pf->hw.phy.get_link_info = true;
+
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+ new_link = i40e_get_link_status(&pf->hw);
- if (new_link == old_link)
+ if (new_link == old_link &&
+ new_link == netif_carrier_ok(pf->vsi[pf->lan_vsi]->netdev))
return;
if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
memcpy(&pf->hw.phy.link_info_old, hw_link_info,
sizeof(pf->hw.phy.link_info_old));
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct
+ * This completely ignores any state information
+ * in the ARQ event info, instead choosing to always
+ * issue the AQ update link status command.
+ */
+ i40e_link_event(pf);
+
/* check for unqualified module, if link is down */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
dev_err(&pf->pdev->dev,
"The driver failed to link because an unqualified module was detected.\n");
-
- /* update link status */
- hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
- hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
- hw_link_info->link_info = status->link_info;
- hw_link_info->an_info = status->an_info;
- hw_link_info->ext_info = status->ext_info;
- hw_link_info->lse_enable =
- le16_to_cpu(status->command_flags) &
- I40E_AQ_LSE_ENABLE;
-
- /* process the event */
- i40e_link_event(pf);
-
- /* Do a new status request to re-enable LSE reporting
- * and load new status information into the hw struct,
- * then see if the status changed while processing the
- * initial event.
- */
- i40e_update_link_info(&pf->hw, true);
- i40e_link_event(pf);
}
/**
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 v;
if (ret)
goto end_core_reset;
+ /* driver is only interested in link up/down and module qualification
+ * reports from firmware
+ */
+ ret = i40e_aq_set_phy_int_mask(&pf->hw,
+ I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+
+ /* make sure our flow control settings are restored */
+ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
}
}
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
I40E_GL_MDET_TX_PF_NUM_SHIFT;
u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
I40E_GL_MDET_TX_VF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
#endif
i40e_clean_adminq_subtask(pf);
+ i40e_link_event(pf);
+
i40e_service_event_complete(pf);
/* If the tasks have taken longer than one timer cycle or there
i40e_update_link_info(&pf->hw, true);
i40e_link_event(pf);
+ /* Initialize user-specific link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+
+ /* fill in link information and enable LSE reporting */
+ i40e_update_link_info(&pf->hw, true);
+ i40e_link_event(pf);
+
/* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
}
}
+ /* driver is only interested in link up/down and module qualification
+ * reports from firmware
+ */
+ err = i40e_aq_set_phy_int_mask(&pf->hw,
+ I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- if (priv->port_up)
- napi_schedule(&cq->napi);
+ if (likely(priv->port_up))
+ napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
- MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ if (!skb->encapsulation)
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ else
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
ring->tx_csum++;
}
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
#define MII_M1145_RGMII_TX_DELAY 0x0002
+#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1145_HWCFG_MODE_MASK 0xf
+#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
+ #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
+ #define MII_M1145_HWCFG_MODE_MASK 0xf
+ #define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
+
#define MII_M1111_PHY_LED_CONTROL 0x18
#define MII_M1111_PHY_LED_DIRECT 0x4100
#define MII_M1111_PHY_LED_COMBINE 0x411c
#define MII_M1116R_CONTROL_REG_MAC 21
+#define MII_88E3016_PHY_SPEC_CTRL 0x10
+#define MII_88E3016_DISABLE_SCRAMBLER 0x0200
+#define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
return 0;
}
+static int m88e3016_config_init(struct phy_device *phydev)
+{
+ int reg;
+
+ /* Enable Scrambler and Auto-Crossover */
+ reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
+ if (reg < 0)
+ return reg;
+
+ reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
+ reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
+
+ reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
+ if (reg < 0)
+ return reg;
+
+ return 0;
+}
+
static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
static int m88e1145_config_init(struct phy_device *phydev)
{
int err;
+ int temp;
/* Take care of errata E0 & E1 */
err = phy_write(phydev, 0x1d, 0x001b);
}
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+ temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
if (temp < 0)
return temp;
- temp &= ~(MII_M1145_HWCFG_MODE_MASK);
+ temp &= ~MII_M1145_HWCFG_MODE_MASK;
temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
return 0;
}
+static int marvell_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
+ return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
+}
+
static int m88e1121_did_interrupt(struct phy_device *phydev)
{
int imask;
.suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
+ {
+ .phy_id = MARVELL_PHY_ID_88E3016,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E3016",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .config_init = &m88e3016_config_init,
+ .aneg_done = &marvell_aneg_done,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .driver = { .owner = THIS_MODULE },
+ },
};
static int __init marvell_init(void)
{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
{ }
};
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
};
struct rx_desc {
}
}
+ if (ret == -ENODEV)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
return ret;
}
}
error1:
+ if (ret == -ENODEV)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
return ret;
}
result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
if (result == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
} else if (result) {
spin_lock(&tp->rx_lock);
case -ESHUTDOWN:
netif_device_detach(tp->netdev);
case -ENOENT:
+ case -EPROTO:
+ netif_info(tp, intr, tp->netdev,
+ "Stop submitting intr, status %d\n", status);
return;
case -EOVERFLOW:
netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
resubmit:
res = usb_submit_urb(urb, GFP_ATOMIC);
- if (res == -ENODEV)
+ if (res == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
- else if (res)
+ } else if (res) {
netif_err(tp, intr, tp->netdev,
"can't resubmit intr, status %d\n", res);
+ }
}
static inline void *rx_agg_align(void *data)
struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(netdev);
} else {
struct net_device_stats *stats = &netdev->stats;
if (res)
goto out;
+ /* set speed to 0 to avoid autoresume try to submit rx */
+ tp->speed = 0;
+
res = usb_autopm_get_interface(tp->intf);
if (res < 0) {
free_all_mem(tp);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+
+ /* disable the tx/rx, if the workqueue has enabled them. */
if (tp->speed & LINK_STATUS)
tp->rtl_ops.disable(tp);
}
* be disable when autoresume occurs, because the
* netif_running() would be false.
*/
- if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_runtime_suspend_enable(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
- }
+ rtl_runtime_suspend_enable(tp, false);
tasklet_disable(&tp->tl);
tp->rtl_ops.down(tp);
netif_device_detach(netdev);
}
- if (netif_running(netdev)) {
+ if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
tasklet_disable(&tp->tl);
set_bit(WORK_ENABLE, &tp->flags);
}
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
mutex_unlock(&tp->control);
data[9] = le64_to_cpu(tally.rx_broadcast);
data[10] = le32_to_cpu(tally.rx_multicast);
data[11] = le16_to_cpu(tally.tx_aborted);
- data[12] = le16_to_cpu(tally.tx_underun);
+ data[12] = le16_to_cpu(tally.tx_underrun);
}
static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
return ret;
}
+static int rtl8152_nway_reset(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&tp->control);
+
+ ret = mii_nway_restart(&tp->mii);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
+}
+
static struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .nway_reset = rtl8152_nway_reset,
.get_msglevel = rtl8152_get_msglevel,
.set_msglevel = rtl8152_set_msglevel,
.get_wol = rtl8152_get_wol,
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
+ /* Number of bytes allowed on the internal guest Rx queue. */
+ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
+
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
atomic_dec(&queue->inflight_packets);
}
- static inline void xenvif_stop_queue(struct xenvif_queue *queue)
- {
- struct net_device *dev = queue->vif->dev;
-
- if (!queue->vif->can_queue)
- return;
-
- netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
- }
-
int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
- test_bit(VIF_STATUS_CONNECTED, &vif->status);
+ test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
+ !vif->disabled;
}
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
- struct netdev_queue *net_queue =
- netdev_get_tx_queue(queue->vif->dev, queue->id);
- /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
- * the carrier went down and this queue was previously blocked
- */
- if (unlikely(netif_tx_queue_stopped(net_queue) ||
- (!netif_carrier_ok(queue->vif->dev) &&
- test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
- set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
xenvif_kick_thread(queue);
return IRQ_HANDLED;
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
- /* Callback to wake the queue's thread and turn the carrier off on timeout */
- static void xenvif_rx_stalled(unsigned long data)
- {
- struct xenvif_queue *queue = (struct xenvif_queue *)data;
-
- if (xenvif_queue_stopped(queue)) {
- set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
- xenvif_kick_thread(queue);
- }
- }
-
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
u16 index;
- int min_slots_needed;
+ struct xenvif_rx_cb *cb;
BUG_ON(skb->dev != dev);
!xenvif_schedulable(vif))
goto drop;
- /* At best we'll need one slot for the header and one for each
- * frag.
- */
- min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
-
- /* If the skb is GSO then we'll also need an extra slot for the
- * metadata.
- */
- if (skb_is_gso(skb))
- min_slots_needed++;
+ cb = XENVIF_RX_CB(skb);
+ cb->expires = jiffies + rx_drain_timeout_jiffies;
- /* If the skb can't possibly fit in the remaining slots
- * then turn off the queue to give the ring a chance to
- * drain.
- */
- if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
- queue->rx_stalled.function = xenvif_rx_stalled;
- queue->rx_stalled.data = (unsigned long)queue;
- xenvif_stop_queue(queue);
- mod_timer(&queue->rx_stalled,
- jiffies + rx_drain_timeout_jiffies);
- }
-
- skb_queue_tail(&queue->rx_queue, skb);
+ xenvif_rx_queue_tail(queue, skb);
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
- napi_disable(&queue->napi);
disable_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
disable_irq(queue->rx_irq);
+ napi_disable(&queue->napi);
del_timer_sync(&queue->credit_timeout);
}
}
vif->queues = NULL;
vif->num_queues = 0;
+ spin_lock_init(&vif->lock);
+
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
init_timer(&queue->credit_timeout);
queue->credit_window_start = get_jiffies_64();
+ queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
+
skb_queue_head_init(&queue->rx_queue);
skb_queue_head_init(&queue->tx_queue);
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
}
- init_timer(&queue->rx_stalled);
-
return 0;
}
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
set_bit(VIF_STATUS_CONNECTED, &vif->status);
- netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
rtnl_unlock();
disable_irq(queue->rx_irq);
}
+ queue->stalled = true;
+
task = kthread_create(xenvif_kthread_guest_rx,
(void *)queue, "%s-guest-rx", queue->name);
if (IS_ERR(task)) {
netif_napi_del(&queue->napi);
if (queue->task) {
- del_timer_sync(&queue->rx_stalled);
kthread_stop(queue->task);
queue->task = NULL;
}
bool separate_tx_rx_irq = 1;
module_param(separate_tx_rx_irq, bool, 0644);
- /* When guest ring is filled up, qdisc queues the packets for us, but we have
- * to timeout them, otherwise other guests' packets can get stuck there
+ /* The time that packets can stay on the guest Rx internal queue
+ * before they are dropped.
*/
unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_drain_timeout_jiffies;
+ /* The length of time before the frontend is considered unresponsive
+ * because it isn't providing Rx slots.
+ */
+ static unsigned int rx_stall_timeout_msecs = 60000;
+ module_param(rx_stall_timeout_msecs, uint, 0444);
+ static unsigned int rx_stall_timeout_jiffies;
+
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
s8 st);
static inline int tx_work_todo(struct xenvif_queue *queue);
- static inline int rx_work_todo(struct xenvif_queue *queue);
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
u16 id,
return false;
}
+ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
+
+ __skb_queue_tail(&queue->rx_queue, skb);
+
+ queue->rx_queue_len += skb->len;
+ if (queue->rx_queue_len > queue->rx_queue_max)
+ netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+ }
+
+ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+ {
+ struct sk_buff *skb;
+
+ spin_lock_irq(&queue->rx_queue.lock);
+
+ skb = __skb_dequeue(&queue->rx_queue);
+ if (skb)
+ queue->rx_queue_len -= skb->len;
+
+ spin_unlock_irq(&queue->rx_queue.lock);
+
+ return skb;
+ }
+
+ static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
+ {
+ spin_lock_irq(&queue->rx_queue.lock);
+
+ if (queue->rx_queue_len < queue->rx_queue_max)
+ netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
+
+ spin_unlock_irq(&queue->rx_queue.lock);
+ }
+
+
+ static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
+ {
+ struct sk_buff *skb;
+ while ((skb = xenvif_rx_dequeue(queue)) != NULL)
+ kfree_skb(skb);
+ }
+
+ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
+ {
+ struct sk_buff *skb;
+
+ for(;;) {
+ skb = skb_peek(&queue->rx_queue);
+ if (!skb)
+ break;
+ if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
+ break;
+ xenvif_rx_dequeue(queue);
+ kfree_skb(skb);
+ }
+ }
+
/*
* Returns true if we should start a new receive buffer instead of
* adding 'size' bytes to a buffer which currently contains 'offset'
return meta;
}
- struct xenvif_rx_cb {
- int meta_slots_used;
- bool full_coalesce;
- };
-
- #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
skb_queue_head_init(&rxq);
- while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
+ while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+ && (skb = xenvif_rx_dequeue(queue)) != NULL) {
RING_IDX max_slots_needed;
RING_IDX old_req_cons;
RING_IDX ring_slots_used;
int i;
+ queue->last_rx_time = jiffies;
+
/* We need a cheap worse case estimate for the number of
* slots we'll use.
*/
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++;
- /* If the skb may not fit then bail out now */
- if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
- skb_queue_head(&queue->rx_queue, skb);
- need_to_notify = true;
- queue->rx_last_skb_slots = max_slots_needed;
- break;
- } else
- queue->rx_last_skb_slots = 0;
-
old_req_cons = queue->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
ring_slots_used = queue->rx.req_cons - old_req_cons;
unsigned int len;
BUG_ON(i >= MAX_SKB_FRAGS);
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+ page = alloc_page(GFP_ATOMIC);
if (!page) {
int j;
skb->truesize += skb->data_len;
}
}
- static inline int rx_work_todo(struct xenvif_queue *queue)
- {
- return (!skb_queue_empty(&queue->rx_queue) &&
- xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
- }
-
static inline int tx_work_todo(struct xenvif_queue *queue)
{
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
return err;
}
- static void xenvif_start_queue(struct xenvif_queue *queue)
+ static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
{
- if (xenvif_schedulable(queue->vif))
- xenvif_wake_queue(queue);
+ struct xenvif *vif = queue->vif;
+
+ queue->stalled = true;
+
+ /* At least one queue has stalled? Disable the carrier. */
+ spin_lock(&vif->lock);
+ if (vif->stalled_queues++ == 0) {
+ netdev_info(vif->dev, "Guest Rx stalled");
+ netif_carrier_off(vif->dev);
+ }
+ spin_unlock(&vif->lock);
}
- /* Only called from the queue's thread, it handles the situation when the guest
- * doesn't post enough requests on the receiving ring.
- * First xenvif_start_xmit disables QDisc and start a timer, and then either the
- * timer fires, or the guest send an interrupt after posting new request. If it
- * is the timer, the carrier is turned off here.
- * */
- static void xenvif_rx_purge_event(struct xenvif_queue *queue)
+ static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
{
- /* Either the last unsuccesful skb or at least 1 slot should fit */
- int needed = queue->rx_last_skb_slots ?
- queue->rx_last_skb_slots : 1;
+ struct xenvif *vif = queue->vif;
- /* It is assumed that if the guest post new slots after this, the RX
- * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
- * the thread again
- */
- set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
- if (!xenvif_rx_ring_slots_available(queue, needed)) {
- rtnl_lock();
- if (netif_carrier_ok(queue->vif->dev)) {
- /* Timer fired and there are still no slots. Turn off
- * everything except the interrupts
- */
- netif_carrier_off(queue->vif->dev);
- skb_queue_purge(&queue->rx_queue);
- queue->rx_last_skb_slots = 0;
- if (net_ratelimit())
- netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
- } else {
- /* Probably an another queue already turned the carrier
- * off, make sure nothing is stucked in the internal
- * queue of this queue
- */
- skb_queue_purge(&queue->rx_queue);
- queue->rx_last_skb_slots = 0;
- }
- rtnl_unlock();
- } else if (!netif_carrier_ok(queue->vif->dev)) {
- unsigned int num_queues = queue->vif->num_queues;
- unsigned int i;
- /* The carrier was down, but an interrupt kicked
- * the thread again after new requests were
- * posted
- */
- clear_bit(QUEUE_STATUS_RX_STALLED,
- &queue->status);
- rtnl_lock();
- netif_carrier_on(queue->vif->dev);
- netif_tx_wake_all_queues(queue->vif->dev);
- rtnl_unlock();
+ queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
+ queue->stalled = false;
- for (i = 0; i < num_queues; i++) {
- struct xenvif_queue *temp = &queue->vif->queues[i];
+ /* All queues are ready? Enable the carrier. */
+ spin_lock(&vif->lock);
+ if (--vif->stalled_queues == 0) {
+ netdev_info(vif->dev, "Guest Rx ready");
+ netif_carrier_on(vif->dev);
+ }
+ spin_unlock(&vif->lock);
+ }
- xenvif_napi_schedule_or_enable_events(temp);
- }
- if (net_ratelimit())
- netdev_err(queue->vif->dev, "Carrier on again\n");
- } else {
- /* Queuing were stopped, but the guest posted
- * new requests and sent an interrupt
- */
- clear_bit(QUEUE_STATUS_RX_STALLED,
- &queue->status);
- del_timer_sync(&queue->rx_stalled);
- xenvif_start_queue(queue);
+ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+ {
+ RING_IDX prod, cons;
+
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
+
+ return !queue->stalled
+ && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+ && time_after(jiffies,
+ queue->last_rx_time + rx_stall_timeout_jiffies);
+ }
+
+ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+ {
+ RING_IDX prod, cons;
+
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
+
+ return queue->stalled
+ && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+ }
+
+ static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+ {
+ return (!skb_queue_empty(&queue->rx_queue)
+ && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+ || xenvif_rx_queue_stalled(queue)
+ || xenvif_rx_queue_ready(queue)
+ || kthread_should_stop()
+ || queue->vif->disabled;
+ }
+
+ static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
+ {
+ struct sk_buff *skb;
+ long timeout;
+
+ skb = skb_peek(&queue->rx_queue);
+ if (!skb)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ timeout = XENVIF_RX_CB(skb)->expires - jiffies;
+ return timeout < 0 ? 0 : timeout;
+ }
+
+ /* Wait until the guest Rx thread has work.
+ *
+ * The timeout needs to be adjusted based on the current head of the
+ * queue (and not just the head at the beginning). In particular, if
+ * the queue is initially empty an infinite timeout is used and this
+ * needs to be reduced when a skb is queued.
+ *
+ * This cannot be done with wait_event_timeout() because it only
+ * calculates the timeout once.
+ */
+ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
+ {
+ DEFINE_WAIT(wait);
+
+ if (xenvif_have_rx_work(queue))
+ return;
+
+ for (;;) {
+ long ret;
+
+ prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
+ if (xenvif_have_rx_work(queue))
+ break;
+ ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
+ if (!ret)
+ break;
}
+ finish_wait(&queue->wq, &wait);
}
int xenvif_kthread_guest_rx(void *data)
{
struct xenvif_queue *queue = data;
- struct sk_buff *skb;
+ struct xenvif *vif = queue->vif;
- while (!kthread_should_stop()) {
- wait_event_interruptible(queue->wq,
- rx_work_todo(queue) ||
- queue->vif->disabled ||
- test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
- kthread_should_stop());
+ for (;;) {
+ xenvif_wait_for_rx_work(queue);
if (kthread_should_stop())
break;
* context so we defer it here, if this thread is
* associated with queue 0.
*/
- if (unlikely(queue->vif->disabled && queue->id == 0)) {
- xenvif_carrier_off(queue->vif);
- } else if (unlikely(queue->vif->disabled)) {
- /* kthread_stop() would be called upon this thread soon,
- * be a bit proactive
- */
- skb_queue_purge(&queue->rx_queue);
- queue->rx_last_skb_slots = 0;
- } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
- &queue->status))) {
- xenvif_rx_purge_event(queue);
- } else if (!netif_carrier_ok(queue->vif->dev)) {
- /* Another queue stalled and turned the carrier off, so
- * purge the internal queue of queues which were not
- * blocked
- */
- skb_queue_purge(&queue->rx_queue);
- queue->rx_last_skb_slots = 0;
+ if (unlikely(vif->disabled && queue->id == 0)) {
+ xenvif_carrier_off(vif);
+ xenvif_rx_queue_purge(queue);
+ continue;
}
if (!skb_queue_empty(&queue->rx_queue))
xenvif_rx_action(queue);
+ /* If the guest hasn't provided any Rx slots for a
+ * while it's probably not responsive, drop the
+ * carrier so packets are dropped earlier.
+ */
+ if (xenvif_rx_queue_stalled(queue))
+ xenvif_queue_carrier_off(queue);
+ else if (xenvif_rx_queue_ready(queue))
+ xenvif_queue_carrier_on(queue);
+
+ /* Queued packets may have foreign pages from other
+ * domains. These cannot be queued indefinitely as
+ * this would starve guests of grant refs and transmit
+ * slots.
+ */
+ xenvif_rx_queue_drop_expired(queue);
+
+ xenvif_rx_queue_maybe_wake(queue);
+
cond_resched();
}
/* Bin any remaining skbs */
- while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
- dev_kfree_skb(skb);
+ xenvif_rx_queue_purge(queue);
return 0;
}
goto failed_init;
rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+ rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
kfree_skb(skb);
}
+ EXPORT_SYMBOL_GPL(br_deliver);
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
/* Do not flood unicast traffic to ports that turn it off */
if (unicast && !(p->flags & BR_FLOOD))
continue;
+
+ /* Do not flood to ports that enable proxy ARP */
+ if (p->flags & BR_PROXYARP)
+ continue;
+
prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
+ if (unlikely(skb->pfmemalloc)) {
+ consume_skb(skb);
+ return;
+ }
__skb_pull(skb, skb_headlen(skb));
/* restore the reserve we had after netdev_alloc_skb_ip_align() */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
* __napi_schedule - schedule for receive
* @n: entry to schedule
*
- * The entry's receive function will be scheduled to run
+ * The entry's receive function will be scheduled to run.
+ * Consider using __napi_schedule_irqoff() if hard irqs are masked.
*/
void __napi_schedule(struct napi_struct *n)
{
}
EXPORT_SYMBOL(__napi_schedule);
+/**
+ * __napi_schedule_irqoff - schedule for receive
+ * @n: entry to schedule
+ *
+ * Variant of __napi_schedule() assuming hard irqs are masked
+ */
+void __napi_schedule_irqoff(struct napi_struct *n)
+{
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+}
+EXPORT_SYMBOL(__napi_schedule_irqoff);
+
void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
* (at your option) any later version.
*/
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/sysfs.h>
#include "dsa_priv.h"
char dsa_driver_version[] = "0.1";
return ret;
}
+/* hwmon support ************************************************************/
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = ds->drv->get_temp(ds, &temp);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", temp * 1000);
+}
+static DEVICE_ATTR_RO(temp1_input);
+
+static ssize_t temp1_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = ds->drv->get_temp_limit(ds, &temp);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", temp * 1000);
+}
+
+static ssize_t temp1_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = kstrtoint(buf, 0, &temp);
+ if (ret < 0)
+ return ret;
+
+ ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store);
+
+static ssize_t temp1_max_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ bool alarm;
+ int ret;
+
+ ret = ds->drv->get_temp_alarm(ds, &alarm);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", alarm);
+}
+static DEVICE_ATTR_RO(temp1_max_alarm);
+
+static struct attribute *dsa_hwmon_attrs[] = {
+ &dev_attr_temp1_input.attr, /* 0 */
+ &dev_attr_temp1_max.attr, /* 1 */
+ &dev_attr_temp1_max_alarm.attr, /* 2 */
+ NULL
+};
+
+static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ struct dsa_switch_driver *drv = ds->drv;
+ umode_t mode = attr->mode;
+
+ if (index == 1) {
+ if (!drv->get_temp_limit)
+ mode = 0;
+ else if (drv->set_temp_limit)
+ mode |= S_IWUSR;
+ } else if (index == 2 && !drv->get_temp_alarm) {
+ mode = 0;
+ }
+ return mode;
+}
+
+static const struct attribute_group dsa_hwmon_group = {
+ .attrs = dsa_hwmon_attrs,
+ .is_visible = dsa_hwmon_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(dsa_hwmon);
+
+#endif /* CONFIG_NET_DSA_HWMON */
/* basic switch operations **************************************************/
static struct dsa_switch *
dst->rcv = brcm_netdev_ops.rcv;
break;
#endif
- default:
+ case DSA_TAG_PROTO_NONE:
break;
+ default:
+ ret = -ENOPROTOOPT;
+ goto out;
}
dst->tag_protocol = drv->tag_protocol;
ds->ports[i] = slave_dev;
}
+#ifdef CONFIG_NET_DSA_HWMON
+ /* If the switch provides a temperature sensor,
+ * register with hardware monitoring subsystem.
+ * Treat registration error as non-fatal and ignore it.
+ */
+ if (drv->get_temp) {
+ const char *netname = netdev_name(dst->master_netdev);
+ char hname[IFNAMSIZ + 1];
+ int i, j;
+
+ /* Create valid hwmon 'name' attribute */
+ for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) {
+ if (isalnum(netname[i]))
+ hname[j++] = netname[i];
+ }
+ hname[j] = '\0';
+ scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d",
+ hname, index);
+ ds->hwmon_dev = hwmon_device_register_with_groups(NULL,
+ ds->hwmon_name, ds, dsa_hwmon_groups);
+ if (IS_ERR(ds->hwmon_dev))
+ ds->hwmon_dev = NULL;
+ }
+#endif /* CONFIG_NET_DSA_HWMON */
+
return ds;
out_free:
static void dsa_switch_destroy(struct dsa_switch *ds)
{
+#ifdef CONFIG_NET_DSA_HWMON
+ if (ds->hwmon_dev)
+ hwmon_device_unregister(ds->hwmon_dev);
+#endif
}
#ifdef CONFIG_PM_SLEEP
const char *port_name;
int chip_index, port_index;
const unsigned int *sw_addr, *port_reg;
+ u32 eeprom_len;
int ret;
mdio = of_parse_phandle(np, "dsa,mii-bus", 0);
if (cd->sw_addr > PHY_MAX_ADDR)
continue;
+ if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ cd->eeprom_len = eeprom_len;
+
for_each_available_child_of_node(child, port) {
port_reg = of_get_property(port, "reg", NULL);
if (!port_reg)
IPV6_SADDR_RULE_PRIVACY,
IPV6_SADDR_RULE_ORCHID,
IPV6_SADDR_RULE_PREFIX,
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ IPV6_SADDR_RULE_NOT_OPTIMISTIC,
+#endif
IPV6_SADDR_RULE_MAX
};
return 0;
}
+static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+{
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+#else
+ return false;
+#endif
+}
+
static int ipv6_get_saddr_eval(struct net *net,
struct ipv6_saddr_score *score,
struct ipv6_saddr_dst *dst,
score->scopedist = ret;
break;
case IPV6_SADDR_RULE_PREFERRED:
+ {
/* Rule 3: Avoid deprecated and optimistic addresses */
+ u8 avoid = IFA_F_DEPRECATED;
+
+ if (!ipv6_use_optimistic_addr(score->ifa->idev))
+ avoid |= IFA_F_OPTIMISTIC;
ret = ipv6_saddr_preferred(score->addr_type) ||
- !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC));
+ !(score->ifa->flags & avoid);
break;
+ }
#ifdef CONFIG_IPV6_MIP6
case IPV6_SADDR_RULE_HOA:
{
ret = score->ifa->prefix_len;
score->matchlen = ret;
break;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
+ /* Optimistic addresses still have lower precedence than other
+ * preferred addresses.
+ */
+ ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
+ break;
+#endif
default:
ret = 0;
}
else
stored_lft = 0;
if (!update_lft && !create && stored_lft) {
- const u32 minimum_lft = min(
- stored_lft, (u32)MIN_VALID_LIFETIME);
+ const u32 minimum_lft = min_t(u32,
+ stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
/* RFC4862 Section 5.5.3e:
* Optimistic nodes can start receiving
* Frames right away
*/
- if (ifp->flags & IFA_F_OPTIMISTIC)
+ if (ifp->flags & IFA_F_OPTIMISTIC) {
ip6_ins_rt(ifp->rt);
+ if (ipv6_use_optimistic_addr(idev)) {
+ /* Because optimistic nodes can use this address,
+ * notify listeners. If DAD fails, RTM_DELADDR is sent.
+ */
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ }
+ }
addrconf_dad_kick(ifp);
out:
array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
+ array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
}
write_unlock_bh(&idev->lock);
+ inet6_ifinfo_notify(RTM_NEWLINK, idev);
addrconf_verify_rtnl();
return 0;
}
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "use_optimistic",
+ .data = &ipv6_devconf.use_optimistic,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+
+ },
#endif
#ifdef CONFIG_IPV6_MROUTE
{