local_irq_save(flags);
rps_lock(sd);
+ if (!netif_running(skb->dev))
+ goto drop;
if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
if (skb_queue_len(&sd->input_pkt_queue)) {
enqueue:
goto enqueue;
}
+drop:
sd->dropped++;
rps_unlock(sd);
pt_prev = NULL;
- rcu_read_lock();
-
another_round:
__this_cpu_inc(softnet_data.processed);
}
out:
- rcu_read_unlock();
return ret;
}
*/
int netif_receive_skb(struct sk_buff *skb)
{
+ int ret;
+
if (netdev_tstamp_prequeue)
net_timestamp_check(skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
+ rcu_read_lock();
+
#ifdef CONFIG_RPS
{
struct rps_dev_flow voidflow, *rflow = &voidflow;
- int cpu, ret;
-
- rcu_read_lock();
-
- cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ int cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- } else {
- rcu_read_unlock();
- ret = __netif_receive_skb(skb);
+ return ret;
}
-
- return ret;
}
-#else
- return __netif_receive_skb(skb);
#endif
+ ret = __netif_receive_skb(skb);
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(netif_receive_skb);
unsigned int qlen;
while ((skb = __skb_dequeue(&sd->process_queue))) {
+ rcu_read_lock();
local_irq_enable();
__netif_receive_skb(skb);
+ rcu_read_unlock();
local_irq_disable();
input_queue_head_incr(sd);
if (++work >= quota) {
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
+ on_each_cpu(flush_backlog, dev, 1);
}
synchronize_net();
netdev_err(dev,
"set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
err, features, dev->features);
- return -1;
+ return 0;
}
if (!err)
dev->reg_state = NETREG_UNREGISTERED;
- on_each_cpu(flush_backlog, dev, 1);
-
netdev_wait_allrefs(dev);
/* paranoia */