#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
+#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
}
-void __netif_schedule(struct Qdisc *q)
+static inline void __netif_reschedule(struct Qdisc *q)
{
- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
- struct softnet_data *sd;
- unsigned long flags;
+ struct softnet_data *sd;
+ unsigned long flags;
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- }
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
{
u32 addr1, addr2, ports;
u32 hash, ihl;
- u8 ip_proto;
+ u8 ip_proto = 0;
if (unlikely(!simple_tx_hashrnd_initialized)) {
get_random_bytes(&simple_tx_hashrnd, 4);
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
- ip_proto = ip_hdr(skb)->protocol;
+ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
+ ip_proto = ip_hdr(skb)->protocol;
addr1 = ip_hdr(skb)->saddr;
addr2 = ip_hdr(skb)->daddr;
ihl = ip_hdr(skb)->ihl;
spin_lock(root_lock);
- rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
-
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
spin_unlock(root_lock);
goto out;
head = head->next_sched;
- smp_mb__before_clear_bit();
- clear_bit(__QDISC_STATE_SCHED, &q->state);
-
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
- __netif_schedule(q);
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state)) {
+ __netif_reschedule(q);
+ } else {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+ }
}
}
}
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
- result = qdisc_enqueue_root(skb, q);
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}
return 0;
}
+static void dev_change_rx_flags(struct net_device *dev, int flags)
+{
+ if (dev->flags & IFF_UP && dev->change_rx_flags)
+ dev->change_rx_flags(dev, flags);
+}
+
static int __dev_set_promiscuity(struct net_device *dev, int inc)
{
unsigned short old_flags = dev->flags;
current->uid, current->gid,
audit_get_sessionid(current));
- if (dev->change_rx_flags)
- dev->change_rx_flags(dev, IFF_PROMISC);
+ dev_change_rx_flags(dev, IFF_PROMISC);
}
return 0;
}
}
}
if (dev->flags ^ old_flags) {
- if (dev->change_rx_flags)
- dev->change_rx_flags(dev, IFF_ALLMULTI);
+ dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
}
return 0;
* Load in the correct multicast list now the flags have changed.
*/
- if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
- dev->change_rx_flags(dev, IFF_MULTICAST);
+ if ((old_flags ^ flags) & IFF_MULTICAST)
+ dev_change_rx_flags(dev, IFF_MULTICAST);
dev_set_rx_mode(dev);
}
/* Delayed registration/unregisteration */
-static DEFINE_SPINLOCK(net_todo_list_lock);
static LIST_HEAD(net_todo_list);
static void net_set_todo(struct net_device *dev)
{
- spin_lock(&net_todo_list_lock);
list_add_tail(&dev->todo_list, &net_todo_list);
- spin_unlock(&net_todo_list_lock);
}
static void rollback_registered(struct net_device *dev)
* free_netdev(y1);
* free_netdev(y2);
*
- * We are invoked by rtnl_unlock() after it drops the semaphore.
+ * We are invoked by rtnl_unlock().
* This allows us to deal with problems:
* 1) We can delete sysfs objects which invoke hotplug
* without deadlocking with linkwatch via keventd.
* 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero.
+ *
+ * We must not return until all unregister events added during
+ * the interval the lock was held have been completed.
*/
-static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void)
{
struct list_head list;
- /* Need to guard against multiple cpu's getting out of order. */
- mutex_lock(&net_todo_run_mutex);
-
- /* Not safe to do outside the semaphore. We must not return
- * until all unregister events invoked by the local processor
- * have been completed (either by this todo run, or one on
- * another cpu).
- */
- if (list_empty(&net_todo_list))
- goto out;
-
/* Snapshot list, allow later requests */
- spin_lock(&net_todo_list_lock);
list_replace_init(&net_todo_list, &list);
- spin_unlock(&net_todo_list_lock);
+
+ __rtnl_unlock();
while (!list_empty(&list)) {
struct net_device *dev
/* Free network device */
kobject_put(&dev->dev.kobj);
}
-
-out:
- mutex_unlock(&net_todo_run_mutex);
}
static struct net_device_stats *internal_stats(struct net_device *dev)