#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/lockdep.h>
#include <net/net_namespace.h>
#include <net/sock.h>
return NULL;
}
+/*
+ * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
+ * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
+ */
+static DEFINE_SPINLOCK(qdisc_list_lock);
+
+static void qdisc_list_add(struct Qdisc *q)
+{
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ spin_lock_bh(&qdisc_list_lock);
+ list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+ spin_unlock_bh(&qdisc_list_lock);
+ }
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ spin_lock_bh(&qdisc_list_lock);
+ list_del(&q->list);
+ spin_unlock_bh(&qdisc_list_lock);
+ }
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
unsigned int i;
+ struct Qdisc *q;
+
+ spin_lock_bh(&qdisc_list_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
+ struct Qdisc *txq_root = txq->qdisc_sleeping;
q = qdisc_match_from_root(txq_root, handle);
if (q)
- return q;
+ goto unlock;
}
- return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+ q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+unlock:
+ spin_unlock_bh(&qdisc_list_lock);
+
+ return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
if (!s || tsize != s->tsize || (!tab && tsize > 0))
return ERR_PTR(-EINVAL);
- spin_lock_bh(&qdisc_stab_lock);
+ spin_lock(&qdisc_stab_lock);
list_for_each_entry(stab, &qdisc_stab_list, list) {
if (memcmp(&stab->szopts, s, sizeof(*s)))
if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
continue;
stab->refcnt++;
- spin_unlock_bh(&qdisc_stab_lock);
+ spin_unlock(&qdisc_stab_lock);
return stab;
}
- spin_unlock_bh(&qdisc_stab_lock);
+ spin_unlock(&qdisc_stab_lock);
stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
if (!stab)
if (tsize > 0)
memcpy(stab->data, tab, tsize * sizeof(u16));
- spin_lock_bh(&qdisc_stab_lock);
+ spin_lock(&qdisc_stab_lock);
list_add_tail(&stab->list, &qdisc_stab_list);
- spin_unlock_bh(&qdisc_stab_lock);
+ spin_unlock(&qdisc_stab_lock);
return stab;
}
if (!tab)
return;
- spin_lock_bh(&qdisc_stab_lock);
+ spin_lock(&qdisc_stab_lock);
if (--tab->refcnt == 0) {
list_del(&tab->list);
kfree(tab);
}
- spin_unlock_bh(&qdisc_stab_lock);
+ spin_unlock(&qdisc_stab_lock);
}
EXPORT_SYMBOL(qdisc_put_stab);
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
smp_wmb();
- __netif_schedule(wd->qdisc);
+ __netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
}
{
ktime_t time;
+ if (test_bit(__QDISC_STATE_DEACTIVATED,
+ &qdisc_root_sleeping(wd->qdisc)->state))
+ return;
+
wd->qdisc->flags |= TCQ_F_THROTTLED;
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_US2NS(expires));
struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
spinlock_t *root_lock;
- root_lock = qdisc_root_lock(oqdisc);
+ root_lock = qdisc_lock(oqdisc);
spin_lock_bh(root_lock);
/* Prune old scheduler */
if (qdisc == NULL)
qdisc = &noop_qdisc;
dev_queue->qdisc_sleeping = qdisc;
- dev_queue->qdisc = &noop_qdisc;
+ rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
spin_unlock_bh(root_lock);
if (new || old)
qdisc_notify(skb, n, clid, old, new);
- if (old) {
- spin_lock_bh(&old->q.lock);
+ if (old)
qdisc_destroy(old);
- spin_unlock_bh(&old->q.lock);
- }
}
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
return err;
}
+/* lockdep annotation is needed for ingress; egress gets it only for name */
+static struct lock_class_key qdisc_tx_lock;
+static struct lock_class_key qdisc_rx_lock;
+
/*
Allocate and initialize new qdisc.
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
if (handle == 0)
goto err_out3;
}
+ lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
}
sch->handle = handle;
sch->stab = stab;
}
if (tca[TCA_RATE]) {
+ spinlock_t *root_lock;
+
+ if ((sch->parent != TC_H_ROOT) &&
+ !(sch->flags & TCQ_F_INGRESS))
+ root_lock = qdisc_root_sleeping_lock(sch);
+ else
+ root_lock = qdisc_lock(sch);
+
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
- qdisc_root_lock(sch),
- tca[TCA_RATE]);
+ root_lock, tca[TCA_RATE]);
if (err) {
/*
* Any broken qdiscs that would require
goto err_out3;
}
}
- if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
- list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
+
+ qdisc_list_add(sch);
return sch;
}
if (tca[TCA_RATE])
gen_replace_estimator(&sch->bstats, &sch->rate_est,
- qdisc_root_lock(sch), tca[TCA_RATE]);
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE]);
return 0;
}
}
graft:
- if (1) {
- spinlock_t *root_lock;
-
- err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
- if (err) {
- if (q) {
- root_lock = qdisc_root_lock(q);
- spin_lock_bh(root_lock);
- qdisc_destroy(q);
- spin_unlock_bh(root_lock);
- }
- return err;
- }
+ err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+ if (err) {
+ if (q)
+ qdisc_destroy(q);
+ return err;
}
+
return 0;
}
if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
goto nla_put_failure;
- if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
+ if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
+ qdisc_root_sleeping_lock(q), &d) < 0)
goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
goto nla_put_failure;
- if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
+ if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
+ qdisc_root_sleeping_lock(q), &d) < 0)
goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)