/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
- * qdisc_root_lock(qdisc) spinlock.
+ * qdisc_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via qdisc root lock
if (unlikely((skb = dequeue_skb(q)) == NULL))
return 0;
- root_lock = qdisc_root_lock(q);
+ root_lock = qdisc_lock(q);
/* And release qdisc */
spin_unlock(root_lock);
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_subqueue_stopped(dev, skb))
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
break;
}
- if (ret && netif_tx_queue_stopped(txq))
+ if (ret && (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq)))
ret = 0;
return ret;
if (some_queue_stopped &&
time_after(jiffies, (dev->trans_start +
dev->watchdog_timeo))) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: "
- "transmit timed out\n",
- dev->name);
+ char drivername[64];
+ printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
+ dev->name, netdev_drivername(dev, drivername, 64));
dev->tx_timeout(dev);
WARN_ON_ONCE(1);
}
};
-static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+static const u8 prio2band[TC_PRIO_MAX+1] =
+ { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* 3-band FIFO queue: old style, but should be a bit faster than
+ generic prio+fifo combination.
+ */
+
+#define PFIFO_FAST_BANDS 3
+
+static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
+ struct Qdisc *qdisc)
+{
+ struct sk_buff_head *list = qdisc_priv(qdisc);
+ return list + prio2band[skb->priority & TC_PRIO_MAX];
+}
+
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = &qdisc->q;
+ struct sk_buff_head *list = prio2list(skb, qdisc);
- if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len)
+ if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
+ qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
+ }
return qdisc_drop(skb, qdisc);
}
-static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
{
- struct sk_buff_head *list = &qdisc->q;
+ int prio;
+ struct sk_buff_head *list = qdisc_priv(qdisc);
- if (!skb_queue_empty(list))
- return __qdisc_dequeue_head(qdisc, list);
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+ if (!skb_queue_empty(list + prio)) {
+ qdisc->q.qlen--;
+ return __qdisc_dequeue_head(qdisc, list + prio);
+ }
+ }
return NULL;
}
-static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- return __qdisc_requeue(skb, qdisc, &qdisc->q);
+ qdisc->q.qlen++;
+ return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
}
-static void fifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc* qdisc)
{
- __qdisc_reset_queue(qdisc, &qdisc->q);
+ int prio;
+ struct sk_buff_head *list = qdisc_priv(qdisc);
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
+ __qdisc_reset_queue(qdisc, list + prio);
+
qdisc->qstats.backlog = 0;
+ qdisc->q.qlen = 0;
}
-static struct Qdisc_ops fifo_fast_ops __read_mostly = {
- .id = "fifo_fast",
- .priv_size = 0,
- .enqueue = fifo_fast_enqueue,
- .dequeue = fifo_fast_dequeue,
- .requeue = fifo_fast_requeue,
- .reset = fifo_fast_reset,
+static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
+{
+ struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
+
+ memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+{
+ int prio;
+ struct sk_buff_head *list = qdisc_priv(qdisc);
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
+ skb_queue_head_init(list + prio);
+
+ return 0;
+}
+
+static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
+ .id = "pfifo_fast",
+ .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
+ .enqueue = pfifo_fast_enqueue,
+ .dequeue = pfifo_fast_dequeue,
+ .requeue = pfifo_fast_requeue,
+ .init = pfifo_fast_init,
+ .reset = pfifo_fast_reset,
+ .dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
}
EXPORT_SYMBOL(qdisc_create_dflt);
-/* Under qdisc_root_lock(qdisc) and BH! */
+/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
kfree((char *) qdisc - qdisc->padded);
}
-/* Under qdisc_root_lock(qdisc) and BH! */
+/* Under qdisc_lock(qdisc) and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev, dev_queue,
- &fifo_fast_ops, TC_H_ROOT);
+ &pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) {
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
int *need_watchdog_p = _need_watchdog;
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
- if (new_qdisc != &noqueue_qdisc)
+ if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
}
int need_watchdog;
/* No queueing discipline is attached to device;
- * create default one i.e. fifo_fast for devices,
- * which need queueing and noqueue_qdisc for
- * virtual interfaces.
+ create default one i.e. pfifo_fast for devices,
+ which need queueing and noqueue_qdisc for
+ virtual interfaces
*/
if (dev_all_qdisc_sleeping_noop(dev))
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
+ transition_one_qdisc(dev, &dev->rx_queue, NULL);
if (need_watchdog) {
dev->trans_start = jiffies;
dev_queue = netdev_get_tx_queue(dev, i);
q = dev_queue->qdisc;
- root_lock = qdisc_root_lock(q);
+ root_lock = qdisc_lock(q);
if (lock)
spin_lock_bh(root_lock);
bool running;
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
+ dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
dev_watchdog_down(dev);
void dev_init_scheduler(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
- dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
+ dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
- spinlock_t *root_lock = qdisc_root_lock(qdisc);
+ spinlock_t *root_lock = qdisc_lock(qdisc);
dev_queue->qdisc = qdisc_default;
dev_queue->qdisc_sleeping = qdisc_default;
- spin_lock(root_lock);
+ spin_lock_bh(root_lock);
qdisc_destroy(qdisc);
- spin_unlock(root_lock);
+ spin_unlock_bh(root_lock);
}
}
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
- shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
- BUG_TRAP(!timer_pending(&dev->watchdog_timer));
+ shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ WARN_ON(timer_pending(&dev->watchdog_timer));
}