2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
26 /* Parameters, settable by user:
27 -----------------------------
29 limit - bytes (must be > qth_max + burst)
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
39 struct red_sched_data {
40 u32 limit; /* HARD maximal queue length */
42 struct red_parms parms;
43 struct red_stats stats;
47 static inline int red_use_ecn(struct red_sched_data *q)
49 return q->flags & TC_RED_ECN;
52 static inline int red_use_harddrop(struct red_sched_data *q)
54 return q->flags & TC_RED_HARDDROP;
57 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
59 struct red_sched_data *q = qdisc_priv(sch);
60 struct Qdisc *child = q->qdisc;
63 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
65 if (red_is_idling(&q->parms))
66 red_end_of_idle_period(&q->parms);
68 switch (red_action(&q->parms, q->parms.qavg)) {
73 sch->qstats.overlimits++;
74 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
83 sch->qstats.overlimits++;
84 if (red_use_harddrop(q) || !red_use_ecn(q) ||
85 !INET_ECN_set_ce(skb)) {
86 q->stats.forced_drop++;
90 q->stats.forced_mark++;
94 ret = qdisc_enqueue(skb, child);
95 if (likely(ret == NET_XMIT_SUCCESS)) {
97 } else if (net_xmit_drop_count(ret)) {
104 qdisc_drop(skb, sch);
108 static struct sk_buff *red_dequeue(struct Qdisc *sch)
111 struct red_sched_data *q = qdisc_priv(sch);
112 struct Qdisc *child = q->qdisc;
114 skb = child->dequeue(child);
116 qdisc_bstats_update(sch, skb);
119 if (!red_is_idling(&q->parms))
120 red_start_of_idle_period(&q->parms);
125 static struct sk_buff *red_peek(struct Qdisc *sch)
127 struct red_sched_data *q = qdisc_priv(sch);
128 struct Qdisc *child = q->qdisc;
130 return child->ops->peek(child);
133 static unsigned int red_drop(struct Qdisc *sch)
135 struct red_sched_data *q = qdisc_priv(sch);
136 struct Qdisc *child = q->qdisc;
139 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
146 if (!red_is_idling(&q->parms))
147 red_start_of_idle_period(&q->parms);
152 static void red_reset(struct Qdisc *sch)
154 struct red_sched_data *q = qdisc_priv(sch);
156 qdisc_reset(q->qdisc);
158 red_restart(&q->parms);
161 static void red_destroy(struct Qdisc *sch)
163 struct red_sched_data *q = qdisc_priv(sch);
164 qdisc_destroy(q->qdisc);
167 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
168 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
169 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
172 static int red_change(struct Qdisc *sch, struct nlattr *opt)
174 struct red_sched_data *q = qdisc_priv(sch);
175 struct nlattr *tb[TCA_RED_MAX + 1];
176 struct tc_red_qopt *ctl;
177 struct Qdisc *child = NULL;
183 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
187 if (tb[TCA_RED_PARMS] == NULL ||
188 tb[TCA_RED_STAB] == NULL)
191 ctl = nla_data(tb[TCA_RED_PARMS]);
193 if (ctl->limit > 0) {
194 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
196 return PTR_ERR(child);
200 q->flags = ctl->flags;
201 q->limit = ctl->limit;
203 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
204 qdisc_destroy(q->qdisc);
208 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
209 ctl->Plog, ctl->Scell_log,
210 nla_data(tb[TCA_RED_STAB]));
212 if (skb_queue_empty(&sch->q))
213 red_end_of_idle_period(&q->parms);
215 sch_tree_unlock(sch);
219 static int red_init(struct Qdisc *sch, struct nlattr *opt)
221 struct red_sched_data *q = qdisc_priv(sch);
223 q->qdisc = &noop_qdisc;
224 return red_change(sch, opt);
227 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
229 struct red_sched_data *q = qdisc_priv(sch);
230 struct nlattr *opts = NULL;
231 struct tc_red_qopt opt = {
234 .qth_min = q->parms.qth_min >> q->parms.Wlog,
235 .qth_max = q->parms.qth_max >> q->parms.Wlog,
236 .Wlog = q->parms.Wlog,
237 .Plog = q->parms.Plog,
238 .Scell_log = q->parms.Scell_log,
241 sch->qstats.backlog = q->qdisc->qstats.backlog;
242 opts = nla_nest_start(skb, TCA_OPTIONS);
244 goto nla_put_failure;
245 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
246 return nla_nest_end(skb, opts);
249 nla_nest_cancel(skb, opts);
253 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
255 struct red_sched_data *q = qdisc_priv(sch);
256 struct tc_red_xstats st = {
257 .early = q->stats.prob_drop + q->stats.forced_drop,
258 .pdrop = q->stats.pdrop,
259 .other = q->stats.other,
260 .marked = q->stats.prob_mark + q->stats.forced_mark,
263 return gnet_stats_copy_app(d, &st, sizeof(st));
266 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
267 struct sk_buff *skb, struct tcmsg *tcm)
269 struct red_sched_data *q = qdisc_priv(sch);
271 tcm->tcm_handle |= TC_H_MIN(1);
272 tcm->tcm_info = q->qdisc->handle;
276 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
279 struct red_sched_data *q = qdisc_priv(sch);
287 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
289 sch_tree_unlock(sch);
293 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
295 struct red_sched_data *q = qdisc_priv(sch);
299 static unsigned long red_get(struct Qdisc *sch, u32 classid)
304 static void red_put(struct Qdisc *sch, unsigned long arg)
308 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
311 if (walker->count >= walker->skip)
312 if (walker->fn(sch, 1, walker) < 0) {
320 static const struct Qdisc_class_ops red_class_ops = {
326 .dump = red_dump_class,
329 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
331 .priv_size = sizeof(struct red_sched_data),
332 .cl_ops = &red_class_ops,
333 .enqueue = red_enqueue,
334 .dequeue = red_dequeue,
339 .destroy = red_destroy,
340 .change = red_change,
342 .dump_stats = red_dump_stats,
343 .owner = THIS_MODULE,
346 static int __init red_module_init(void)
348 return register_qdisc(&red_qdisc_ops);
351 static void __exit red_module_exit(void)
353 unregister_qdisc(&red_qdisc_ops);
356 module_init(red_module_init)
357 module_exit(red_module_exit)
359 MODULE_LICENSE("GPL");