2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim <hadi@nortel.com> 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim <hadi@nortelnetworks.com> 980816: ECN support
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <asm/uaccess.h>
20 #include <asm/system.h>
21 #include <linux/bitops.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/string.h>
27 #include <linux/socket.h>
28 #include <linux/sockios.h>
30 #include <linux/errno.h>
31 #include <linux/interrupt.h>
32 #include <linux/if_ether.h>
33 #include <linux/inet.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/notifier.h>
38 #include <net/route.h>
39 #include <linux/skbuff.h>
41 #include <net/pkt_sched.h>
42 #include <net/inet_ecn.h>
43 #include <net/dsfield.h>
47 /* Parameters, settable by user:
48 -----------------------------
50 limit - bytes (must be > qth_max + burst)
52 Hard limit on queue length, should be chosen >qth_max
53 to allow packet bursts. This parameter does not
54 affect the algorithms behaviour and can be chosen
55 arbitrarily high (well, less than ram size)
56 Really, this limit will never be reached
57 if RED works correctly.
62 u32 limit; /* HARD maximal queue length */
64 struct red_parms parms;
65 struct red_stats stats;
68 static inline int red_use_ecn(struct red_sched_data *q)
70 return q->flags & TC_RED_ECN;
74 red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
76 struct red_sched_data *q = qdisc_priv(sch);
78 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
80 if (red_is_idling(&q->parms))
81 red_end_of_idle_period(&q->parms);
83 switch (red_action(&q->parms, q->parms.qavg)) {
88 sch->qstats.overlimits++;
89 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
98 sch->qstats.overlimits++;
99 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
100 q->stats.forced_drop++;
101 goto congestion_drop;
104 q->stats.forced_mark++;
108 if (sch->qstats.backlog + skb->len <= q->limit) {
109 __skb_queue_tail(&sch->q, skb);
110 sch->qstats.backlog += skb->len;
111 sch->bstats.bytes += skb->len;
112 sch->bstats.packets++;
113 return NET_XMIT_SUCCESS;
119 return NET_XMIT_DROP;
128 red_requeue(struct sk_buff *skb, struct Qdisc* sch)
130 struct red_sched_data *q = qdisc_priv(sch);
132 if (red_is_idling(&q->parms))
133 red_end_of_idle_period(&q->parms);
135 __skb_queue_head(&sch->q, skb);
136 sch->qstats.backlog += skb->len;
137 sch->qstats.requeues++;
141 static struct sk_buff *
142 red_dequeue(struct Qdisc* sch)
145 struct red_sched_data *q = qdisc_priv(sch);
147 skb = __skb_dequeue(&sch->q);
149 sch->qstats.backlog -= skb->len;
153 red_start_of_idle_period(&q->parms);
157 static unsigned int red_drop(struct Qdisc* sch)
160 struct red_sched_data *q = qdisc_priv(sch);
162 skb = __skb_dequeue_tail(&sch->q);
164 unsigned int len = skb->len;
165 sch->qstats.backlog -= len;
172 red_start_of_idle_period(&q->parms);
176 static void red_reset(struct Qdisc* sch)
178 struct red_sched_data *q = qdisc_priv(sch);
180 __skb_queue_purge(&sch->q);
181 sch->qstats.backlog = 0;
182 red_restart(&q->parms);
185 static int red_change(struct Qdisc *sch, struct rtattr *opt)
187 struct red_sched_data *q = qdisc_priv(sch);
188 struct rtattr *tb[TCA_RED_STAB];
189 struct tc_red_qopt *ctl;
192 rtattr_parse_nested(tb, TCA_RED_STAB, opt) ||
193 tb[TCA_RED_PARMS-1] == 0 || tb[TCA_RED_STAB-1] == 0 ||
194 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
195 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < 256)
198 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
201 q->flags = ctl->flags;
202 q->limit = ctl->limit;
204 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
205 ctl->Plog, ctl->Scell_log,
206 RTA_DATA(tb[TCA_RED_STAB-1]));
208 if (skb_queue_empty(&sch->q))
209 red_end_of_idle_period(&q->parms);
210 sch_tree_unlock(sch);
214 static int red_init(struct Qdisc* sch, struct rtattr *opt)
216 return red_change(sch, opt);
219 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
221 struct red_sched_data *q = qdisc_priv(sch);
222 unsigned char *b = skb->tail;
224 struct tc_red_qopt opt = {
227 .qth_min = q->parms.qth_min >> q->parms.Wlog,
228 .qth_max = q->parms.qth_max >> q->parms.Wlog,
229 .Wlog = q->parms.Wlog,
230 .Plog = q->parms.Plog,
231 .Scell_log = q->parms.Scell_log,
234 rta = (struct rtattr*)b;
235 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
236 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
237 rta->rta_len = skb->tail - b;
242 skb_trim(skb, b - skb->data);
246 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
248 struct red_sched_data *q = qdisc_priv(sch);
249 struct tc_red_xstats st = {
250 .early = q->stats.prob_drop + q->stats.forced_drop,
251 .pdrop = q->stats.pdrop,
252 .other = q->stats.other,
253 .marked = q->stats.prob_mark + q->stats.forced_mark,
256 return gnet_stats_copy_app(d, &st, sizeof(st));
259 static struct Qdisc_ops red_qdisc_ops = {
263 .priv_size = sizeof(struct red_sched_data),
264 .enqueue = red_enqueue,
265 .dequeue = red_dequeue,
266 .requeue = red_requeue,
270 .change = red_change,
272 .dump_stats = red_dump_stats,
273 .owner = THIS_MODULE,
276 static int __init red_module_init(void)
278 return register_qdisc(&red_qdisc_ops);
280 static void __exit red_module_exit(void)
282 unregister_qdisc(&red_qdisc_ops);
284 module_init(red_module_init)
285 module_exit(red_module_exit)
286 MODULE_LICENSE("GPL");