2 * net/sched/sch_choke.c CHOKE scheduler
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/reciprocal_div.h>
18 #include <linux/vmalloc.h>
19 #include <net/pkt_sched.h>
20 #include <net/inet_ecn.h>
24 #include <linux/ipv6.h>
28 CHOKe stateless AQM for fair bandwidth allocation
29 =================================================
31 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
32 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
33 maintains no flow state. The difference from RED is an additional step
34 during the enqueuing process. If average queue size is over the
35 low threshold (qmin), a packet is chosen at random from the queue.
36 If both the new and chosen packet are from the same flow, both
37 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
38 needs to access packets in queue randomly. It has a minimal class
39 interface to allow overriding the builtin flow classifier with
43 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
44 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
47 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
48 Characteristics", IEEE/ACM Transactions on Networking, 2004
52 /* Upper bound on size of sk_buff table (packets) */
53 #define CHOKE_MAX_QUEUE (128*1024 - 1)
55 struct choke_sched_data {
60 struct red_parms parms;
63 struct tcf_proto *filter_list;
65 u32 prob_drop; /* Early probability drops */
66 u32 prob_mark; /* Early probability marks */
67 u32 forced_drop; /* Forced drops, qavg > max_thresh */
68 u32 forced_mark; /* Forced marks, qavg > max_thresh */
69 u32 pdrop; /* Drops due to queue limits */
70 u32 other; /* Drops due to drop() calls */
71 u32 matched; /* Drops to flow match */
77 unsigned int tab_mask; /* size - 1 */
82 /* deliver a random number between 0 and N - 1 */
83 static u32 random_N(unsigned int N)
85 return reciprocal_divide(random32(), N);
88 /* number of elements in queue including holes */
89 static unsigned int choke_len(const struct choke_sched_data *q)
91 return (q->tail - q->head) & q->tab_mask;
94 /* Is ECN parameter configured */
95 static int use_ecn(const struct choke_sched_data *q)
97 return q->flags & TC_RED_ECN;
100 /* Should packets over max just be dropped (versus marked) */
101 static int use_harddrop(const struct choke_sched_data *q)
103 return q->flags & TC_RED_HARDDROP;
106 /* Move head pointer forward to skip over holes */
107 static void choke_zap_head_holes(struct choke_sched_data *q)
110 q->head = (q->head + 1) & q->tab_mask;
111 if (q->head == q->tail)
113 } while (q->tab[q->head] == NULL);
116 /* Move tail pointer backwards to reuse holes */
117 static void choke_zap_tail_holes(struct choke_sched_data *q)
120 q->tail = (q->tail - 1) & q->tab_mask;
121 if (q->head == q->tail)
123 } while (q->tab[q->tail] == NULL);
126 /* Drop packet from queue array by creating a "hole" */
127 static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
129 struct choke_sched_data *q = qdisc_priv(sch);
130 struct sk_buff *skb = q->tab[idx];
135 choke_zap_head_holes(q);
137 choke_zap_tail_holes(q);
139 sch->qstats.backlog -= qdisc_pkt_len(skb);
140 qdisc_drop(skb, sch);
141 qdisc_tree_decrease_qlen(sch, 1);
146 * Compare flow of two packets
147 * Returns true only if source and destination address and port match.
148 * false for special cases
150 static bool choke_match_flow(struct sk_buff *skb1,
151 struct sk_buff *skb2)
153 int off1, off2, poff;
154 const u32 *ports1, *ports2;
158 if (skb1->protocol != skb2->protocol)
161 /* Use hash value as quick check
162 * Assumes that __skb_get_rxhash makes IP header and ports linear
164 hash1 = skb_get_rxhash(skb1);
165 if (!hash1 || hash1 != skb_get_rxhash(skb2))
168 /* Probably match, but be sure to avoid hash collisions */
169 off1 = skb_network_offset(skb1);
170 off2 = skb_network_offset(skb2);
172 switch (skb1->protocol) {
173 case __constant_htons(ETH_P_IP): {
174 const struct iphdr *ip1, *ip2;
176 ip1 = (const struct iphdr *) (skb1->data + off1);
177 ip2 = (const struct iphdr *) (skb2->data + off2);
179 ip_proto = ip1->protocol;
180 if (ip_proto != ip2->protocol ||
181 ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
184 if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
186 off1 += ip1->ihl * 4;
187 off2 += ip2->ihl * 4;
191 case __constant_htons(ETH_P_IPV6): {
192 const struct ipv6hdr *ip1, *ip2;
194 ip1 = (const struct ipv6hdr *) (skb1->data + off1);
195 ip2 = (const struct ipv6hdr *) (skb2->data + off2);
197 ip_proto = ip1->nexthdr;
198 if (ip_proto != ip2->nexthdr ||
199 ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
200 ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
206 default: /* Maybe compare MAC header here? */
210 poff = proto_ports_offset(ip_proto);
217 ports1 = (__force u32 *)(skb1->data + off1);
218 ports2 = (__force u32 *)(skb2->data + off2);
219 return *ports1 == *ports2;
222 struct choke_skb_cb {
226 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
228 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
229 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
232 static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
234 choke_skb_cb(skb)->classid = classid;
237 static u16 choke_get_classid(const struct sk_buff *skb)
239 return choke_skb_cb(skb)->classid;
243 * Classify flow using either:
244 * 1. pre-existing classification result in skb
245 * 2. fast internal classification
246 * 3. use TC filter based classification
248 static bool choke_classify(struct sk_buff *skb,
249 struct Qdisc *sch, int *qerr)
252 struct choke_sched_data *q = qdisc_priv(sch);
253 struct tcf_result res;
256 result = tc_classify(skb, q->filter_list, &res);
258 #ifdef CONFIG_NET_CLS_ACT
262 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
267 choke_set_classid(skb, TC_H_MIN(res.classid));
275 * Select a packet at random from queue
276 * HACK: since queue can have holes from previous deletion; retry several
277 * times to find a random skb but then just give up and return the head
278 * Will return NULL if queue is empty (q->head == q->tail)
280 static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
287 *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
291 } while (--retrys > 0);
293 return q->tab[*pidx = q->head];
297 * Compare new packet with random packet in queue
298 * returns true if matched and sets *pidx
300 static bool choke_match_random(const struct choke_sched_data *q,
301 struct sk_buff *nskb,
304 struct sk_buff *oskb;
306 if (q->head == q->tail)
309 oskb = choke_peek_random(q, pidx);
311 return choke_get_classid(nskb) == choke_get_classid(oskb);
313 return choke_match_flow(oskb, nskb);
316 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
318 struct choke_sched_data *q = qdisc_priv(sch);
319 struct red_parms *p = &q->parms;
320 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
322 if (q->filter_list) {
323 /* If using external classifiers, get result and record it. */
324 if (!choke_classify(skb, sch, &ret))
325 goto other_drop; /* Packet was eaten by filter */
328 /* Compute average queue usage (see RED) */
329 p->qavg = red_calc_qavg(p, sch->q.qlen);
330 if (red_is_idling(p))
331 red_end_of_idle_period(p);
333 /* Is queue small? */
334 if (p->qavg <= p->qth_min)
339 /* Draw a packet at random from queue and compare flow */
340 if (choke_match_random(q, skb, &idx)) {
342 choke_drop_by_idx(sch, idx);
343 goto congestion_drop;
346 /* Queue is large, always mark/drop */
347 if (p->qavg > p->qth_max) {
350 sch->qstats.overlimits++;
351 if (use_harddrop(q) || !use_ecn(q) ||
352 !INET_ECN_set_ce(skb)) {
353 q->stats.forced_drop++;
354 goto congestion_drop;
357 q->stats.forced_mark++;
358 } else if (++p->qcount) {
359 if (red_mark_probability(p, p->qavg)) {
361 p->qR = red_random(p);
363 sch->qstats.overlimits++;
364 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
365 q->stats.prob_drop++;
366 goto congestion_drop;
369 q->stats.prob_mark++;
372 p->qR = red_random(p);
375 /* Admit new packet */
376 if (sch->q.qlen < q->limit) {
377 q->tab[q->tail] = skb;
378 q->tail = (q->tail + 1) & q->tab_mask;
380 sch->qstats.backlog += qdisc_pkt_len(skb);
381 return NET_XMIT_SUCCESS;
387 return NET_XMIT_DROP;
390 qdisc_drop(skb, sch);
394 if (ret & __NET_XMIT_BYPASS)
400 static struct sk_buff *choke_dequeue(struct Qdisc *sch)
402 struct choke_sched_data *q = qdisc_priv(sch);
405 if (q->head == q->tail) {
406 if (!red_is_idling(&q->parms))
407 red_start_of_idle_period(&q->parms);
411 skb = q->tab[q->head];
412 q->tab[q->head] = NULL;
413 choke_zap_head_holes(q);
415 sch->qstats.backlog -= qdisc_pkt_len(skb);
416 qdisc_bstats_update(sch, skb);
421 static unsigned int choke_drop(struct Qdisc *sch)
423 struct choke_sched_data *q = qdisc_priv(sch);
426 len = qdisc_queue_drop(sch);
430 if (!red_is_idling(&q->parms))
431 red_start_of_idle_period(&q->parms);
437 static void choke_reset(struct Qdisc *sch)
439 struct choke_sched_data *q = qdisc_priv(sch);
441 red_restart(&q->parms);
444 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
445 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
446 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
450 static void choke_free(void *addr)
453 if (is_vmalloc_addr(addr))
460 static int choke_change(struct Qdisc *sch, struct nlattr *opt)
462 struct choke_sched_data *q = qdisc_priv(sch);
463 struct nlattr *tb[TCA_CHOKE_MAX + 1];
464 const struct tc_red_qopt *ctl;
466 struct sk_buff **old = NULL;
472 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
476 if (tb[TCA_CHOKE_PARMS] == NULL ||
477 tb[TCA_CHOKE_STAB] == NULL)
480 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
482 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
485 if (ctl->limit > CHOKE_MAX_QUEUE)
488 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
489 if (mask != q->tab_mask) {
490 struct sk_buff **ntab;
492 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
494 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
501 unsigned int oqlen = sch->q.qlen, tail = 0;
503 while (q->head != q->tail) {
504 struct sk_buff *skb = q->tab[q->head];
506 q->head = (q->head + 1) & q->tab_mask;
513 sch->qstats.backlog -= qdisc_pkt_len(skb);
515 qdisc_drop(skb, sch);
517 qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
527 q->flags = ctl->flags;
528 q->limit = ctl->limit;
530 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
531 ctl->Plog, ctl->Scell_log,
532 nla_data(tb[TCA_CHOKE_STAB]));
534 if (q->head == q->tail)
535 red_end_of_idle_period(&q->parms);
537 sch_tree_unlock(sch);
542 static int choke_init(struct Qdisc *sch, struct nlattr *opt)
544 return choke_change(sch, opt);
547 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
549 struct choke_sched_data *q = qdisc_priv(sch);
550 struct nlattr *opts = NULL;
551 struct tc_red_qopt opt = {
554 .qth_min = q->parms.qth_min >> q->parms.Wlog,
555 .qth_max = q->parms.qth_max >> q->parms.Wlog,
556 .Wlog = q->parms.Wlog,
557 .Plog = q->parms.Plog,
558 .Scell_log = q->parms.Scell_log,
561 opts = nla_nest_start(skb, TCA_OPTIONS);
563 goto nla_put_failure;
565 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
566 return nla_nest_end(skb, opts);
569 nla_nest_cancel(skb, opts);
573 static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
575 struct choke_sched_data *q = qdisc_priv(sch);
576 struct tc_choke_xstats st = {
577 .early = q->stats.prob_drop + q->stats.forced_drop,
578 .marked = q->stats.prob_mark + q->stats.forced_mark,
579 .pdrop = q->stats.pdrop,
580 .other = q->stats.other,
581 .matched = q->stats.matched,
584 return gnet_stats_copy_app(d, &st, sizeof(st));
587 static void choke_destroy(struct Qdisc *sch)
589 struct choke_sched_data *q = qdisc_priv(sch);
591 tcf_destroy_chain(&q->filter_list);
595 static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
600 static unsigned long choke_get(struct Qdisc *sch, u32 classid)
605 static void choke_put(struct Qdisc *q, unsigned long cl)
609 static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
615 static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
617 struct choke_sched_data *q = qdisc_priv(sch);
621 return &q->filter_list;
624 static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
625 struct sk_buff *skb, struct tcmsg *tcm)
627 tcm->tcm_handle |= TC_H_MIN(cl);
631 static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
634 if (arg->fn(sch, 1, arg) < 0) {
642 static const struct Qdisc_class_ops choke_class_ops = {
646 .tcf_chain = choke_find_tcf,
647 .bind_tcf = choke_bind,
648 .unbind_tcf = choke_put,
649 .dump = choke_dump_class,
653 static struct sk_buff *choke_peek_head(struct Qdisc *sch)
655 struct choke_sched_data *q = qdisc_priv(sch);
657 return (q->head != q->tail) ? q->tab[q->head] : NULL;
660 static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
662 .priv_size = sizeof(struct choke_sched_data),
664 .enqueue = choke_enqueue,
665 .dequeue = choke_dequeue,
666 .peek = choke_peek_head,
669 .destroy = choke_destroy,
670 .reset = choke_reset,
671 .change = choke_change,
673 .dump_stats = choke_dump_stats,
674 .owner = THIS_MODULE,
677 static int __init choke_module_init(void)
679 return register_qdisc(&choke_qdisc_ops);
682 static void __exit choke_module_exit(void)
684 unregister_qdisc(&choke_qdisc_ops);
687 module_init(choke_module_init)
688 module_exit(choke_module_exit)
690 MODULE_LICENSE("GPL");