079b0a4ea1c2dbc0a3dac2b866f2aab7b8194138
[pandora-kernel.git] / net / sched / sch_gred.c
1 /*
2  * net/sched/sch_gred.c Generic Random Early Detection queue.
3  *
4  *
5  *              This program is free software; you can redistribute it and/or
6  *              modify it under the terms of the GNU General Public License
7  *              as published by the Free Software Foundation; either version
8  *              2 of the License, or (at your option) any later version.
9  *
10  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
11  *
12  *             991129: -  Bug fix with grio mode
13  *                     - a better sing. AvgQ mode with Grio(WRED)
14  *                     - A finer grained VQ dequeue based on sugestion
15  *                       from Ren Liu
16  *                     - More error checks
17  *
18  *  For all the glorious comments look at include/net/red.h
19  */
20
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <net/pkt_sched.h>
28 #include <net/red.h>
29
30 #define GRED_DEF_PRIO (MAX_DPs / 2)
31 #define GRED_VQ_MASK (MAX_DPs - 1)
32
33 struct gred_sched_data;
34 struct gred_sched;
35
36 struct gred_sched_data
37 {
38         u32             limit;          /* HARD maximal queue length    */
39         u32             DP;             /* the drop pramaters */
40         u32             bytesin;        /* bytes seen on virtualQ so far*/
41         u32             packetsin;      /* packets seen on virtualQ so far*/
42         u32             backlog;        /* bytes on the virtualQ */
43         u8              prio;           /* the prio of this vq */
44
45         struct red_parms parms;
46         struct red_stats stats;
47 };
48
49 enum {
50         GRED_WRED_MODE = 1,
51         GRED_RIO_MODE,
52 };
53
54 struct gred_sched
55 {
56         struct gred_sched_data *tab[MAX_DPs];
57         unsigned long   flags;
58         u32             red_flags;
59         u32             DPs;
60         u32             def;
61         struct red_parms wred_set;
62 };
63
64 static inline int gred_wred_mode(struct gred_sched *table)
65 {
66         return test_bit(GRED_WRED_MODE, &table->flags);
67 }
68
69 static inline void gred_enable_wred_mode(struct gred_sched *table)
70 {
71         __set_bit(GRED_WRED_MODE, &table->flags);
72 }
73
74 static inline void gred_disable_wred_mode(struct gred_sched *table)
75 {
76         __clear_bit(GRED_WRED_MODE, &table->flags);
77 }
78
79 static inline int gred_rio_mode(struct gred_sched *table)
80 {
81         return test_bit(GRED_RIO_MODE, &table->flags);
82 }
83
84 static inline void gred_enable_rio_mode(struct gred_sched *table)
85 {
86         __set_bit(GRED_RIO_MODE, &table->flags);
87 }
88
89 static inline void gred_disable_rio_mode(struct gred_sched *table)
90 {
91         __clear_bit(GRED_RIO_MODE, &table->flags);
92 }
93
94 static inline int gred_wred_mode_check(struct Qdisc *sch)
95 {
96         struct gred_sched *table = qdisc_priv(sch);
97         int i;
98
99         /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
100         for (i = 0; i < table->DPs; i++) {
101                 struct gred_sched_data *q = table->tab[i];
102                 int n;
103
104                 if (q == NULL)
105                         continue;
106
107                 for (n = 0; n < table->DPs; n++)
108                         if (table->tab[n] && table->tab[n] != q &&
109                             table->tab[n]->prio == q->prio)
110                                 return 1;
111         }
112
113         return 0;
114 }
115
116 static inline unsigned int gred_backlog(struct gred_sched *table,
117                                         struct gred_sched_data *q,
118                                         struct Qdisc *sch)
119 {
120         if (gred_wred_mode(table))
121                 return sch->qstats.backlog;
122         else
123                 return q->backlog;
124 }
125
126 static inline u16 tc_index_to_dp(struct sk_buff *skb)
127 {
128         return skb->tc_index & GRED_VQ_MASK;
129 }
130
131 static inline void gred_load_wred_set(struct gred_sched *table,
132                                       struct gred_sched_data *q)
133 {
134         q->parms.qavg = table->wred_set.qavg;
135         q->parms.qidlestart = table->wred_set.qidlestart;
136 }
137
138 static inline void gred_store_wred_set(struct gred_sched *table,
139                                        struct gred_sched_data *q)
140 {
141         table->wred_set.qavg = q->parms.qavg;
142 }
143
144 static inline int gred_use_ecn(struct gred_sched *t)
145 {
146         return t->red_flags & TC_RED_ECN;
147 }
148
149 static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
150 {
151         struct gred_sched_data *q=NULL;
152         struct gred_sched *t= qdisc_priv(sch);
153         unsigned long qavg = 0;
154         u16 dp = tc_index_to_dp(skb);
155
156         if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
157                 dp = t->def;
158
159                 if ((q = t->tab[dp]) == NULL) {
160                         /* Pass through packets not assigned to a DP
161                          * if no default DP has been configured. This
162                          * allows for DP flows to be left untouched.
163                          */
164                         if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
165                                 return qdisc_enqueue_tail(skb, sch);
166                         else
167                                 goto drop;
168                 }
169
170                 /* fix tc_index? --could be controvesial but needed for
171                    requeueing */
172                 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
173         }
174
175         /* sum up all the qaves of prios <= to ours to get the new qave */
176         if (!gred_wred_mode(t) && gred_rio_mode(t)) {
177                 int i;
178
179                 for (i = 0; i < t->DPs; i++) {
180                         if (t->tab[i] && t->tab[i]->prio < q->prio &&
181                             !red_is_idling(&t->tab[i]->parms))
182                                 qavg +=t->tab[i]->parms.qavg;
183                 }
184
185         }
186
187         q->packetsin++;
188         q->bytesin += skb->len;
189
190         if (gred_wred_mode(t))
191                 gred_load_wred_set(t, q);
192
193         q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
194
195         if (red_is_idling(&q->parms))
196                 red_end_of_idle_period(&q->parms);
197
198         if (gred_wred_mode(t))
199                 gred_store_wred_set(t, q);
200
201         switch (red_action(&q->parms, q->parms.qavg + qavg)) {
202                 case RED_DONT_MARK:
203                         break;
204
205                 case RED_PROB_MARK:
206                         sch->qstats.overlimits++;
207                         if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
208                                 q->stats.prob_drop++;
209                                 goto congestion_drop;
210                         }
211
212                         q->stats.prob_mark++;
213                         break;
214
215                 case RED_HARD_MARK:
216                         sch->qstats.overlimits++;
217                         if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
218                                 q->stats.forced_drop++;
219                                 goto congestion_drop;
220                         }
221                         q->stats.forced_mark++;
222                         break;
223         }
224
225         if (q->backlog + skb->len <= q->limit) {
226                 q->backlog += skb->len;
227                 return qdisc_enqueue_tail(skb, sch);
228         }
229
230         q->stats.pdrop++;
231 drop:
232         return qdisc_drop(skb, sch);
233
234 congestion_drop:
235         qdisc_drop(skb, sch);
236         return NET_XMIT_CN;
237 }
238
239 static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
240 {
241         struct gred_sched *t = qdisc_priv(sch);
242         struct gred_sched_data *q;
243         u16 dp = tc_index_to_dp(skb);
244
245         if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
246                 if (net_ratelimit())
247                         printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
248                                "for requeue, screwing up backlog.\n",
249                                tc_index_to_dp(skb));
250         } else {
251                 if (red_is_idling(&q->parms))
252                         red_end_of_idle_period(&q->parms);
253                 q->backlog += skb->len;
254         }
255
256         return qdisc_requeue(skb, sch);
257 }
258
259 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
260 {
261         struct sk_buff *skb;
262         struct gred_sched *t = qdisc_priv(sch);
263
264         skb = qdisc_dequeue_head(sch);
265
266         if (skb) {
267                 struct gred_sched_data *q;
268                 u16 dp = tc_index_to_dp(skb);
269
270                 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
271                         if (net_ratelimit())
272                                 printk(KERN_WARNING "GRED: Unable to relocate "
273                                        "VQ 0x%x after dequeue, screwing up "
274                                        "backlog.\n", tc_index_to_dp(skb));
275                 } else {
276                         q->backlog -= skb->len;
277
278                         if (!q->backlog && !gred_wred_mode(t))
279                                 red_start_of_idle_period(&q->parms);
280                 }
281
282                 return skb;
283         }
284
285         if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
286                 red_start_of_idle_period(&t->wred_set);
287
288         return NULL;
289 }
290
291 static unsigned int gred_drop(struct Qdisc* sch)
292 {
293         struct sk_buff *skb;
294         struct gred_sched *t = qdisc_priv(sch);
295
296         skb = qdisc_dequeue_tail(sch);
297         if (skb) {
298                 unsigned int len = skb->len;
299                 struct gred_sched_data *q;
300                 u16 dp = tc_index_to_dp(skb);
301
302                 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
303                         if (net_ratelimit())
304                                 printk(KERN_WARNING "GRED: Unable to relocate "
305                                        "VQ 0x%x while dropping, screwing up "
306                                        "backlog.\n", tc_index_to_dp(skb));
307                 } else {
308                         q->backlog -= len;
309                         q->stats.other++;
310
311                         if (!q->backlog && !gred_wred_mode(t))
312                                 red_start_of_idle_period(&q->parms);
313                 }
314
315                 qdisc_drop(skb, sch);
316                 return len;
317         }
318
319         if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
320                 red_start_of_idle_period(&t->wred_set);
321
322         return 0;
323
324 }
325
326 static void gred_reset(struct Qdisc* sch)
327 {
328         int i;
329         struct gred_sched *t = qdisc_priv(sch);
330
331         qdisc_reset_queue(sch);
332
333         for (i = 0; i < t->DPs; i++) {
334                 struct gred_sched_data *q = t->tab[i];
335
336                 if (!q)
337                         continue;
338
339                 red_restart(&q->parms);
340                 q->backlog = 0;
341         }
342 }
343
344 static inline void gred_destroy_vq(struct gred_sched_data *q)
345 {
346         kfree(q);
347 }
348
349 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
350 {
351         struct gred_sched *table = qdisc_priv(sch);
352         struct tc_gred_sopt *sopt;
353         int i;
354
355         if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
356                 return -EINVAL;
357
358         sopt = RTA_DATA(dps);
359
360         if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
361                 return -EINVAL;
362
363         sch_tree_lock(sch);
364         table->DPs = sopt->DPs;
365         table->def = sopt->def_DP;
366         table->red_flags = sopt->flags;
367
368         /*
369          * Every entry point to GRED is synchronized with the above code
370          * and the DP is checked against DPs, i.e. shadowed VQs can no
371          * longer be found so we can unlock right here.
372          */
373         sch_tree_unlock(sch);
374
375         if (sopt->grio) {
376                 gred_enable_rio_mode(table);
377                 gred_disable_wred_mode(table);
378                 if (gred_wred_mode_check(sch))
379                         gred_enable_wred_mode(table);
380         } else {
381                 gred_disable_rio_mode(table);
382                 gred_disable_wred_mode(table);
383         }
384
385         for (i = table->DPs; i < MAX_DPs; i++) {
386                 if (table->tab[i]) {
387                         printk(KERN_WARNING "GRED: Warning: Destroying "
388                                "shadowed VQ 0x%x\n", i);
389                         gred_destroy_vq(table->tab[i]);
390                         table->tab[i] = NULL;
391                 }
392         }
393
394         return 0;
395 }
396
397 static inline int gred_change_vq(struct Qdisc *sch, int dp,
398                                  struct tc_gred_qopt *ctl, int prio, u8 *stab)
399 {
400         struct gred_sched *table = qdisc_priv(sch);
401         struct gred_sched_data *q;
402
403         if (table->tab[dp] == NULL) {
404                 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
405                 if (table->tab[dp] == NULL)
406                         return -ENOMEM;
407                 memset(table->tab[dp], 0, sizeof(*q));
408         }
409
410         q = table->tab[dp];
411         q->DP = dp;
412         q->prio = prio;
413         q->limit = ctl->limit;
414
415         if (q->backlog == 0)
416                 red_end_of_idle_period(&q->parms);
417
418         red_set_parms(&q->parms,
419                       ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
420                       ctl->Scell_log, stab);
421
422         return 0;
423 }
424
425 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
426 {
427         struct gred_sched *table = qdisc_priv(sch);
428         struct tc_gred_qopt *ctl;
429         struct rtattr *tb[TCA_GRED_MAX];
430         int err = -EINVAL, prio = GRED_DEF_PRIO;
431         u8 *stab;
432
433         if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
434                 return -EINVAL;
435
436         if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
437                 return gred_change_table_def(sch, opt);
438
439         if (tb[TCA_GRED_PARMS-1] == NULL ||
440             RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
441             tb[TCA_GRED_STAB-1] == NULL ||
442             RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
443                 return -EINVAL;
444
445         ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
446         stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
447
448         if (ctl->DP >= table->DPs)
449                 goto errout;
450
451         if (gred_rio_mode(table)) {
452                 if (ctl->prio == 0) {
453                         int def_prio = GRED_DEF_PRIO;
454
455                         if (table->tab[table->def])
456                                 def_prio = table->tab[table->def]->prio;
457
458                         printk(KERN_DEBUG "GRED: DP %u does not have a prio "
459                                "setting default to %d\n", ctl->DP, def_prio);
460
461                         prio = def_prio;
462                 } else
463                         prio = ctl->prio;
464         }
465
466         sch_tree_lock(sch);
467
468         err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
469         if (err < 0)
470                 goto errout_locked;
471
472         if (gred_rio_mode(table)) {
473                 gred_disable_wred_mode(table);
474                 if (gred_wred_mode_check(sch))
475                         gred_enable_wred_mode(table);
476         }
477
478         err = 0;
479
480 errout_locked:
481         sch_tree_unlock(sch);
482 errout:
483         return err;
484 }
485
486 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
487 {
488         struct rtattr *tb[TCA_GRED_MAX];
489
490         if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
491                 return -EINVAL;
492
493         if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
494                 return -EINVAL;
495
496         return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
497 }
498
499 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
500 {
501         struct gred_sched *table = qdisc_priv(sch);
502         struct rtattr *parms, *opts = NULL;
503         int i;
504         struct tc_gred_sopt sopt = {
505                 .DPs    = table->DPs,
506                 .def_DP = table->def,
507                 .grio   = gred_rio_mode(table),
508                 .flags  = table->red_flags,
509         };
510
511         opts = RTA_NEST(skb, TCA_OPTIONS);
512         RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
513         parms = RTA_NEST(skb, TCA_GRED_PARMS);
514
515         for (i = 0; i < MAX_DPs; i++) {
516                 struct gred_sched_data *q = table->tab[i];
517                 struct tc_gred_qopt opt;
518
519                 memset(&opt, 0, sizeof(opt));
520
521                 if (!q) {
522                         /* hack -- fix at some point with proper message
523                            This is how we indicate to tc that there is no VQ
524                            at this DP */
525
526                         opt.DP = MAX_DPs + i;
527                         goto append_opt;
528                 }
529
530                 opt.limit       = q->limit;
531                 opt.DP          = q->DP;
532                 opt.backlog     = q->backlog;
533                 opt.prio        = q->prio;
534                 opt.qth_min     = q->parms.qth_min >> q->parms.Wlog;
535                 opt.qth_max     = q->parms.qth_max >> q->parms.Wlog;
536                 opt.Wlog        = q->parms.Wlog;
537                 opt.Plog        = q->parms.Plog;
538                 opt.Scell_log   = q->parms.Scell_log;
539                 opt.other       = q->stats.other;
540                 opt.early       = q->stats.prob_drop;
541                 opt.forced      = q->stats.forced_drop;
542                 opt.pdrop       = q->stats.pdrop;
543                 opt.packets     = q->packetsin;
544                 opt.bytesin     = q->bytesin;
545
546                 if (gred_wred_mode(table)) {
547                         q->parms.qidlestart =
548                                 table->tab[table->def]->parms.qidlestart;
549                         q->parms.qavg = table->tab[table->def]->parms.qavg;
550                 }
551
552                 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
553
554 append_opt:
555                 RTA_APPEND(skb, sizeof(opt), &opt);
556         }
557
558         RTA_NEST_END(skb, parms);
559
560         return RTA_NEST_END(skb, opts);
561
562 rtattr_failure:
563         return RTA_NEST_CANCEL(skb, opts);
564 }
565
566 static void gred_destroy(struct Qdisc *sch)
567 {
568         struct gred_sched *table = qdisc_priv(sch);
569         int i;
570
571         for (i = 0; i < table->DPs; i++) {
572                 if (table->tab[i])
573                         gred_destroy_vq(table->tab[i]);
574         }
575 }
576
577 static struct Qdisc_ops gred_qdisc_ops = {
578         .id             =       "gred",
579         .priv_size      =       sizeof(struct gred_sched),
580         .enqueue        =       gred_enqueue,
581         .dequeue        =       gred_dequeue,
582         .requeue        =       gred_requeue,
583         .drop           =       gred_drop,
584         .init           =       gred_init,
585         .reset          =       gred_reset,
586         .destroy        =       gred_destroy,
587         .change         =       gred_change,
588         .dump           =       gred_dump,
589         .owner          =       THIS_MODULE,
590 };
591
592 static int __init gred_module_init(void)
593 {
594         return register_qdisc(&gred_qdisc_ops);
595 }
596
597 static void __exit gred_module_exit(void)
598 {
599         unregister_qdisc(&gred_qdisc_ops);
600 }
601
602 module_init(gred_module_init)
603 module_exit(gred_module_exit)
604
605 MODULE_LICENSE("GPL");