bnx2x: fix panic when TX ring is full
[pandora-kernel.git] / drivers / net / eql.c
1 /*
2  * Equalizer Load-balancer for serial network interfaces.
3  *
4  * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5  * NCM: Network and Communications Management, Inc.
6  *
7  * (c) Copyright 2002 David S. Miller (davem@redhat.com)
8  *
9  *      This software may be used and distributed according to the terms
10  *      of the GNU General Public License, incorporated herein by reference.
11  *
12  * The author may be reached as simon@ncm.com, or C/O
13  *    NCM
14  *    Attn: Simon Janes
15  *    6803 Whittier Ave
16  *    McLean VA 22101
17  *    Phone: 1-703-847-0040 ext 103
18  */
19
20 /*
21  * Sources:
22  *   skeleton.c by Donald Becker.
23  * Inspirations:
24  *   The Harried and Overworked Alan Cox
25  * Conspiracies:
26  *   The Alan Cox and Mike McLagan plot to get someone else to do the code,
27  *   which turned out to be me.
28  */
29
30 /*
31  * $Log: eql.c,v $
32  * Revision 1.2  1996/04/11 17:51:52  guru
33  * Added one-line eql_remove_slave patch.
34  *
35  * Revision 1.1  1996/04/11 17:44:17  guru
36  * Initial revision
37  *
38  * Revision 3.13  1996/01/21  15:17:18  alan
39  * tx_queue_len changes.
40  * reformatted.
41  *
42  * Revision 3.12  1995/03/22  21:07:51  anarchy
43  * Added capable() checks on configuration.
44  * Moved header file.
45  *
46  * Revision 3.11  1995/01/19  23:14:31  guru
47  *                    slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48  *                      (priority_Bps) + bytes_queued * 8;
49  *
50  * Revision 3.10  1995/01/19  23:07:53  guru
51  * back to
52  *                    slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53  *                      (priority_Bps) + bytes_queued;
54  *
55  * Revision 3.9  1995/01/19  22:38:20  guru
56  *                    slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57  *                      (priority_Bps) + bytes_queued * 4;
58  *
59  * Revision 3.8  1995/01/19  22:30:55  guru
60  *       slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61  *                      (priority_Bps) + bytes_queued * 2;
62  *
63  * Revision 3.7  1995/01/19  21:52:35  guru
64  * printk's trimmed out.
65  *
66  * Revision 3.6  1995/01/19  21:49:56  guru
67  * This is working pretty well. I gained 1 K/s in speed.. now it's just
68  * robustness and printk's to be diked out.
69  *
70  * Revision 3.5  1995/01/18  22:29:59  guru
71  * still crashes the kernel when the lock_wait thing is woken up.
72  *
73  * Revision 3.4  1995/01/18  21:59:47  guru
74  * Broken set-bit locking snapshot
75  *
76  * Revision 3.3  1995/01/17  22:09:18  guru
77  * infinite sleep in a lock somewhere..
78  *
79  * Revision 3.2  1995/01/15  16:46:06  guru
80  * Log trimmed of non-pertinent 1.x branch messages
81  *
82  * Revision 3.1  1995/01/15  14:41:45  guru
83  * New Scheduler and timer stuff...
84  *
85  * Revision 1.15  1995/01/15  14:29:02  guru
86  * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87  * with the dumber scheduler
88  *
89  * Revision 1.14  1995/01/15  02:37:08  guru
90  * shock.. the kept-new-versions could have zonked working
91  * stuff.. shudder
92  *
93  * Revision 1.13  1995/01/15  02:36:31  guru
94  * big changes
95  *
96  *      scheduler was torn out and replaced with something smarter
97  *
98  *      global names not prefixed with eql_ were renamed to protect
99  *      against namespace collisions
100  *
101  *      a few more abstract interfaces were added to facilitate any
102  *      potential change of datastructure.  the driver is still using
103  *      a linked list of slaves.  going to a heap would be a bit of
104  *      an overkill.
105  *
106  *      this compiles fine with no warnings.
107  *
108  *      the locking mechanism and timer stuff must be written however,
109  *      this version will not work otherwise
110  *
111  * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112  */
113
114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
116 #include <linux/capability.h>
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/init.h>
120 #include <linux/slab.h>
121 #include <linux/timer.h>
122 #include <linux/netdevice.h>
123 #include <net/net_namespace.h>
124
125 #include <linux/if.h>
126 #include <linux/if_arp.h>
127 #include <linux/if_eql.h>
128
129 #include <asm/uaccess.h>
130
131 static int eql_open(struct net_device *dev);
132 static int eql_close(struct net_device *dev);
133 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
134 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
135
136 #define eql_is_slave(dev)       ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
137 #define eql_is_master(dev)      ((dev->flags & IFF_MASTER) == IFF_MASTER)
138
139 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
140
141 static void eql_timer(unsigned long param)
142 {
143         equalizer_t *eql = (equalizer_t *) param;
144         struct list_head *this, *tmp, *head;
145
146         spin_lock_bh(&eql->queue.lock);
147         head = &eql->queue.all_slaves;
148         list_for_each_safe(this, tmp, head) {
149                 slave_t *slave = list_entry(this, slave_t, list);
150
151                 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
152                         slave->bytes_queued -= slave->priority_Bps;
153                         if (slave->bytes_queued < 0)
154                                 slave->bytes_queued = 0;
155                 } else {
156                         eql_kill_one_slave(&eql->queue, slave);
157                 }
158
159         }
160         spin_unlock_bh(&eql->queue.lock);
161
162         eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
163         add_timer(&eql->timer);
164 }
165
166 static const char version[] __initconst =
167         "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
168
169 static const struct net_device_ops eql_netdev_ops = {
170         .ndo_open       = eql_open,
171         .ndo_stop       = eql_close,
172         .ndo_do_ioctl   = eql_ioctl,
173         .ndo_start_xmit = eql_slave_xmit,
174 };
175
176 static void __init eql_setup(struct net_device *dev)
177 {
178         equalizer_t *eql = netdev_priv(dev);
179
180         init_timer(&eql->timer);
181         eql->timer.data         = (unsigned long) eql;
182         eql->timer.expires      = jiffies + EQL_DEFAULT_RESCHED_IVAL;
183         eql->timer.function     = eql_timer;
184
185         spin_lock_init(&eql->queue.lock);
186         INIT_LIST_HEAD(&eql->queue.all_slaves);
187         eql->queue.master_dev   = dev;
188
189         dev->netdev_ops         = &eql_netdev_ops;
190
191         /*
192          *      Now we undo some of the things that eth_setup does
193          *      that we don't like
194          */
195
196         dev->mtu                = EQL_DEFAULT_MTU;      /* set to 576 in if_eql.h */
197         dev->flags              = IFF_MASTER;
198
199         dev->type               = ARPHRD_SLIP;
200         dev->tx_queue_len       = 5;            /* Hands them off fast */
201         dev->priv_flags        &= ~IFF_XMIT_DST_RELEASE;
202 }
203
204 static int eql_open(struct net_device *dev)
205 {
206         equalizer_t *eql = netdev_priv(dev);
207
208         /* XXX We should force this off automatically for the user. */
209         netdev_info(dev,
210                     "remember to turn off Van-Jacobson compression on your slave devices\n");
211
212         BUG_ON(!list_empty(&eql->queue.all_slaves));
213
214         eql->min_slaves = 1;
215         eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
216
217         add_timer(&eql->timer);
218
219         return 0;
220 }
221
222 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
223 {
224         list_del(&slave->list);
225         queue->num_slaves--;
226         slave->dev->flags &= ~IFF_SLAVE;
227         dev_put(slave->dev);
228         kfree(slave);
229 }
230
231 static void eql_kill_slave_queue(slave_queue_t *queue)
232 {
233         struct list_head *head, *tmp, *this;
234
235         spin_lock_bh(&queue->lock);
236
237         head = &queue->all_slaves;
238         list_for_each_safe(this, tmp, head) {
239                 slave_t *s = list_entry(this, slave_t, list);
240
241                 eql_kill_one_slave(queue, s);
242         }
243
244         spin_unlock_bh(&queue->lock);
245 }
246
247 static int eql_close(struct net_device *dev)
248 {
249         equalizer_t *eql = netdev_priv(dev);
250
251         /*
252          *      The timer has to be stopped first before we start hacking away
253          *      at the data structure it scans every so often...
254          */
255
256         del_timer_sync(&eql->timer);
257
258         eql_kill_slave_queue(&eql->queue);
259
260         return 0;
261 }
262
263 static int eql_enslave(struct net_device *dev,  slaving_request_t __user *srq);
264 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
265
266 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
267 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
268
269 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
270 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
271
272 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
273 {
274         if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
275             !capable(CAP_NET_ADMIN))
276                 return -EPERM;
277
278         switch (cmd) {
279                 case EQL_ENSLAVE:
280                         return eql_enslave(dev, ifr->ifr_data);
281                 case EQL_EMANCIPATE:
282                         return eql_emancipate(dev, ifr->ifr_data);
283                 case EQL_GETSLAVECFG:
284                         return eql_g_slave_cfg(dev, ifr->ifr_data);
285                 case EQL_SETSLAVECFG:
286                         return eql_s_slave_cfg(dev, ifr->ifr_data);
287                 case EQL_GETMASTRCFG:
288                         return eql_g_master_cfg(dev, ifr->ifr_data);
289                 case EQL_SETMASTRCFG:
290                         return eql_s_master_cfg(dev, ifr->ifr_data);
291                 default:
292                         return -EOPNOTSUPP;
293         }
294 }
295
296 /* queue->lock must be held */
297 static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
298 {
299         unsigned long best_load = ~0UL;
300         struct list_head *this, *tmp, *head;
301         slave_t *best_slave;
302
303         best_slave = NULL;
304
305         /* Make a pass to set the best slave. */
306         head = &queue->all_slaves;
307         list_for_each_safe(this, tmp, head) {
308                 slave_t *slave = list_entry(this, slave_t, list);
309                 unsigned long slave_load, bytes_queued, priority_Bps;
310
311                 /* Go through the slave list once, updating best_slave
312                  * whenever a new best_load is found.
313                  */
314                 bytes_queued = slave->bytes_queued;
315                 priority_Bps = slave->priority_Bps;
316                 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
317                         slave_load = (~0UL - (~0UL / 2)) -
318                                 (priority_Bps) + bytes_queued * 8;
319
320                         if (slave_load < best_load) {
321                                 best_load = slave_load;
322                                 best_slave = slave;
323                         }
324                 } else {
325                         /* We found a dead slave, kill it. */
326                         eql_kill_one_slave(queue, slave);
327                 }
328         }
329         return best_slave;
330 }
331
332 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
333 {
334         equalizer_t *eql = netdev_priv(dev);
335         slave_t *slave;
336
337         spin_lock(&eql->queue.lock);
338
339         slave = __eql_schedule_slaves(&eql->queue);
340         if (slave) {
341                 struct net_device *slave_dev = slave->dev;
342
343                 skb->dev = slave_dev;
344                 skb->priority = 1;
345                 slave->bytes_queued += skb->len;
346                 dev_queue_xmit(skb);
347                 dev->stats.tx_packets++;
348         } else {
349                 dev->stats.tx_dropped++;
350                 dev_kfree_skb(skb);
351         }
352
353         spin_unlock(&eql->queue.lock);
354
355         return NETDEV_TX_OK;
356 }
357
358 /*
359  *      Private ioctl functions
360  */
361
362 /* queue->lock must be held */
363 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
364 {
365         struct list_head *this, *head;
366
367         head = &queue->all_slaves;
368         list_for_each(this, head) {
369                 slave_t *slave = list_entry(this, slave_t, list);
370
371                 if (slave->dev == dev)
372                         return slave;
373         }
374
375         return NULL;
376 }
377
378 static inline int eql_is_full(slave_queue_t *queue)
379 {
380         equalizer_t *eql = netdev_priv(queue->master_dev);
381
382         if (queue->num_slaves >= eql->max_slaves)
383                 return 1;
384         return 0;
385 }
386
387 /* queue->lock must be held */
388 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
389 {
390         if (!eql_is_full(queue)) {
391                 slave_t *duplicate_slave = NULL;
392
393                 duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
394                 if (duplicate_slave)
395                         eql_kill_one_slave(queue, duplicate_slave);
396
397                 list_add(&slave->list, &queue->all_slaves);
398                 queue->num_slaves++;
399                 slave->dev->flags |= IFF_SLAVE;
400
401                 return 0;
402         }
403
404         return -ENOSPC;
405 }
406
407 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
408 {
409         struct net_device *slave_dev;
410         slaving_request_t srq;
411
412         if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
413                 return -EFAULT;
414
415         slave_dev  = dev_get_by_name(&init_net, srq.slave_name);
416         if (slave_dev) {
417                 if ((master_dev->flags & IFF_UP) == IFF_UP) {
418                         /* slave is not a master & not already a slave: */
419                         if (!eql_is_master(slave_dev) &&
420                             !eql_is_slave(slave_dev)) {
421                                 slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
422                                 equalizer_t *eql = netdev_priv(master_dev);
423                                 int ret;
424
425                                 if (!s) {
426                                         dev_put(slave_dev);
427                                         return -ENOMEM;
428                                 }
429
430                                 memset(s, 0, sizeof(*s));
431                                 s->dev = slave_dev;
432                                 s->priority = srq.priority;
433                                 s->priority_bps = srq.priority;
434                                 s->priority_Bps = srq.priority / 8;
435
436                                 spin_lock_bh(&eql->queue.lock);
437                                 ret = __eql_insert_slave(&eql->queue, s);
438                                 if (ret) {
439                                         dev_put(slave_dev);
440                                         kfree(s);
441                                 }
442                                 spin_unlock_bh(&eql->queue.lock);
443
444                                 return ret;
445                         }
446                 }
447                 dev_put(slave_dev);
448         }
449
450         return -EINVAL;
451 }
452
453 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
454 {
455         equalizer_t *eql = netdev_priv(master_dev);
456         struct net_device *slave_dev;
457         slaving_request_t srq;
458         int ret;
459
460         if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
461                 return -EFAULT;
462
463         slave_dev = dev_get_by_name(&init_net, srq.slave_name);
464         ret = -EINVAL;
465         if (slave_dev) {
466                 spin_lock_bh(&eql->queue.lock);
467
468                 if (eql_is_slave(slave_dev)) {
469                         slave_t *slave = __eql_find_slave_dev(&eql->queue,
470                                                               slave_dev);
471
472                         if (slave) {
473                                 eql_kill_one_slave(&eql->queue, slave);
474                                 ret = 0;
475                         }
476                 }
477                 dev_put(slave_dev);
478
479                 spin_unlock_bh(&eql->queue.lock);
480         }
481
482         return ret;
483 }
484
485 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
486 {
487         equalizer_t *eql = netdev_priv(dev);
488         slave_t *slave;
489         struct net_device *slave_dev;
490         slave_config_t sc;
491         int ret;
492
493         if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
494                 return -EFAULT;
495
496         slave_dev = dev_get_by_name(&init_net, sc.slave_name);
497         if (!slave_dev)
498                 return -ENODEV;
499
500         ret = -EINVAL;
501
502         spin_lock_bh(&eql->queue.lock);
503         if (eql_is_slave(slave_dev)) {
504                 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
505                 if (slave) {
506                         sc.priority = slave->priority;
507                         ret = 0;
508                 }
509         }
510         spin_unlock_bh(&eql->queue.lock);
511
512         dev_put(slave_dev);
513
514         if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
515                 ret = -EFAULT;
516
517         return ret;
518 }
519
520 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
521 {
522         slave_t *slave;
523         equalizer_t *eql;
524         struct net_device *slave_dev;
525         slave_config_t sc;
526         int ret;
527
528         if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
529                 return -EFAULT;
530
531         slave_dev = dev_get_by_name(&init_net, sc.slave_name);
532         if (!slave_dev)
533                 return -ENODEV;
534
535         ret = -EINVAL;
536
537         eql = netdev_priv(dev);
538         spin_lock_bh(&eql->queue.lock);
539         if (eql_is_slave(slave_dev)) {
540                 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
541                 if (slave) {
542                         slave->priority = sc.priority;
543                         slave->priority_bps = sc.priority;
544                         slave->priority_Bps = sc.priority / 8;
545                         ret = 0;
546                 }
547         }
548         spin_unlock_bh(&eql->queue.lock);
549
550         dev_put(slave_dev);
551
552         return ret;
553 }
554
555 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
556 {
557         equalizer_t *eql;
558         master_config_t mc;
559
560         memset(&mc, 0, sizeof(master_config_t));
561
562         if (eql_is_master(dev)) {
563                 eql = netdev_priv(dev);
564                 mc.max_slaves = eql->max_slaves;
565                 mc.min_slaves = eql->min_slaves;
566                 if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
567                         return -EFAULT;
568                 return 0;
569         }
570         return -EINVAL;
571 }
572
573 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
574 {
575         equalizer_t *eql;
576         master_config_t mc;
577
578         if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
579                 return -EFAULT;
580
581         if (eql_is_master(dev)) {
582                 eql = netdev_priv(dev);
583                 eql->max_slaves = mc.max_slaves;
584                 eql->min_slaves = mc.min_slaves;
585                 return 0;
586         }
587         return -EINVAL;
588 }
589
590 static struct net_device *dev_eql;
591
592 static int __init eql_init_module(void)
593 {
594         int err;
595
596         pr_info("%s\n", version);
597
598         dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
599         if (!dev_eql)
600                 return -ENOMEM;
601
602         err = register_netdev(dev_eql);
603         if (err)
604                 free_netdev(dev_eql);
605         return err;
606 }
607
608 static void __exit eql_cleanup_module(void)
609 {
610         unregister_netdev(dev_eql);
611         free_netdev(dev_eql);
612 }
613
614 module_init(eql_init_module);
615 module_exit(eql_cleanup_module);
616 MODULE_LICENSE("GPL");