Merge ../torvalds-2.6/
[pandora-kernel.git] / arch / um / drivers / net_kern.c
1 /*
2  * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and 
3  * James Leu (jleu@mindspring.net).
4  * Copyright (C) 2001 by various other people who didn't put their name here.
5  * Licensed under the GPL.
6  */
7
8 #include "linux/config.h"
9 #include "linux/kernel.h"
10 #include "linux/netdevice.h"
11 #include "linux/rtnetlink.h"
12 #include "linux/skbuff.h"
13 #include "linux/socket.h"
14 #include "linux/spinlock.h"
15 #include "linux/module.h"
16 #include "linux/init.h"
17 #include "linux/etherdevice.h"
18 #include "linux/list.h"
19 #include "linux/inetdevice.h"
20 #include "linux/ctype.h"
21 #include "linux/bootmem.h"
22 #include "linux/ethtool.h"
23 #include "linux/platform_device.h"
24 #include "asm/uaccess.h"
25 #include "user_util.h"
26 #include "kern_util.h"
27 #include "net_kern.h"
28 #include "net_user.h"
29 #include "mconsole_kern.h"
30 #include "init.h"
31 #include "irq_user.h"
32 #include "irq_kern.h"
33
34 #define DRIVER_NAME "uml-netdev"
35
36 static DEFINE_SPINLOCK(opened_lock);
37 static LIST_HEAD(opened);
38
39 static int uml_net_rx(struct net_device *dev)
40 {
41         struct uml_net_private *lp = dev->priv;
42         int pkt_len;
43         struct sk_buff *skb;
44
45         /* If we can't allocate memory, try again next round. */
46         skb = dev_alloc_skb(dev->mtu);
47         if (skb == NULL) {
48                 lp->stats.rx_dropped++;
49                 return 0;
50         }
51
52         skb->dev = dev;
53         skb_put(skb, dev->mtu);
54         skb->mac.raw = skb->data;
55         pkt_len = (*lp->read)(lp->fd, &skb, lp);
56
57         if (pkt_len > 0) {
58                 skb_trim(skb, pkt_len);
59                 skb->protocol = (*lp->protocol)(skb);
60                 netif_rx(skb);
61
62                 lp->stats.rx_bytes += skb->len;
63                 lp->stats.rx_packets++;
64                 return pkt_len;
65         }
66
67         kfree_skb(skb);
68         return pkt_len;
69 }
70
71 irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
72 {
73         struct net_device *dev = dev_id;
74         struct uml_net_private *lp = dev->priv;
75         int err;
76
77         if(!netif_running(dev))
78                 return(IRQ_NONE);
79
80         spin_lock(&lp->lock);
81         while((err = uml_net_rx(dev)) > 0) ;
82         if(err < 0) {
83                 printk(KERN_ERR 
84                        "Device '%s' read returned %d, shutting it down\n", 
85                        dev->name, err);
86                 dev_close(dev);
87                 goto out;
88         }
89         reactivate_fd(lp->fd, UM_ETH_IRQ);
90
91  out:
92         spin_unlock(&lp->lock);
93         return(IRQ_HANDLED);
94 }
95
96 static int uml_net_open(struct net_device *dev)
97 {
98         struct uml_net_private *lp = dev->priv;
99         int err;
100
101         spin_lock(&lp->lock);
102
103         if(lp->fd >= 0){
104                 err = -ENXIO;
105                 goto out;
106         }
107
108         if(!lp->have_mac){
109                 dev_ip_addr(dev, &lp->mac[2]);
110                 set_ether_mac(dev, lp->mac);
111         }
112
113         lp->fd = (*lp->open)(&lp->user);
114         if(lp->fd < 0){
115                 err = lp->fd;
116                 goto out;
117         }
118
119         err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
120                              SA_INTERRUPT | SA_SHIRQ, dev->name, dev);
121         if(err != 0){
122                 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
123                 if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
124                 lp->fd = -1;
125                 err = -ENETUNREACH;
126         }
127
128         lp->tl.data = (unsigned long) &lp->user;
129         netif_start_queue(dev);
130
131         /* clear buffer - it can happen that the host side of the interface
132          * is full when we get here.  In this case, new data is never queued,
133          * SIGIOs never arrive, and the net never works.
134          */
135         while((err = uml_net_rx(dev)) > 0) ;
136
137  out:
138         spin_unlock(&lp->lock);
139         return(err);
140 }
141
142 static int uml_net_close(struct net_device *dev)
143 {
144         struct uml_net_private *lp = dev->priv;
145         
146         netif_stop_queue(dev);
147         spin_lock(&lp->lock);
148
149         free_irq(dev->irq, dev);
150         if(lp->close != NULL)
151                 (*lp->close)(lp->fd, &lp->user);
152         lp->fd = -1;
153         list_del(&lp->list);
154
155         spin_unlock(&lp->lock);
156         return 0;
157 }
158
159 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
160 {
161         struct uml_net_private *lp = dev->priv;
162         unsigned long flags;
163         int len;
164
165         netif_stop_queue(dev);
166
167         spin_lock_irqsave(&lp->lock, flags);
168
169         len = (*lp->write)(lp->fd, &skb, lp);
170
171         if(len == skb->len) {
172                 lp->stats.tx_packets++;
173                 lp->stats.tx_bytes += skb->len;
174                 dev->trans_start = jiffies;
175                 netif_start_queue(dev);
176
177                 /* this is normally done in the interrupt when tx finishes */
178                 netif_wake_queue(dev);
179         } 
180         else if(len == 0){
181                 netif_start_queue(dev);
182                 lp->stats.tx_dropped++;
183         }
184         else {
185                 netif_start_queue(dev);
186                 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
187         }
188
189         spin_unlock_irqrestore(&lp->lock, flags);
190
191         dev_kfree_skb(skb);
192
193         return 0;
194 }
195
196 static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
197 {
198         struct uml_net_private *lp = dev->priv;
199         return &lp->stats;
200 }
201
202 static void uml_net_set_multicast_list(struct net_device *dev)
203 {
204         if (dev->flags & IFF_PROMISC) return;
205         else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
206         else dev->flags &= ~IFF_ALLMULTI;
207 }
208
209 static void uml_net_tx_timeout(struct net_device *dev)
210 {
211         dev->trans_start = jiffies;
212         netif_wake_queue(dev);
213 }
214
215 static int uml_net_set_mac(struct net_device *dev, void *addr)
216 {
217         struct uml_net_private *lp = dev->priv;
218         struct sockaddr *hwaddr = addr;
219
220         spin_lock(&lp->lock);
221         memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
222         spin_unlock(&lp->lock);
223
224         return(0);
225 }
226
227 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
228 {
229         struct uml_net_private *lp = dev->priv;
230         int err = 0;
231
232         spin_lock(&lp->lock);
233
234         new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
235         if(new_mtu < 0){
236                 err = new_mtu;
237                 goto out;
238         }
239
240         dev->mtu = new_mtu;
241
242  out:
243         spin_unlock(&lp->lock);
244         return err;
245 }
246
247 static void uml_net_get_drvinfo(struct net_device *dev,
248                                 struct ethtool_drvinfo *info)
249 {
250         strcpy(info->driver, DRIVER_NAME);
251         strcpy(info->version, "42");
252 }
253
254 static struct ethtool_ops uml_net_ethtool_ops = {
255         .get_drvinfo    = uml_net_get_drvinfo,
256         .get_link       = ethtool_op_get_link,
257 };
258
259 void uml_net_user_timer_expire(unsigned long _conn)
260 {
261 #ifdef undef
262         struct connection *conn = (struct connection *)_conn;
263
264         dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
265         do_connect(conn);
266 #endif
267 }
268
269 static DEFINE_SPINLOCK(devices_lock);
270 static LIST_HEAD(devices);
271
272 static struct platform_driver uml_net_driver = {
273         .driver = {
274                 .name  = DRIVER_NAME,
275         },
276 };
277 static int driver_registered;
278
279 static int eth_configure(int n, void *init, char *mac,
280                          struct transport *transport)
281 {
282         struct uml_net *device;
283         struct net_device *dev;
284         struct uml_net_private *lp;
285         int save, err, size;
286
287         size = transport->private_size + sizeof(struct uml_net_private) + 
288                 sizeof(((struct uml_net_private *) 0)->user);
289
290         device = kmalloc(sizeof(*device), GFP_KERNEL);
291         if (device == NULL) {
292                 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
293                 return(1);
294         }
295
296         memset(device, 0, sizeof(*device));
297         INIT_LIST_HEAD(&device->list);
298         device->index = n;
299
300         spin_lock(&devices_lock);
301         list_add(&device->list, &devices);
302         spin_unlock(&devices_lock);
303
304         if (setup_etheraddr(mac, device->mac))
305                 device->have_mac = 1;
306
307         printk(KERN_INFO "Netdevice %d ", n);
308         if (device->have_mac)
309                 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
310                        device->mac[0], device->mac[1],
311                        device->mac[2], device->mac[3],
312                        device->mac[4], device->mac[5]);
313         printk(": ");
314         dev = alloc_etherdev(size);
315         if (dev == NULL) {
316                 printk(KERN_ERR "eth_configure: failed to allocate device\n");
317                 return 1;
318         }
319
320         /* sysfs register */
321         if (!driver_registered) {
322                 platform_driver_register(&uml_net_driver);
323                 driver_registered = 1;
324         }
325         device->pdev.id = n;
326         device->pdev.name = DRIVER_NAME;
327         platform_device_register(&device->pdev);
328         SET_NETDEV_DEV(dev,&device->pdev.dev);
329
330         /* If this name ends up conflicting with an existing registered
331          * netdevice, that is OK, register_netdev{,ice}() will notice this
332          * and fail.
333          */
334         snprintf(dev->name, sizeof(dev->name), "eth%d", n);
335         device->dev = dev;
336
337         (*transport->kern->init)(dev, init);
338
339         dev->mtu = transport->user->max_packet;
340         dev->open = uml_net_open;
341         dev->hard_start_xmit = uml_net_start_xmit;
342         dev->stop = uml_net_close;
343         dev->get_stats = uml_net_get_stats;
344         dev->set_multicast_list = uml_net_set_multicast_list;
345         dev->tx_timeout = uml_net_tx_timeout;
346         dev->set_mac_address = uml_net_set_mac;
347         dev->change_mtu = uml_net_change_mtu;
348         dev->ethtool_ops = &uml_net_ethtool_ops;
349         dev->watchdog_timeo = (HZ >> 1);
350         dev->irq = UM_ETH_IRQ;
351
352         rtnl_lock();
353         err = register_netdevice(dev);
354         rtnl_unlock();
355         if (err) {
356                 device->dev = NULL;
357                 /* XXX: should we call ->remove() here? */
358                 free_netdev(dev);
359                 return 1;
360         }
361         lp = dev->priv;
362
363         /* lp.user is the first four bytes of the transport data, which
364          * has already been initialized.  This structure assignment will
365          * overwrite that, so we make sure that .user gets overwritten with
366          * what it already has.
367          */
368         save = lp->user[0];
369         *lp = ((struct uml_net_private)
370                 { .list                 = LIST_HEAD_INIT(lp->list),
371                   .dev                  = dev,
372                   .fd                   = -1,
373                   .mac                  = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
374                   .have_mac             = device->have_mac,
375                   .protocol             = transport->kern->protocol,
376                   .open                 = transport->user->open,
377                   .close                = transport->user->close,
378                   .remove               = transport->user->remove,
379                   .read                 = transport->kern->read,
380                   .write                = transport->kern->write,
381                   .add_address          = transport->user->add_address,
382                   .delete_address       = transport->user->delete_address,
383                   .set_mtu              = transport->user->set_mtu,
384                   .user                 = { save } });
385
386         init_timer(&lp->tl);
387         spin_lock_init(&lp->lock);
388         lp->tl.function = uml_net_user_timer_expire;
389         if (lp->have_mac)
390                 memcpy(lp->mac, device->mac, sizeof(lp->mac));
391
392         if (transport->user->init) 
393                 (*transport->user->init)(&lp->user, dev);
394
395         if (device->have_mac)
396                 set_ether_mac(dev, device->mac);
397
398         spin_lock(&opened_lock);
399         list_add(&lp->list, &opened);
400         spin_unlock(&opened_lock);
401
402         return(0);
403 }
404
405 static struct uml_net *find_device(int n)
406 {
407         struct uml_net *device;
408         struct list_head *ele;
409
410         spin_lock(&devices_lock);
411         list_for_each(ele, &devices){
412                 device = list_entry(ele, struct uml_net, list);
413                 if(device->index == n)
414                         goto out;
415         }
416         device = NULL;
417  out:
418         spin_unlock(&devices_lock);
419         return(device);
420 }
421
422 static int eth_parse(char *str, int *index_out, char **str_out)
423 {
424         char *end;
425         int n;
426
427         n = simple_strtoul(str, &end, 0);
428         if(end == str){
429                 printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str);
430                 return(1);
431         }
432         if(n < 0){
433                 printk(KERN_ERR "eth_setup: device %d is negative\n", n);
434                 return(1);
435         }
436         str = end;
437         if(*str != '='){
438                 printk(KERN_ERR 
439                        "eth_setup: expected '=' after device number\n");
440                 return(1);
441         }
442         str++;
443         if(find_device(n)){
444                 printk(KERN_ERR "eth_setup: Device %d already configured\n",
445                        n);
446                 return(1);
447         }
448         if(index_out) *index_out = n;
449         *str_out = str;
450         return(0);
451 }
452
453 struct eth_init {
454         struct list_head list;
455         char *init;
456         int index;
457 };
458
459 /* Filled in at boot time.  Will need locking if the transports become
460  * modular.
461  */
462 struct list_head transports = LIST_HEAD_INIT(transports);
463
464 /* Filled in during early boot */
465 struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
466
467 static int check_transport(struct transport *transport, char *eth, int n,
468                            void **init_out, char **mac_out)
469 {
470         int len;
471
472         len = strlen(transport->name);
473         if(strncmp(eth, transport->name, len))
474                 return(0);
475
476         eth += len;
477         if(*eth == ',')
478                 eth++;
479         else if(*eth != '\0')
480                 return(0);
481
482         *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
483         if(*init_out == NULL)
484                 return(1);
485
486         if(!transport->setup(eth, mac_out, *init_out)){
487                 kfree(*init_out);
488                 *init_out = NULL;
489         }
490         return(1);
491 }
492
493 void register_transport(struct transport *new)
494 {
495         struct list_head *ele, *next;
496         struct eth_init *eth;
497         void *init;
498         char *mac = NULL;
499         int match;
500
501         list_add(&new->list, &transports);
502
503         list_for_each_safe(ele, next, &eth_cmd_line){
504                 eth = list_entry(ele, struct eth_init, list);
505                 match = check_transport(new, eth->init, eth->index, &init,
506                                         &mac);
507                 if(!match)
508                         continue;
509                 else if(init != NULL){
510                         eth_configure(eth->index, init, mac, new);
511                         kfree(init);
512                 }
513                 list_del(&eth->list);
514         }
515 }
516
517 static int eth_setup_common(char *str, int index)
518 {
519         struct list_head *ele;
520         struct transport *transport;
521         void *init;
522         char *mac = NULL;
523
524         list_for_each(ele, &transports){
525                 transport = list_entry(ele, struct transport, list);
526                 if(!check_transport(transport, str, index, &init, &mac))
527                         continue;
528                 if(init != NULL){
529                         eth_configure(index, init, mac, transport);
530                         kfree(init);
531                 }
532                 return(1);
533         }
534         return(0);
535 }
536
537 static int eth_setup(char *str)
538 {
539         struct eth_init *new;
540         int n, err;
541
542         err = eth_parse(str, &n, &str);
543         if(err) return(1);
544
545         new = alloc_bootmem(sizeof(new));
546         if (new == NULL){
547                 printk("eth_init : alloc_bootmem failed\n");
548                 return(1);
549         }
550
551         INIT_LIST_HEAD(&new->list);
552         new->index = n;
553         new->init = str;
554
555         list_add_tail(&new->list, &eth_cmd_line);
556         return(1);
557 }
558
559 __setup("eth", eth_setup);
560 __uml_help(eth_setup,
561 "eth[0-9]+=<transport>,<options>\n"
562 "    Configure a network device.\n\n"
563 );
564
565 #if 0
566 static int eth_init(void)
567 {
568         struct list_head *ele, *next;
569         struct eth_init *eth;
570
571         list_for_each_safe(ele, next, &eth_cmd_line){
572                 eth = list_entry(ele, struct eth_init, list);
573
574                 if(eth_setup_common(eth->init, eth->index))
575                         list_del(&eth->list);
576         }
577         
578         return(1);
579 }
580 __initcall(eth_init);
581 #endif
582
583 static int net_config(char *str)
584 {
585         int n, err;
586
587         err = eth_parse(str, &n, &str);
588         if(err) return(err);
589
590         str = kstrdup(str, GFP_KERNEL);
591         if(str == NULL){
592                 printk(KERN_ERR "net_config failed to strdup string\n");
593                 return(-1);
594         }
595         err = !eth_setup_common(str, n);
596         if(err) 
597                 kfree(str);
598         return(err);
599 }
600
601 static int net_id(char **str, int *start_out, int *end_out)
602 {
603         char *end;
604         int n;
605
606         n = simple_strtoul(*str, &end, 0);
607         if((*end != '\0') || (end == *str))
608                 return -1;
609
610         *start_out = n;
611         *end_out = n;
612         *str = end;
613         return n;
614 }
615
616 static int net_remove(int n)
617 {
618         struct uml_net *device;
619         struct net_device *dev;
620         struct uml_net_private *lp;
621
622         device = find_device(n);
623         if(device == NULL)
624                 return -ENODEV;
625
626         dev = device->dev;
627         lp = dev->priv;
628         if(lp->fd > 0)
629                 return -EBUSY;
630         if(lp->remove != NULL) (*lp->remove)(&lp->user);
631         unregister_netdev(dev);
632         platform_device_unregister(&device->pdev);
633
634         list_del(&device->list);
635         kfree(device);
636         free_netdev(dev);
637         return 0;
638 }
639
640 static struct mc_device net_mc = {
641         .name           = "eth",
642         .config         = net_config,
643         .get_config     = NULL,
644         .id             = net_id,
645         .remove         = net_remove,
646 };
647
648 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
649                               void *ptr)
650 {
651         struct in_ifaddr *ifa = ptr;
652         struct net_device *dev = ifa->ifa_dev->dev;
653         struct uml_net_private *lp;
654         void (*proc)(unsigned char *, unsigned char *, void *);
655         unsigned char addr_buf[4], netmask_buf[4];
656
657         if(dev->open != uml_net_open) return(NOTIFY_DONE);
658
659         lp = dev->priv;
660
661         proc = NULL;
662         switch (event){
663         case NETDEV_UP:
664                 proc = lp->add_address;
665                 break;
666         case NETDEV_DOWN:
667                 proc = lp->delete_address;
668                 break;
669         }
670         if(proc != NULL){
671                 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
672                 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
673                 (*proc)(addr_buf, netmask_buf, &lp->user);
674         }
675         return(NOTIFY_DONE);
676 }
677
678 struct notifier_block uml_inetaddr_notifier = {
679         .notifier_call          = uml_inetaddr_event,
680 };
681
682 static int uml_net_init(void)
683 {
684         struct list_head *ele;
685         struct uml_net_private *lp;     
686         struct in_device *ip;
687         struct in_ifaddr *in;
688
689         mconsole_register_dev(&net_mc);
690         register_inetaddr_notifier(&uml_inetaddr_notifier);
691
692         /* Devices may have been opened already, so the uml_inetaddr_notifier
693          * didn't get a chance to run for them.  This fakes it so that
694          * addresses which have already been set up get handled properly.
695          */
696         list_for_each(ele, &opened){
697                 lp = list_entry(ele, struct uml_net_private, list);
698                 ip = lp->dev->ip_ptr;
699                 if(ip == NULL) continue;
700                 in = ip->ifa_list;
701                 while(in != NULL){
702                         uml_inetaddr_event(NULL, NETDEV_UP, in);
703                         in = in->ifa_next;
704                 }
705         }       
706
707         return(0);
708 }
709
710 __initcall(uml_net_init);
711
712 static void close_devices(void)
713 {
714         struct list_head *ele;
715         struct uml_net_private *lp;
716
717         list_for_each(ele, &opened){
718                 lp = list_entry(ele, struct uml_net_private, list);
719                 free_irq(lp->dev->irq, lp->dev);
720                 if((lp->close != NULL) && (lp->fd >= 0))
721                         (*lp->close)(lp->fd, &lp->user);
722                 if(lp->remove != NULL) (*lp->remove)(&lp->user);
723         }
724 }
725
726 __uml_exitcall(close_devices);
727
728 int setup_etheraddr(char *str, unsigned char *addr)
729 {
730         char *end;
731         int i;
732
733         if(str == NULL)
734                 return(0);
735         for(i=0;i<6;i++){
736                 addr[i] = simple_strtoul(str, &end, 16);
737                 if((end == str) ||
738                    ((*end != ':') && (*end != ',') && (*end != '\0'))){
739                         printk(KERN_ERR 
740                                "setup_etheraddr: failed to parse '%s' "
741                                "as an ethernet address\n", str);
742                         return(0);
743                 }
744                 str = end + 1;
745         }
746         if(addr[0] & 1){
747                 printk(KERN_ERR 
748                        "Attempt to assign a broadcast ethernet address to a "
749                        "device disallowed\n");
750                 return(0);
751         }
752         return(1);
753 }
754
755 void dev_ip_addr(void *d, unsigned char *bin_buf)
756 {
757         struct net_device *dev = d;
758         struct in_device *ip = dev->ip_ptr;
759         struct in_ifaddr *in;
760
761         if((ip == NULL) || ((in = ip->ifa_list) == NULL)){
762                 printk(KERN_WARNING "dev_ip_addr - device not assigned an "
763                        "IP address\n");
764                 return;
765         }
766         memcpy(bin_buf, &in->ifa_address, sizeof(in->ifa_address));
767 }
768
769 void set_ether_mac(void *d, unsigned char *addr)
770 {
771         struct net_device *dev = d;
772
773         memcpy(dev->dev_addr, addr, ETH_ALEN);  
774 }
775
776 struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
777 {
778         if((skb != NULL) && (skb_tailroom(skb) < extra)){
779                 struct sk_buff *skb2;
780
781                 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
782                 dev_kfree_skb(skb);
783                 skb = skb2;
784         }
785         if(skb != NULL) skb_put(skb, extra);
786         return(skb);
787 }
788
789 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, 
790                                         void *), 
791                     void *arg)
792 {
793         struct net_device *dev = d;
794         struct in_device *ip = dev->ip_ptr;
795         struct in_ifaddr *in;
796         unsigned char address[4], netmask[4];
797
798         if(ip == NULL) return;
799         in = ip->ifa_list;
800         while(in != NULL){
801                 memcpy(address, &in->ifa_address, sizeof(address));
802                 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
803                 (*cb)(address, netmask, arg);
804                 in = in->ifa_next;
805         }
806 }
807
808 int dev_netmask(void *d, void *m)
809 {
810         struct net_device *dev = d;
811         struct in_device *ip = dev->ip_ptr;
812         struct in_ifaddr *in;
813         __u32 *mask_out = m;
814
815         if(ip == NULL) 
816                 return(1);
817
818         in = ip->ifa_list;
819         if(in == NULL) 
820                 return(1);
821
822         *mask_out = in->ifa_mask;
823         return(0);
824 }
825
826 void *get_output_buffer(int *len_out)
827 {
828         void *ret;
829
830         ret = (void *) __get_free_pages(GFP_KERNEL, 0);
831         if(ret) *len_out = PAGE_SIZE;
832         else *len_out = 0;
833         return(ret);
834 }
835
836 void free_output_buffer(void *buffer)
837 {
838         free_pages((unsigned long) buffer, 0);
839 }
840
841 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, 
842                      char **gate_addr)
843 {
844         char *remain;
845
846         remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
847         if(remain != NULL){
848                 printk("tap_setup_common - Extra garbage on specification : "
849                        "'%s'\n", remain);
850                 return(1);
851         }
852
853         return(0);
854 }
855
856 unsigned short eth_protocol(struct sk_buff *skb)
857 {
858         return(eth_type_trans(skb, skb->dev));
859 }
860
861 /*
862  * Overrides for Emacs so that we follow Linus's tabbing style.
863  * Emacs will notice this stuff at the end of the file and automatically
864  * adjust the settings for this buffer only.  This must remain at the end
865  * of the file.
866  * ---------------------------------------------------------------------------
867  * Local variables:
868  * c-file-style: "linux"
869  * End:
870  */