2 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
3 * James Leu (jleu@mindspring.net).
4 * Copyright (C) 2001 by various other people who didn't put their name here.
5 * Licensed under the GPL.
8 #include "linux/config.h"
9 #include "linux/kernel.h"
10 #include "linux/netdevice.h"
11 #include "linux/rtnetlink.h"
12 #include "linux/skbuff.h"
13 #include "linux/socket.h"
14 #include "linux/spinlock.h"
15 #include "linux/module.h"
16 #include "linux/init.h"
17 #include "linux/etherdevice.h"
18 #include "linux/list.h"
19 #include "linux/inetdevice.h"
20 #include "linux/ctype.h"
21 #include "linux/bootmem.h"
22 #include "linux/ethtool.h"
23 #include "linux/platform_device.h"
24 #include "asm/uaccess.h"
25 #include "user_util.h"
26 #include "kern_util.h"
29 #include "mconsole_kern.h"
34 #define DRIVER_NAME "uml-netdev"
36 static DEFINE_SPINLOCK(opened_lock);
37 static LIST_HEAD(opened);
39 static int uml_net_rx(struct net_device *dev)
41 struct uml_net_private *lp = dev->priv;
45 /* If we can't allocate memory, try again next round. */
46 skb = dev_alloc_skb(dev->mtu);
48 lp->stats.rx_dropped++;
53 skb_put(skb, dev->mtu);
54 skb->mac.raw = skb->data;
55 pkt_len = (*lp->read)(lp->fd, &skb, lp);
58 skb_trim(skb, pkt_len);
59 skb->protocol = (*lp->protocol)(skb);
62 lp->stats.rx_bytes += skb->len;
63 lp->stats.rx_packets++;
71 static void uml_dev_close(void* dev)
73 dev_close( (struct net_device *) dev);
76 irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
78 struct net_device *dev = dev_id;
79 struct uml_net_private *lp = dev->priv;
82 if(!netif_running(dev))
86 while((err = uml_net_rx(dev)) > 0) ;
88 DECLARE_WORK(close_work, uml_dev_close, dev);
90 "Device '%s' read returned %d, shutting it down\n",
92 /* dev_close can't be called in interrupt context, and takes
94 * And dev_close() can be safely called multiple times on the
95 * same device, since it tests for (dev->flags & IFF_UP). So
96 * there's no harm in delaying the device shutdown. */
97 schedule_work(&close_work);
100 reactivate_fd(lp->fd, UM_ETH_IRQ);
103 spin_unlock(&lp->lock);
107 static int uml_net_open(struct net_device *dev)
109 struct uml_net_private *lp = dev->priv;
112 spin_lock(&lp->lock);
120 dev_ip_addr(dev, &lp->mac[2]);
121 set_ether_mac(dev, lp->mac);
124 lp->fd = (*lp->open)(&lp->user);
130 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
131 SA_INTERRUPT | SA_SHIRQ, dev->name, dev);
133 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
134 if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
139 lp->tl.data = (unsigned long) &lp->user;
140 netif_start_queue(dev);
142 /* clear buffer - it can happen that the host side of the interface
143 * is full when we get here. In this case, new data is never queued,
144 * SIGIOs never arrive, and the net never works.
146 while((err = uml_net_rx(dev)) > 0) ;
149 spin_unlock(&lp->lock);
153 static int uml_net_close(struct net_device *dev)
155 struct uml_net_private *lp = dev->priv;
157 netif_stop_queue(dev);
158 spin_lock(&lp->lock);
160 free_irq(dev->irq, dev);
161 if(lp->close != NULL)
162 (*lp->close)(lp->fd, &lp->user);
166 spin_unlock(&lp->lock);
170 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
172 struct uml_net_private *lp = dev->priv;
176 netif_stop_queue(dev);
178 spin_lock_irqsave(&lp->lock, flags);
180 len = (*lp->write)(lp->fd, &skb, lp);
182 if(len == skb->len) {
183 lp->stats.tx_packets++;
184 lp->stats.tx_bytes += skb->len;
185 dev->trans_start = jiffies;
186 netif_start_queue(dev);
188 /* this is normally done in the interrupt when tx finishes */
189 netif_wake_queue(dev);
192 netif_start_queue(dev);
193 lp->stats.tx_dropped++;
196 netif_start_queue(dev);
197 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
200 spin_unlock_irqrestore(&lp->lock, flags);
207 static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
209 struct uml_net_private *lp = dev->priv;
213 static void uml_net_set_multicast_list(struct net_device *dev)
215 if (dev->flags & IFF_PROMISC) return;
216 else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
217 else dev->flags &= ~IFF_ALLMULTI;
220 static void uml_net_tx_timeout(struct net_device *dev)
222 dev->trans_start = jiffies;
223 netif_wake_queue(dev);
226 static int uml_net_set_mac(struct net_device *dev, void *addr)
228 struct uml_net_private *lp = dev->priv;
229 struct sockaddr *hwaddr = addr;
231 spin_lock(&lp->lock);
232 memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
233 spin_unlock(&lp->lock);
238 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
240 struct uml_net_private *lp = dev->priv;
243 spin_lock(&lp->lock);
245 new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
254 spin_unlock(&lp->lock);
258 static void uml_net_get_drvinfo(struct net_device *dev,
259 struct ethtool_drvinfo *info)
261 strcpy(info->driver, DRIVER_NAME);
262 strcpy(info->version, "42");
265 static struct ethtool_ops uml_net_ethtool_ops = {
266 .get_drvinfo = uml_net_get_drvinfo,
267 .get_link = ethtool_op_get_link,
270 void uml_net_user_timer_expire(unsigned long _conn)
273 struct connection *conn = (struct connection *)_conn;
275 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
280 static DEFINE_SPINLOCK(devices_lock);
281 static LIST_HEAD(devices);
283 static struct platform_driver uml_net_driver = {
288 static int driver_registered;
290 static int eth_configure(int n, void *init, char *mac,
291 struct transport *transport)
293 struct uml_net *device;
294 struct net_device *dev;
295 struct uml_net_private *lp;
298 size = transport->private_size + sizeof(struct uml_net_private) +
299 sizeof(((struct uml_net_private *) 0)->user);
301 device = kmalloc(sizeof(*device), GFP_KERNEL);
302 if (device == NULL) {
303 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
307 memset(device, 0, sizeof(*device));
308 INIT_LIST_HEAD(&device->list);
311 spin_lock(&devices_lock);
312 list_add(&device->list, &devices);
313 spin_unlock(&devices_lock);
315 if (setup_etheraddr(mac, device->mac))
316 device->have_mac = 1;
318 printk(KERN_INFO "Netdevice %d ", n);
319 if (device->have_mac)
320 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
321 device->mac[0], device->mac[1],
322 device->mac[2], device->mac[3],
323 device->mac[4], device->mac[5]);
325 dev = alloc_etherdev(size);
327 printk(KERN_ERR "eth_configure: failed to allocate device\n");
332 /* This points to the transport private data. It's still clear, but we
333 * must memset it to 0 *now*. Let's help the drivers. */
337 if (!driver_registered) {
338 platform_driver_register(¨_net_driver);
339 driver_registered = 1;
342 device->pdev.name = DRIVER_NAME;
343 platform_device_register(&device->pdev);
344 SET_NETDEV_DEV(dev,&device->pdev.dev);
346 /* If this name ends up conflicting with an existing registered
347 * netdevice, that is OK, register_netdev{,ice}() will notice this
350 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
353 (*transport->kern->init)(dev, init);
355 dev->mtu = transport->user->max_packet;
356 dev->open = uml_net_open;
357 dev->hard_start_xmit = uml_net_start_xmit;
358 dev->stop = uml_net_close;
359 dev->get_stats = uml_net_get_stats;
360 dev->set_multicast_list = uml_net_set_multicast_list;
361 dev->tx_timeout = uml_net_tx_timeout;
362 dev->set_mac_address = uml_net_set_mac;
363 dev->change_mtu = uml_net_change_mtu;
364 dev->ethtool_ops = ¨_net_ethtool_ops;
365 dev->watchdog_timeo = (HZ >> 1);
366 dev->irq = UM_ETH_IRQ;
369 err = register_netdevice(dev);
373 /* XXX: should we call ->remove() here? */
378 /* lp.user is the first four bytes of the transport data, which
379 * has already been initialized. This structure assignment will
380 * overwrite that, so we make sure that .user gets overwritten with
381 * what it already has.
384 *lp = ((struct uml_net_private)
385 { .list = LIST_HEAD_INIT(lp->list),
388 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
389 .have_mac = device->have_mac,
390 .protocol = transport->kern->protocol,
391 .open = transport->user->open,
392 .close = transport->user->close,
393 .remove = transport->user->remove,
394 .read = transport->kern->read,
395 .write = transport->kern->write,
396 .add_address = transport->user->add_address,
397 .delete_address = transport->user->delete_address,
398 .set_mtu = transport->user->set_mtu,
402 spin_lock_init(&lp->lock);
403 lp->tl.function = uml_net_user_timer_expire;
405 memcpy(lp->mac, device->mac, sizeof(lp->mac));
407 if (transport->user->init)
408 (*transport->user->init)(&lp->user, dev);
410 if (device->have_mac)
411 set_ether_mac(dev, device->mac);
413 spin_lock(&opened_lock);
414 list_add(&lp->list, &opened);
415 spin_unlock(&opened_lock);
420 static struct uml_net *find_device(int n)
422 struct uml_net *device;
423 struct list_head *ele;
425 spin_lock(&devices_lock);
426 list_for_each(ele, &devices){
427 device = list_entry(ele, struct uml_net, list);
428 if(device->index == n)
433 spin_unlock(&devices_lock);
437 static int eth_parse(char *str, int *index_out, char **str_out)
442 n = simple_strtoul(str, &end, 0);
444 printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str);
448 printk(KERN_ERR "eth_setup: device %d is negative\n", n);
454 "eth_setup: expected '=' after device number\n");
459 printk(KERN_ERR "eth_setup: Device %d already configured\n",
463 if(index_out) *index_out = n;
469 struct list_head list;
474 /* Filled in at boot time. Will need locking if the transports become
477 struct list_head transports = LIST_HEAD_INIT(transports);
479 /* Filled in during early boot */
480 struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
482 static int check_transport(struct transport *transport, char *eth, int n,
483 void **init_out, char **mac_out)
487 len = strlen(transport->name);
488 if(strncmp(eth, transport->name, len))
494 else if(*eth != '\0')
497 *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
498 if(*init_out == NULL)
501 if(!transport->setup(eth, mac_out, *init_out)){
508 void register_transport(struct transport *new)
510 struct list_head *ele, *next;
511 struct eth_init *eth;
516 list_add(&new->list, &transports);
518 list_for_each_safe(ele, next, ð_cmd_line){
519 eth = list_entry(ele, struct eth_init, list);
520 match = check_transport(new, eth->init, eth->index, &init,
524 else if(init != NULL){
525 eth_configure(eth->index, init, mac, new);
528 list_del(ð->list);
532 static int eth_setup_common(char *str, int index)
534 struct list_head *ele;
535 struct transport *transport;
539 list_for_each(ele, &transports){
540 transport = list_entry(ele, struct transport, list);
541 if(!check_transport(transport, str, index, &init, &mac))
544 eth_configure(index, init, mac, transport);
552 static int eth_setup(char *str)
554 struct eth_init *new;
557 err = eth_parse(str, &n, &str);
560 new = alloc_bootmem(sizeof(new));
562 printk("eth_init : alloc_bootmem failed\n");
566 INIT_LIST_HEAD(&new->list);
570 list_add_tail(&new->list, ð_cmd_line);
574 __setup("eth", eth_setup);
575 __uml_help(eth_setup,
576 "eth[0-9]+=<transport>,<options>\n"
577 " Configure a network device.\n\n"
581 static int eth_init(void)
583 struct list_head *ele, *next;
584 struct eth_init *eth;
586 list_for_each_safe(ele, next, ð_cmd_line){
587 eth = list_entry(ele, struct eth_init, list);
589 if(eth_setup_common(eth->init, eth->index))
590 list_del(ð->list);
595 __initcall(eth_init);
598 static int net_config(char *str)
602 err = eth_parse(str, &n, &str);
605 str = kstrdup(str, GFP_KERNEL);
607 printk(KERN_ERR "net_config failed to strdup string\n");
610 err = !eth_setup_common(str, n);
616 static int net_id(char **str, int *start_out, int *end_out)
621 n = simple_strtoul(*str, &end, 0);
622 if((*end != '\0') || (end == *str))
631 static int net_remove(int n)
633 struct uml_net *device;
634 struct net_device *dev;
635 struct uml_net_private *lp;
637 device = find_device(n);
645 if(lp->remove != NULL) (*lp->remove)(&lp->user);
646 unregister_netdev(dev);
647 platform_device_unregister(&device->pdev);
649 list_del(&device->list);
655 static struct mc_device net_mc = {
657 .config = net_config,
660 .remove = net_remove,
663 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
666 struct in_ifaddr *ifa = ptr;
667 struct net_device *dev = ifa->ifa_dev->dev;
668 struct uml_net_private *lp;
669 void (*proc)(unsigned char *, unsigned char *, void *);
670 unsigned char addr_buf[4], netmask_buf[4];
672 if(dev->open != uml_net_open) return(NOTIFY_DONE);
679 proc = lp->add_address;
682 proc = lp->delete_address;
686 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
687 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
688 (*proc)(addr_buf, netmask_buf, &lp->user);
693 struct notifier_block uml_inetaddr_notifier = {
694 .notifier_call = uml_inetaddr_event,
697 static int uml_net_init(void)
699 struct list_head *ele;
700 struct uml_net_private *lp;
701 struct in_device *ip;
702 struct in_ifaddr *in;
704 mconsole_register_dev(&net_mc);
705 register_inetaddr_notifier(¨_inetaddr_notifier);
707 /* Devices may have been opened already, so the uml_inetaddr_notifier
708 * didn't get a chance to run for them. This fakes it so that
709 * addresses which have already been set up get handled properly.
711 list_for_each(ele, &opened){
712 lp = list_entry(ele, struct uml_net_private, list);
713 ip = lp->dev->ip_ptr;
714 if(ip == NULL) continue;
717 uml_inetaddr_event(NULL, NETDEV_UP, in);
725 __initcall(uml_net_init);
727 static void close_devices(void)
729 struct list_head *ele;
730 struct uml_net_private *lp;
732 list_for_each(ele, &opened){
733 lp = list_entry(ele, struct uml_net_private, list);
734 free_irq(lp->dev->irq, lp->dev);
735 if((lp->close != NULL) && (lp->fd >= 0))
736 (*lp->close)(lp->fd, &lp->user);
737 if(lp->remove != NULL) (*lp->remove)(&lp->user);
741 __uml_exitcall(close_devices);
743 int setup_etheraddr(char *str, unsigned char *addr)
751 addr[i] = simple_strtoul(str, &end, 16);
753 ((*end != ':') && (*end != ',') && (*end != '\0'))){
755 "setup_etheraddr: failed to parse '%s' "
756 "as an ethernet address\n", str);
763 "Attempt to assign a broadcast ethernet address to a "
764 "device disallowed\n");
770 void dev_ip_addr(void *d, unsigned char *bin_buf)
772 struct net_device *dev = d;
773 struct in_device *ip = dev->ip_ptr;
774 struct in_ifaddr *in;
776 if((ip == NULL) || ((in = ip->ifa_list) == NULL)){
777 printk(KERN_WARNING "dev_ip_addr - device not assigned an "
781 memcpy(bin_buf, &in->ifa_address, sizeof(in->ifa_address));
784 void set_ether_mac(void *d, unsigned char *addr)
786 struct net_device *dev = d;
788 memcpy(dev->dev_addr, addr, ETH_ALEN);
791 struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
793 if((skb != NULL) && (skb_tailroom(skb) < extra)){
794 struct sk_buff *skb2;
796 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
800 if(skb != NULL) skb_put(skb, extra);
804 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
808 struct net_device *dev = d;
809 struct in_device *ip = dev->ip_ptr;
810 struct in_ifaddr *in;
811 unsigned char address[4], netmask[4];
813 if(ip == NULL) return;
816 memcpy(address, &in->ifa_address, sizeof(address));
817 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
818 (*cb)(address, netmask, arg);
823 int dev_netmask(void *d, void *m)
825 struct net_device *dev = d;
826 struct in_device *ip = dev->ip_ptr;
827 struct in_ifaddr *in;
837 *mask_out = in->ifa_mask;
841 void *get_output_buffer(int *len_out)
845 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
846 if(ret) *len_out = PAGE_SIZE;
851 void free_output_buffer(void *buffer)
853 free_pages((unsigned long) buffer, 0);
856 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
861 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
863 printk("tap_setup_common - Extra garbage on specification : "
871 unsigned short eth_protocol(struct sk_buff *skb)
873 return(eth_type_trans(skb, skb->dev));
877 * Overrides for Emacs so that we follow Linus's tabbing style.
878 * Emacs will notice this stuff at the end of the file and automatically
879 * adjust the settings for this buffer only. This must remain at the end
881 * ---------------------------------------------------------------------------
883 * c-file-style: "linux"