2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
132 /* Instead of increasing this, you should create a hash table. */
133 #define MAX_GRO_SKBS 8
135 /* This should be increased if a protocol with a bigger head is added. */
136 #define GRO_MAX_HEAD (MAX_HEADER + 128)
147 * The list of packet types we will receive (as opposed to discard)
148 * and the routines to invoke.
150 * Why 16. Because with 16 the only overlap we get on a hash of the
151 * low nibble of the protocol value is RARP/SNAP/X.25.
153 * NOTE: That is no longer true with the addition of VLAN tags. Not
154 * sure which should go first, but I bet it won't make much
155 * difference if we are running VLANs. The good news is that
156 * this protocol won't be in the list unless compiled in, so
157 * the average user (w/out VLANs) will not be adversely affected.
174 #define PTYPE_HASH_SIZE (16)
175 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177 static DEFINE_SPINLOCK(ptype_lock);
178 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
179 static struct list_head ptype_all __read_mostly; /* Taps */
182 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
185 * Pure readers hold dev_base_lock for reading.
187 * Writers must hold the rtnl semaphore while they loop through the
188 * dev_base_head list, and hold dev_base_lock for writing when they do the
189 * actual updates. This allows pure readers to access the list even
190 * while a writer is preparing to update it.
192 * To put it another way, dev_base_lock is held for writing only to
193 * protect against pure readers; the rtnl semaphore provides the
194 * protection against other writers.
196 * See, for example usages, register_netdevice() and
197 * unregister_netdevice(), which must be called with the rtnl
200 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
204 #define NETDEV_HASHBITS 8
205 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
207 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
210 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline void *skb_gro_mac_header(struct sk_buff *skb)
220 return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) :
221 page_address(skb_shinfo(skb)->frags[0].page) +
222 skb_shinfo(skb)->frags[0].page_offset;
225 /* Device list insertion */
226 static int list_netdevice(struct net_device *dev)
228 struct net *net = dev_net(dev);
232 write_lock_bh(&dev_base_lock);
233 list_add_tail(&dev->dev_list, &net->dev_base_head);
234 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
235 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
236 write_unlock_bh(&dev_base_lock);
240 /* Device list removal */
241 static void unlist_netdevice(struct net_device *dev)
245 /* Unlink dev from the device chain */
246 write_lock_bh(&dev_base_lock);
247 list_del(&dev->dev_list);
248 hlist_del(&dev->name_hlist);
249 hlist_del(&dev->index_hlist);
250 write_unlock_bh(&dev_base_lock);
257 static RAW_NOTIFIER_HEAD(netdev_chain);
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
264 DEFINE_PER_CPU(struct softnet_data, softnet_data);
266 #ifdef CONFIG_LOCKDEP
268 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
269 * according to dev->type
271 static const unsigned short netdev_lock_type[] =
272 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
273 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
274 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
275 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
276 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
277 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
278 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
279 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
280 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
281 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
282 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
283 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
284 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
285 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
286 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
288 static const char *netdev_lock_name[] =
289 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
290 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
291 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
292 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
293 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
294 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
295 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
296 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
297 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
298 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
299 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
300 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
301 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
302 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
303 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
305 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
306 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
308 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
312 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
313 if (netdev_lock_type[i] == dev_type)
315 /* the last key is used by default */
316 return ARRAY_SIZE(netdev_lock_type) - 1;
319 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
320 unsigned short dev_type)
324 i = netdev_lock_pos(dev_type);
325 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
326 netdev_lock_name[i]);
329 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
333 i = netdev_lock_pos(dev->type);
334 lockdep_set_class_and_name(&dev->addr_list_lock,
335 &netdev_addr_lock_key[i],
336 netdev_lock_name[i]);
339 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
340 unsigned short dev_type)
343 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
348 /*******************************************************************************
350 Protocol management and registration routines
352 *******************************************************************************/
355 * Add a protocol ID to the list. Now that the input handler is
356 * smarter we can dispense with all the messy stuff that used to be
359 * BEWARE!!! Protocol handlers, mangling input packets,
360 * MUST BE last in hash buckets and checking protocol handlers
361 * MUST start from promiscuous ptype_all chain in net_bh.
362 * It is true now, do not change it.
363 * Explanation follows: if protocol handler, mangling packet, will
364 * be the first on list, it is not able to sense, that packet
365 * is cloned and should be copied-on-write, so that it will
366 * change it and subsequent readers will get broken packet.
371 * dev_add_pack - add packet handler
372 * @pt: packet type declaration
374 * Add a protocol handler to the networking stack. The passed &packet_type
375 * is linked into kernel lists and may not be freed until it has been
376 * removed from the kernel lists.
378 * This call does not sleep therefore it can not
379 * guarantee all CPU's that are in middle of receiving packets
380 * will see the new packet type (until the next received packet).
383 void dev_add_pack(struct packet_type *pt)
387 spin_lock_bh(&ptype_lock);
388 if (pt->type == htons(ETH_P_ALL))
389 list_add_rcu(&pt->list, &ptype_all);
391 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
392 list_add_rcu(&pt->list, &ptype_base[hash]);
394 spin_unlock_bh(&ptype_lock);
398 * __dev_remove_pack - remove packet handler
399 * @pt: packet type declaration
401 * Remove a protocol handler that was previously added to the kernel
402 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
403 * from the kernel lists and can be freed or reused once this function
406 * The packet type might still be in use by receivers
407 * and must not be freed until after all the CPU's have gone
408 * through a quiescent state.
410 void __dev_remove_pack(struct packet_type *pt)
412 struct list_head *head;
413 struct packet_type *pt1;
415 spin_lock_bh(&ptype_lock);
417 if (pt->type == htons(ETH_P_ALL))
420 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
422 list_for_each_entry(pt1, head, list) {
424 list_del_rcu(&pt->list);
429 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
431 spin_unlock_bh(&ptype_lock);
434 * dev_remove_pack - remove packet handler
435 * @pt: packet type declaration
437 * Remove a protocol handler that was previously added to the kernel
438 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
439 * from the kernel lists and can be freed or reused once this function
442 * This call sleeps to guarantee that no CPU is looking at the packet
445 void dev_remove_pack(struct packet_type *pt)
447 __dev_remove_pack(pt);
452 /******************************************************************************
454 Device Boot-time Settings Routines
456 *******************************************************************************/
458 /* Boot time configuration table */
459 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
462 * netdev_boot_setup_add - add new setup entry
463 * @name: name of the device
464 * @map: configured settings for the device
466 * Adds new setup entry to the dev_boot_setup list. The function
467 * returns 0 on error and 1 on success. This is a generic routine to
470 static int netdev_boot_setup_add(char *name, struct ifmap *map)
472 struct netdev_boot_setup *s;
476 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
477 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
478 memset(s[i].name, 0, sizeof(s[i].name));
479 strlcpy(s[i].name, name, IFNAMSIZ);
480 memcpy(&s[i].map, map, sizeof(s[i].map));
485 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
489 * netdev_boot_setup_check - check boot time settings
490 * @dev: the netdevice
492 * Check boot time settings for the device.
493 * The found settings are set for the device to be used
494 * later in the device probing.
495 * Returns 0 if no settings found, 1 if they are.
497 int netdev_boot_setup_check(struct net_device *dev)
499 struct netdev_boot_setup *s = dev_boot_setup;
502 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
503 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
504 !strcmp(dev->name, s[i].name)) {
505 dev->irq = s[i].map.irq;
506 dev->base_addr = s[i].map.base_addr;
507 dev->mem_start = s[i].map.mem_start;
508 dev->mem_end = s[i].map.mem_end;
517 * netdev_boot_base - get address from boot time settings
518 * @prefix: prefix for network device
519 * @unit: id for network device
521 * Check boot time settings for the base address of device.
522 * The found settings are set for the device to be used
523 * later in the device probing.
524 * Returns 0 if no settings found.
526 unsigned long netdev_boot_base(const char *prefix, int unit)
528 const struct netdev_boot_setup *s = dev_boot_setup;
532 sprintf(name, "%s%d", prefix, unit);
535 * If device already registered then return base of 1
536 * to indicate not to probe for this interface
538 if (__dev_get_by_name(&init_net, name))
541 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
542 if (!strcmp(name, s[i].name))
543 return s[i].map.base_addr;
548 * Saves at boot time configured settings for any netdevice.
550 int __init netdev_boot_setup(char *str)
555 str = get_options(str, ARRAY_SIZE(ints), ints);
560 memset(&map, 0, sizeof(map));
564 map.base_addr = ints[2];
566 map.mem_start = ints[3];
568 map.mem_end = ints[4];
570 /* Add new entry to the list */
571 return netdev_boot_setup_add(str, &map);
574 __setup("netdev=", netdev_boot_setup);
576 /*******************************************************************************
578 Device Interface Subroutines
580 *******************************************************************************/
583 * __dev_get_by_name - find a device by its name
584 * @net: the applicable net namespace
585 * @name: name to find
587 * Find an interface by name. Must be called under RTNL semaphore
588 * or @dev_base_lock. If the name is found a pointer to the device
589 * is returned. If the name is not found then %NULL is returned. The
590 * reference counters are not incremented so the caller must be
591 * careful with locks.
594 struct net_device *__dev_get_by_name(struct net *net, const char *name)
596 struct hlist_node *p;
598 hlist_for_each(p, dev_name_hash(net, name)) {
599 struct net_device *dev
600 = hlist_entry(p, struct net_device, name_hlist);
601 if (!strncmp(dev->name, name, IFNAMSIZ))
608 * dev_get_by_name - find a device by its name
609 * @net: the applicable net namespace
610 * @name: name to find
612 * Find an interface by name. This can be called from any
613 * context and does its own locking. The returned handle has
614 * the usage count incremented and the caller must use dev_put() to
615 * release it when it is no longer needed. %NULL is returned if no
616 * matching device is found.
619 struct net_device *dev_get_by_name(struct net *net, const char *name)
621 struct net_device *dev;
623 read_lock(&dev_base_lock);
624 dev = __dev_get_by_name(net, name);
627 read_unlock(&dev_base_lock);
632 * __dev_get_by_index - find a device by its ifindex
633 * @net: the applicable net namespace
634 * @ifindex: index of device
636 * Search for an interface by index. Returns %NULL if the device
637 * is not found or a pointer to the device. The device has not
638 * had its reference counter increased so the caller must be careful
639 * about locking. The caller must hold either the RTNL semaphore
643 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
645 struct hlist_node *p;
647 hlist_for_each(p, dev_index_hash(net, ifindex)) {
648 struct net_device *dev
649 = hlist_entry(p, struct net_device, index_hlist);
650 if (dev->ifindex == ifindex)
658 * dev_get_by_index - find a device by its ifindex
659 * @net: the applicable net namespace
660 * @ifindex: index of device
662 * Search for an interface by index. Returns NULL if the device
663 * is not found or a pointer to the device. The device returned has
664 * had a reference added and the pointer is safe until the user calls
665 * dev_put to indicate they have finished with it.
668 struct net_device *dev_get_by_index(struct net *net, int ifindex)
670 struct net_device *dev;
672 read_lock(&dev_base_lock);
673 dev = __dev_get_by_index(net, ifindex);
676 read_unlock(&dev_base_lock);
681 * dev_getbyhwaddr - find a device by its hardware address
682 * @net: the applicable net namespace
683 * @type: media type of device
684 * @ha: hardware address
686 * Search for an interface by MAC address. Returns NULL if the device
687 * is not found or a pointer to the device. The caller must hold the
688 * rtnl semaphore. The returned device has not had its ref count increased
689 * and the caller must therefore be careful about locking
692 * If the API was consistent this would be __dev_get_by_hwaddr
695 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
697 struct net_device *dev;
701 for_each_netdev(net, dev)
702 if (dev->type == type &&
703 !memcmp(dev->dev_addr, ha, dev->addr_len))
709 EXPORT_SYMBOL(dev_getbyhwaddr);
711 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
713 struct net_device *dev;
716 for_each_netdev(net, dev)
717 if (dev->type == type)
723 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
725 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
727 struct net_device *dev;
730 dev = __dev_getfirstbyhwtype(net, type);
737 EXPORT_SYMBOL(dev_getfirstbyhwtype);
740 * dev_get_by_flags - find any device with given flags
741 * @net: the applicable net namespace
742 * @if_flags: IFF_* values
743 * @mask: bitmask of bits in if_flags to check
745 * Search for any interface with the given flags. Returns NULL if a device
746 * is not found or a pointer to the device. The device returned has
747 * had a reference added and the pointer is safe until the user calls
748 * dev_put to indicate they have finished with it.
751 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
753 struct net_device *dev, *ret;
756 read_lock(&dev_base_lock);
757 for_each_netdev(net, dev) {
758 if (((dev->flags ^ if_flags) & mask) == 0) {
764 read_unlock(&dev_base_lock);
769 * dev_valid_name - check if name is okay for network device
772 * Network device names need to be valid file names to
773 * to allow sysfs to work. We also disallow any kind of
776 int dev_valid_name(const char *name)
780 if (strlen(name) >= IFNAMSIZ)
782 if (!strcmp(name, ".") || !strcmp(name, ".."))
786 if (*name == '/' || isspace(*name))
794 * __dev_alloc_name - allocate a name for a device
795 * @net: network namespace to allocate the device name in
796 * @name: name format string
797 * @buf: scratch buffer and result name string
799 * Passed a format string - eg "lt%d" it will try and find a suitable
800 * id. It scans list of devices to build up a free map, then chooses
801 * the first empty slot. The caller must hold the dev_base or rtnl lock
802 * while allocating the name and adding the device in order to avoid
804 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
805 * Returns the number of the unit assigned or a negative errno code.
808 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
812 const int max_netdevices = 8*PAGE_SIZE;
813 unsigned long *inuse;
814 struct net_device *d;
816 p = strnchr(name, IFNAMSIZ-1, '%');
819 * Verify the string as this thing may have come from
820 * the user. There must be either one "%d" and no other "%"
823 if (p[1] != 'd' || strchr(p + 2, '%'))
826 /* Use one page as a bit array of possible slots */
827 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
831 for_each_netdev(net, d) {
832 if (!sscanf(d->name, name, &i))
834 if (i < 0 || i >= max_netdevices)
837 /* avoid cases where sscanf is not exact inverse of printf */
838 snprintf(buf, IFNAMSIZ, name, i);
839 if (!strncmp(buf, d->name, IFNAMSIZ))
843 i = find_first_zero_bit(inuse, max_netdevices);
844 free_page((unsigned long) inuse);
847 snprintf(buf, IFNAMSIZ, name, i);
848 if (!__dev_get_by_name(net, buf))
851 /* It is possible to run out of possible slots
852 * when the name is long and there isn't enough space left
853 * for the digits, or if all bits are used.
859 * dev_alloc_name - allocate a name for a device
861 * @name: name format string
863 * Passed a format string - eg "lt%d" it will try and find a suitable
864 * id. It scans list of devices to build up a free map, then chooses
865 * the first empty slot. The caller must hold the dev_base or rtnl lock
866 * while allocating the name and adding the device in order to avoid
868 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
869 * Returns the number of the unit assigned or a negative errno code.
872 int dev_alloc_name(struct net_device *dev, const char *name)
878 BUG_ON(!dev_net(dev));
880 ret = __dev_alloc_name(net, name, buf);
882 strlcpy(dev->name, buf, IFNAMSIZ);
888 * dev_change_name - change name of a device
890 * @newname: name (or format string) must be at least IFNAMSIZ
892 * Change name of a device, can pass format strings "eth%d".
895 int dev_change_name(struct net_device *dev, const char *newname)
897 char oldname[IFNAMSIZ];
903 BUG_ON(!dev_net(dev));
906 if (dev->flags & IFF_UP)
909 if (!dev_valid_name(newname))
912 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
915 memcpy(oldname, dev->name, IFNAMSIZ);
917 if (strchr(newname, '%')) {
918 err = dev_alloc_name(dev, newname);
922 else if (__dev_get_by_name(net, newname))
925 strlcpy(dev->name, newname, IFNAMSIZ);
928 /* For now only devices in the initial network namespace
931 if (net == &init_net) {
932 ret = device_rename(&dev->dev, dev->name);
934 memcpy(dev->name, oldname, IFNAMSIZ);
939 write_lock_bh(&dev_base_lock);
940 hlist_del(&dev->name_hlist);
941 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
942 write_unlock_bh(&dev_base_lock);
944 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
945 ret = notifier_to_errno(ret);
950 "%s: name change rollback failed: %d.\n",
954 memcpy(dev->name, oldname, IFNAMSIZ);
963 * dev_set_alias - change ifalias of a device
965 * @alias: name up to IFALIASZ
966 * @len: limit of bytes to copy from info
968 * Set ifalias for a device,
970 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
985 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
989 strlcpy(dev->ifalias, alias, len+1);
995 * netdev_features_change - device changes features
996 * @dev: device to cause notification
998 * Called to indicate a device has changed features.
1000 void netdev_features_change(struct net_device *dev)
1002 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1004 EXPORT_SYMBOL(netdev_features_change);
1007 * netdev_state_change - device changes state
1008 * @dev: device to cause notification
1010 * Called to indicate a device has changed state. This function calls
1011 * the notifier chains for netdev_chain and sends a NEWLINK message
1012 * to the routing socket.
1014 void netdev_state_change(struct net_device *dev)
1016 if (dev->flags & IFF_UP) {
1017 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1018 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1022 void netdev_bonding_change(struct net_device *dev)
1024 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1026 EXPORT_SYMBOL(netdev_bonding_change);
1029 * dev_load - load a network module
1030 * @net: the applicable net namespace
1031 * @name: name of interface
1033 * If a network interface is not present and the process has suitable
1034 * privileges this function loads the module. If module loading is not
1035 * available in this kernel then it becomes a nop.
1038 void dev_load(struct net *net, const char *name)
1040 struct net_device *dev;
1042 read_lock(&dev_base_lock);
1043 dev = __dev_get_by_name(net, name);
1044 read_unlock(&dev_base_lock);
1046 if (!dev && capable(CAP_SYS_MODULE))
1047 request_module("%s", name);
1051 * dev_open - prepare an interface for use.
1052 * @dev: device to open
1054 * Takes a device from down to up state. The device's private open
1055 * function is invoked and then the multicast lists are loaded. Finally
1056 * the device is moved into the up state and a %NETDEV_UP message is
1057 * sent to the netdev notifier chain.
1059 * Calling this function on an active interface is a nop. On a failure
1060 * a negative errno code is returned.
1062 int dev_open(struct net_device *dev)
1064 const struct net_device_ops *ops = dev->netdev_ops;
1073 if (dev->flags & IFF_UP)
1077 * Is it even present?
1079 if (!netif_device_present(dev))
1083 * Call device private open method
1085 set_bit(__LINK_STATE_START, &dev->state);
1087 if (ops->ndo_validate_addr)
1088 ret = ops->ndo_validate_addr(dev);
1090 if (!ret && ops->ndo_open)
1091 ret = ops->ndo_open(dev);
1094 * If it went open OK then:
1098 clear_bit(__LINK_STATE_START, &dev->state);
1103 dev->flags |= IFF_UP;
1111 * Initialize multicasting status
1113 dev_set_rx_mode(dev);
1116 * Wakeup transmit queue engine
1121 * ... and announce new interface.
1123 call_netdevice_notifiers(NETDEV_UP, dev);
1130 * dev_close - shutdown an interface.
1131 * @dev: device to shutdown
1133 * This function moves an active device into down state. A
1134 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1135 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1138 int dev_close(struct net_device *dev)
1140 const struct net_device_ops *ops = dev->netdev_ops;
1145 if (!(dev->flags & IFF_UP))
1149 * Tell people we are going down, so that they can
1150 * prepare to death, when device is still operating.
1152 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1154 clear_bit(__LINK_STATE_START, &dev->state);
1156 /* Synchronize to scheduled poll. We cannot touch poll list,
1157 * it can be even on different cpu. So just clear netif_running().
1159 * dev->stop() will invoke napi_disable() on all of it's
1160 * napi_struct instances on this device.
1162 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1164 dev_deactivate(dev);
1167 * Call the device specific close. This cannot fail.
1168 * Only if device is UP
1170 * We allow it to be called even after a DETACH hot-plug
1177 * Device is now down.
1180 dev->flags &= ~IFF_UP;
1183 * Tell people we are down
1185 call_netdevice_notifiers(NETDEV_DOWN, dev);
1197 * dev_disable_lro - disable Large Receive Offload on a device
1200 * Disable Large Receive Offload (LRO) on a net device. Must be
1201 * called under RTNL. This is needed if received packets may be
1202 * forwarded to another interface.
1204 void dev_disable_lro(struct net_device *dev)
1206 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1207 dev->ethtool_ops->set_flags) {
1208 u32 flags = dev->ethtool_ops->get_flags(dev);
1209 if (flags & ETH_FLAG_LRO) {
1210 flags &= ~ETH_FLAG_LRO;
1211 dev->ethtool_ops->set_flags(dev, flags);
1214 WARN_ON(dev->features & NETIF_F_LRO);
1216 EXPORT_SYMBOL(dev_disable_lro);
1219 static int dev_boot_phase = 1;
1222 * Device change register/unregister. These are not inline or static
1223 * as we export them to the world.
1227 * register_netdevice_notifier - register a network notifier block
1230 * Register a notifier to be called when network device events occur.
1231 * The notifier passed is linked into the kernel structures and must
1232 * not be reused until it has been unregistered. A negative errno code
1233 * is returned on a failure.
1235 * When registered all registration and up events are replayed
1236 * to the new notifier to allow device to have a race free
1237 * view of the network device list.
1240 int register_netdevice_notifier(struct notifier_block *nb)
1242 struct net_device *dev;
1243 struct net_device *last;
1248 err = raw_notifier_chain_register(&netdev_chain, nb);
1254 for_each_netdev(net, dev) {
1255 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1256 err = notifier_to_errno(err);
1260 if (!(dev->flags & IFF_UP))
1263 nb->notifier_call(nb, NETDEV_UP, dev);
1274 for_each_netdev(net, dev) {
1278 if (dev->flags & IFF_UP) {
1279 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1280 nb->notifier_call(nb, NETDEV_DOWN, dev);
1282 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1286 raw_notifier_chain_unregister(&netdev_chain, nb);
1291 * unregister_netdevice_notifier - unregister a network notifier block
1294 * Unregister a notifier previously registered by
1295 * register_netdevice_notifier(). The notifier is unlinked into the
1296 * kernel structures and may then be reused. A negative errno code
1297 * is returned on a failure.
1300 int unregister_netdevice_notifier(struct notifier_block *nb)
1305 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1311 * call_netdevice_notifiers - call all network notifier blocks
1312 * @val: value passed unmodified to notifier function
1313 * @dev: net_device pointer passed unmodified to notifier function
1315 * Call all network notifier blocks. Parameters and return value
1316 * are as for raw_notifier_call_chain().
1319 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1321 return raw_notifier_call_chain(&netdev_chain, val, dev);
1324 /* When > 0 there are consumers of rx skb time stamps */
1325 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1327 void net_enable_timestamp(void)
1329 atomic_inc(&netstamp_needed);
1332 void net_disable_timestamp(void)
1334 atomic_dec(&netstamp_needed);
1337 static inline void net_timestamp(struct sk_buff *skb)
1339 if (atomic_read(&netstamp_needed))
1340 __net_timestamp(skb);
1342 skb->tstamp.tv64 = 0;
1346 * Support routine. Sends outgoing frames to any network
1347 * taps currently in use.
1350 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1352 struct packet_type *ptype;
1357 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1358 /* Never send packets back to the socket
1359 * they originated from - MvS (miquels@drinkel.ow.org)
1361 if ((ptype->dev == dev || !ptype->dev) &&
1362 (ptype->af_packet_priv == NULL ||
1363 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1364 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1368 /* skb->nh should be correctly
1369 set by sender, so that the second statement is
1370 just protection against buggy protocols.
1372 skb_reset_mac_header(skb2);
1374 if (skb_network_header(skb2) < skb2->data ||
1375 skb2->network_header > skb2->tail) {
1376 if (net_ratelimit())
1377 printk(KERN_CRIT "protocol %04x is "
1379 skb2->protocol, dev->name);
1380 skb_reset_network_header(skb2);
1383 skb2->transport_header = skb2->network_header;
1384 skb2->pkt_type = PACKET_OUTGOING;
1385 ptype->func(skb2, skb->dev, ptype, skb->dev);
1392 static inline void __netif_reschedule(struct Qdisc *q)
1394 struct softnet_data *sd;
1395 unsigned long flags;
1397 local_irq_save(flags);
1398 sd = &__get_cpu_var(softnet_data);
1399 q->next_sched = sd->output_queue;
1400 sd->output_queue = q;
1401 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1402 local_irq_restore(flags);
1405 void __netif_schedule(struct Qdisc *q)
1407 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1408 __netif_reschedule(q);
1410 EXPORT_SYMBOL(__netif_schedule);
1412 void dev_kfree_skb_irq(struct sk_buff *skb)
1414 if (atomic_dec_and_test(&skb->users)) {
1415 struct softnet_data *sd;
1416 unsigned long flags;
1418 local_irq_save(flags);
1419 sd = &__get_cpu_var(softnet_data);
1420 skb->next = sd->completion_queue;
1421 sd->completion_queue = skb;
1422 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1423 local_irq_restore(flags);
1426 EXPORT_SYMBOL(dev_kfree_skb_irq);
1428 void dev_kfree_skb_any(struct sk_buff *skb)
1430 if (in_irq() || irqs_disabled())
1431 dev_kfree_skb_irq(skb);
1435 EXPORT_SYMBOL(dev_kfree_skb_any);
1439 * netif_device_detach - mark device as removed
1440 * @dev: network device
1442 * Mark device as removed from system and therefore no longer available.
1444 void netif_device_detach(struct net_device *dev)
1446 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1447 netif_running(dev)) {
1448 netif_stop_queue(dev);
1451 EXPORT_SYMBOL(netif_device_detach);
1454 * netif_device_attach - mark device as attached
1455 * @dev: network device
1457 * Mark device as attached from system and restart if needed.
1459 void netif_device_attach(struct net_device *dev)
1461 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1462 netif_running(dev)) {
1463 netif_wake_queue(dev);
1464 __netdev_watchdog_up(dev);
1467 EXPORT_SYMBOL(netif_device_attach);
1469 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1471 return ((features & NETIF_F_GEN_CSUM) ||
1472 ((features & NETIF_F_IP_CSUM) &&
1473 protocol == htons(ETH_P_IP)) ||
1474 ((features & NETIF_F_IPV6_CSUM) &&
1475 protocol == htons(ETH_P_IPV6)));
1478 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1480 if (can_checksum_protocol(dev->features, skb->protocol))
1483 if (skb->protocol == htons(ETH_P_8021Q)) {
1484 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1485 if (can_checksum_protocol(dev->features & dev->vlan_features,
1486 veh->h_vlan_encapsulated_proto))
1494 * Invalidate hardware checksum when packet is to be mangled, and
1495 * complete checksum manually on outgoing path.
1497 int skb_checksum_help(struct sk_buff *skb)
1500 int ret = 0, offset;
1502 if (skb->ip_summed == CHECKSUM_COMPLETE)
1503 goto out_set_summed;
1505 if (unlikely(skb_shinfo(skb)->gso_size)) {
1506 /* Let GSO fix up the checksum. */
1507 goto out_set_summed;
1510 offset = skb->csum_start - skb_headroom(skb);
1511 BUG_ON(offset >= skb_headlen(skb));
1512 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1514 offset += skb->csum_offset;
1515 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1517 if (skb_cloned(skb) &&
1518 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1519 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1524 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1526 skb->ip_summed = CHECKSUM_NONE;
1532 * skb_gso_segment - Perform segmentation on skb.
1533 * @skb: buffer to segment
1534 * @features: features for the output path (see dev->features)
1536 * This function segments the given skb and returns a list of segments.
1538 * It may return NULL if the skb requires no segmentation. This is
1539 * only possible when GSO is used for verifying header integrity.
1541 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1543 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1544 struct packet_type *ptype;
1545 __be16 type = skb->protocol;
1548 skb_reset_mac_header(skb);
1549 skb->mac_len = skb->network_header - skb->mac_header;
1550 __skb_pull(skb, skb->mac_len);
1552 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1553 struct net_device *dev = skb->dev;
1554 struct ethtool_drvinfo info = {};
1556 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1557 dev->ethtool_ops->get_drvinfo(dev, &info);
1559 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1561 info.driver, dev ? dev->features : 0L,
1562 skb->sk ? skb->sk->sk_route_caps : 0L,
1563 skb->len, skb->data_len, skb->ip_summed);
1565 if (skb_header_cloned(skb) &&
1566 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1567 return ERR_PTR(err);
1571 list_for_each_entry_rcu(ptype,
1572 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1573 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1574 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1575 err = ptype->gso_send_check(skb);
1576 segs = ERR_PTR(err);
1577 if (err || skb_gso_ok(skb, features))
1579 __skb_push(skb, (skb->data -
1580 skb_network_header(skb)));
1582 segs = ptype->gso_segment(skb, features);
1588 __skb_push(skb, skb->data - skb_mac_header(skb));
1593 EXPORT_SYMBOL(skb_gso_segment);
1595 /* Take action when hardware reception checksum errors are detected. */
1597 void netdev_rx_csum_fault(struct net_device *dev)
1599 if (net_ratelimit()) {
1600 printk(KERN_ERR "%s: hw csum failure.\n",
1601 dev ? dev->name : "<unknown>");
1605 EXPORT_SYMBOL(netdev_rx_csum_fault);
1608 /* Actually, we should eliminate this check as soon as we know, that:
1609 * 1. IOMMU is present and allows to map all the memory.
1610 * 2. No high memory really exists on this machine.
1613 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1615 #ifdef CONFIG_HIGHMEM
1618 if (dev->features & NETIF_F_HIGHDMA)
1621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1622 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1630 void (*destructor)(struct sk_buff *skb);
1633 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1635 static void dev_gso_skb_destructor(struct sk_buff *skb)
1637 struct dev_gso_cb *cb;
1640 struct sk_buff *nskb = skb->next;
1642 skb->next = nskb->next;
1645 } while (skb->next);
1647 cb = DEV_GSO_CB(skb);
1649 cb->destructor(skb);
1653 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1654 * @skb: buffer to segment
1656 * This function segments the given skb and stores the list of segments
1659 static int dev_gso_segment(struct sk_buff *skb)
1661 struct net_device *dev = skb->dev;
1662 struct sk_buff *segs;
1663 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1666 segs = skb_gso_segment(skb, features);
1668 /* Verifying header integrity only. */
1673 return PTR_ERR(segs);
1676 DEV_GSO_CB(skb)->destructor = skb->destructor;
1677 skb->destructor = dev_gso_skb_destructor;
1682 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1683 struct netdev_queue *txq)
1685 const struct net_device_ops *ops = dev->netdev_ops;
1687 prefetch(&dev->netdev_ops->ndo_start_xmit);
1688 if (likely(!skb->next)) {
1689 if (!list_empty(&ptype_all))
1690 dev_queue_xmit_nit(skb, dev);
1692 if (netif_needs_gso(dev, skb)) {
1693 if (unlikely(dev_gso_segment(skb)))
1699 return ops->ndo_start_xmit(skb, dev);
1704 struct sk_buff *nskb = skb->next;
1707 skb->next = nskb->next;
1709 rc = ops->ndo_start_xmit(nskb, dev);
1711 nskb->next = skb->next;
1715 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1716 return NETDEV_TX_BUSY;
1717 } while (skb->next);
1719 skb->destructor = DEV_GSO_CB(skb)->destructor;
1726 static u32 skb_tx_hashrnd;
1727 static int skb_tx_hashrnd_initialized = 0;
1729 static u16 skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
1733 if (unlikely(!skb_tx_hashrnd_initialized)) {
1734 get_random_bytes(&skb_tx_hashrnd, 4);
1735 skb_tx_hashrnd_initialized = 1;
1738 if (skb_rx_queue_recorded(skb)) {
1739 hash = skb_get_rx_queue(skb);
1740 } else if (skb->sk && skb->sk->sk_hash) {
1741 hash = skb->sk->sk_hash;
1743 hash = skb->protocol;
1745 hash = jhash_1word(hash, skb_tx_hashrnd);
1747 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1750 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1751 struct sk_buff *skb)
1753 const struct net_device_ops *ops = dev->netdev_ops;
1754 u16 queue_index = 0;
1756 if (ops->ndo_select_queue)
1757 queue_index = ops->ndo_select_queue(dev, skb);
1758 else if (dev->real_num_tx_queues > 1)
1759 queue_index = skb_tx_hash(dev, skb);
1761 skb_set_queue_mapping(skb, queue_index);
1762 return netdev_get_tx_queue(dev, queue_index);
1766 * dev_queue_xmit - transmit a buffer
1767 * @skb: buffer to transmit
1769 * Queue a buffer for transmission to a network device. The caller must
1770 * have set the device and priority and built the buffer before calling
1771 * this function. The function can be called from an interrupt.
1773 * A negative errno code is returned on a failure. A success does not
1774 * guarantee the frame will be transmitted as it may be dropped due
1775 * to congestion or traffic shaping.
1777 * -----------------------------------------------------------------------------------
1778 * I notice this method can also return errors from the queue disciplines,
1779 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1782 * Regardless of the return value, the skb is consumed, so it is currently
1783 * difficult to retry a send to this method. (You can bump the ref count
1784 * before sending to hold a reference for retry if you are careful.)
1786 * When calling this method, interrupts MUST be enabled. This is because
1787 * the BH enable code must have IRQs enabled so that it will not deadlock.
1790 int dev_queue_xmit(struct sk_buff *skb)
1792 struct net_device *dev = skb->dev;
1793 struct netdev_queue *txq;
1797 /* GSO will handle the following emulations directly. */
1798 if (netif_needs_gso(dev, skb))
1801 if (skb_shinfo(skb)->frag_list &&
1802 !(dev->features & NETIF_F_FRAGLIST) &&
1803 __skb_linearize(skb))
1806 /* Fragmented skb is linearized if device does not support SG,
1807 * or if at least one of fragments is in highmem and device
1808 * does not support DMA from it.
1810 if (skb_shinfo(skb)->nr_frags &&
1811 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1812 __skb_linearize(skb))
1815 /* If packet is not checksummed and device does not support
1816 * checksumming for this protocol, complete checksumming here.
1818 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1819 skb_set_transport_header(skb, skb->csum_start -
1821 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1826 /* Disable soft irqs for various locks below. Also
1827 * stops preemption for RCU.
1831 txq = dev_pick_tx(dev, skb);
1832 q = rcu_dereference(txq->qdisc);
1834 #ifdef CONFIG_NET_CLS_ACT
1835 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1838 spinlock_t *root_lock = qdisc_lock(q);
1840 spin_lock(root_lock);
1842 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1846 rc = qdisc_enqueue_root(skb, q);
1849 spin_unlock(root_lock);
1854 /* The device has no queue. Common case for software devices:
1855 loopback, all the sorts of tunnels...
1857 Really, it is unlikely that netif_tx_lock protection is necessary
1858 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1860 However, it is possible, that they rely on protection
1863 Check this and shot the lock. It is not prone from deadlocks.
1864 Either shot noqueue qdisc, it is even simpler 8)
1866 if (dev->flags & IFF_UP) {
1867 int cpu = smp_processor_id(); /* ok because BHs are off */
1869 if (txq->xmit_lock_owner != cpu) {
1871 HARD_TX_LOCK(dev, txq, cpu);
1873 if (!netif_tx_queue_stopped(txq)) {
1875 if (!dev_hard_start_xmit(skb, dev, txq)) {
1876 HARD_TX_UNLOCK(dev, txq);
1880 HARD_TX_UNLOCK(dev, txq);
1881 if (net_ratelimit())
1882 printk(KERN_CRIT "Virtual device %s asks to "
1883 "queue packet!\n", dev->name);
1885 /* Recursion is detected! It is possible,
1887 if (net_ratelimit())
1888 printk(KERN_CRIT "Dead loop on virtual device "
1889 "%s, fix it urgently!\n", dev->name);
1894 rcu_read_unlock_bh();
1900 rcu_read_unlock_bh();
1905 /*=======================================================================
1907 =======================================================================*/
1909 int netdev_max_backlog __read_mostly = 1000;
1910 int netdev_budget __read_mostly = 300;
1911 int weight_p __read_mostly = 64; /* old backlog weight */
1913 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1917 * netif_rx - post buffer to the network code
1918 * @skb: buffer to post
1920 * This function receives a packet from a device driver and queues it for
1921 * the upper (protocol) levels to process. It always succeeds. The buffer
1922 * may be dropped during processing for congestion control or by the
1926 * NET_RX_SUCCESS (no congestion)
1927 * NET_RX_DROP (packet was dropped)
1931 int netif_rx(struct sk_buff *skb)
1933 struct softnet_data *queue;
1934 unsigned long flags;
1936 /* if netpoll wants it, pretend we never saw it */
1937 if (netpoll_rx(skb))
1940 if (!skb->tstamp.tv64)
1944 * The code is rearranged so that the path is the most
1945 * short when CPU is congested, but is still operating.
1947 local_irq_save(flags);
1948 queue = &__get_cpu_var(softnet_data);
1950 __get_cpu_var(netdev_rx_stat).total++;
1951 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1952 if (queue->input_pkt_queue.qlen) {
1954 __skb_queue_tail(&queue->input_pkt_queue, skb);
1955 local_irq_restore(flags);
1956 return NET_RX_SUCCESS;
1959 napi_schedule(&queue->backlog);
1963 __get_cpu_var(netdev_rx_stat).dropped++;
1964 local_irq_restore(flags);
1970 int netif_rx_ni(struct sk_buff *skb)
1975 err = netif_rx(skb);
1976 if (local_softirq_pending())
1983 EXPORT_SYMBOL(netif_rx_ni);
1985 static void net_tx_action(struct softirq_action *h)
1987 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1989 if (sd->completion_queue) {
1990 struct sk_buff *clist;
1992 local_irq_disable();
1993 clist = sd->completion_queue;
1994 sd->completion_queue = NULL;
1998 struct sk_buff *skb = clist;
1999 clist = clist->next;
2001 WARN_ON(atomic_read(&skb->users));
2006 if (sd->output_queue) {
2009 local_irq_disable();
2010 head = sd->output_queue;
2011 sd->output_queue = NULL;
2015 struct Qdisc *q = head;
2016 spinlock_t *root_lock;
2018 head = head->next_sched;
2020 root_lock = qdisc_lock(q);
2021 if (spin_trylock(root_lock)) {
2022 smp_mb__before_clear_bit();
2023 clear_bit(__QDISC_STATE_SCHED,
2026 spin_unlock(root_lock);
2028 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2030 __netif_reschedule(q);
2032 smp_mb__before_clear_bit();
2033 clear_bit(__QDISC_STATE_SCHED,
2041 static inline int deliver_skb(struct sk_buff *skb,
2042 struct packet_type *pt_prev,
2043 struct net_device *orig_dev)
2045 atomic_inc(&skb->users);
2046 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2049 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2050 /* These hooks defined here for ATM */
2052 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2053 unsigned char *addr);
2054 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2057 * If bridge module is loaded call bridging hook.
2058 * returns NULL if packet was consumed.
2060 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2061 struct sk_buff *skb) __read_mostly;
2062 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2063 struct packet_type **pt_prev, int *ret,
2064 struct net_device *orig_dev)
2066 struct net_bridge_port *port;
2068 if (skb->pkt_type == PACKET_LOOPBACK ||
2069 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2073 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2077 return br_handle_frame_hook(port, skb);
2080 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2083 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2084 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2085 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2087 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2088 struct packet_type **pt_prev,
2090 struct net_device *orig_dev)
2092 if (skb->dev->macvlan_port == NULL)
2096 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2099 return macvlan_handle_frame_hook(skb);
2102 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2105 #ifdef CONFIG_NET_CLS_ACT
2106 /* TODO: Maybe we should just force sch_ingress to be compiled in
2107 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2108 * a compare and 2 stores extra right now if we dont have it on
2109 * but have CONFIG_NET_CLS_ACT
2110 * NOTE: This doesnt stop any functionality; if you dont have
2111 * the ingress scheduler, you just cant add policies on ingress.
2114 static int ing_filter(struct sk_buff *skb)
2116 struct net_device *dev = skb->dev;
2117 u32 ttl = G_TC_RTTL(skb->tc_verd);
2118 struct netdev_queue *rxq;
2119 int result = TC_ACT_OK;
2122 if (MAX_RED_LOOP < ttl++) {
2124 "Redir loop detected Dropping packet (%d->%d)\n",
2125 skb->iif, dev->ifindex);
2129 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2130 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2132 rxq = &dev->rx_queue;
2135 if (q != &noop_qdisc) {
2136 spin_lock(qdisc_lock(q));
2137 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2138 result = qdisc_enqueue_root(skb, q);
2139 spin_unlock(qdisc_lock(q));
2145 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2146 struct packet_type **pt_prev,
2147 int *ret, struct net_device *orig_dev)
2149 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2153 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2156 /* Huh? Why does turning on AF_PACKET affect this? */
2157 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2160 switch (ing_filter(skb)) {
2174 * netif_nit_deliver - deliver received packets to network taps
2177 * This function is used to deliver incoming packets to network
2178 * taps. It should be used when the normal netif_receive_skb path
2179 * is bypassed, for example because of VLAN acceleration.
2181 void netif_nit_deliver(struct sk_buff *skb)
2183 struct packet_type *ptype;
2185 if (list_empty(&ptype_all))
2188 skb_reset_network_header(skb);
2189 skb_reset_transport_header(skb);
2190 skb->mac_len = skb->network_header - skb->mac_header;
2193 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2194 if (!ptype->dev || ptype->dev == skb->dev)
2195 deliver_skb(skb, ptype, skb->dev);
2201 * netif_receive_skb - process receive buffer from network
2202 * @skb: buffer to process
2204 * netif_receive_skb() is the main receive data processing function.
2205 * It always succeeds. The buffer may be dropped during processing
2206 * for congestion control or by the protocol layers.
2208 * This function may only be called from softirq context and interrupts
2209 * should be enabled.
2211 * Return values (usually ignored):
2212 * NET_RX_SUCCESS: no congestion
2213 * NET_RX_DROP: packet was dropped
2215 int netif_receive_skb(struct sk_buff *skb)
2217 struct packet_type *ptype, *pt_prev;
2218 struct net_device *orig_dev;
2219 struct net_device *null_or_orig;
2220 int ret = NET_RX_DROP;
2223 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2224 return NET_RX_SUCCESS;
2226 /* if we've gotten here through NAPI, check netpoll */
2227 if (netpoll_receive_skb(skb))
2230 if (!skb->tstamp.tv64)
2234 skb->iif = skb->dev->ifindex;
2236 null_or_orig = NULL;
2237 orig_dev = skb->dev;
2238 if (orig_dev->master) {
2239 if (skb_bond_should_drop(skb))
2240 null_or_orig = orig_dev; /* deliver only exact match */
2242 skb->dev = orig_dev->master;
2245 __get_cpu_var(netdev_rx_stat).total++;
2247 skb_reset_network_header(skb);
2248 skb_reset_transport_header(skb);
2249 skb->mac_len = skb->network_header - skb->mac_header;
2255 /* Don't receive packets in an exiting network namespace */
2256 if (!net_alive(dev_net(skb->dev))) {
2261 #ifdef CONFIG_NET_CLS_ACT
2262 if (skb->tc_verd & TC_NCLS) {
2263 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2268 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2269 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2270 ptype->dev == orig_dev) {
2272 ret = deliver_skb(skb, pt_prev, orig_dev);
2277 #ifdef CONFIG_NET_CLS_ACT
2278 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2284 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2287 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2293 type = skb->protocol;
2294 list_for_each_entry_rcu(ptype,
2295 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2296 if (ptype->type == type &&
2297 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2298 ptype->dev == orig_dev)) {
2300 ret = deliver_skb(skb, pt_prev, orig_dev);
2306 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2309 /* Jamal, now you will not able to escape explaining
2310 * me how you were going to use this. :-)
2320 /* Network device is going away, flush any packets still pending */
2321 static void flush_backlog(void *arg)
2323 struct net_device *dev = arg;
2324 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2325 struct sk_buff *skb, *tmp;
2327 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2328 if (skb->dev == dev) {
2329 __skb_unlink(skb, &queue->input_pkt_queue);
2334 static int napi_gro_complete(struct sk_buff *skb)
2336 struct packet_type *ptype;
2337 __be16 type = skb->protocol;
2338 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2341 if (NAPI_GRO_CB(skb)->count == 1)
2345 list_for_each_entry_rcu(ptype, head, list) {
2346 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2349 err = ptype->gro_complete(skb);
2355 WARN_ON(&ptype->list == head);
2357 return NET_RX_SUCCESS;
2361 skb_shinfo(skb)->gso_size = 0;
2362 return netif_receive_skb(skb);
2365 void napi_gro_flush(struct napi_struct *napi)
2367 struct sk_buff *skb, *next;
2369 for (skb = napi->gro_list; skb; skb = next) {
2372 napi_gro_complete(skb);
2375 napi->gro_list = NULL;
2377 EXPORT_SYMBOL(napi_gro_flush);
2379 void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2381 unsigned int offset = skb_gro_offset(skb);
2384 if (hlen <= skb_headlen(skb))
2385 return skb->data + offset;
2387 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2388 skb_shinfo(skb)->frags[0].size <=
2389 hlen - skb_headlen(skb) ||
2390 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2391 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2393 return page_address(skb_shinfo(skb)->frags[0].page) +
2394 skb_shinfo(skb)->frags[0].page_offset +
2395 offset - skb_headlen(skb);
2397 EXPORT_SYMBOL(skb_gro_header);
2399 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2401 struct sk_buff **pp = NULL;
2402 struct packet_type *ptype;
2403 __be16 type = skb->protocol;
2404 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2410 if (!(skb->dev->features & NETIF_F_GRO))
2413 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2417 list_for_each_entry_rcu(ptype, head, list) {
2421 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2424 skb_set_network_header(skb, skb_gro_offset(skb));
2425 mac = skb_gro_mac_header(skb);
2426 mac_len = skb->network_header - skb->mac_header;
2427 skb->mac_len = mac_len;
2428 NAPI_GRO_CB(skb)->same_flow = 0;
2429 NAPI_GRO_CB(skb)->flush = 0;
2430 NAPI_GRO_CB(skb)->free = 0;
2432 for (p = napi->gro_list; p; p = p->next) {
2435 if (!NAPI_GRO_CB(p)->same_flow)
2438 if (p->mac_len != mac_len ||
2439 memcmp(skb_mac_header(p), mac, mac_len))
2440 NAPI_GRO_CB(p)->same_flow = 0;
2443 pp = ptype->gro_receive(&napi->gro_list, skb);
2448 if (&ptype->list == head)
2451 same_flow = NAPI_GRO_CB(skb)->same_flow;
2452 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2455 struct sk_buff *nskb = *pp;
2459 napi_gro_complete(nskb);
2466 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS)
2469 NAPI_GRO_CB(skb)->count = 1;
2470 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2471 skb->next = napi->gro_list;
2472 napi->gro_list = skb;
2476 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) {
2477 if (napi->gro_list == skb)
2478 napi->gro_list = skb->next;
2489 EXPORT_SYMBOL(dev_gro_receive);
2491 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2495 for (p = napi->gro_list; p; p = p->next) {
2496 NAPI_GRO_CB(p)->same_flow = 1;
2497 NAPI_GRO_CB(p)->flush = 0;
2500 return dev_gro_receive(napi, skb);
2503 int napi_skb_finish(int ret, struct sk_buff *skb)
2505 int err = NET_RX_SUCCESS;
2509 return netif_receive_skb(skb);
2515 case GRO_MERGED_FREE:
2522 EXPORT_SYMBOL(napi_skb_finish);
2524 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2526 skb_gro_reset_offset(skb);
2528 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2530 EXPORT_SYMBOL(napi_gro_receive);
2532 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2534 __skb_pull(skb, skb_headlen(skb));
2535 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2539 EXPORT_SYMBOL(napi_reuse_skb);
2541 struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2542 struct napi_gro_fraginfo *info)
2544 struct net_device *dev = napi->dev;
2545 struct sk_buff *skb = napi->skb;
2553 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2557 skb_reserve(skb, NET_IP_ALIGN);
2560 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2561 frag = &info->frags[info->nr_frags - 1];
2563 for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
2564 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2568 skb_shinfo(skb)->nr_frags = info->nr_frags;
2570 skb->data_len = info->len;
2571 skb->len += info->len;
2572 skb->truesize += info->len;
2574 skb_reset_mac_header(skb);
2575 skb_gro_reset_offset(skb);
2577 eth = skb_gro_header(skb, sizeof(*eth));
2579 napi_reuse_skb(napi, skb);
2584 skb_gro_pull(skb, sizeof(*eth));
2587 * This works because the only protocols we care about don't require
2588 * special handling. We'll fix it up properly at the end.
2590 skb->protocol = eth->h_proto;
2592 skb->ip_summed = info->ip_summed;
2593 skb->csum = info->csum;
2598 EXPORT_SYMBOL(napi_fraginfo_skb);
2600 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2602 int err = NET_RX_SUCCESS;
2607 skb->protocol = eth_type_trans(skb, napi->dev);
2609 if (ret == GRO_NORMAL)
2610 return netif_receive_skb(skb);
2612 skb_gro_pull(skb, -ETH_HLEN);
2619 case GRO_MERGED_FREE:
2620 napi_reuse_skb(napi, skb);
2626 EXPORT_SYMBOL(napi_frags_finish);
2628 int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2630 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2635 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2637 EXPORT_SYMBOL(napi_gro_frags);
2639 static int process_backlog(struct napi_struct *napi, int quota)
2642 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2643 unsigned long start_time = jiffies;
2645 napi->weight = weight_p;
2647 struct sk_buff *skb;
2649 local_irq_disable();
2650 skb = __skb_dequeue(&queue->input_pkt_queue);
2652 __napi_complete(napi);
2658 napi_gro_receive(napi, skb);
2659 } while (++work < quota && jiffies == start_time);
2661 napi_gro_flush(napi);
2667 * __napi_schedule - schedule for receive
2668 * @n: entry to schedule
2670 * The entry's receive function will be scheduled to run
2672 void __napi_schedule(struct napi_struct *n)
2674 unsigned long flags;
2676 local_irq_save(flags);
2677 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2678 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2679 local_irq_restore(flags);
2681 EXPORT_SYMBOL(__napi_schedule);
2683 void __napi_complete(struct napi_struct *n)
2685 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2686 BUG_ON(n->gro_list);
2688 list_del(&n->poll_list);
2689 smp_mb__before_clear_bit();
2690 clear_bit(NAPI_STATE_SCHED, &n->state);
2692 EXPORT_SYMBOL(__napi_complete);
2694 void napi_complete(struct napi_struct *n)
2696 unsigned long flags;
2699 * don't let napi dequeue from the cpu poll list
2700 * just in case its running on a different cpu
2702 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2706 local_irq_save(flags);
2708 local_irq_restore(flags);
2710 EXPORT_SYMBOL(napi_complete);
2712 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2713 int (*poll)(struct napi_struct *, int), int weight)
2715 INIT_LIST_HEAD(&napi->poll_list);
2716 napi->gro_list = NULL;
2719 napi->weight = weight;
2720 list_add(&napi->dev_list, &dev->napi_list);
2722 #ifdef CONFIG_NETPOLL
2723 spin_lock_init(&napi->poll_lock);
2724 napi->poll_owner = -1;
2726 set_bit(NAPI_STATE_SCHED, &napi->state);
2728 EXPORT_SYMBOL(netif_napi_add);
2730 void netif_napi_del(struct napi_struct *napi)
2732 struct sk_buff *skb, *next;
2734 list_del_init(&napi->dev_list);
2737 for (skb = napi->gro_list; skb; skb = next) {
2743 napi->gro_list = NULL;
2745 EXPORT_SYMBOL(netif_napi_del);
2748 static void net_rx_action(struct softirq_action *h)
2750 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2751 unsigned long time_limit = jiffies + 2;
2752 int budget = netdev_budget;
2755 local_irq_disable();
2757 while (!list_empty(list)) {
2758 struct napi_struct *n;
2761 /* If softirq window is exhuasted then punt.
2762 * Allow this to run for 2 jiffies since which will allow
2763 * an average latency of 1.5/HZ.
2765 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2770 /* Even though interrupts have been re-enabled, this
2771 * access is safe because interrupts can only add new
2772 * entries to the tail of this list, and only ->poll()
2773 * calls can remove this head entry from the list.
2775 n = list_entry(list->next, struct napi_struct, poll_list);
2777 have = netpoll_poll_lock(n);
2781 /* This NAPI_STATE_SCHED test is for avoiding a race
2782 * with netpoll's poll_napi(). Only the entity which
2783 * obtains the lock and sees NAPI_STATE_SCHED set will
2784 * actually make the ->poll() call. Therefore we avoid
2785 * accidently calling ->poll() when NAPI is not scheduled.
2788 if (test_bit(NAPI_STATE_SCHED, &n->state))
2789 work = n->poll(n, weight);
2791 WARN_ON_ONCE(work > weight);
2795 local_irq_disable();
2797 /* Drivers must not modify the NAPI state if they
2798 * consume the entire weight. In such cases this code
2799 * still "owns" the NAPI instance and therefore can
2800 * move the instance around on the list at-will.
2802 if (unlikely(work == weight)) {
2803 if (unlikely(napi_disable_pending(n)))
2806 list_move_tail(&n->poll_list, list);
2809 netpoll_poll_unlock(have);
2814 #ifdef CONFIG_NET_DMA
2816 * There may not be any more sk_buffs coming right now, so push
2817 * any pending DMA copies to hardware
2819 dma_issue_pending_all();
2825 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2826 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2830 static gifconf_func_t * gifconf_list [NPROTO];
2833 * register_gifconf - register a SIOCGIF handler
2834 * @family: Address family
2835 * @gifconf: Function handler
2837 * Register protocol dependent address dumping routines. The handler
2838 * that is passed must not be freed or reused until it has been replaced
2839 * by another handler.
2841 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2843 if (family >= NPROTO)
2845 gifconf_list[family] = gifconf;
2851 * Map an interface index to its name (SIOCGIFNAME)
2855 * We need this ioctl for efficient implementation of the
2856 * if_indextoname() function required by the IPv6 API. Without
2857 * it, we would have to search all the interfaces to find a
2861 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2863 struct net_device *dev;
2867 * Fetch the caller's info block.
2870 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2873 read_lock(&dev_base_lock);
2874 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2876 read_unlock(&dev_base_lock);
2880 strcpy(ifr.ifr_name, dev->name);
2881 read_unlock(&dev_base_lock);
2883 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2889 * Perform a SIOCGIFCONF call. This structure will change
2890 * size eventually, and there is nothing I can do about it.
2891 * Thus we will need a 'compatibility mode'.
2894 static int dev_ifconf(struct net *net, char __user *arg)
2897 struct net_device *dev;
2904 * Fetch the caller's info block.
2907 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2914 * Loop over the interfaces, and write an info block for each.
2918 for_each_netdev(net, dev) {
2919 for (i = 0; i < NPROTO; i++) {
2920 if (gifconf_list[i]) {
2923 done = gifconf_list[i](dev, NULL, 0);
2925 done = gifconf_list[i](dev, pos + total,
2935 * All done. Write the updated control block back to the caller.
2937 ifc.ifc_len = total;
2940 * Both BSD and Solaris return 0 here, so we do too.
2942 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2945 #ifdef CONFIG_PROC_FS
2947 * This is invoked by the /proc filesystem handler to display a device
2950 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2951 __acquires(dev_base_lock)
2953 struct net *net = seq_file_net(seq);
2955 struct net_device *dev;
2957 read_lock(&dev_base_lock);
2959 return SEQ_START_TOKEN;
2962 for_each_netdev(net, dev)
2969 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2971 struct net *net = seq_file_net(seq);
2973 return v == SEQ_START_TOKEN ?
2974 first_net_device(net) : next_net_device((struct net_device *)v);
2977 void dev_seq_stop(struct seq_file *seq, void *v)
2978 __releases(dev_base_lock)
2980 read_unlock(&dev_base_lock);
2983 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2985 const struct net_device_stats *stats = dev_get_stats(dev);
2987 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2988 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2989 dev->name, stats->rx_bytes, stats->rx_packets,
2991 stats->rx_dropped + stats->rx_missed_errors,
2992 stats->rx_fifo_errors,
2993 stats->rx_length_errors + stats->rx_over_errors +
2994 stats->rx_crc_errors + stats->rx_frame_errors,
2995 stats->rx_compressed, stats->multicast,
2996 stats->tx_bytes, stats->tx_packets,
2997 stats->tx_errors, stats->tx_dropped,
2998 stats->tx_fifo_errors, stats->collisions,
2999 stats->tx_carrier_errors +
3000 stats->tx_aborted_errors +
3001 stats->tx_window_errors +
3002 stats->tx_heartbeat_errors,
3003 stats->tx_compressed);
3007 * Called from the PROCfs module. This now uses the new arbitrary sized
3008 * /proc/net interface to create /proc/net/dev
3010 static int dev_seq_show(struct seq_file *seq, void *v)
3012 if (v == SEQ_START_TOKEN)
3013 seq_puts(seq, "Inter-| Receive "
3015 " face |bytes packets errs drop fifo frame "
3016 "compressed multicast|bytes packets errs "
3017 "drop fifo colls carrier compressed\n");
3019 dev_seq_printf_stats(seq, v);
3023 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3025 struct netif_rx_stats *rc = NULL;
3027 while (*pos < nr_cpu_ids)
3028 if (cpu_online(*pos)) {
3029 rc = &per_cpu(netdev_rx_stat, *pos);
3036 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3038 return softnet_get_online(pos);
3041 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3044 return softnet_get_online(pos);
3047 static void softnet_seq_stop(struct seq_file *seq, void *v)
3051 static int softnet_seq_show(struct seq_file *seq, void *v)
3053 struct netif_rx_stats *s = v;
3055 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3056 s->total, s->dropped, s->time_squeeze, 0,
3057 0, 0, 0, 0, /* was fastroute */
3062 static const struct seq_operations dev_seq_ops = {
3063 .start = dev_seq_start,
3064 .next = dev_seq_next,
3065 .stop = dev_seq_stop,
3066 .show = dev_seq_show,
3069 static int dev_seq_open(struct inode *inode, struct file *file)
3071 return seq_open_net(inode, file, &dev_seq_ops,
3072 sizeof(struct seq_net_private));
3075 static const struct file_operations dev_seq_fops = {
3076 .owner = THIS_MODULE,
3077 .open = dev_seq_open,
3079 .llseek = seq_lseek,
3080 .release = seq_release_net,
3083 static const struct seq_operations softnet_seq_ops = {
3084 .start = softnet_seq_start,
3085 .next = softnet_seq_next,
3086 .stop = softnet_seq_stop,
3087 .show = softnet_seq_show,
3090 static int softnet_seq_open(struct inode *inode, struct file *file)
3092 return seq_open(file, &softnet_seq_ops);
3095 static const struct file_operations softnet_seq_fops = {
3096 .owner = THIS_MODULE,
3097 .open = softnet_seq_open,
3099 .llseek = seq_lseek,
3100 .release = seq_release,
3103 static void *ptype_get_idx(loff_t pos)
3105 struct packet_type *pt = NULL;
3109 list_for_each_entry_rcu(pt, &ptype_all, list) {
3115 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3116 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3125 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3129 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3132 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3134 struct packet_type *pt;
3135 struct list_head *nxt;
3139 if (v == SEQ_START_TOKEN)
3140 return ptype_get_idx(0);
3143 nxt = pt->list.next;
3144 if (pt->type == htons(ETH_P_ALL)) {
3145 if (nxt != &ptype_all)
3148 nxt = ptype_base[0].next;
3150 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3152 while (nxt == &ptype_base[hash]) {
3153 if (++hash >= PTYPE_HASH_SIZE)
3155 nxt = ptype_base[hash].next;
3158 return list_entry(nxt, struct packet_type, list);
3161 static void ptype_seq_stop(struct seq_file *seq, void *v)
3167 static int ptype_seq_show(struct seq_file *seq, void *v)
3169 struct packet_type *pt = v;
3171 if (v == SEQ_START_TOKEN)
3172 seq_puts(seq, "Type Device Function\n");
3173 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3174 if (pt->type == htons(ETH_P_ALL))
3175 seq_puts(seq, "ALL ");
3177 seq_printf(seq, "%04x", ntohs(pt->type));
3179 seq_printf(seq, " %-8s %pF\n",
3180 pt->dev ? pt->dev->name : "", pt->func);
3186 static const struct seq_operations ptype_seq_ops = {
3187 .start = ptype_seq_start,
3188 .next = ptype_seq_next,
3189 .stop = ptype_seq_stop,
3190 .show = ptype_seq_show,
3193 static int ptype_seq_open(struct inode *inode, struct file *file)
3195 return seq_open_net(inode, file, &ptype_seq_ops,
3196 sizeof(struct seq_net_private));
3199 static const struct file_operations ptype_seq_fops = {
3200 .owner = THIS_MODULE,
3201 .open = ptype_seq_open,
3203 .llseek = seq_lseek,
3204 .release = seq_release_net,
3208 static int __net_init dev_proc_net_init(struct net *net)
3212 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3214 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3216 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3219 if (wext_proc_init(net))
3225 proc_net_remove(net, "ptype");
3227 proc_net_remove(net, "softnet_stat");
3229 proc_net_remove(net, "dev");
3233 static void __net_exit dev_proc_net_exit(struct net *net)
3235 wext_proc_exit(net);
3237 proc_net_remove(net, "ptype");
3238 proc_net_remove(net, "softnet_stat");
3239 proc_net_remove(net, "dev");
3242 static struct pernet_operations __net_initdata dev_proc_ops = {
3243 .init = dev_proc_net_init,
3244 .exit = dev_proc_net_exit,
3247 static int __init dev_proc_init(void)
3249 return register_pernet_subsys(&dev_proc_ops);
3252 #define dev_proc_init() 0
3253 #endif /* CONFIG_PROC_FS */
3257 * netdev_set_master - set up master/slave pair
3258 * @slave: slave device
3259 * @master: new master device
3261 * Changes the master device of the slave. Pass %NULL to break the
3262 * bonding. The caller must hold the RTNL semaphore. On a failure
3263 * a negative errno code is returned. On success the reference counts
3264 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3265 * function returns zero.
3267 int netdev_set_master(struct net_device *slave, struct net_device *master)
3269 struct net_device *old = slave->master;
3279 slave->master = master;
3287 slave->flags |= IFF_SLAVE;
3289 slave->flags &= ~IFF_SLAVE;
3291 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3295 static void dev_change_rx_flags(struct net_device *dev, int flags)
3297 const struct net_device_ops *ops = dev->netdev_ops;
3299 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3300 ops->ndo_change_rx_flags(dev, flags);
3303 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3305 unsigned short old_flags = dev->flags;
3311 dev->flags |= IFF_PROMISC;
3312 dev->promiscuity += inc;
3313 if (dev->promiscuity == 0) {
3316 * If inc causes overflow, untouch promisc and return error.
3319 dev->flags &= ~IFF_PROMISC;
3321 dev->promiscuity -= inc;
3322 printk(KERN_WARNING "%s: promiscuity touches roof, "
3323 "set promiscuity failed, promiscuity feature "
3324 "of device might be broken.\n", dev->name);
3328 if (dev->flags != old_flags) {
3329 printk(KERN_INFO "device %s %s promiscuous mode\n",
3330 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3332 if (audit_enabled) {
3333 current_uid_gid(&uid, &gid);
3334 audit_log(current->audit_context, GFP_ATOMIC,
3335 AUDIT_ANOM_PROMISCUOUS,
3336 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3337 dev->name, (dev->flags & IFF_PROMISC),
3338 (old_flags & IFF_PROMISC),
3339 audit_get_loginuid(current),
3341 audit_get_sessionid(current));
3344 dev_change_rx_flags(dev, IFF_PROMISC);
3350 * dev_set_promiscuity - update promiscuity count on a device
3354 * Add or remove promiscuity from a device. While the count in the device
3355 * remains above zero the interface remains promiscuous. Once it hits zero
3356 * the device reverts back to normal filtering operation. A negative inc
3357 * value is used to drop promiscuity on the device.
3358 * Return 0 if successful or a negative errno code on error.
3360 int dev_set_promiscuity(struct net_device *dev, int inc)
3362 unsigned short old_flags = dev->flags;
3365 err = __dev_set_promiscuity(dev, inc);
3368 if (dev->flags != old_flags)
3369 dev_set_rx_mode(dev);
3374 * dev_set_allmulti - update allmulti count on a device
3378 * Add or remove reception of all multicast frames to a device. While the
3379 * count in the device remains above zero the interface remains listening
3380 * to all interfaces. Once it hits zero the device reverts back to normal
3381 * filtering operation. A negative @inc value is used to drop the counter
3382 * when releasing a resource needing all multicasts.
3383 * Return 0 if successful or a negative errno code on error.
3386 int dev_set_allmulti(struct net_device *dev, int inc)
3388 unsigned short old_flags = dev->flags;
3392 dev->flags |= IFF_ALLMULTI;
3393 dev->allmulti += inc;
3394 if (dev->allmulti == 0) {
3397 * If inc causes overflow, untouch allmulti and return error.
3400 dev->flags &= ~IFF_ALLMULTI;
3402 dev->allmulti -= inc;
3403 printk(KERN_WARNING "%s: allmulti touches roof, "
3404 "set allmulti failed, allmulti feature of "
3405 "device might be broken.\n", dev->name);
3409 if (dev->flags ^ old_flags) {
3410 dev_change_rx_flags(dev, IFF_ALLMULTI);
3411 dev_set_rx_mode(dev);
3417 * Upload unicast and multicast address lists to device and
3418 * configure RX filtering. When the device doesn't support unicast
3419 * filtering it is put in promiscuous mode while unicast addresses
3422 void __dev_set_rx_mode(struct net_device *dev)
3424 const struct net_device_ops *ops = dev->netdev_ops;
3426 /* dev_open will call this function so the list will stay sane. */
3427 if (!(dev->flags&IFF_UP))
3430 if (!netif_device_present(dev))
3433 if (ops->ndo_set_rx_mode)
3434 ops->ndo_set_rx_mode(dev);
3436 /* Unicast addresses changes may only happen under the rtnl,
3437 * therefore calling __dev_set_promiscuity here is safe.
3439 if (dev->uc_count > 0 && !dev->uc_promisc) {
3440 __dev_set_promiscuity(dev, 1);
3441 dev->uc_promisc = 1;
3442 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3443 __dev_set_promiscuity(dev, -1);
3444 dev->uc_promisc = 0;
3447 if (ops->ndo_set_multicast_list)
3448 ops->ndo_set_multicast_list(dev);
3452 void dev_set_rx_mode(struct net_device *dev)
3454 netif_addr_lock_bh(dev);
3455 __dev_set_rx_mode(dev);
3456 netif_addr_unlock_bh(dev);
3459 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3460 void *addr, int alen, int glbl)
3462 struct dev_addr_list *da;
3464 for (; (da = *list) != NULL; list = &da->next) {
3465 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3466 alen == da->da_addrlen) {
3468 int old_glbl = da->da_gusers;
3485 int __dev_addr_add(struct dev_addr_list **list, int *count,
3486 void *addr, int alen, int glbl)
3488 struct dev_addr_list *da;
3490 for (da = *list; da != NULL; da = da->next) {
3491 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3492 da->da_addrlen == alen) {
3494 int old_glbl = da->da_gusers;
3504 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3507 memcpy(da->da_addr, addr, alen);
3508 da->da_addrlen = alen;
3510 da->da_gusers = glbl ? 1 : 0;
3518 * dev_unicast_delete - Release secondary unicast address.
3520 * @addr: address to delete
3521 * @alen: length of @addr
3523 * Release reference to a secondary unicast address and remove it
3524 * from the device if the reference count drops to zero.
3526 * The caller must hold the rtnl_mutex.
3528 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3534 netif_addr_lock_bh(dev);
3535 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3537 __dev_set_rx_mode(dev);
3538 netif_addr_unlock_bh(dev);
3541 EXPORT_SYMBOL(dev_unicast_delete);
3544 * dev_unicast_add - add a secondary unicast address
3546 * @addr: address to add
3547 * @alen: length of @addr
3549 * Add a secondary unicast address to the device or increase
3550 * the reference count if it already exists.
3552 * The caller must hold the rtnl_mutex.
3554 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3560 netif_addr_lock_bh(dev);
3561 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3563 __dev_set_rx_mode(dev);
3564 netif_addr_unlock_bh(dev);
3567 EXPORT_SYMBOL(dev_unicast_add);
3569 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3570 struct dev_addr_list **from, int *from_count)
3572 struct dev_addr_list *da, *next;
3576 while (da != NULL) {
3578 if (!da->da_synced) {
3579 err = __dev_addr_add(to, to_count,
3580 da->da_addr, da->da_addrlen, 0);
3585 } else if (da->da_users == 1) {
3586 __dev_addr_delete(to, to_count,
3587 da->da_addr, da->da_addrlen, 0);
3588 __dev_addr_delete(from, from_count,
3589 da->da_addr, da->da_addrlen, 0);
3596 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3597 struct dev_addr_list **from, int *from_count)
3599 struct dev_addr_list *da, *next;
3602 while (da != NULL) {
3604 if (da->da_synced) {
3605 __dev_addr_delete(to, to_count,
3606 da->da_addr, da->da_addrlen, 0);
3608 __dev_addr_delete(from, from_count,
3609 da->da_addr, da->da_addrlen, 0);
3616 * dev_unicast_sync - Synchronize device's unicast list to another device
3617 * @to: destination device
3618 * @from: source device
3620 * Add newly added addresses to the destination device and release
3621 * addresses that have no users left. The source device must be
3622 * locked by netif_tx_lock_bh.
3624 * This function is intended to be called from the dev->set_rx_mode
3625 * function of layered software devices.
3627 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3631 netif_addr_lock_bh(to);
3632 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3633 &from->uc_list, &from->uc_count);
3635 __dev_set_rx_mode(to);
3636 netif_addr_unlock_bh(to);
3639 EXPORT_SYMBOL(dev_unicast_sync);
3642 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3643 * @to: destination device
3644 * @from: source device
3646 * Remove all addresses that were added to the destination device by
3647 * dev_unicast_sync(). This function is intended to be called from the
3648 * dev->stop function of layered software devices.
3650 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3652 netif_addr_lock_bh(from);
3653 netif_addr_lock(to);
3655 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3656 &from->uc_list, &from->uc_count);
3657 __dev_set_rx_mode(to);
3659 netif_addr_unlock(to);
3660 netif_addr_unlock_bh(from);
3662 EXPORT_SYMBOL(dev_unicast_unsync);
3664 static void __dev_addr_discard(struct dev_addr_list **list)
3666 struct dev_addr_list *tmp;
3668 while (*list != NULL) {
3671 if (tmp->da_users > tmp->da_gusers)
3672 printk("__dev_addr_discard: address leakage! "
3673 "da_users=%d\n", tmp->da_users);
3678 static void dev_addr_discard(struct net_device *dev)
3680 netif_addr_lock_bh(dev);
3682 __dev_addr_discard(&dev->uc_list);
3685 __dev_addr_discard(&dev->mc_list);
3688 netif_addr_unlock_bh(dev);
3692 * dev_get_flags - get flags reported to userspace
3695 * Get the combination of flag bits exported through APIs to userspace.
3697 unsigned dev_get_flags(const struct net_device *dev)
3701 flags = (dev->flags & ~(IFF_PROMISC |
3706 (dev->gflags & (IFF_PROMISC |
3709 if (netif_running(dev)) {
3710 if (netif_oper_up(dev))
3711 flags |= IFF_RUNNING;
3712 if (netif_carrier_ok(dev))
3713 flags |= IFF_LOWER_UP;
3714 if (netif_dormant(dev))
3715 flags |= IFF_DORMANT;
3722 * dev_change_flags - change device settings
3724 * @flags: device state flags
3726 * Change settings on device based state flags. The flags are
3727 * in the userspace exported format.
3729 int dev_change_flags(struct net_device *dev, unsigned flags)
3732 int old_flags = dev->flags;
3737 * Set the flags on our device.
3740 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3741 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3743 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3747 * Load in the correct multicast list now the flags have changed.
3750 if ((old_flags ^ flags) & IFF_MULTICAST)
3751 dev_change_rx_flags(dev, IFF_MULTICAST);
3753 dev_set_rx_mode(dev);
3756 * Have we downed the interface. We handle IFF_UP ourselves
3757 * according to user attempts to set it, rather than blindly
3762 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3763 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3766 dev_set_rx_mode(dev);
3769 if (dev->flags & IFF_UP &&
3770 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3772 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3774 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3775 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3776 dev->gflags ^= IFF_PROMISC;
3777 dev_set_promiscuity(dev, inc);
3780 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3781 is important. Some (broken) drivers set IFF_PROMISC, when
3782 IFF_ALLMULTI is requested not asking us and not reporting.
3784 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3785 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3786 dev->gflags ^= IFF_ALLMULTI;
3787 dev_set_allmulti(dev, inc);
3790 /* Exclude state transition flags, already notified */
3791 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3793 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3799 * dev_set_mtu - Change maximum transfer unit
3801 * @new_mtu: new transfer unit
3803 * Change the maximum transfer size of the network device.
3805 int dev_set_mtu(struct net_device *dev, int new_mtu)
3807 const struct net_device_ops *ops = dev->netdev_ops;
3810 if (new_mtu == dev->mtu)
3813 /* MTU must be positive. */
3817 if (!netif_device_present(dev))
3821 if (ops->ndo_change_mtu)
3822 err = ops->ndo_change_mtu(dev, new_mtu);
3826 if (!err && dev->flags & IFF_UP)
3827 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3832 * dev_set_mac_address - Change Media Access Control Address
3836 * Change the hardware (MAC) address of the device
3838 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3840 const struct net_device_ops *ops = dev->netdev_ops;
3843 if (!ops->ndo_set_mac_address)
3845 if (sa->sa_family != dev->type)
3847 if (!netif_device_present(dev))
3849 err = ops->ndo_set_mac_address(dev, sa);
3851 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3856 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3858 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3861 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3867 case SIOCGIFFLAGS: /* Get interface flags */
3868 ifr->ifr_flags = dev_get_flags(dev);
3871 case SIOCGIFMETRIC: /* Get the metric on the interface
3872 (currently unused) */
3873 ifr->ifr_metric = 0;
3876 case SIOCGIFMTU: /* Get the MTU of a device */
3877 ifr->ifr_mtu = dev->mtu;
3882 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3884 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3885 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3886 ifr->ifr_hwaddr.sa_family = dev->type;
3894 ifr->ifr_map.mem_start = dev->mem_start;
3895 ifr->ifr_map.mem_end = dev->mem_end;
3896 ifr->ifr_map.base_addr = dev->base_addr;
3897 ifr->ifr_map.irq = dev->irq;
3898 ifr->ifr_map.dma = dev->dma;
3899 ifr->ifr_map.port = dev->if_port;
3903 ifr->ifr_ifindex = dev->ifindex;
3907 ifr->ifr_qlen = dev->tx_queue_len;
3911 /* dev_ioctl() should ensure this case
3923 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3925 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3928 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3929 const struct net_device_ops *ops;
3934 ops = dev->netdev_ops;
3937 case SIOCSIFFLAGS: /* Set interface flags */
3938 return dev_change_flags(dev, ifr->ifr_flags);
3940 case SIOCSIFMETRIC: /* Set the metric on the interface
3941 (currently unused) */
3944 case SIOCSIFMTU: /* Set the MTU of a device */
3945 return dev_set_mtu(dev, ifr->ifr_mtu);
3948 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3950 case SIOCSIFHWBROADCAST:
3951 if (ifr->ifr_hwaddr.sa_family != dev->type)
3953 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3954 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3955 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3959 if (ops->ndo_set_config) {
3960 if (!netif_device_present(dev))
3962 return ops->ndo_set_config(dev, &ifr->ifr_map);
3967 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3968 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3970 if (!netif_device_present(dev))
3972 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3976 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3977 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3979 if (!netif_device_present(dev))
3981 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3985 if (ifr->ifr_qlen < 0)
3987 dev->tx_queue_len = ifr->ifr_qlen;
3991 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3992 return dev_change_name(dev, ifr->ifr_newname);
3995 * Unknown or private ioctl
3999 if ((cmd >= SIOCDEVPRIVATE &&
4000 cmd <= SIOCDEVPRIVATE + 15) ||
4001 cmd == SIOCBONDENSLAVE ||
4002 cmd == SIOCBONDRELEASE ||
4003 cmd == SIOCBONDSETHWADDR ||
4004 cmd == SIOCBONDSLAVEINFOQUERY ||
4005 cmd == SIOCBONDINFOQUERY ||
4006 cmd == SIOCBONDCHANGEACTIVE ||
4007 cmd == SIOCGMIIPHY ||
4008 cmd == SIOCGMIIREG ||
4009 cmd == SIOCSMIIREG ||
4010 cmd == SIOCBRADDIF ||
4011 cmd == SIOCBRDELIF ||
4012 cmd == SIOCWANDEV) {
4014 if (ops->ndo_do_ioctl) {
4015 if (netif_device_present(dev))
4016 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4028 * This function handles all "interface"-type I/O control requests. The actual
4029 * 'doing' part of this is dev_ifsioc above.
4033 * dev_ioctl - network device ioctl
4034 * @net: the applicable net namespace
4035 * @cmd: command to issue
4036 * @arg: pointer to a struct ifreq in user space
4038 * Issue ioctl functions to devices. This is normally called by the
4039 * user space syscall interfaces but can sometimes be useful for
4040 * other purposes. The return value is the return from the syscall if
4041 * positive or a negative errno code on error.
4044 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4050 /* One special case: SIOCGIFCONF takes ifconf argument
4051 and requires shared lock, because it sleeps writing
4055 if (cmd == SIOCGIFCONF) {
4057 ret = dev_ifconf(net, (char __user *) arg);
4061 if (cmd == SIOCGIFNAME)
4062 return dev_ifname(net, (struct ifreq __user *)arg);
4064 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4067 ifr.ifr_name[IFNAMSIZ-1] = 0;
4069 colon = strchr(ifr.ifr_name, ':');
4074 * See which interface the caller is talking about.
4079 * These ioctl calls:
4080 * - can be done by all.
4081 * - atomic and do not require locking.
4092 dev_load(net, ifr.ifr_name);
4093 read_lock(&dev_base_lock);
4094 ret = dev_ifsioc_locked(net, &ifr, cmd);
4095 read_unlock(&dev_base_lock);
4099 if (copy_to_user(arg, &ifr,
4100 sizeof(struct ifreq)))
4106 dev_load(net, ifr.ifr_name);
4108 ret = dev_ethtool(net, &ifr);
4113 if (copy_to_user(arg, &ifr,
4114 sizeof(struct ifreq)))
4120 * These ioctl calls:
4121 * - require superuser power.
4122 * - require strict serialization.
4128 if (!capable(CAP_NET_ADMIN))
4130 dev_load(net, ifr.ifr_name);
4132 ret = dev_ifsioc(net, &ifr, cmd);
4137 if (copy_to_user(arg, &ifr,
4138 sizeof(struct ifreq)))
4144 * These ioctl calls:
4145 * - require superuser power.
4146 * - require strict serialization.
4147 * - do not return a value
4157 case SIOCSIFHWBROADCAST:
4160 case SIOCBONDENSLAVE:
4161 case SIOCBONDRELEASE:
4162 case SIOCBONDSETHWADDR:
4163 case SIOCBONDCHANGEACTIVE:
4166 if (!capable(CAP_NET_ADMIN))
4169 case SIOCBONDSLAVEINFOQUERY:
4170 case SIOCBONDINFOQUERY:
4171 dev_load(net, ifr.ifr_name);
4173 ret = dev_ifsioc(net, &ifr, cmd);
4178 /* Get the per device memory space. We can add this but
4179 * currently do not support it */
4181 /* Set the per device memory buffer space.
4182 * Not applicable in our case */
4187 * Unknown or private ioctl.
4190 if (cmd == SIOCWANDEV ||
4191 (cmd >= SIOCDEVPRIVATE &&
4192 cmd <= SIOCDEVPRIVATE + 15)) {
4193 dev_load(net, ifr.ifr_name);
4195 ret = dev_ifsioc(net, &ifr, cmd);
4197 if (!ret && copy_to_user(arg, &ifr,
4198 sizeof(struct ifreq)))
4202 /* Take care of Wireless Extensions */
4203 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4204 return wext_handle_ioctl(net, &ifr, cmd, arg);
4211 * dev_new_index - allocate an ifindex
4212 * @net: the applicable net namespace
4214 * Returns a suitable unique value for a new device interface
4215 * number. The caller must hold the rtnl semaphore or the
4216 * dev_base_lock to be sure it remains unique.
4218 static int dev_new_index(struct net *net)
4224 if (!__dev_get_by_index(net, ifindex))
4229 /* Delayed registration/unregisteration */
4230 static LIST_HEAD(net_todo_list);
4232 static void net_set_todo(struct net_device *dev)
4234 list_add_tail(&dev->todo_list, &net_todo_list);
4237 static void rollback_registered(struct net_device *dev)
4239 BUG_ON(dev_boot_phase);
4242 /* Some devices call without registering for initialization unwind. */
4243 if (dev->reg_state == NETREG_UNINITIALIZED) {
4244 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4245 "was registered\n", dev->name, dev);
4251 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4253 /* If device is running, close it first. */
4256 /* And unlink it from device chain. */
4257 unlist_netdevice(dev);
4259 dev->reg_state = NETREG_UNREGISTERING;
4263 /* Shutdown queueing discipline. */
4267 /* Notify protocols, that we are about to destroy
4268 this device. They should clean all the things.
4270 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4273 * Flush the unicast and multicast chains
4275 dev_addr_discard(dev);
4277 if (dev->netdev_ops->ndo_uninit)
4278 dev->netdev_ops->ndo_uninit(dev);
4280 /* Notifier chain MUST detach us from master device. */
4281 WARN_ON(dev->master);
4283 /* Remove entries from kobject tree */
4284 netdev_unregister_kobject(dev);
4291 static void __netdev_init_queue_locks_one(struct net_device *dev,
4292 struct netdev_queue *dev_queue,
4295 spin_lock_init(&dev_queue->_xmit_lock);
4296 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4297 dev_queue->xmit_lock_owner = -1;
4300 static void netdev_init_queue_locks(struct net_device *dev)
4302 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4303 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4306 unsigned long netdev_fix_features(unsigned long features, const char *name)
4308 /* Fix illegal SG+CSUM combinations. */
4309 if ((features & NETIF_F_SG) &&
4310 !(features & NETIF_F_ALL_CSUM)) {
4312 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4313 "checksum feature.\n", name);
4314 features &= ~NETIF_F_SG;
4317 /* TSO requires that SG is present as well. */
4318 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4320 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4321 "SG feature.\n", name);
4322 features &= ~NETIF_F_TSO;
4325 if (features & NETIF_F_UFO) {
4326 if (!(features & NETIF_F_GEN_CSUM)) {
4328 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4329 "since no NETIF_F_HW_CSUM feature.\n",
4331 features &= ~NETIF_F_UFO;
4334 if (!(features & NETIF_F_SG)) {
4336 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4337 "since no NETIF_F_SG feature.\n", name);
4338 features &= ~NETIF_F_UFO;
4344 EXPORT_SYMBOL(netdev_fix_features);
4347 * register_netdevice - register a network device
4348 * @dev: device to register
4350 * Take a completed network device structure and add it to the kernel
4351 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4352 * chain. 0 is returned on success. A negative errno code is returned
4353 * on a failure to set up the device, or if the name is a duplicate.
4355 * Callers must hold the rtnl semaphore. You may want
4356 * register_netdev() instead of this.
4359 * The locking appears insufficient to guarantee two parallel registers
4360 * will not get the same name.
4363 int register_netdevice(struct net_device *dev)
4365 struct hlist_head *head;
4366 struct hlist_node *p;
4368 struct net *net = dev_net(dev);
4370 BUG_ON(dev_boot_phase);
4375 /* When net_device's are persistent, this will be fatal. */
4376 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4379 spin_lock_init(&dev->addr_list_lock);
4380 netdev_set_addr_lockdep_class(dev);
4381 netdev_init_queue_locks(dev);
4385 #ifdef CONFIG_COMPAT_NET_DEV_OPS
4386 /* Netdevice_ops API compatiability support.
4387 * This is temporary until all network devices are converted.
4389 if (dev->netdev_ops) {
4390 const struct net_device_ops *ops = dev->netdev_ops;
4392 dev->init = ops->ndo_init;
4393 dev->uninit = ops->ndo_uninit;
4394 dev->open = ops->ndo_open;
4395 dev->change_rx_flags = ops->ndo_change_rx_flags;
4396 dev->set_rx_mode = ops->ndo_set_rx_mode;
4397 dev->set_multicast_list = ops->ndo_set_multicast_list;
4398 dev->set_mac_address = ops->ndo_set_mac_address;
4399 dev->validate_addr = ops->ndo_validate_addr;
4400 dev->do_ioctl = ops->ndo_do_ioctl;
4401 dev->set_config = ops->ndo_set_config;
4402 dev->change_mtu = ops->ndo_change_mtu;
4403 dev->tx_timeout = ops->ndo_tx_timeout;
4404 dev->get_stats = ops->ndo_get_stats;
4405 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4406 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4407 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4408 #ifdef CONFIG_NET_POLL_CONTROLLER
4409 dev->poll_controller = ops->ndo_poll_controller;
4412 char drivername[64];
4413 pr_info("%s (%s): not using net_device_ops yet\n",
4414 dev->name, netdev_drivername(dev, drivername, 64));
4416 /* This works only because net_device_ops and the
4417 compatiablity structure are the same. */
4418 dev->netdev_ops = (void *) &(dev->init);
4422 /* Init, if this function is available */
4423 if (dev->netdev_ops->ndo_init) {
4424 ret = dev->netdev_ops->ndo_init(dev);
4432 if (!dev_valid_name(dev->name)) {
4437 dev->ifindex = dev_new_index(net);
4438 if (dev->iflink == -1)
4439 dev->iflink = dev->ifindex;
4441 /* Check for existence of name */
4442 head = dev_name_hash(net, dev->name);
4443 hlist_for_each(p, head) {
4444 struct net_device *d
4445 = hlist_entry(p, struct net_device, name_hlist);
4446 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4452 /* Fix illegal checksum combinations */
4453 if ((dev->features & NETIF_F_HW_CSUM) &&
4454 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4455 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4457 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4460 if ((dev->features & NETIF_F_NO_CSUM) &&
4461 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4462 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4464 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4467 dev->features = netdev_fix_features(dev->features, dev->name);
4469 /* Enable software GSO if SG is supported. */
4470 if (dev->features & NETIF_F_SG)
4471 dev->features |= NETIF_F_GSO;
4473 netdev_initialize_kobject(dev);
4474 ret = netdev_register_kobject(dev);
4477 dev->reg_state = NETREG_REGISTERED;
4480 * Default initial state at registry is that the
4481 * device is present.
4484 set_bit(__LINK_STATE_PRESENT, &dev->state);
4486 dev_init_scheduler(dev);
4488 list_netdevice(dev);
4490 /* Notify protocols, that a new device appeared. */
4491 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4492 ret = notifier_to_errno(ret);
4494 rollback_registered(dev);
4495 dev->reg_state = NETREG_UNREGISTERED;
4502 if (dev->netdev_ops->ndo_uninit)
4503 dev->netdev_ops->ndo_uninit(dev);
4508 * init_dummy_netdev - init a dummy network device for NAPI
4509 * @dev: device to init
4511 * This takes a network device structure and initialize the minimum
4512 * amount of fields so it can be used to schedule NAPI polls without
4513 * registering a full blown interface. This is to be used by drivers
4514 * that need to tie several hardware interfaces to a single NAPI
4515 * poll scheduler due to HW limitations.
4517 int init_dummy_netdev(struct net_device *dev)
4519 /* Clear everything. Note we don't initialize spinlocks
4520 * are they aren't supposed to be taken by any of the
4521 * NAPI code and this dummy netdev is supposed to be
4522 * only ever used for NAPI polls
4524 memset(dev, 0, sizeof(struct net_device));
4526 /* make sure we BUG if trying to hit standard
4527 * register/unregister code path
4529 dev->reg_state = NETREG_DUMMY;
4531 /* initialize the ref count */
4532 atomic_set(&dev->refcnt, 1);
4534 /* NAPI wants this */
4535 INIT_LIST_HEAD(&dev->napi_list);
4537 /* a dummy interface is started by default */
4538 set_bit(__LINK_STATE_PRESENT, &dev->state);
4539 set_bit(__LINK_STATE_START, &dev->state);
4543 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4547 * register_netdev - register a network device
4548 * @dev: device to register
4550 * Take a completed network device structure and add it to the kernel
4551 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4552 * chain. 0 is returned on success. A negative errno code is returned
4553 * on a failure to set up the device, or if the name is a duplicate.
4555 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4556 * and expands the device name if you passed a format string to
4559 int register_netdev(struct net_device *dev)
4566 * If the name is a format string the caller wants us to do a
4569 if (strchr(dev->name, '%')) {
4570 err = dev_alloc_name(dev, dev->name);
4575 err = register_netdevice(dev);
4580 EXPORT_SYMBOL(register_netdev);
4583 * netdev_wait_allrefs - wait until all references are gone.
4585 * This is called when unregistering network devices.
4587 * Any protocol or device that holds a reference should register
4588 * for netdevice notification, and cleanup and put back the
4589 * reference if they receive an UNREGISTER event.
4590 * We can get stuck here if buggy protocols don't correctly
4593 static void netdev_wait_allrefs(struct net_device *dev)
4595 unsigned long rebroadcast_time, warning_time;
4597 rebroadcast_time = warning_time = jiffies;
4598 while (atomic_read(&dev->refcnt) != 0) {
4599 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4602 /* Rebroadcast unregister notification */
4603 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4605 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4607 /* We must not have linkwatch events
4608 * pending on unregister. If this
4609 * happens, we simply run the queue
4610 * unscheduled, resulting in a noop
4613 linkwatch_run_queue();
4618 rebroadcast_time = jiffies;
4623 if (time_after(jiffies, warning_time + 10 * HZ)) {
4624 printk(KERN_EMERG "unregister_netdevice: "
4625 "waiting for %s to become free. Usage "
4627 dev->name, atomic_read(&dev->refcnt));
4628 warning_time = jiffies;
4637 * register_netdevice(x1);
4638 * register_netdevice(x2);
4640 * unregister_netdevice(y1);
4641 * unregister_netdevice(y2);
4647 * We are invoked by rtnl_unlock().
4648 * This allows us to deal with problems:
4649 * 1) We can delete sysfs objects which invoke hotplug
4650 * without deadlocking with linkwatch via keventd.
4651 * 2) Since we run with the RTNL semaphore not held, we can sleep
4652 * safely in order to wait for the netdev refcnt to drop to zero.
4654 * We must not return until all unregister events added during
4655 * the interval the lock was held have been completed.
4657 void netdev_run_todo(void)
4659 struct list_head list;
4661 /* Snapshot list, allow later requests */
4662 list_replace_init(&net_todo_list, &list);
4666 while (!list_empty(&list)) {
4667 struct net_device *dev
4668 = list_entry(list.next, struct net_device, todo_list);
4669 list_del(&dev->todo_list);
4671 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4672 printk(KERN_ERR "network todo '%s' but state %d\n",
4673 dev->name, dev->reg_state);
4678 dev->reg_state = NETREG_UNREGISTERED;
4680 on_each_cpu(flush_backlog, dev, 1);
4682 netdev_wait_allrefs(dev);
4685 BUG_ON(atomic_read(&dev->refcnt));
4686 WARN_ON(dev->ip_ptr);
4687 WARN_ON(dev->ip6_ptr);
4688 WARN_ON(dev->dn_ptr);
4690 if (dev->destructor)
4691 dev->destructor(dev);
4693 /* Free network device */
4694 kobject_put(&dev->dev.kobj);
4699 * dev_get_stats - get network device statistics
4700 * @dev: device to get statistics from
4702 * Get network statistics from device. The device driver may provide
4703 * its own method by setting dev->netdev_ops->get_stats; otherwise
4704 * the internal statistics structure is used.
4706 const struct net_device_stats *dev_get_stats(struct net_device *dev)
4708 const struct net_device_ops *ops = dev->netdev_ops;
4710 if (ops->ndo_get_stats)
4711 return ops->ndo_get_stats(dev);
4715 EXPORT_SYMBOL(dev_get_stats);
4717 static void netdev_init_one_queue(struct net_device *dev,
4718 struct netdev_queue *queue,
4724 static void netdev_init_queues(struct net_device *dev)
4726 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4727 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4728 spin_lock_init(&dev->tx_global_lock);
4732 * alloc_netdev_mq - allocate network device
4733 * @sizeof_priv: size of private data to allocate space for
4734 * @name: device name format string
4735 * @setup: callback to initialize device
4736 * @queue_count: the number of subqueues to allocate
4738 * Allocates a struct net_device with private data area for driver use
4739 * and performs basic initialization. Also allocates subquue structs
4740 * for each queue on the device at the end of the netdevice.
4742 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4743 void (*setup)(struct net_device *), unsigned int queue_count)
4745 struct netdev_queue *tx;
4746 struct net_device *dev;
4750 BUG_ON(strlen(name) >= sizeof(dev->name));
4752 alloc_size = sizeof(struct net_device);
4754 /* ensure 32-byte alignment of private area */
4755 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4756 alloc_size += sizeof_priv;
4758 /* ensure 32-byte alignment of whole construct */
4759 alloc_size += NETDEV_ALIGN_CONST;
4761 p = kzalloc(alloc_size, GFP_KERNEL);
4763 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4767 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4769 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4775 dev = (struct net_device *)
4776 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4777 dev->padded = (char *)dev - (char *)p;
4778 dev_net_set(dev, &init_net);
4781 dev->num_tx_queues = queue_count;
4782 dev->real_num_tx_queues = queue_count;
4784 dev->gso_max_size = GSO_MAX_SIZE;
4786 netdev_init_queues(dev);
4788 INIT_LIST_HEAD(&dev->napi_list);
4790 strcpy(dev->name, name);
4793 EXPORT_SYMBOL(alloc_netdev_mq);
4796 * free_netdev - free network device
4799 * This function does the last stage of destroying an allocated device
4800 * interface. The reference to the device object is released.
4801 * If this is the last reference then it will be freed.
4803 void free_netdev(struct net_device *dev)
4805 struct napi_struct *p, *n;
4807 release_net(dev_net(dev));
4811 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4814 /* Compatibility with error handling in drivers */
4815 if (dev->reg_state == NETREG_UNINITIALIZED) {
4816 kfree((char *)dev - dev->padded);
4820 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4821 dev->reg_state = NETREG_RELEASED;
4823 /* will free via device release */
4824 put_device(&dev->dev);
4828 * synchronize_net - Synchronize with packet receive processing
4830 * Wait for packets currently being received to be done.
4831 * Does not block later packets from starting.
4833 void synchronize_net(void)
4840 * unregister_netdevice - remove device from the kernel
4843 * This function shuts down a device interface and removes it
4844 * from the kernel tables.
4846 * Callers must hold the rtnl semaphore. You may want
4847 * unregister_netdev() instead of this.
4850 void unregister_netdevice(struct net_device *dev)
4854 rollback_registered(dev);
4855 /* Finish processing unregister after unlock */
4860 * unregister_netdev - remove device from the kernel
4863 * This function shuts down a device interface and removes it
4864 * from the kernel tables.
4866 * This is just a wrapper for unregister_netdevice that takes
4867 * the rtnl semaphore. In general you want to use this and not
4868 * unregister_netdevice.
4870 void unregister_netdev(struct net_device *dev)
4873 unregister_netdevice(dev);
4877 EXPORT_SYMBOL(unregister_netdev);
4880 * dev_change_net_namespace - move device to different nethost namespace
4882 * @net: network namespace
4883 * @pat: If not NULL name pattern to try if the current device name
4884 * is already taken in the destination network namespace.
4886 * This function shuts down a device interface and moves it
4887 * to a new network namespace. On success 0 is returned, on
4888 * a failure a netagive errno code is returned.
4890 * Callers must hold the rtnl semaphore.
4893 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4896 const char *destname;
4901 /* Don't allow namespace local devices to be moved. */
4903 if (dev->features & NETIF_F_NETNS_LOCAL)
4907 /* Don't allow real devices to be moved when sysfs
4911 if (dev->dev.parent)
4915 /* Ensure the device has been registrered */
4917 if (dev->reg_state != NETREG_REGISTERED)
4920 /* Get out if there is nothing todo */
4922 if (net_eq(dev_net(dev), net))
4925 /* Pick the destination device name, and ensure
4926 * we can use it in the destination network namespace.
4929 destname = dev->name;
4930 if (__dev_get_by_name(net, destname)) {
4931 /* We get here if we can't use the current device name */
4934 if (!dev_valid_name(pat))
4936 if (strchr(pat, '%')) {
4937 if (__dev_alloc_name(net, pat, buf) < 0)
4942 if (__dev_get_by_name(net, destname))
4947 * And now a mini version of register_netdevice unregister_netdevice.
4950 /* If device is running close it first. */
4953 /* And unlink it from device chain */
4955 unlist_netdevice(dev);
4959 /* Shutdown queueing discipline. */
4962 /* Notify protocols, that we are about to destroy
4963 this device. They should clean all the things.
4965 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4968 * Flush the unicast and multicast chains
4970 dev_addr_discard(dev);
4972 netdev_unregister_kobject(dev);
4974 /* Actually switch the network namespace */
4975 dev_net_set(dev, net);
4977 /* Assign the new device name */
4978 if (destname != dev->name)
4979 strcpy(dev->name, destname);
4981 /* If there is an ifindex conflict assign a new one */
4982 if (__dev_get_by_index(net, dev->ifindex)) {
4983 int iflink = (dev->iflink == dev->ifindex);
4984 dev->ifindex = dev_new_index(net);
4986 dev->iflink = dev->ifindex;
4989 /* Fixup kobjects */
4990 err = netdev_register_kobject(dev);
4993 /* Add the device back in the hashes */
4994 list_netdevice(dev);
4996 /* Notify protocols, that a new device appeared. */
4997 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5005 static int dev_cpu_callback(struct notifier_block *nfb,
5006 unsigned long action,
5009 struct sk_buff **list_skb;
5010 struct Qdisc **list_net;
5011 struct sk_buff *skb;
5012 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5013 struct softnet_data *sd, *oldsd;
5015 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5018 local_irq_disable();
5019 cpu = smp_processor_id();
5020 sd = &per_cpu(softnet_data, cpu);
5021 oldsd = &per_cpu(softnet_data, oldcpu);
5023 /* Find end of our completion_queue. */
5024 list_skb = &sd->completion_queue;
5026 list_skb = &(*list_skb)->next;
5027 /* Append completion queue from offline CPU. */
5028 *list_skb = oldsd->completion_queue;
5029 oldsd->completion_queue = NULL;
5031 /* Find end of our output_queue. */
5032 list_net = &sd->output_queue;
5034 list_net = &(*list_net)->next_sched;
5035 /* Append output queue from offline CPU. */
5036 *list_net = oldsd->output_queue;
5037 oldsd->output_queue = NULL;
5039 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5042 /* Process offline CPU's input_pkt_queue */
5043 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5051 * netdev_increment_features - increment feature set by one
5052 * @all: current feature set
5053 * @one: new feature set
5054 * @mask: mask feature set
5056 * Computes a new feature set after adding a device with feature set
5057 * @one to the master device with current feature set @all. Will not
5058 * enable anything that is off in @mask. Returns the new feature set.
5060 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5063 /* If device needs checksumming, downgrade to it. */
5064 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5065 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5066 else if (mask & NETIF_F_ALL_CSUM) {
5067 /* If one device supports v4/v6 checksumming, set for all. */
5068 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5069 !(all & NETIF_F_GEN_CSUM)) {
5070 all &= ~NETIF_F_ALL_CSUM;
5071 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5074 /* If one device supports hw checksumming, set for all. */
5075 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5076 all &= ~NETIF_F_ALL_CSUM;
5077 all |= NETIF_F_HW_CSUM;
5081 one |= NETIF_F_ALL_CSUM;
5083 one |= all & NETIF_F_ONE_FOR_ALL;
5084 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5085 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5089 EXPORT_SYMBOL(netdev_increment_features);
5091 static struct hlist_head *netdev_create_hash(void)
5094 struct hlist_head *hash;
5096 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5098 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5099 INIT_HLIST_HEAD(&hash[i]);
5104 /* Initialize per network namespace state */
5105 static int __net_init netdev_init(struct net *net)
5107 INIT_LIST_HEAD(&net->dev_base_head);
5109 net->dev_name_head = netdev_create_hash();
5110 if (net->dev_name_head == NULL)
5113 net->dev_index_head = netdev_create_hash();
5114 if (net->dev_index_head == NULL)
5120 kfree(net->dev_name_head);
5126 * netdev_drivername - network driver for the device
5127 * @dev: network device
5128 * @buffer: buffer for resulting name
5129 * @len: size of buffer
5131 * Determine network driver for device.
5133 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5135 const struct device_driver *driver;
5136 const struct device *parent;
5138 if (len <= 0 || !buffer)
5142 parent = dev->dev.parent;
5147 driver = parent->driver;
5148 if (driver && driver->name)
5149 strlcpy(buffer, driver->name, len);
5153 static void __net_exit netdev_exit(struct net *net)
5155 kfree(net->dev_name_head);
5156 kfree(net->dev_index_head);
5159 static struct pernet_operations __net_initdata netdev_net_ops = {
5160 .init = netdev_init,
5161 .exit = netdev_exit,
5164 static void __net_exit default_device_exit(struct net *net)
5166 struct net_device *dev;
5168 * Push all migratable of the network devices back to the
5169 * initial network namespace
5173 for_each_netdev(net, dev) {
5175 char fb_name[IFNAMSIZ];
5177 /* Ignore unmoveable devices (i.e. loopback) */
5178 if (dev->features & NETIF_F_NETNS_LOCAL)
5181 /* Delete virtual devices */
5182 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5183 dev->rtnl_link_ops->dellink(dev);
5187 /* Push remaing network devices to init_net */
5188 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5189 err = dev_change_net_namespace(dev, &init_net, fb_name);
5191 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5192 __func__, dev->name, err);
5200 static struct pernet_operations __net_initdata default_device_ops = {
5201 .exit = default_device_exit,
5205 * Initialize the DEV module. At boot time this walks the device list and
5206 * unhooks any devices that fail to initialise (normally hardware not
5207 * present) and leaves us with a valid list of present and active devices.
5212 * This is called single threaded during boot, so no need
5213 * to take the rtnl semaphore.
5215 static int __init net_dev_init(void)
5217 int i, rc = -ENOMEM;
5219 BUG_ON(!dev_boot_phase);
5221 if (dev_proc_init())
5224 if (netdev_kobject_init())
5227 INIT_LIST_HEAD(&ptype_all);
5228 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5229 INIT_LIST_HEAD(&ptype_base[i]);
5231 if (register_pernet_subsys(&netdev_net_ops))
5235 * Initialise the packet receive queues.
5238 for_each_possible_cpu(i) {
5239 struct softnet_data *queue;
5241 queue = &per_cpu(softnet_data, i);
5242 skb_queue_head_init(&queue->input_pkt_queue);
5243 queue->completion_queue = NULL;
5244 INIT_LIST_HEAD(&queue->poll_list);
5246 queue->backlog.poll = process_backlog;
5247 queue->backlog.weight = weight_p;
5248 queue->backlog.gro_list = NULL;
5253 /* The loopback device is special if any other network devices
5254 * is present in a network namespace the loopback device must
5255 * be present. Since we now dynamically allocate and free the
5256 * loopback device ensure this invariant is maintained by
5257 * keeping the loopback device as the first device on the
5258 * list of network devices. Ensuring the loopback devices
5259 * is the first device that appears and the last network device
5262 if (register_pernet_device(&loopback_net_ops))
5265 if (register_pernet_device(&default_device_ops))
5268 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5269 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5271 hotcpu_notifier(dev_cpu_callback, 0);
5279 subsys_initcall(net_dev_init);
5281 EXPORT_SYMBOL(__dev_get_by_index);
5282 EXPORT_SYMBOL(__dev_get_by_name);
5283 EXPORT_SYMBOL(__dev_remove_pack);
5284 EXPORT_SYMBOL(dev_valid_name);
5285 EXPORT_SYMBOL(dev_add_pack);
5286 EXPORT_SYMBOL(dev_alloc_name);
5287 EXPORT_SYMBOL(dev_close);
5288 EXPORT_SYMBOL(dev_get_by_flags);
5289 EXPORT_SYMBOL(dev_get_by_index);
5290 EXPORT_SYMBOL(dev_get_by_name);
5291 EXPORT_SYMBOL(dev_open);
5292 EXPORT_SYMBOL(dev_queue_xmit);
5293 EXPORT_SYMBOL(dev_remove_pack);
5294 EXPORT_SYMBOL(dev_set_allmulti);
5295 EXPORT_SYMBOL(dev_set_promiscuity);
5296 EXPORT_SYMBOL(dev_change_flags);
5297 EXPORT_SYMBOL(dev_set_mtu);
5298 EXPORT_SYMBOL(dev_set_mac_address);
5299 EXPORT_SYMBOL(free_netdev);
5300 EXPORT_SYMBOL(netdev_boot_setup_check);
5301 EXPORT_SYMBOL(netdev_set_master);
5302 EXPORT_SYMBOL(netdev_state_change);
5303 EXPORT_SYMBOL(netif_receive_skb);
5304 EXPORT_SYMBOL(netif_rx);
5305 EXPORT_SYMBOL(register_gifconf);
5306 EXPORT_SYMBOL(register_netdevice);
5307 EXPORT_SYMBOL(register_netdevice_notifier);
5308 EXPORT_SYMBOL(skb_checksum_help);
5309 EXPORT_SYMBOL(synchronize_net);
5310 EXPORT_SYMBOL(unregister_netdevice);
5311 EXPORT_SYMBOL(unregister_netdevice_notifier);
5312 EXPORT_SYMBOL(net_enable_timestamp);
5313 EXPORT_SYMBOL(net_disable_timestamp);
5314 EXPORT_SYMBOL(dev_get_flags);
5316 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5317 EXPORT_SYMBOL(br_handle_frame_hook);
5318 EXPORT_SYMBOL(br_fdb_get_hook);
5319 EXPORT_SYMBOL(br_fdb_put_hook);
5322 EXPORT_SYMBOL(dev_load);
5324 EXPORT_PER_CPU_SYMBOL(softnet_data);