2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
131 #include "net-sysfs.h"
133 /* Instead of increasing this, you should create a hash table. */
134 #define MAX_GRO_SKBS 8
136 /* This should be increased if a protocol with a bigger head is added. */
137 #define GRO_MAX_HEAD (MAX_HEADER + 128)
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
150 * the average user (w/out VLANs) will not be adversely affected.
167 #define PTYPE_HASH_SIZE (16)
168 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
170 static DEFINE_SPINLOCK(ptype_lock);
171 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
172 static struct list_head ptype_all __read_mostly; /* Taps */
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
178 * Pure readers hold dev_base_lock for reading.
180 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
193 DEFINE_RWLOCK(dev_base_lock);
195 EXPORT_SYMBOL(dev_base_lock);
197 #define NETDEV_HASHBITS 8
198 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
200 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
206 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
211 /* Device list insertion */
212 static int list_netdevice(struct net_device *dev)
214 struct net *net = dev_net(dev);
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
226 /* Device list removal */
227 static void unlist_netdevice(struct net_device *dev)
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
243 static RAW_NOTIFIER_HEAD(netdev_chain);
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
250 DEFINE_PER_CPU(struct softnet_data, softnet_data);
252 #ifdef CONFIG_LOCKDEP
254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
255 * according to dev->type
257 static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
275 static const char *netdev_lock_name[] =
276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
293 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
294 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
296 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
307 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
317 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
327 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
331 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336 /*******************************************************************************
338 Protocol management and registration routines
340 *******************************************************************************/
343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
366 * This call does not sleep therefore it can not
367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
371 void dev_add_pack(struct packet_type *pt)
375 spin_lock_bh(&ptype_lock);
376 if (pt->type == htons(ETH_P_ALL))
377 list_add_rcu(&pt->list, &ptype_all);
379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
380 list_add_rcu(&pt->list, &ptype_base[hash]);
382 spin_unlock_bh(&ptype_lock);
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
398 void __dev_remove_pack(struct packet_type *pt)
400 struct list_head *head;
401 struct packet_type *pt1;
403 spin_lock_bh(&ptype_lock);
405 if (pt->type == htons(ETH_P_ALL))
408 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
410 list_for_each_entry(pt1, head, list) {
412 list_del_rcu(&pt->list);
417 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419 spin_unlock_bh(&ptype_lock);
422 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration
425 * Remove a protocol handler that was previously added to the kernel
426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
427 * from the kernel lists and can be freed or reused once this function
430 * This call sleeps to guarantee that no CPU is looking at the packet
433 void dev_remove_pack(struct packet_type *pt)
435 __dev_remove_pack(pt);
440 /******************************************************************************
442 Device Boot-time Settings Routines
444 *******************************************************************************/
446 /* Boot time configuration table */
447 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
450 * netdev_boot_setup_add - add new setup entry
451 * @name: name of the device
452 * @map: configured settings for the device
454 * Adds new setup entry to the dev_boot_setup list. The function
455 * returns 0 on error and 1 on success. This is a generic routine to
458 static int netdev_boot_setup_add(char *name, struct ifmap *map)
460 struct netdev_boot_setup *s;
464 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
465 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
466 memset(s[i].name, 0, sizeof(s[i].name));
467 strlcpy(s[i].name, name, IFNAMSIZ);
468 memcpy(&s[i].map, map, sizeof(s[i].map));
473 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
477 * netdev_boot_setup_check - check boot time settings
478 * @dev: the netdevice
480 * Check boot time settings for the device.
481 * The found settings are set for the device to be used
482 * later in the device probing.
483 * Returns 0 if no settings found, 1 if they are.
485 int netdev_boot_setup_check(struct net_device *dev)
487 struct netdev_boot_setup *s = dev_boot_setup;
490 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
491 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
492 !strcmp(dev->name, s[i].name)) {
493 dev->irq = s[i].map.irq;
494 dev->base_addr = s[i].map.base_addr;
495 dev->mem_start = s[i].map.mem_start;
496 dev->mem_end = s[i].map.mem_end;
505 * netdev_boot_base - get address from boot time settings
506 * @prefix: prefix for network device
507 * @unit: id for network device
509 * Check boot time settings for the base address of device.
510 * The found settings are set for the device to be used
511 * later in the device probing.
512 * Returns 0 if no settings found.
514 unsigned long netdev_boot_base(const char *prefix, int unit)
516 const struct netdev_boot_setup *s = dev_boot_setup;
520 sprintf(name, "%s%d", prefix, unit);
523 * If device already registered then return base of 1
524 * to indicate not to probe for this interface
526 if (__dev_get_by_name(&init_net, name))
529 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
530 if (!strcmp(name, s[i].name))
531 return s[i].map.base_addr;
536 * Saves at boot time configured settings for any netdevice.
538 int __init netdev_boot_setup(char *str)
543 str = get_options(str, ARRAY_SIZE(ints), ints);
548 memset(&map, 0, sizeof(map));
552 map.base_addr = ints[2];
554 map.mem_start = ints[3];
556 map.mem_end = ints[4];
558 /* Add new entry to the list */
559 return netdev_boot_setup_add(str, &map);
562 __setup("netdev=", netdev_boot_setup);
564 /*******************************************************************************
566 Device Interface Subroutines
568 *******************************************************************************/
571 * __dev_get_by_name - find a device by its name
572 * @net: the applicable net namespace
573 * @name: name to find
575 * Find an interface by name. Must be called under RTNL semaphore
576 * or @dev_base_lock. If the name is found a pointer to the device
577 * is returned. If the name is not found then %NULL is returned. The
578 * reference counters are not incremented so the caller must be
579 * careful with locks.
582 struct net_device *__dev_get_by_name(struct net *net, const char *name)
584 struct hlist_node *p;
586 hlist_for_each(p, dev_name_hash(net, name)) {
587 struct net_device *dev
588 = hlist_entry(p, struct net_device, name_hlist);
589 if (!strncmp(dev->name, name, IFNAMSIZ))
596 * dev_get_by_name - find a device by its name
597 * @net: the applicable net namespace
598 * @name: name to find
600 * Find an interface by name. This can be called from any
601 * context and does its own locking. The returned handle has
602 * the usage count incremented and the caller must use dev_put() to
603 * release it when it is no longer needed. %NULL is returned if no
604 * matching device is found.
607 struct net_device *dev_get_by_name(struct net *net, const char *name)
609 struct net_device *dev;
611 read_lock(&dev_base_lock);
612 dev = __dev_get_by_name(net, name);
615 read_unlock(&dev_base_lock);
620 * __dev_get_by_index - find a device by its ifindex
621 * @net: the applicable net namespace
622 * @ifindex: index of device
624 * Search for an interface by index. Returns %NULL if the device
625 * is not found or a pointer to the device. The device has not
626 * had its reference counter increased so the caller must be careful
627 * about locking. The caller must hold either the RTNL semaphore
631 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
633 struct hlist_node *p;
635 hlist_for_each(p, dev_index_hash(net, ifindex)) {
636 struct net_device *dev
637 = hlist_entry(p, struct net_device, index_hlist);
638 if (dev->ifindex == ifindex)
646 * dev_get_by_index - find a device by its ifindex
647 * @net: the applicable net namespace
648 * @ifindex: index of device
650 * Search for an interface by index. Returns NULL if the device
651 * is not found or a pointer to the device. The device returned has
652 * had a reference added and the pointer is safe until the user calls
653 * dev_put to indicate they have finished with it.
656 struct net_device *dev_get_by_index(struct net *net, int ifindex)
658 struct net_device *dev;
660 read_lock(&dev_base_lock);
661 dev = __dev_get_by_index(net, ifindex);
664 read_unlock(&dev_base_lock);
669 * dev_getbyhwaddr - find a device by its hardware address
670 * @net: the applicable net namespace
671 * @type: media type of device
672 * @ha: hardware address
674 * Search for an interface by MAC address. Returns NULL if the device
675 * is not found or a pointer to the device. The caller must hold the
676 * rtnl semaphore. The returned device has not had its ref count increased
677 * and the caller must therefore be careful about locking
680 * If the API was consistent this would be __dev_get_by_hwaddr
683 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
685 struct net_device *dev;
689 for_each_netdev(net, dev)
690 if (dev->type == type &&
691 !memcmp(dev->dev_addr, ha, dev->addr_len))
697 EXPORT_SYMBOL(dev_getbyhwaddr);
699 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
701 struct net_device *dev;
704 for_each_netdev(net, dev)
705 if (dev->type == type)
711 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
713 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
715 struct net_device *dev;
718 dev = __dev_getfirstbyhwtype(net, type);
725 EXPORT_SYMBOL(dev_getfirstbyhwtype);
728 * dev_get_by_flags - find any device with given flags
729 * @net: the applicable net namespace
730 * @if_flags: IFF_* values
731 * @mask: bitmask of bits in if_flags to check
733 * Search for any interface with the given flags. Returns NULL if a device
734 * is not found or a pointer to the device. The device returned has
735 * had a reference added and the pointer is safe until the user calls
736 * dev_put to indicate they have finished with it.
739 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
741 struct net_device *dev, *ret;
744 read_lock(&dev_base_lock);
745 for_each_netdev(net, dev) {
746 if (((dev->flags ^ if_flags) & mask) == 0) {
752 read_unlock(&dev_base_lock);
757 * dev_valid_name - check if name is okay for network device
760 * Network device names need to be valid file names to
761 * to allow sysfs to work. We also disallow any kind of
764 int dev_valid_name(const char *name)
768 if (strlen(name) >= IFNAMSIZ)
770 if (!strcmp(name, ".") || !strcmp(name, ".."))
774 if (*name == '/' || isspace(*name))
782 * __dev_alloc_name - allocate a name for a device
783 * @net: network namespace to allocate the device name in
784 * @name: name format string
785 * @buf: scratch buffer and result name string
787 * Passed a format string - eg "lt%d" it will try and find a suitable
788 * id. It scans list of devices to build up a free map, then chooses
789 * the first empty slot. The caller must hold the dev_base or rtnl lock
790 * while allocating the name and adding the device in order to avoid
792 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
793 * Returns the number of the unit assigned or a negative errno code.
796 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
800 const int max_netdevices = 8*PAGE_SIZE;
801 unsigned long *inuse;
802 struct net_device *d;
804 p = strnchr(name, IFNAMSIZ-1, '%');
807 * Verify the string as this thing may have come from
808 * the user. There must be either one "%d" and no other "%"
811 if (p[1] != 'd' || strchr(p + 2, '%'))
814 /* Use one page as a bit array of possible slots */
815 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
819 for_each_netdev(net, d) {
820 if (!sscanf(d->name, name, &i))
822 if (i < 0 || i >= max_netdevices)
825 /* avoid cases where sscanf is not exact inverse of printf */
826 snprintf(buf, IFNAMSIZ, name, i);
827 if (!strncmp(buf, d->name, IFNAMSIZ))
831 i = find_first_zero_bit(inuse, max_netdevices);
832 free_page((unsigned long) inuse);
835 snprintf(buf, IFNAMSIZ, name, i);
836 if (!__dev_get_by_name(net, buf))
839 /* It is possible to run out of possible slots
840 * when the name is long and there isn't enough space left
841 * for the digits, or if all bits are used.
847 * dev_alloc_name - allocate a name for a device
849 * @name: name format string
851 * Passed a format string - eg "lt%d" it will try and find a suitable
852 * id. It scans list of devices to build up a free map, then chooses
853 * the first empty slot. The caller must hold the dev_base or rtnl lock
854 * while allocating the name and adding the device in order to avoid
856 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
857 * Returns the number of the unit assigned or a negative errno code.
860 int dev_alloc_name(struct net_device *dev, const char *name)
866 BUG_ON(!dev_net(dev));
868 ret = __dev_alloc_name(net, name, buf);
870 strlcpy(dev->name, buf, IFNAMSIZ);
876 * dev_change_name - change name of a device
878 * @newname: name (or format string) must be at least IFNAMSIZ
880 * Change name of a device, can pass format strings "eth%d".
883 int dev_change_name(struct net_device *dev, const char *newname)
885 char oldname[IFNAMSIZ];
891 BUG_ON(!dev_net(dev));
894 if (dev->flags & IFF_UP)
897 if (!dev_valid_name(newname))
900 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
903 memcpy(oldname, dev->name, IFNAMSIZ);
905 if (strchr(newname, '%')) {
906 err = dev_alloc_name(dev, newname);
910 else if (__dev_get_by_name(net, newname))
913 strlcpy(dev->name, newname, IFNAMSIZ);
916 /* For now only devices in the initial network namespace
919 if (net == &init_net) {
920 ret = device_rename(&dev->dev, dev->name);
922 memcpy(dev->name, oldname, IFNAMSIZ);
927 write_lock_bh(&dev_base_lock);
928 hlist_del(&dev->name_hlist);
929 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
930 write_unlock_bh(&dev_base_lock);
932 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
933 ret = notifier_to_errno(ret);
938 "%s: name change rollback failed: %d.\n",
942 memcpy(dev->name, oldname, IFNAMSIZ);
951 * dev_set_alias - change ifalias of a device
953 * @alias: name up to IFALIASZ
954 * @len: limit of bytes to copy from info
956 * Set ifalias for a device,
958 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
973 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
977 strlcpy(dev->ifalias, alias, len+1);
983 * netdev_features_change - device changes features
984 * @dev: device to cause notification
986 * Called to indicate a device has changed features.
988 void netdev_features_change(struct net_device *dev)
990 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
992 EXPORT_SYMBOL(netdev_features_change);
995 * netdev_state_change - device changes state
996 * @dev: device to cause notification
998 * Called to indicate a device has changed state. This function calls
999 * the notifier chains for netdev_chain and sends a NEWLINK message
1000 * to the routing socket.
1002 void netdev_state_change(struct net_device *dev)
1004 if (dev->flags & IFF_UP) {
1005 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1006 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1010 void netdev_bonding_change(struct net_device *dev)
1012 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1014 EXPORT_SYMBOL(netdev_bonding_change);
1017 * dev_load - load a network module
1018 * @net: the applicable net namespace
1019 * @name: name of interface
1021 * If a network interface is not present and the process has suitable
1022 * privileges this function loads the module. If module loading is not
1023 * available in this kernel then it becomes a nop.
1026 void dev_load(struct net *net, const char *name)
1028 struct net_device *dev;
1030 read_lock(&dev_base_lock);
1031 dev = __dev_get_by_name(net, name);
1032 read_unlock(&dev_base_lock);
1034 if (!dev && capable(CAP_SYS_MODULE))
1035 request_module("%s", name);
1039 * dev_open - prepare an interface for use.
1040 * @dev: device to open
1042 * Takes a device from down to up state. The device's private open
1043 * function is invoked and then the multicast lists are loaded. Finally
1044 * the device is moved into the up state and a %NETDEV_UP message is
1045 * sent to the netdev notifier chain.
1047 * Calling this function on an active interface is a nop. On a failure
1048 * a negative errno code is returned.
1050 int dev_open(struct net_device *dev)
1052 const struct net_device_ops *ops = dev->netdev_ops;
1061 if (dev->flags & IFF_UP)
1065 * Is it even present?
1067 if (!netif_device_present(dev))
1070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1076 * Call device private open method
1078 set_bit(__LINK_STATE_START, &dev->state);
1080 if (ops->ndo_validate_addr)
1081 ret = ops->ndo_validate_addr(dev);
1083 if (!ret && ops->ndo_open)
1084 ret = ops->ndo_open(dev);
1087 * If it went open OK then:
1091 clear_bit(__LINK_STATE_START, &dev->state);
1096 dev->flags |= IFF_UP;
1101 net_dmaengine_get();
1104 * Initialize multicasting status
1106 dev_set_rx_mode(dev);
1109 * Wakeup transmit queue engine
1114 * ... and announce new interface.
1116 call_netdevice_notifiers(NETDEV_UP, dev);
1123 * dev_close - shutdown an interface.
1124 * @dev: device to shutdown
1126 * This function moves an active device into down state. A
1127 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1128 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1131 int dev_close(struct net_device *dev)
1133 const struct net_device_ops *ops = dev->netdev_ops;
1138 if (!(dev->flags & IFF_UP))
1142 * Tell people we are going down, so that they can
1143 * prepare to death, when device is still operating.
1145 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1147 clear_bit(__LINK_STATE_START, &dev->state);
1149 /* Synchronize to scheduled poll. We cannot touch poll list,
1150 * it can be even on different cpu. So just clear netif_running().
1152 * dev->stop() will invoke napi_disable() on all of it's
1153 * napi_struct instances on this device.
1155 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1157 dev_deactivate(dev);
1160 * Call the device specific close. This cannot fail.
1161 * Only if device is UP
1163 * We allow it to be called even after a DETACH hot-plug
1170 * Device is now down.
1173 dev->flags &= ~IFF_UP;
1176 * Tell people we are down
1178 call_netdevice_notifiers(NETDEV_DOWN, dev);
1183 net_dmaengine_put();
1190 * dev_disable_lro - disable Large Receive Offload on a device
1193 * Disable Large Receive Offload (LRO) on a net device. Must be
1194 * called under RTNL. This is needed if received packets may be
1195 * forwarded to another interface.
1197 void dev_disable_lro(struct net_device *dev)
1199 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1200 dev->ethtool_ops->set_flags) {
1201 u32 flags = dev->ethtool_ops->get_flags(dev);
1202 if (flags & ETH_FLAG_LRO) {
1203 flags &= ~ETH_FLAG_LRO;
1204 dev->ethtool_ops->set_flags(dev, flags);
1207 WARN_ON(dev->features & NETIF_F_LRO);
1209 EXPORT_SYMBOL(dev_disable_lro);
1212 static int dev_boot_phase = 1;
1215 * Device change register/unregister. These are not inline or static
1216 * as we export them to the world.
1220 * register_netdevice_notifier - register a network notifier block
1223 * Register a notifier to be called when network device events occur.
1224 * The notifier passed is linked into the kernel structures and must
1225 * not be reused until it has been unregistered. A negative errno code
1226 * is returned on a failure.
1228 * When registered all registration and up events are replayed
1229 * to the new notifier to allow device to have a race free
1230 * view of the network device list.
1233 int register_netdevice_notifier(struct notifier_block *nb)
1235 struct net_device *dev;
1236 struct net_device *last;
1241 err = raw_notifier_chain_register(&netdev_chain, nb);
1247 for_each_netdev(net, dev) {
1248 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1249 err = notifier_to_errno(err);
1253 if (!(dev->flags & IFF_UP))
1256 nb->notifier_call(nb, NETDEV_UP, dev);
1267 for_each_netdev(net, dev) {
1271 if (dev->flags & IFF_UP) {
1272 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1273 nb->notifier_call(nb, NETDEV_DOWN, dev);
1275 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1279 raw_notifier_chain_unregister(&netdev_chain, nb);
1284 * unregister_netdevice_notifier - unregister a network notifier block
1287 * Unregister a notifier previously registered by
1288 * register_netdevice_notifier(). The notifier is unlinked into the
1289 * kernel structures and may then be reused. A negative errno code
1290 * is returned on a failure.
1293 int unregister_netdevice_notifier(struct notifier_block *nb)
1298 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1304 * call_netdevice_notifiers - call all network notifier blocks
1305 * @val: value passed unmodified to notifier function
1306 * @dev: net_device pointer passed unmodified to notifier function
1308 * Call all network notifier blocks. Parameters and return value
1309 * are as for raw_notifier_call_chain().
1312 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1314 return raw_notifier_call_chain(&netdev_chain, val, dev);
1317 /* When > 0 there are consumers of rx skb time stamps */
1318 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1320 void net_enable_timestamp(void)
1322 atomic_inc(&netstamp_needed);
1325 void net_disable_timestamp(void)
1327 atomic_dec(&netstamp_needed);
1330 static inline void net_timestamp(struct sk_buff *skb)
1332 if (atomic_read(&netstamp_needed))
1333 __net_timestamp(skb);
1335 skb->tstamp.tv64 = 0;
1339 * Support routine. Sends outgoing frames to any network
1340 * taps currently in use.
1343 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1345 struct packet_type *ptype;
1347 #ifdef CONFIG_NET_CLS_ACT
1348 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1355 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1356 /* Never send packets back to the socket
1357 * they originated from - MvS (miquels@drinkel.ow.org)
1359 if ((ptype->dev == dev || !ptype->dev) &&
1360 (ptype->af_packet_priv == NULL ||
1361 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1362 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1366 /* skb->nh should be correctly
1367 set by sender, so that the second statement is
1368 just protection against buggy protocols.
1370 skb_reset_mac_header(skb2);
1372 if (skb_network_header(skb2) < skb2->data ||
1373 skb2->network_header > skb2->tail) {
1374 if (net_ratelimit())
1375 printk(KERN_CRIT "protocol %04x is "
1377 skb2->protocol, dev->name);
1378 skb_reset_network_header(skb2);
1381 skb2->transport_header = skb2->network_header;
1382 skb2->pkt_type = PACKET_OUTGOING;
1383 ptype->func(skb2, skb->dev, ptype, skb->dev);
1390 static inline void __netif_reschedule(struct Qdisc *q)
1392 struct softnet_data *sd;
1393 unsigned long flags;
1395 local_irq_save(flags);
1396 sd = &__get_cpu_var(softnet_data);
1397 q->next_sched = sd->output_queue;
1398 sd->output_queue = q;
1399 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1400 local_irq_restore(flags);
1403 void __netif_schedule(struct Qdisc *q)
1405 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1406 __netif_reschedule(q);
1408 EXPORT_SYMBOL(__netif_schedule);
1410 void dev_kfree_skb_irq(struct sk_buff *skb)
1412 if (atomic_dec_and_test(&skb->users)) {
1413 struct softnet_data *sd;
1414 unsigned long flags;
1416 local_irq_save(flags);
1417 sd = &__get_cpu_var(softnet_data);
1418 skb->next = sd->completion_queue;
1419 sd->completion_queue = skb;
1420 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1421 local_irq_restore(flags);
1424 EXPORT_SYMBOL(dev_kfree_skb_irq);
1426 void dev_kfree_skb_any(struct sk_buff *skb)
1428 if (in_irq() || irqs_disabled())
1429 dev_kfree_skb_irq(skb);
1433 EXPORT_SYMBOL(dev_kfree_skb_any);
1437 * netif_device_detach - mark device as removed
1438 * @dev: network device
1440 * Mark device as removed from system and therefore no longer available.
1442 void netif_device_detach(struct net_device *dev)
1444 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1445 netif_running(dev)) {
1446 netif_tx_stop_all_queues(dev);
1449 EXPORT_SYMBOL(netif_device_detach);
1452 * netif_device_attach - mark device as attached
1453 * @dev: network device
1455 * Mark device as attached from system and restart if needed.
1457 void netif_device_attach(struct net_device *dev)
1459 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1460 netif_running(dev)) {
1461 netif_tx_wake_all_queues(dev);
1462 __netdev_watchdog_up(dev);
1465 EXPORT_SYMBOL(netif_device_attach);
1467 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1469 return ((features & NETIF_F_GEN_CSUM) ||
1470 ((features & NETIF_F_IP_CSUM) &&
1471 protocol == htons(ETH_P_IP)) ||
1472 ((features & NETIF_F_IPV6_CSUM) &&
1473 protocol == htons(ETH_P_IPV6)) ||
1474 ((features & NETIF_F_FCOE_CRC) &&
1475 protocol == htons(ETH_P_FCOE)));
1478 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1480 if (can_checksum_protocol(dev->features, skb->protocol))
1483 if (skb->protocol == htons(ETH_P_8021Q)) {
1484 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1485 if (can_checksum_protocol(dev->features & dev->vlan_features,
1486 veh->h_vlan_encapsulated_proto))
1494 * Invalidate hardware checksum when packet is to be mangled, and
1495 * complete checksum manually on outgoing path.
1497 int skb_checksum_help(struct sk_buff *skb)
1500 int ret = 0, offset;
1502 if (skb->ip_summed == CHECKSUM_COMPLETE)
1503 goto out_set_summed;
1505 if (unlikely(skb_shinfo(skb)->gso_size)) {
1506 /* Let GSO fix up the checksum. */
1507 goto out_set_summed;
1510 offset = skb->csum_start - skb_headroom(skb);
1511 BUG_ON(offset >= skb_headlen(skb));
1512 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1514 offset += skb->csum_offset;
1515 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1517 if (skb_cloned(skb) &&
1518 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1519 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1524 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1526 skb->ip_summed = CHECKSUM_NONE;
1532 * skb_gso_segment - Perform segmentation on skb.
1533 * @skb: buffer to segment
1534 * @features: features for the output path (see dev->features)
1536 * This function segments the given skb and returns a list of segments.
1538 * It may return NULL if the skb requires no segmentation. This is
1539 * only possible when GSO is used for verifying header integrity.
1541 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1543 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1544 struct packet_type *ptype;
1545 __be16 type = skb->protocol;
1548 skb_reset_mac_header(skb);
1549 skb->mac_len = skb->network_header - skb->mac_header;
1550 __skb_pull(skb, skb->mac_len);
1552 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1553 struct net_device *dev = skb->dev;
1554 struct ethtool_drvinfo info = {};
1556 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1557 dev->ethtool_ops->get_drvinfo(dev, &info);
1559 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1561 info.driver, dev ? dev->features : 0L,
1562 skb->sk ? skb->sk->sk_route_caps : 0L,
1563 skb->len, skb->data_len, skb->ip_summed);
1565 if (skb_header_cloned(skb) &&
1566 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1567 return ERR_PTR(err);
1571 list_for_each_entry_rcu(ptype,
1572 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1573 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1574 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1575 err = ptype->gso_send_check(skb);
1576 segs = ERR_PTR(err);
1577 if (err || skb_gso_ok(skb, features))
1579 __skb_push(skb, (skb->data -
1580 skb_network_header(skb)));
1582 segs = ptype->gso_segment(skb, features);
1588 __skb_push(skb, skb->data - skb_mac_header(skb));
1593 EXPORT_SYMBOL(skb_gso_segment);
1595 /* Take action when hardware reception checksum errors are detected. */
1597 void netdev_rx_csum_fault(struct net_device *dev)
1599 if (net_ratelimit()) {
1600 printk(KERN_ERR "%s: hw csum failure.\n",
1601 dev ? dev->name : "<unknown>");
1605 EXPORT_SYMBOL(netdev_rx_csum_fault);
1608 /* Actually, we should eliminate this check as soon as we know, that:
1609 * 1. IOMMU is present and allows to map all the memory.
1610 * 2. No high memory really exists on this machine.
1613 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1615 #ifdef CONFIG_HIGHMEM
1618 if (dev->features & NETIF_F_HIGHDMA)
1621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1622 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1630 void (*destructor)(struct sk_buff *skb);
1633 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1635 static void dev_gso_skb_destructor(struct sk_buff *skb)
1637 struct dev_gso_cb *cb;
1640 struct sk_buff *nskb = skb->next;
1642 skb->next = nskb->next;
1645 } while (skb->next);
1647 cb = DEV_GSO_CB(skb);
1649 cb->destructor(skb);
1653 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1654 * @skb: buffer to segment
1656 * This function segments the given skb and stores the list of segments
1659 static int dev_gso_segment(struct sk_buff *skb)
1661 struct net_device *dev = skb->dev;
1662 struct sk_buff *segs;
1663 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1666 segs = skb_gso_segment(skb, features);
1668 /* Verifying header integrity only. */
1673 return PTR_ERR(segs);
1676 DEV_GSO_CB(skb)->destructor = skb->destructor;
1677 skb->destructor = dev_gso_skb_destructor;
1682 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1683 struct netdev_queue *txq)
1685 const struct net_device_ops *ops = dev->netdev_ops;
1688 if (likely(!skb->next)) {
1689 if (!list_empty(&ptype_all))
1690 dev_queue_xmit_nit(skb, dev);
1692 if (netif_needs_gso(dev, skb)) {
1693 if (unlikely(dev_gso_segment(skb)))
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1706 rc = ops->ndo_start_xmit(skb, dev);
1708 txq_trans_update(txq);
1710 * TODO: if skb_orphan() was called by
1711 * dev->hard_start_xmit() (for example, the unmodified
1712 * igb driver does that; bnx2 doesn't), then
1713 * skb_tx_software_timestamp() will be unable to send
1714 * back the time stamp.
1716 * How can this be prevented? Always create another
1717 * reference to the socket before calling
1718 * dev->hard_start_xmit()? Prevent that skb_orphan()
1719 * does anything in dev->hard_start_xmit() by clearing
1720 * the skb destructor before the call and restoring it
1721 * afterwards, then doing the skb_orphan() ourselves?
1728 struct sk_buff *nskb = skb->next;
1730 skb->next = nskb->next;
1732 rc = ops->ndo_start_xmit(nskb, dev);
1734 nskb->next = skb->next;
1738 txq_trans_update(txq);
1739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1740 return NETDEV_TX_BUSY;
1741 } while (skb->next);
1743 skb->destructor = DEV_GSO_CB(skb)->destructor;
1750 static u32 skb_tx_hashrnd;
1752 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1756 if (skb_rx_queue_recorded(skb)) {
1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1763 if (skb->sk && skb->sk->sk_hash)
1764 hash = skb->sk->sk_hash;
1766 hash = skb->protocol;
1768 hash = jhash_1word(hash, skb_tx_hashrnd);
1770 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1772 EXPORT_SYMBOL(skb_tx_hash);
1774 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1775 struct sk_buff *skb)
1777 const struct net_device_ops *ops = dev->netdev_ops;
1778 u16 queue_index = 0;
1780 if (ops->ndo_select_queue)
1781 queue_index = ops->ndo_select_queue(dev, skb);
1782 else if (dev->real_num_tx_queues > 1)
1783 queue_index = skb_tx_hash(dev, skb);
1785 skb_set_queue_mapping(skb, queue_index);
1786 return netdev_get_tx_queue(dev, queue_index);
1790 * dev_queue_xmit - transmit a buffer
1791 * @skb: buffer to transmit
1793 * Queue a buffer for transmission to a network device. The caller must
1794 * have set the device and priority and built the buffer before calling
1795 * this function. The function can be called from an interrupt.
1797 * A negative errno code is returned on a failure. A success does not
1798 * guarantee the frame will be transmitted as it may be dropped due
1799 * to congestion or traffic shaping.
1801 * -----------------------------------------------------------------------------------
1802 * I notice this method can also return errors from the queue disciplines,
1803 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1806 * Regardless of the return value, the skb is consumed, so it is currently
1807 * difficult to retry a send to this method. (You can bump the ref count
1808 * before sending to hold a reference for retry if you are careful.)
1810 * When calling this method, interrupts MUST be enabled. This is because
1811 * the BH enable code must have IRQs enabled so that it will not deadlock.
1814 int dev_queue_xmit(struct sk_buff *skb)
1816 struct net_device *dev = skb->dev;
1817 struct netdev_queue *txq;
1821 /* GSO will handle the following emulations directly. */
1822 if (netif_needs_gso(dev, skb))
1825 if (skb_has_frags(skb) &&
1826 !(dev->features & NETIF_F_FRAGLIST) &&
1827 __skb_linearize(skb))
1830 /* Fragmented skb is linearized if device does not support SG,
1831 * or if at least one of fragments is in highmem and device
1832 * does not support DMA from it.
1834 if (skb_shinfo(skb)->nr_frags &&
1835 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1836 __skb_linearize(skb))
1839 /* If packet is not checksummed and device does not support
1840 * checksumming for this protocol, complete checksumming here.
1842 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1843 skb_set_transport_header(skb, skb->csum_start -
1845 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1850 /* Disable soft irqs for various locks below. Also
1851 * stops preemption for RCU.
1855 txq = dev_pick_tx(dev, skb);
1856 q = rcu_dereference(txq->qdisc);
1858 #ifdef CONFIG_NET_CLS_ACT
1859 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1862 spinlock_t *root_lock = qdisc_lock(q);
1864 spin_lock(root_lock);
1866 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1870 rc = qdisc_enqueue_root(skb, q);
1873 spin_unlock(root_lock);
1878 /* The device has no queue. Common case for software devices:
1879 loopback, all the sorts of tunnels...
1881 Really, it is unlikely that netif_tx_lock protection is necessary
1882 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1884 However, it is possible, that they rely on protection
1887 Check this and shot the lock. It is not prone from deadlocks.
1888 Either shot noqueue qdisc, it is even simpler 8)
1890 if (dev->flags & IFF_UP) {
1891 int cpu = smp_processor_id(); /* ok because BHs are off */
1893 if (txq->xmit_lock_owner != cpu) {
1895 HARD_TX_LOCK(dev, txq, cpu);
1897 if (!netif_tx_queue_stopped(txq)) {
1899 if (!dev_hard_start_xmit(skb, dev, txq)) {
1900 HARD_TX_UNLOCK(dev, txq);
1904 HARD_TX_UNLOCK(dev, txq);
1905 if (net_ratelimit())
1906 printk(KERN_CRIT "Virtual device %s asks to "
1907 "queue packet!\n", dev->name);
1909 /* Recursion is detected! It is possible,
1911 if (net_ratelimit())
1912 printk(KERN_CRIT "Dead loop on virtual device "
1913 "%s, fix it urgently!\n", dev->name);
1918 rcu_read_unlock_bh();
1924 rcu_read_unlock_bh();
1929 /*=======================================================================
1931 =======================================================================*/
1933 int netdev_max_backlog __read_mostly = 1000;
1934 int netdev_budget __read_mostly = 300;
1935 int weight_p __read_mostly = 64; /* old backlog weight */
1937 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1941 * netif_rx - post buffer to the network code
1942 * @skb: buffer to post
1944 * This function receives a packet from a device driver and queues it for
1945 * the upper (protocol) levels to process. It always succeeds. The buffer
1946 * may be dropped during processing for congestion control or by the
1950 * NET_RX_SUCCESS (no congestion)
1951 * NET_RX_DROP (packet was dropped)
1955 int netif_rx(struct sk_buff *skb)
1957 struct softnet_data *queue;
1958 unsigned long flags;
1960 /* if netpoll wants it, pretend we never saw it */
1961 if (netpoll_rx(skb))
1964 if (!skb->tstamp.tv64)
1968 * The code is rearranged so that the path is the most
1969 * short when CPU is congested, but is still operating.
1971 local_irq_save(flags);
1972 queue = &__get_cpu_var(softnet_data);
1974 __get_cpu_var(netdev_rx_stat).total++;
1975 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1976 if (queue->input_pkt_queue.qlen) {
1978 __skb_queue_tail(&queue->input_pkt_queue, skb);
1979 local_irq_restore(flags);
1980 return NET_RX_SUCCESS;
1983 napi_schedule(&queue->backlog);
1987 __get_cpu_var(netdev_rx_stat).dropped++;
1988 local_irq_restore(flags);
1994 int netif_rx_ni(struct sk_buff *skb)
1999 err = netif_rx(skb);
2000 if (local_softirq_pending())
2007 EXPORT_SYMBOL(netif_rx_ni);
2009 static void net_tx_action(struct softirq_action *h)
2011 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2013 if (sd->completion_queue) {
2014 struct sk_buff *clist;
2016 local_irq_disable();
2017 clist = sd->completion_queue;
2018 sd->completion_queue = NULL;
2022 struct sk_buff *skb = clist;
2023 clist = clist->next;
2025 WARN_ON(atomic_read(&skb->users));
2030 if (sd->output_queue) {
2033 local_irq_disable();
2034 head = sd->output_queue;
2035 sd->output_queue = NULL;
2039 struct Qdisc *q = head;
2040 spinlock_t *root_lock;
2042 head = head->next_sched;
2044 root_lock = qdisc_lock(q);
2045 if (spin_trylock(root_lock)) {
2046 smp_mb__before_clear_bit();
2047 clear_bit(__QDISC_STATE_SCHED,
2050 spin_unlock(root_lock);
2052 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2054 __netif_reschedule(q);
2056 smp_mb__before_clear_bit();
2057 clear_bit(__QDISC_STATE_SCHED,
2065 static inline int deliver_skb(struct sk_buff *skb,
2066 struct packet_type *pt_prev,
2067 struct net_device *orig_dev)
2069 atomic_inc(&skb->users);
2070 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2073 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2075 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2076 /* This hook is defined here for ATM LANE */
2077 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2078 unsigned char *addr) __read_mostly;
2079 EXPORT_SYMBOL(br_fdb_test_addr_hook);
2083 * If bridge module is loaded call bridging hook.
2084 * returns NULL if packet was consumed.
2086 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2087 struct sk_buff *skb) __read_mostly;
2088 EXPORT_SYMBOL(br_handle_frame_hook);
2090 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2091 struct packet_type **pt_prev, int *ret,
2092 struct net_device *orig_dev)
2094 struct net_bridge_port *port;
2096 if (skb->pkt_type == PACKET_LOOPBACK ||
2097 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2101 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2105 return br_handle_frame_hook(port, skb);
2108 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2111 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2112 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2113 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2115 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2116 struct packet_type **pt_prev,
2118 struct net_device *orig_dev)
2120 if (skb->dev->macvlan_port == NULL)
2124 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2127 return macvlan_handle_frame_hook(skb);
2130 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2133 #ifdef CONFIG_NET_CLS_ACT
2134 /* TODO: Maybe we should just force sch_ingress to be compiled in
2135 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2136 * a compare and 2 stores extra right now if we dont have it on
2137 * but have CONFIG_NET_CLS_ACT
2138 * NOTE: This doesnt stop any functionality; if you dont have
2139 * the ingress scheduler, you just cant add policies on ingress.
2142 static int ing_filter(struct sk_buff *skb)
2144 struct net_device *dev = skb->dev;
2145 u32 ttl = G_TC_RTTL(skb->tc_verd);
2146 struct netdev_queue *rxq;
2147 int result = TC_ACT_OK;
2150 if (MAX_RED_LOOP < ttl++) {
2152 "Redir loop detected Dropping packet (%d->%d)\n",
2153 skb->iif, dev->ifindex);
2157 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2158 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2160 rxq = &dev->rx_queue;
2163 if (q != &noop_qdisc) {
2164 spin_lock(qdisc_lock(q));
2165 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2166 result = qdisc_enqueue_root(skb, q);
2167 spin_unlock(qdisc_lock(q));
2173 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2174 struct packet_type **pt_prev,
2175 int *ret, struct net_device *orig_dev)
2177 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2181 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2184 /* Huh? Why does turning on AF_PACKET affect this? */
2185 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2188 switch (ing_filter(skb)) {
2202 * netif_nit_deliver - deliver received packets to network taps
2205 * This function is used to deliver incoming packets to network
2206 * taps. It should be used when the normal netif_receive_skb path
2207 * is bypassed, for example because of VLAN acceleration.
2209 void netif_nit_deliver(struct sk_buff *skb)
2211 struct packet_type *ptype;
2213 if (list_empty(&ptype_all))
2216 skb_reset_network_header(skb);
2217 skb_reset_transport_header(skb);
2218 skb->mac_len = skb->network_header - skb->mac_header;
2221 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2222 if (!ptype->dev || ptype->dev == skb->dev)
2223 deliver_skb(skb, ptype, skb->dev);
2229 * netif_receive_skb - process receive buffer from network
2230 * @skb: buffer to process
2232 * netif_receive_skb() is the main receive data processing function.
2233 * It always succeeds. The buffer may be dropped during processing
2234 * for congestion control or by the protocol layers.
2236 * This function may only be called from softirq context and interrupts
2237 * should be enabled.
2239 * Return values (usually ignored):
2240 * NET_RX_SUCCESS: no congestion
2241 * NET_RX_DROP: packet was dropped
2243 int netif_receive_skb(struct sk_buff *skb)
2245 struct packet_type *ptype, *pt_prev;
2246 struct net_device *orig_dev;
2247 struct net_device *null_or_orig;
2248 int ret = NET_RX_DROP;
2251 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2252 return NET_RX_SUCCESS;
2254 /* if we've gotten here through NAPI, check netpoll */
2255 if (netpoll_receive_skb(skb))
2258 if (!skb->tstamp.tv64)
2262 skb->iif = skb->dev->ifindex;
2264 null_or_orig = NULL;
2265 orig_dev = skb->dev;
2266 if (orig_dev->master) {
2267 if (skb_bond_should_drop(skb))
2268 null_or_orig = orig_dev; /* deliver only exact match */
2270 skb->dev = orig_dev->master;
2273 __get_cpu_var(netdev_rx_stat).total++;
2275 skb_reset_network_header(skb);
2276 skb_reset_transport_header(skb);
2277 skb->mac_len = skb->network_header - skb->mac_header;
2283 #ifdef CONFIG_NET_CLS_ACT
2284 if (skb->tc_verd & TC_NCLS) {
2285 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2290 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2291 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2292 ptype->dev == orig_dev) {
2294 ret = deliver_skb(skb, pt_prev, orig_dev);
2299 #ifdef CONFIG_NET_CLS_ACT
2300 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2306 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2309 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2313 type = skb->protocol;
2314 list_for_each_entry_rcu(ptype,
2315 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2316 if (ptype->type == type &&
2317 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2318 ptype->dev == orig_dev)) {
2320 ret = deliver_skb(skb, pt_prev, orig_dev);
2326 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2329 /* Jamal, now you will not able to escape explaining
2330 * me how you were going to use this. :-)
2340 /* Network device is going away, flush any packets still pending */
2341 static void flush_backlog(void *arg)
2343 struct net_device *dev = arg;
2344 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2345 struct sk_buff *skb, *tmp;
2347 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2348 if (skb->dev == dev) {
2349 __skb_unlink(skb, &queue->input_pkt_queue);
2354 static int napi_gro_complete(struct sk_buff *skb)
2356 struct packet_type *ptype;
2357 __be16 type = skb->protocol;
2358 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2361 if (NAPI_GRO_CB(skb)->count == 1) {
2362 skb_shinfo(skb)->gso_size = 0;
2367 list_for_each_entry_rcu(ptype, head, list) {
2368 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2371 err = ptype->gro_complete(skb);
2377 WARN_ON(&ptype->list == head);
2379 return NET_RX_SUCCESS;
2383 return netif_receive_skb(skb);
2386 void napi_gro_flush(struct napi_struct *napi)
2388 struct sk_buff *skb, *next;
2390 for (skb = napi->gro_list; skb; skb = next) {
2393 napi_gro_complete(skb);
2396 napi->gro_count = 0;
2397 napi->gro_list = NULL;
2399 EXPORT_SYMBOL(napi_gro_flush);
2401 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2403 struct sk_buff **pp = NULL;
2404 struct packet_type *ptype;
2405 __be16 type = skb->protocol;
2406 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2411 if (!(skb->dev->features & NETIF_F_GRO))
2414 if (skb_is_gso(skb) || skb_has_frags(skb))
2418 list_for_each_entry_rcu(ptype, head, list) {
2419 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2422 skb_set_network_header(skb, skb_gro_offset(skb));
2423 mac_len = skb->network_header - skb->mac_header;
2424 skb->mac_len = mac_len;
2425 NAPI_GRO_CB(skb)->same_flow = 0;
2426 NAPI_GRO_CB(skb)->flush = 0;
2427 NAPI_GRO_CB(skb)->free = 0;
2429 pp = ptype->gro_receive(&napi->gro_list, skb);
2434 if (&ptype->list == head)
2437 same_flow = NAPI_GRO_CB(skb)->same_flow;
2438 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2441 struct sk_buff *nskb = *pp;
2445 napi_gro_complete(nskb);
2452 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2456 NAPI_GRO_CB(skb)->count = 1;
2457 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2458 skb->next = napi->gro_list;
2459 napi->gro_list = skb;
2463 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2464 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2466 BUG_ON(skb->end - skb->tail < grow);
2468 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2471 skb->data_len -= grow;
2473 skb_shinfo(skb)->frags[0].page_offset += grow;
2474 skb_shinfo(skb)->frags[0].size -= grow;
2476 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2477 put_page(skb_shinfo(skb)->frags[0].page);
2478 memmove(skb_shinfo(skb)->frags,
2479 skb_shinfo(skb)->frags + 1,
2480 --skb_shinfo(skb)->nr_frags);
2491 EXPORT_SYMBOL(dev_gro_receive);
2493 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2497 if (netpoll_rx_on(skb))
2500 for (p = napi->gro_list; p; p = p->next) {
2501 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2502 && !compare_ether_header(skb_mac_header(p),
2503 skb_gro_mac_header(skb));
2504 NAPI_GRO_CB(p)->flush = 0;
2507 return dev_gro_receive(napi, skb);
2510 int napi_skb_finish(int ret, struct sk_buff *skb)
2512 int err = NET_RX_SUCCESS;
2516 return netif_receive_skb(skb);
2522 case GRO_MERGED_FREE:
2529 EXPORT_SYMBOL(napi_skb_finish);
2531 void skb_gro_reset_offset(struct sk_buff *skb)
2533 NAPI_GRO_CB(skb)->data_offset = 0;
2534 NAPI_GRO_CB(skb)->frag0 = NULL;
2535 NAPI_GRO_CB(skb)->frag0_len = 0;
2537 if (skb->mac_header == skb->tail &&
2538 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2539 NAPI_GRO_CB(skb)->frag0 =
2540 page_address(skb_shinfo(skb)->frags[0].page) +
2541 skb_shinfo(skb)->frags[0].page_offset;
2542 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2545 EXPORT_SYMBOL(skb_gro_reset_offset);
2547 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2549 skb_gro_reset_offset(skb);
2551 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2553 EXPORT_SYMBOL(napi_gro_receive);
2555 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2557 __skb_pull(skb, skb_headlen(skb));
2558 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2562 EXPORT_SYMBOL(napi_reuse_skb);
2564 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2566 struct net_device *dev = napi->dev;
2567 struct sk_buff *skb = napi->skb;
2570 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2574 skb_reserve(skb, NET_IP_ALIGN);
2582 EXPORT_SYMBOL(napi_get_frags);
2584 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2586 int err = NET_RX_SUCCESS;
2591 skb->protocol = eth_type_trans(skb, napi->dev);
2593 if (ret == GRO_NORMAL)
2594 return netif_receive_skb(skb);
2596 skb_gro_pull(skb, -ETH_HLEN);
2603 case GRO_MERGED_FREE:
2604 napi_reuse_skb(napi, skb);
2610 EXPORT_SYMBOL(napi_frags_finish);
2612 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2614 struct sk_buff *skb = napi->skb;
2621 skb_reset_mac_header(skb);
2622 skb_gro_reset_offset(skb);
2624 off = skb_gro_offset(skb);
2625 hlen = off + sizeof(*eth);
2626 eth = skb_gro_header_fast(skb, off);
2627 if (skb_gro_header_hard(skb, hlen)) {
2628 eth = skb_gro_header_slow(skb, hlen, off);
2629 if (unlikely(!eth)) {
2630 napi_reuse_skb(napi, skb);
2636 skb_gro_pull(skb, sizeof(*eth));
2639 * This works because the only protocols we care about don't require
2640 * special handling. We'll fix it up properly at the end.
2642 skb->protocol = eth->h_proto;
2647 EXPORT_SYMBOL(napi_frags_skb);
2649 int napi_gro_frags(struct napi_struct *napi)
2651 struct sk_buff *skb = napi_frags_skb(napi);
2656 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2658 EXPORT_SYMBOL(napi_gro_frags);
2660 static int process_backlog(struct napi_struct *napi, int quota)
2663 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2664 unsigned long start_time = jiffies;
2666 napi->weight = weight_p;
2668 struct sk_buff *skb;
2670 local_irq_disable();
2671 skb = __skb_dequeue(&queue->input_pkt_queue);
2673 __napi_complete(napi);
2679 netif_receive_skb(skb);
2680 } while (++work < quota && jiffies == start_time);
2686 * __napi_schedule - schedule for receive
2687 * @n: entry to schedule
2689 * The entry's receive function will be scheduled to run
2691 void __napi_schedule(struct napi_struct *n)
2693 unsigned long flags;
2695 local_irq_save(flags);
2696 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2697 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2698 local_irq_restore(flags);
2700 EXPORT_SYMBOL(__napi_schedule);
2702 void __napi_complete(struct napi_struct *n)
2704 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2705 BUG_ON(n->gro_list);
2707 list_del(&n->poll_list);
2708 smp_mb__before_clear_bit();
2709 clear_bit(NAPI_STATE_SCHED, &n->state);
2711 EXPORT_SYMBOL(__napi_complete);
2713 void napi_complete(struct napi_struct *n)
2715 unsigned long flags;
2718 * don't let napi dequeue from the cpu poll list
2719 * just in case its running on a different cpu
2721 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2725 local_irq_save(flags);
2727 local_irq_restore(flags);
2729 EXPORT_SYMBOL(napi_complete);
2731 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2732 int (*poll)(struct napi_struct *, int), int weight)
2734 INIT_LIST_HEAD(&napi->poll_list);
2735 napi->gro_count = 0;
2736 napi->gro_list = NULL;
2739 napi->weight = weight;
2740 list_add(&napi->dev_list, &dev->napi_list);
2742 #ifdef CONFIG_NETPOLL
2743 spin_lock_init(&napi->poll_lock);
2744 napi->poll_owner = -1;
2746 set_bit(NAPI_STATE_SCHED, &napi->state);
2748 EXPORT_SYMBOL(netif_napi_add);
2750 void netif_napi_del(struct napi_struct *napi)
2752 struct sk_buff *skb, *next;
2754 list_del_init(&napi->dev_list);
2755 napi_free_frags(napi);
2757 for (skb = napi->gro_list; skb; skb = next) {
2763 napi->gro_list = NULL;
2764 napi->gro_count = 0;
2766 EXPORT_SYMBOL(netif_napi_del);
2769 static void net_rx_action(struct softirq_action *h)
2771 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2772 unsigned long time_limit = jiffies + 2;
2773 int budget = netdev_budget;
2776 local_irq_disable();
2778 while (!list_empty(list)) {
2779 struct napi_struct *n;
2782 /* If softirq window is exhuasted then punt.
2783 * Allow this to run for 2 jiffies since which will allow
2784 * an average latency of 1.5/HZ.
2786 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2791 /* Even though interrupts have been re-enabled, this
2792 * access is safe because interrupts can only add new
2793 * entries to the tail of this list, and only ->poll()
2794 * calls can remove this head entry from the list.
2796 n = list_entry(list->next, struct napi_struct, poll_list);
2798 have = netpoll_poll_lock(n);
2802 /* This NAPI_STATE_SCHED test is for avoiding a race
2803 * with netpoll's poll_napi(). Only the entity which
2804 * obtains the lock and sees NAPI_STATE_SCHED set will
2805 * actually make the ->poll() call. Therefore we avoid
2806 * accidently calling ->poll() when NAPI is not scheduled.
2809 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2810 work = n->poll(n, weight);
2814 WARN_ON_ONCE(work > weight);
2818 local_irq_disable();
2820 /* Drivers must not modify the NAPI state if they
2821 * consume the entire weight. In such cases this code
2822 * still "owns" the NAPI instance and therefore can
2823 * move the instance around on the list at-will.
2825 if (unlikely(work == weight)) {
2826 if (unlikely(napi_disable_pending(n)))
2829 list_move_tail(&n->poll_list, list);
2832 netpoll_poll_unlock(have);
2837 #ifdef CONFIG_NET_DMA
2839 * There may not be any more sk_buffs coming right now, so push
2840 * any pending DMA copies to hardware
2842 dma_issue_pending_all();
2848 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2849 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2853 static gifconf_func_t * gifconf_list [NPROTO];
2856 * register_gifconf - register a SIOCGIF handler
2857 * @family: Address family
2858 * @gifconf: Function handler
2860 * Register protocol dependent address dumping routines. The handler
2861 * that is passed must not be freed or reused until it has been replaced
2862 * by another handler.
2864 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2866 if (family >= NPROTO)
2868 gifconf_list[family] = gifconf;
2874 * Map an interface index to its name (SIOCGIFNAME)
2878 * We need this ioctl for efficient implementation of the
2879 * if_indextoname() function required by the IPv6 API. Without
2880 * it, we would have to search all the interfaces to find a
2884 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2886 struct net_device *dev;
2890 * Fetch the caller's info block.
2893 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2896 read_lock(&dev_base_lock);
2897 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2899 read_unlock(&dev_base_lock);
2903 strcpy(ifr.ifr_name, dev->name);
2904 read_unlock(&dev_base_lock);
2906 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2912 * Perform a SIOCGIFCONF call. This structure will change
2913 * size eventually, and there is nothing I can do about it.
2914 * Thus we will need a 'compatibility mode'.
2917 static int dev_ifconf(struct net *net, char __user *arg)
2920 struct net_device *dev;
2927 * Fetch the caller's info block.
2930 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2937 * Loop over the interfaces, and write an info block for each.
2941 for_each_netdev(net, dev) {
2942 for (i = 0; i < NPROTO; i++) {
2943 if (gifconf_list[i]) {
2946 done = gifconf_list[i](dev, NULL, 0);
2948 done = gifconf_list[i](dev, pos + total,
2958 * All done. Write the updated control block back to the caller.
2960 ifc.ifc_len = total;
2963 * Both BSD and Solaris return 0 here, so we do too.
2965 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2968 #ifdef CONFIG_PROC_FS
2970 * This is invoked by the /proc filesystem handler to display a device
2973 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2974 __acquires(dev_base_lock)
2976 struct net *net = seq_file_net(seq);
2978 struct net_device *dev;
2980 read_lock(&dev_base_lock);
2982 return SEQ_START_TOKEN;
2985 for_each_netdev(net, dev)
2992 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2994 struct net *net = seq_file_net(seq);
2996 return v == SEQ_START_TOKEN ?
2997 first_net_device(net) : next_net_device((struct net_device *)v);
3000 void dev_seq_stop(struct seq_file *seq, void *v)
3001 __releases(dev_base_lock)
3003 read_unlock(&dev_base_lock);
3006 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3008 const struct net_device_stats *stats = dev_get_stats(dev);
3010 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3011 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3012 dev->name, stats->rx_bytes, stats->rx_packets,
3014 stats->rx_dropped + stats->rx_missed_errors,
3015 stats->rx_fifo_errors,
3016 stats->rx_length_errors + stats->rx_over_errors +
3017 stats->rx_crc_errors + stats->rx_frame_errors,
3018 stats->rx_compressed, stats->multicast,
3019 stats->tx_bytes, stats->tx_packets,
3020 stats->tx_errors, stats->tx_dropped,
3021 stats->tx_fifo_errors, stats->collisions,
3022 stats->tx_carrier_errors +
3023 stats->tx_aborted_errors +
3024 stats->tx_window_errors +
3025 stats->tx_heartbeat_errors,
3026 stats->tx_compressed);
3030 * Called from the PROCfs module. This now uses the new arbitrary sized
3031 * /proc/net interface to create /proc/net/dev
3033 static int dev_seq_show(struct seq_file *seq, void *v)
3035 if (v == SEQ_START_TOKEN)
3036 seq_puts(seq, "Inter-| Receive "
3038 " face |bytes packets errs drop fifo frame "
3039 "compressed multicast|bytes packets errs "
3040 "drop fifo colls carrier compressed\n");
3042 dev_seq_printf_stats(seq, v);
3046 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3048 struct netif_rx_stats *rc = NULL;
3050 while (*pos < nr_cpu_ids)
3051 if (cpu_online(*pos)) {
3052 rc = &per_cpu(netdev_rx_stat, *pos);
3059 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3061 return softnet_get_online(pos);
3064 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3067 return softnet_get_online(pos);
3070 static void softnet_seq_stop(struct seq_file *seq, void *v)
3074 static int softnet_seq_show(struct seq_file *seq, void *v)
3076 struct netif_rx_stats *s = v;
3078 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3079 s->total, s->dropped, s->time_squeeze, 0,
3080 0, 0, 0, 0, /* was fastroute */
3085 static const struct seq_operations dev_seq_ops = {
3086 .start = dev_seq_start,
3087 .next = dev_seq_next,
3088 .stop = dev_seq_stop,
3089 .show = dev_seq_show,
3092 static int dev_seq_open(struct inode *inode, struct file *file)
3094 return seq_open_net(inode, file, &dev_seq_ops,
3095 sizeof(struct seq_net_private));
3098 static const struct file_operations dev_seq_fops = {
3099 .owner = THIS_MODULE,
3100 .open = dev_seq_open,
3102 .llseek = seq_lseek,
3103 .release = seq_release_net,
3106 static const struct seq_operations softnet_seq_ops = {
3107 .start = softnet_seq_start,
3108 .next = softnet_seq_next,
3109 .stop = softnet_seq_stop,
3110 .show = softnet_seq_show,
3113 static int softnet_seq_open(struct inode *inode, struct file *file)
3115 return seq_open(file, &softnet_seq_ops);
3118 static const struct file_operations softnet_seq_fops = {
3119 .owner = THIS_MODULE,
3120 .open = softnet_seq_open,
3122 .llseek = seq_lseek,
3123 .release = seq_release,
3126 static void *ptype_get_idx(loff_t pos)
3128 struct packet_type *pt = NULL;
3132 list_for_each_entry_rcu(pt, &ptype_all, list) {
3138 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3139 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3148 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3152 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3155 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3157 struct packet_type *pt;
3158 struct list_head *nxt;
3162 if (v == SEQ_START_TOKEN)
3163 return ptype_get_idx(0);
3166 nxt = pt->list.next;
3167 if (pt->type == htons(ETH_P_ALL)) {
3168 if (nxt != &ptype_all)
3171 nxt = ptype_base[0].next;
3173 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3175 while (nxt == &ptype_base[hash]) {
3176 if (++hash >= PTYPE_HASH_SIZE)
3178 nxt = ptype_base[hash].next;
3181 return list_entry(nxt, struct packet_type, list);
3184 static void ptype_seq_stop(struct seq_file *seq, void *v)
3190 static int ptype_seq_show(struct seq_file *seq, void *v)
3192 struct packet_type *pt = v;
3194 if (v == SEQ_START_TOKEN)
3195 seq_puts(seq, "Type Device Function\n");
3196 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3197 if (pt->type == htons(ETH_P_ALL))
3198 seq_puts(seq, "ALL ");
3200 seq_printf(seq, "%04x", ntohs(pt->type));
3202 seq_printf(seq, " %-8s %pF\n",
3203 pt->dev ? pt->dev->name : "", pt->func);
3209 static const struct seq_operations ptype_seq_ops = {
3210 .start = ptype_seq_start,
3211 .next = ptype_seq_next,
3212 .stop = ptype_seq_stop,
3213 .show = ptype_seq_show,
3216 static int ptype_seq_open(struct inode *inode, struct file *file)
3218 return seq_open_net(inode, file, &ptype_seq_ops,
3219 sizeof(struct seq_net_private));
3222 static const struct file_operations ptype_seq_fops = {
3223 .owner = THIS_MODULE,
3224 .open = ptype_seq_open,
3226 .llseek = seq_lseek,
3227 .release = seq_release_net,
3231 static int __net_init dev_proc_net_init(struct net *net)
3235 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3237 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3239 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3242 if (wext_proc_init(net))
3248 proc_net_remove(net, "ptype");
3250 proc_net_remove(net, "softnet_stat");
3252 proc_net_remove(net, "dev");
3256 static void __net_exit dev_proc_net_exit(struct net *net)
3258 wext_proc_exit(net);
3260 proc_net_remove(net, "ptype");
3261 proc_net_remove(net, "softnet_stat");
3262 proc_net_remove(net, "dev");
3265 static struct pernet_operations __net_initdata dev_proc_ops = {
3266 .init = dev_proc_net_init,
3267 .exit = dev_proc_net_exit,
3270 static int __init dev_proc_init(void)
3272 return register_pernet_subsys(&dev_proc_ops);
3275 #define dev_proc_init() 0
3276 #endif /* CONFIG_PROC_FS */
3280 * netdev_set_master - set up master/slave pair
3281 * @slave: slave device
3282 * @master: new master device
3284 * Changes the master device of the slave. Pass %NULL to break the
3285 * bonding. The caller must hold the RTNL semaphore. On a failure
3286 * a negative errno code is returned. On success the reference counts
3287 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3288 * function returns zero.
3290 int netdev_set_master(struct net_device *slave, struct net_device *master)
3292 struct net_device *old = slave->master;
3302 slave->master = master;
3310 slave->flags |= IFF_SLAVE;
3312 slave->flags &= ~IFF_SLAVE;
3314 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3318 static void dev_change_rx_flags(struct net_device *dev, int flags)
3320 const struct net_device_ops *ops = dev->netdev_ops;
3322 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3323 ops->ndo_change_rx_flags(dev, flags);
3326 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3328 unsigned short old_flags = dev->flags;
3334 dev->flags |= IFF_PROMISC;
3335 dev->promiscuity += inc;
3336 if (dev->promiscuity == 0) {
3339 * If inc causes overflow, untouch promisc and return error.
3342 dev->flags &= ~IFF_PROMISC;
3344 dev->promiscuity -= inc;
3345 printk(KERN_WARNING "%s: promiscuity touches roof, "
3346 "set promiscuity failed, promiscuity feature "
3347 "of device might be broken.\n", dev->name);
3351 if (dev->flags != old_flags) {
3352 printk(KERN_INFO "device %s %s promiscuous mode\n",
3353 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3355 if (audit_enabled) {
3356 current_uid_gid(&uid, &gid);
3357 audit_log(current->audit_context, GFP_ATOMIC,
3358 AUDIT_ANOM_PROMISCUOUS,
3359 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3360 dev->name, (dev->flags & IFF_PROMISC),
3361 (old_flags & IFF_PROMISC),
3362 audit_get_loginuid(current),
3364 audit_get_sessionid(current));
3367 dev_change_rx_flags(dev, IFF_PROMISC);
3373 * dev_set_promiscuity - update promiscuity count on a device
3377 * Add or remove promiscuity from a device. While the count in the device
3378 * remains above zero the interface remains promiscuous. Once it hits zero
3379 * the device reverts back to normal filtering operation. A negative inc
3380 * value is used to drop promiscuity on the device.
3381 * Return 0 if successful or a negative errno code on error.
3383 int dev_set_promiscuity(struct net_device *dev, int inc)
3385 unsigned short old_flags = dev->flags;
3388 err = __dev_set_promiscuity(dev, inc);
3391 if (dev->flags != old_flags)
3392 dev_set_rx_mode(dev);
3397 * dev_set_allmulti - update allmulti count on a device
3401 * Add or remove reception of all multicast frames to a device. While the
3402 * count in the device remains above zero the interface remains listening
3403 * to all interfaces. Once it hits zero the device reverts back to normal
3404 * filtering operation. A negative @inc value is used to drop the counter
3405 * when releasing a resource needing all multicasts.
3406 * Return 0 if successful or a negative errno code on error.
3409 int dev_set_allmulti(struct net_device *dev, int inc)
3411 unsigned short old_flags = dev->flags;
3415 dev->flags |= IFF_ALLMULTI;
3416 dev->allmulti += inc;
3417 if (dev->allmulti == 0) {
3420 * If inc causes overflow, untouch allmulti and return error.
3423 dev->flags &= ~IFF_ALLMULTI;
3425 dev->allmulti -= inc;
3426 printk(KERN_WARNING "%s: allmulti touches roof, "
3427 "set allmulti failed, allmulti feature of "
3428 "device might be broken.\n", dev->name);
3432 if (dev->flags ^ old_flags) {
3433 dev_change_rx_flags(dev, IFF_ALLMULTI);
3434 dev_set_rx_mode(dev);
3440 * Upload unicast and multicast address lists to device and
3441 * configure RX filtering. When the device doesn't support unicast
3442 * filtering it is put in promiscuous mode while unicast addresses
3445 void __dev_set_rx_mode(struct net_device *dev)
3447 const struct net_device_ops *ops = dev->netdev_ops;
3449 /* dev_open will call this function so the list will stay sane. */
3450 if (!(dev->flags&IFF_UP))
3453 if (!netif_device_present(dev))
3456 if (ops->ndo_set_rx_mode)
3457 ops->ndo_set_rx_mode(dev);
3459 /* Unicast addresses changes may only happen under the rtnl,
3460 * therefore calling __dev_set_promiscuity here is safe.
3462 if (dev->uc.count > 0 && !dev->uc_promisc) {
3463 __dev_set_promiscuity(dev, 1);
3464 dev->uc_promisc = 1;
3465 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3466 __dev_set_promiscuity(dev, -1);
3467 dev->uc_promisc = 0;
3470 if (ops->ndo_set_multicast_list)
3471 ops->ndo_set_multicast_list(dev);
3475 void dev_set_rx_mode(struct net_device *dev)
3477 netif_addr_lock_bh(dev);
3478 __dev_set_rx_mode(dev);
3479 netif_addr_unlock_bh(dev);
3482 /* hw addresses list handling functions */
3484 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3485 int addr_len, unsigned char addr_type)
3487 struct netdev_hw_addr *ha;
3490 if (addr_len > MAX_ADDR_LEN)
3493 list_for_each_entry(ha, &list->list, list) {
3494 if (!memcmp(ha->addr, addr, addr_len) &&
3495 ha->type == addr_type) {
3502 alloc_size = sizeof(*ha);
3503 if (alloc_size < L1_CACHE_BYTES)
3504 alloc_size = L1_CACHE_BYTES;
3505 ha = kmalloc(alloc_size, GFP_ATOMIC);
3508 memcpy(ha->addr, addr, addr_len);
3509 ha->type = addr_type;
3512 list_add_tail_rcu(&ha->list, &list->list);
3517 static void ha_rcu_free(struct rcu_head *head)
3519 struct netdev_hw_addr *ha;
3521 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3525 static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3526 int addr_len, unsigned char addr_type)
3528 struct netdev_hw_addr *ha;
3530 list_for_each_entry(ha, &list->list, list) {
3531 if (!memcmp(ha->addr, addr, addr_len) &&
3532 (ha->type == addr_type || !addr_type)) {
3535 list_del_rcu(&ha->list);
3536 call_rcu(&ha->rcu_head, ha_rcu_free);
3544 static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3545 struct netdev_hw_addr_list *from_list,
3547 unsigned char addr_type)
3550 struct netdev_hw_addr *ha, *ha2;
3553 list_for_each_entry(ha, &from_list->list, list) {
3554 type = addr_type ? addr_type : ha->type;
3555 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3562 list_for_each_entry(ha2, &from_list->list, list) {
3565 type = addr_type ? addr_type : ha2->type;
3566 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3571 static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3572 struct netdev_hw_addr_list *from_list,
3574 unsigned char addr_type)
3576 struct netdev_hw_addr *ha;
3579 list_for_each_entry(ha, &from_list->list, list) {
3580 type = addr_type ? addr_type : ha->type;
3581 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3585 static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3586 struct netdev_hw_addr_list *from_list,
3590 struct netdev_hw_addr *ha, *tmp;
3592 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3594 err = __hw_addr_add(to_list, ha->addr,
3595 addr_len, ha->type);
3600 } else if (ha->refcount == 1) {
3601 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3602 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3608 static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3609 struct netdev_hw_addr_list *from_list,
3612 struct netdev_hw_addr *ha, *tmp;
3614 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3616 __hw_addr_del(to_list, ha->addr,
3617 addr_len, ha->type);
3619 __hw_addr_del(from_list, ha->addr,
3620 addr_len, ha->type);
3625 static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3627 struct netdev_hw_addr *ha, *tmp;
3629 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3630 list_del_rcu(&ha->list);
3631 call_rcu(&ha->rcu_head, ha_rcu_free);
3636 static void __hw_addr_init(struct netdev_hw_addr_list *list)
3638 INIT_LIST_HEAD(&list->list);
3642 /* Device addresses handling functions */
3644 static void dev_addr_flush(struct net_device *dev)
3646 /* rtnl_mutex must be held here */
3648 __hw_addr_flush(&dev->dev_addrs);
3649 dev->dev_addr = NULL;
3652 static int dev_addr_init(struct net_device *dev)
3654 unsigned char addr[MAX_ADDR_LEN];
3655 struct netdev_hw_addr *ha;
3658 /* rtnl_mutex must be held here */
3660 __hw_addr_init(&dev->dev_addrs);
3661 memset(addr, 0, sizeof(addr));
3662 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3663 NETDEV_HW_ADDR_T_LAN);
3666 * Get the first (previously created) address from the list
3667 * and set dev_addr pointer to this location.
3669 ha = list_first_entry(&dev->dev_addrs.list,
3670 struct netdev_hw_addr, list);
3671 dev->dev_addr = ha->addr;
3677 * dev_addr_add - Add a device address
3679 * @addr: address to add
3680 * @addr_type: address type
3682 * Add a device address to the device or increase the reference count if
3683 * it already exists.
3685 * The caller must hold the rtnl_mutex.
3687 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3688 unsigned char addr_type)
3694 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3696 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3699 EXPORT_SYMBOL(dev_addr_add);
3702 * dev_addr_del - Release a device address.
3704 * @addr: address to delete
3705 * @addr_type: address type
3707 * Release reference to a device address and remove it from the device
3708 * if the reference count drops to zero.
3710 * The caller must hold the rtnl_mutex.
3712 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3713 unsigned char addr_type)
3716 struct netdev_hw_addr *ha;
3721 * We can not remove the first address from the list because
3722 * dev->dev_addr points to that.
3724 ha = list_first_entry(&dev->dev_addrs.list,
3725 struct netdev_hw_addr, list);
3726 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3729 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3732 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3735 EXPORT_SYMBOL(dev_addr_del);
3738 * dev_addr_add_multiple - Add device addresses from another device
3739 * @to_dev: device to which addresses will be added
3740 * @from_dev: device from which addresses will be added
3741 * @addr_type: address type - 0 means type will be used from from_dev
3743 * Add device addresses of the one device to another.
3745 * The caller must hold the rtnl_mutex.
3747 int dev_addr_add_multiple(struct net_device *to_dev,
3748 struct net_device *from_dev,
3749 unsigned char addr_type)
3755 if (from_dev->addr_len != to_dev->addr_len)
3757 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3758 to_dev->addr_len, addr_type);
3760 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3763 EXPORT_SYMBOL(dev_addr_add_multiple);
3766 * dev_addr_del_multiple - Delete device addresses by another device
3767 * @to_dev: device where the addresses will be deleted
3768 * @from_dev: device by which addresses the addresses will be deleted
3769 * @addr_type: address type - 0 means type will used from from_dev
3771 * Deletes addresses in to device by the list of addresses in from device.
3773 * The caller must hold the rtnl_mutex.
3775 int dev_addr_del_multiple(struct net_device *to_dev,
3776 struct net_device *from_dev,
3777 unsigned char addr_type)
3781 if (from_dev->addr_len != to_dev->addr_len)
3783 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3784 to_dev->addr_len, addr_type);
3785 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3788 EXPORT_SYMBOL(dev_addr_del_multiple);
3790 /* multicast addresses handling functions */
3792 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3793 void *addr, int alen, int glbl)
3795 struct dev_addr_list *da;
3797 for (; (da = *list) != NULL; list = &da->next) {
3798 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3799 alen == da->da_addrlen) {
3801 int old_glbl = da->da_gusers;
3818 int __dev_addr_add(struct dev_addr_list **list, int *count,
3819 void *addr, int alen, int glbl)
3821 struct dev_addr_list *da;
3823 for (da = *list; da != NULL; da = da->next) {
3824 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3825 da->da_addrlen == alen) {
3827 int old_glbl = da->da_gusers;
3837 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3840 memcpy(da->da_addr, addr, alen);
3841 da->da_addrlen = alen;
3843 da->da_gusers = glbl ? 1 : 0;
3851 * dev_unicast_delete - Release secondary unicast address.
3853 * @addr: address to delete
3855 * Release reference to a secondary unicast address and remove it
3856 * from the device if the reference count drops to zero.
3858 * The caller must hold the rtnl_mutex.
3860 int dev_unicast_delete(struct net_device *dev, void *addr)
3866 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3867 NETDEV_HW_ADDR_T_UNICAST);
3869 __dev_set_rx_mode(dev);
3872 EXPORT_SYMBOL(dev_unicast_delete);
3875 * dev_unicast_add - add a secondary unicast address
3877 * @addr: address to add
3879 * Add a secondary unicast address to the device or increase
3880 * the reference count if it already exists.
3882 * The caller must hold the rtnl_mutex.
3884 int dev_unicast_add(struct net_device *dev, void *addr)
3890 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3891 NETDEV_HW_ADDR_T_UNICAST);
3893 __dev_set_rx_mode(dev);
3896 EXPORT_SYMBOL(dev_unicast_add);
3898 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3899 struct dev_addr_list **from, int *from_count)
3901 struct dev_addr_list *da, *next;
3905 while (da != NULL) {
3907 if (!da->da_synced) {
3908 err = __dev_addr_add(to, to_count,
3909 da->da_addr, da->da_addrlen, 0);
3914 } else if (da->da_users == 1) {
3915 __dev_addr_delete(to, to_count,
3916 da->da_addr, da->da_addrlen, 0);
3917 __dev_addr_delete(from, from_count,
3918 da->da_addr, da->da_addrlen, 0);
3925 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3926 struct dev_addr_list **from, int *from_count)
3928 struct dev_addr_list *da, *next;
3931 while (da != NULL) {
3933 if (da->da_synced) {
3934 __dev_addr_delete(to, to_count,
3935 da->da_addr, da->da_addrlen, 0);
3937 __dev_addr_delete(from, from_count,
3938 da->da_addr, da->da_addrlen, 0);
3945 * dev_unicast_sync - Synchronize device's unicast list to another device
3946 * @to: destination device
3947 * @from: source device
3949 * Add newly added addresses to the destination device and release
3950 * addresses that have no users left.
3952 * This function is intended to be called from the dev->set_rx_mode
3953 * function of layered software devices.
3955 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3961 if (to->addr_len != from->addr_len)
3964 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
3966 __dev_set_rx_mode(to);
3969 EXPORT_SYMBOL(dev_unicast_sync);
3972 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3973 * @to: destination device
3974 * @from: source device
3976 * Remove all addresses that were added to the destination device by
3977 * dev_unicast_sync(). This function is intended to be called from the
3978 * dev->stop function of layered software devices.
3980 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3984 if (to->addr_len != from->addr_len)
3987 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
3988 __dev_set_rx_mode(to);
3990 EXPORT_SYMBOL(dev_unicast_unsync);
3992 static void dev_unicast_flush(struct net_device *dev)
3994 /* rtnl_mutex must be held here */
3996 __hw_addr_flush(&dev->uc);
3999 static void dev_unicast_init(struct net_device *dev)
4001 /* rtnl_mutex must be held here */
4003 __hw_addr_init(&dev->uc);
4007 static void __dev_addr_discard(struct dev_addr_list **list)
4009 struct dev_addr_list *tmp;
4011 while (*list != NULL) {
4014 if (tmp->da_users > tmp->da_gusers)
4015 printk("__dev_addr_discard: address leakage! "
4016 "da_users=%d\n", tmp->da_users);
4021 static void dev_addr_discard(struct net_device *dev)
4023 netif_addr_lock_bh(dev);
4025 __dev_addr_discard(&dev->mc_list);
4028 netif_addr_unlock_bh(dev);
4032 * dev_get_flags - get flags reported to userspace
4035 * Get the combination of flag bits exported through APIs to userspace.
4037 unsigned dev_get_flags(const struct net_device *dev)
4041 flags = (dev->flags & ~(IFF_PROMISC |
4046 (dev->gflags & (IFF_PROMISC |
4049 if (netif_running(dev)) {
4050 if (netif_oper_up(dev))
4051 flags |= IFF_RUNNING;
4052 if (netif_carrier_ok(dev))
4053 flags |= IFF_LOWER_UP;
4054 if (netif_dormant(dev))
4055 flags |= IFF_DORMANT;
4062 * dev_change_flags - change device settings
4064 * @flags: device state flags
4066 * Change settings on device based state flags. The flags are
4067 * in the userspace exported format.
4069 int dev_change_flags(struct net_device *dev, unsigned flags)
4072 int old_flags = dev->flags;
4077 * Set the flags on our device.
4080 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4081 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4083 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4087 * Load in the correct multicast list now the flags have changed.
4090 if ((old_flags ^ flags) & IFF_MULTICAST)
4091 dev_change_rx_flags(dev, IFF_MULTICAST);
4093 dev_set_rx_mode(dev);
4096 * Have we downed the interface. We handle IFF_UP ourselves
4097 * according to user attempts to set it, rather than blindly
4102 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4103 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4106 dev_set_rx_mode(dev);
4109 if (dev->flags & IFF_UP &&
4110 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4112 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4114 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4115 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4116 dev->gflags ^= IFF_PROMISC;
4117 dev_set_promiscuity(dev, inc);
4120 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4121 is important. Some (broken) drivers set IFF_PROMISC, when
4122 IFF_ALLMULTI is requested not asking us and not reporting.
4124 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4125 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4126 dev->gflags ^= IFF_ALLMULTI;
4127 dev_set_allmulti(dev, inc);
4130 /* Exclude state transition flags, already notified */
4131 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4133 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4139 * dev_set_mtu - Change maximum transfer unit
4141 * @new_mtu: new transfer unit
4143 * Change the maximum transfer size of the network device.
4145 int dev_set_mtu(struct net_device *dev, int new_mtu)
4147 const struct net_device_ops *ops = dev->netdev_ops;
4150 if (new_mtu == dev->mtu)
4153 /* MTU must be positive. */
4157 if (!netif_device_present(dev))
4161 if (ops->ndo_change_mtu)
4162 err = ops->ndo_change_mtu(dev, new_mtu);
4166 if (!err && dev->flags & IFF_UP)
4167 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4172 * dev_set_mac_address - Change Media Access Control Address
4176 * Change the hardware (MAC) address of the device
4178 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4180 const struct net_device_ops *ops = dev->netdev_ops;
4183 if (!ops->ndo_set_mac_address)
4185 if (sa->sa_family != dev->type)
4187 if (!netif_device_present(dev))
4189 err = ops->ndo_set_mac_address(dev, sa);
4191 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4196 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
4198 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4201 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4207 case SIOCGIFFLAGS: /* Get interface flags */
4208 ifr->ifr_flags = (short) dev_get_flags(dev);
4211 case SIOCGIFMETRIC: /* Get the metric on the interface
4212 (currently unused) */
4213 ifr->ifr_metric = 0;
4216 case SIOCGIFMTU: /* Get the MTU of a device */
4217 ifr->ifr_mtu = dev->mtu;
4222 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4224 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4225 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4226 ifr->ifr_hwaddr.sa_family = dev->type;
4234 ifr->ifr_map.mem_start = dev->mem_start;
4235 ifr->ifr_map.mem_end = dev->mem_end;
4236 ifr->ifr_map.base_addr = dev->base_addr;
4237 ifr->ifr_map.irq = dev->irq;
4238 ifr->ifr_map.dma = dev->dma;
4239 ifr->ifr_map.port = dev->if_port;
4243 ifr->ifr_ifindex = dev->ifindex;
4247 ifr->ifr_qlen = dev->tx_queue_len;
4251 /* dev_ioctl() should ensure this case
4263 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4265 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4268 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4269 const struct net_device_ops *ops;
4274 ops = dev->netdev_ops;
4277 case SIOCSIFFLAGS: /* Set interface flags */
4278 return dev_change_flags(dev, ifr->ifr_flags);
4280 case SIOCSIFMETRIC: /* Set the metric on the interface
4281 (currently unused) */
4284 case SIOCSIFMTU: /* Set the MTU of a device */
4285 return dev_set_mtu(dev, ifr->ifr_mtu);
4288 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4290 case SIOCSIFHWBROADCAST:
4291 if (ifr->ifr_hwaddr.sa_family != dev->type)
4293 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4294 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4295 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4299 if (ops->ndo_set_config) {
4300 if (!netif_device_present(dev))
4302 return ops->ndo_set_config(dev, &ifr->ifr_map);
4307 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4308 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4310 if (!netif_device_present(dev))
4312 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4316 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4317 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4319 if (!netif_device_present(dev))
4321 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4325 if (ifr->ifr_qlen < 0)
4327 dev->tx_queue_len = ifr->ifr_qlen;
4331 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4332 return dev_change_name(dev, ifr->ifr_newname);
4335 * Unknown or private ioctl
4339 if ((cmd >= SIOCDEVPRIVATE &&
4340 cmd <= SIOCDEVPRIVATE + 15) ||
4341 cmd == SIOCBONDENSLAVE ||
4342 cmd == SIOCBONDRELEASE ||
4343 cmd == SIOCBONDSETHWADDR ||
4344 cmd == SIOCBONDSLAVEINFOQUERY ||
4345 cmd == SIOCBONDINFOQUERY ||
4346 cmd == SIOCBONDCHANGEACTIVE ||
4347 cmd == SIOCGMIIPHY ||
4348 cmd == SIOCGMIIREG ||
4349 cmd == SIOCSMIIREG ||
4350 cmd == SIOCBRADDIF ||
4351 cmd == SIOCBRDELIF ||
4352 cmd == SIOCSHWTSTAMP ||
4353 cmd == SIOCWANDEV) {
4355 if (ops->ndo_do_ioctl) {
4356 if (netif_device_present(dev))
4357 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4369 * This function handles all "interface"-type I/O control requests. The actual
4370 * 'doing' part of this is dev_ifsioc above.
4374 * dev_ioctl - network device ioctl
4375 * @net: the applicable net namespace
4376 * @cmd: command to issue
4377 * @arg: pointer to a struct ifreq in user space
4379 * Issue ioctl functions to devices. This is normally called by the
4380 * user space syscall interfaces but can sometimes be useful for
4381 * other purposes. The return value is the return from the syscall if
4382 * positive or a negative errno code on error.
4385 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4391 /* One special case: SIOCGIFCONF takes ifconf argument
4392 and requires shared lock, because it sleeps writing
4396 if (cmd == SIOCGIFCONF) {
4398 ret = dev_ifconf(net, (char __user *) arg);
4402 if (cmd == SIOCGIFNAME)
4403 return dev_ifname(net, (struct ifreq __user *)arg);
4405 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4408 ifr.ifr_name[IFNAMSIZ-1] = 0;
4410 colon = strchr(ifr.ifr_name, ':');
4415 * See which interface the caller is talking about.
4420 * These ioctl calls:
4421 * - can be done by all.
4422 * - atomic and do not require locking.
4433 dev_load(net, ifr.ifr_name);
4434 read_lock(&dev_base_lock);
4435 ret = dev_ifsioc_locked(net, &ifr, cmd);
4436 read_unlock(&dev_base_lock);
4440 if (copy_to_user(arg, &ifr,
4441 sizeof(struct ifreq)))
4447 dev_load(net, ifr.ifr_name);
4449 ret = dev_ethtool(net, &ifr);
4454 if (copy_to_user(arg, &ifr,
4455 sizeof(struct ifreq)))
4461 * These ioctl calls:
4462 * - require superuser power.
4463 * - require strict serialization.
4469 if (!capable(CAP_NET_ADMIN))
4471 dev_load(net, ifr.ifr_name);
4473 ret = dev_ifsioc(net, &ifr, cmd);
4478 if (copy_to_user(arg, &ifr,
4479 sizeof(struct ifreq)))
4485 * These ioctl calls:
4486 * - require superuser power.
4487 * - require strict serialization.
4488 * - do not return a value
4498 case SIOCSIFHWBROADCAST:
4501 case SIOCBONDENSLAVE:
4502 case SIOCBONDRELEASE:
4503 case SIOCBONDSETHWADDR:
4504 case SIOCBONDCHANGEACTIVE:
4508 if (!capable(CAP_NET_ADMIN))
4511 case SIOCBONDSLAVEINFOQUERY:
4512 case SIOCBONDINFOQUERY:
4513 dev_load(net, ifr.ifr_name);
4515 ret = dev_ifsioc(net, &ifr, cmd);
4520 /* Get the per device memory space. We can add this but
4521 * currently do not support it */
4523 /* Set the per device memory buffer space.
4524 * Not applicable in our case */
4529 * Unknown or private ioctl.
4532 if (cmd == SIOCWANDEV ||
4533 (cmd >= SIOCDEVPRIVATE &&
4534 cmd <= SIOCDEVPRIVATE + 15)) {
4535 dev_load(net, ifr.ifr_name);
4537 ret = dev_ifsioc(net, &ifr, cmd);
4539 if (!ret && copy_to_user(arg, &ifr,
4540 sizeof(struct ifreq)))
4544 /* Take care of Wireless Extensions */
4545 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4546 return wext_handle_ioctl(net, &ifr, cmd, arg);
4553 * dev_new_index - allocate an ifindex
4554 * @net: the applicable net namespace
4556 * Returns a suitable unique value for a new device interface
4557 * number. The caller must hold the rtnl semaphore or the
4558 * dev_base_lock to be sure it remains unique.
4560 static int dev_new_index(struct net *net)
4566 if (!__dev_get_by_index(net, ifindex))
4571 /* Delayed registration/unregisteration */
4572 static LIST_HEAD(net_todo_list);
4574 static void net_set_todo(struct net_device *dev)
4576 list_add_tail(&dev->todo_list, &net_todo_list);
4579 static void rollback_registered(struct net_device *dev)
4581 BUG_ON(dev_boot_phase);
4584 /* Some devices call without registering for initialization unwind. */
4585 if (dev->reg_state == NETREG_UNINITIALIZED) {
4586 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4587 "was registered\n", dev->name, dev);
4593 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4595 /* If device is running, close it first. */
4598 /* And unlink it from device chain. */
4599 unlist_netdevice(dev);
4601 dev->reg_state = NETREG_UNREGISTERING;
4605 /* Shutdown queueing discipline. */
4609 /* Notify protocols, that we are about to destroy
4610 this device. They should clean all the things.
4612 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4615 * Flush the unicast and multicast chains
4617 dev_unicast_flush(dev);
4618 dev_addr_discard(dev);
4620 if (dev->netdev_ops->ndo_uninit)
4621 dev->netdev_ops->ndo_uninit(dev);
4623 /* Notifier chain MUST detach us from master device. */
4624 WARN_ON(dev->master);
4626 /* Remove entries from kobject tree */
4627 netdev_unregister_kobject(dev);
4634 static void __netdev_init_queue_locks_one(struct net_device *dev,
4635 struct netdev_queue *dev_queue,
4638 spin_lock_init(&dev_queue->_xmit_lock);
4639 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4640 dev_queue->xmit_lock_owner = -1;
4643 static void netdev_init_queue_locks(struct net_device *dev)
4645 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4646 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4649 unsigned long netdev_fix_features(unsigned long features, const char *name)
4651 /* Fix illegal SG+CSUM combinations. */
4652 if ((features & NETIF_F_SG) &&
4653 !(features & NETIF_F_ALL_CSUM)) {
4655 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4656 "checksum feature.\n", name);
4657 features &= ~NETIF_F_SG;
4660 /* TSO requires that SG is present as well. */
4661 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4663 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4664 "SG feature.\n", name);
4665 features &= ~NETIF_F_TSO;
4668 if (features & NETIF_F_UFO) {
4669 if (!(features & NETIF_F_GEN_CSUM)) {
4671 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4672 "since no NETIF_F_HW_CSUM feature.\n",
4674 features &= ~NETIF_F_UFO;
4677 if (!(features & NETIF_F_SG)) {
4679 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4680 "since no NETIF_F_SG feature.\n", name);
4681 features &= ~NETIF_F_UFO;
4687 EXPORT_SYMBOL(netdev_fix_features);
4690 * register_netdevice - register a network device
4691 * @dev: device to register
4693 * Take a completed network device structure and add it to the kernel
4694 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4695 * chain. 0 is returned on success. A negative errno code is returned
4696 * on a failure to set up the device, or if the name is a duplicate.
4698 * Callers must hold the rtnl semaphore. You may want
4699 * register_netdev() instead of this.
4702 * The locking appears insufficient to guarantee two parallel registers
4703 * will not get the same name.
4706 int register_netdevice(struct net_device *dev)
4708 struct hlist_head *head;
4709 struct hlist_node *p;
4711 struct net *net = dev_net(dev);
4713 BUG_ON(dev_boot_phase);
4718 /* When net_device's are persistent, this will be fatal. */
4719 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4722 spin_lock_init(&dev->addr_list_lock);
4723 netdev_set_addr_lockdep_class(dev);
4724 netdev_init_queue_locks(dev);
4728 /* Init, if this function is available */
4729 if (dev->netdev_ops->ndo_init) {
4730 ret = dev->netdev_ops->ndo_init(dev);
4738 if (!dev_valid_name(dev->name)) {
4743 dev->ifindex = dev_new_index(net);
4744 if (dev->iflink == -1)
4745 dev->iflink = dev->ifindex;
4747 /* Check for existence of name */
4748 head = dev_name_hash(net, dev->name);
4749 hlist_for_each(p, head) {
4750 struct net_device *d
4751 = hlist_entry(p, struct net_device, name_hlist);
4752 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4758 /* Fix illegal checksum combinations */
4759 if ((dev->features & NETIF_F_HW_CSUM) &&
4760 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4761 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4763 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4766 if ((dev->features & NETIF_F_NO_CSUM) &&
4767 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4768 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4770 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4773 dev->features = netdev_fix_features(dev->features, dev->name);
4775 /* Enable software GSO if SG is supported. */
4776 if (dev->features & NETIF_F_SG)
4777 dev->features |= NETIF_F_GSO;
4779 netdev_initialize_kobject(dev);
4780 ret = netdev_register_kobject(dev);
4783 dev->reg_state = NETREG_REGISTERED;
4786 * Default initial state at registry is that the
4787 * device is present.
4790 set_bit(__LINK_STATE_PRESENT, &dev->state);
4792 dev_init_scheduler(dev);
4794 list_netdevice(dev);
4796 /* Notify protocols, that a new device appeared. */
4797 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4798 ret = notifier_to_errno(ret);
4800 rollback_registered(dev);
4801 dev->reg_state = NETREG_UNREGISTERED;
4808 if (dev->netdev_ops->ndo_uninit)
4809 dev->netdev_ops->ndo_uninit(dev);
4814 * init_dummy_netdev - init a dummy network device for NAPI
4815 * @dev: device to init
4817 * This takes a network device structure and initialize the minimum
4818 * amount of fields so it can be used to schedule NAPI polls without
4819 * registering a full blown interface. This is to be used by drivers
4820 * that need to tie several hardware interfaces to a single NAPI
4821 * poll scheduler due to HW limitations.
4823 int init_dummy_netdev(struct net_device *dev)
4825 /* Clear everything. Note we don't initialize spinlocks
4826 * are they aren't supposed to be taken by any of the
4827 * NAPI code and this dummy netdev is supposed to be
4828 * only ever used for NAPI polls
4830 memset(dev, 0, sizeof(struct net_device));
4832 /* make sure we BUG if trying to hit standard
4833 * register/unregister code path
4835 dev->reg_state = NETREG_DUMMY;
4837 /* initialize the ref count */
4838 atomic_set(&dev->refcnt, 1);
4840 /* NAPI wants this */
4841 INIT_LIST_HEAD(&dev->napi_list);
4843 /* a dummy interface is started by default */
4844 set_bit(__LINK_STATE_PRESENT, &dev->state);
4845 set_bit(__LINK_STATE_START, &dev->state);
4849 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4853 * register_netdev - register a network device
4854 * @dev: device to register
4856 * Take a completed network device structure and add it to the kernel
4857 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4858 * chain. 0 is returned on success. A negative errno code is returned
4859 * on a failure to set up the device, or if the name is a duplicate.
4861 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4862 * and expands the device name if you passed a format string to
4865 int register_netdev(struct net_device *dev)
4872 * If the name is a format string the caller wants us to do a
4875 if (strchr(dev->name, '%')) {
4876 err = dev_alloc_name(dev, dev->name);
4881 err = register_netdevice(dev);
4886 EXPORT_SYMBOL(register_netdev);
4889 * netdev_wait_allrefs - wait until all references are gone.
4891 * This is called when unregistering network devices.
4893 * Any protocol or device that holds a reference should register
4894 * for netdevice notification, and cleanup and put back the
4895 * reference if they receive an UNREGISTER event.
4896 * We can get stuck here if buggy protocols don't correctly
4899 static void netdev_wait_allrefs(struct net_device *dev)
4901 unsigned long rebroadcast_time, warning_time;
4903 rebroadcast_time = warning_time = jiffies;
4904 while (atomic_read(&dev->refcnt) != 0) {
4905 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4908 /* Rebroadcast unregister notification */
4909 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4911 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4913 /* We must not have linkwatch events
4914 * pending on unregister. If this
4915 * happens, we simply run the queue
4916 * unscheduled, resulting in a noop
4919 linkwatch_run_queue();
4924 rebroadcast_time = jiffies;
4929 if (time_after(jiffies, warning_time + 10 * HZ)) {
4930 printk(KERN_EMERG "unregister_netdevice: "
4931 "waiting for %s to become free. Usage "
4933 dev->name, atomic_read(&dev->refcnt));
4934 warning_time = jiffies;
4943 * register_netdevice(x1);
4944 * register_netdevice(x2);
4946 * unregister_netdevice(y1);
4947 * unregister_netdevice(y2);
4953 * We are invoked by rtnl_unlock().
4954 * This allows us to deal with problems:
4955 * 1) We can delete sysfs objects which invoke hotplug
4956 * without deadlocking with linkwatch via keventd.
4957 * 2) Since we run with the RTNL semaphore not held, we can sleep
4958 * safely in order to wait for the netdev refcnt to drop to zero.
4960 * We must not return until all unregister events added during
4961 * the interval the lock was held have been completed.
4963 void netdev_run_todo(void)
4965 struct list_head list;
4967 /* Snapshot list, allow later requests */
4968 list_replace_init(&net_todo_list, &list);
4972 while (!list_empty(&list)) {
4973 struct net_device *dev
4974 = list_entry(list.next, struct net_device, todo_list);
4975 list_del(&dev->todo_list);
4977 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4978 printk(KERN_ERR "network todo '%s' but state %d\n",
4979 dev->name, dev->reg_state);
4984 dev->reg_state = NETREG_UNREGISTERED;
4986 on_each_cpu(flush_backlog, dev, 1);
4988 netdev_wait_allrefs(dev);
4991 BUG_ON(atomic_read(&dev->refcnt));
4992 WARN_ON(dev->ip_ptr);
4993 WARN_ON(dev->ip6_ptr);
4994 WARN_ON(dev->dn_ptr);
4996 if (dev->destructor)
4997 dev->destructor(dev);
4999 /* Free network device */
5000 kobject_put(&dev->dev.kobj);
5005 * dev_get_stats - get network device statistics
5006 * @dev: device to get statistics from
5008 * Get network statistics from device. The device driver may provide
5009 * its own method by setting dev->netdev_ops->get_stats; otherwise
5010 * the internal statistics structure is used.
5012 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5014 const struct net_device_ops *ops = dev->netdev_ops;
5016 if (ops->ndo_get_stats)
5017 return ops->ndo_get_stats(dev);
5019 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5020 struct net_device_stats *stats = &dev->stats;
5022 struct netdev_queue *txq;
5024 for (i = 0; i < dev->num_tx_queues; i++) {
5025 txq = netdev_get_tx_queue(dev, i);
5026 tx_bytes += txq->tx_bytes;
5027 tx_packets += txq->tx_packets;
5028 tx_dropped += txq->tx_dropped;
5030 if (tx_bytes || tx_packets || tx_dropped) {
5031 stats->tx_bytes = tx_bytes;
5032 stats->tx_packets = tx_packets;
5033 stats->tx_dropped = tx_dropped;
5038 EXPORT_SYMBOL(dev_get_stats);
5040 static void netdev_init_one_queue(struct net_device *dev,
5041 struct netdev_queue *queue,
5047 static void netdev_init_queues(struct net_device *dev)
5049 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5050 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5051 spin_lock_init(&dev->tx_global_lock);
5055 * alloc_netdev_mq - allocate network device
5056 * @sizeof_priv: size of private data to allocate space for
5057 * @name: device name format string
5058 * @setup: callback to initialize device
5059 * @queue_count: the number of subqueues to allocate
5061 * Allocates a struct net_device with private data area for driver use
5062 * and performs basic initialization. Also allocates subquue structs
5063 * for each queue on the device at the end of the netdevice.
5065 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5066 void (*setup)(struct net_device *), unsigned int queue_count)
5068 struct netdev_queue *tx;
5069 struct net_device *dev;
5071 struct net_device *p;
5073 BUG_ON(strlen(name) >= sizeof(dev->name));
5075 alloc_size = sizeof(struct net_device);
5077 /* ensure 32-byte alignment of private area */
5078 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5079 alloc_size += sizeof_priv;
5081 /* ensure 32-byte alignment of whole construct */
5082 alloc_size += NETDEV_ALIGN - 1;
5084 p = kzalloc(alloc_size, GFP_KERNEL);
5086 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5090 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5092 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5097 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5098 dev->padded = (char *)dev - (char *)p;
5100 if (dev_addr_init(dev))
5103 dev_unicast_init(dev);
5105 dev_net_set(dev, &init_net);
5108 dev->num_tx_queues = queue_count;
5109 dev->real_num_tx_queues = queue_count;
5111 dev->gso_max_size = GSO_MAX_SIZE;
5113 netdev_init_queues(dev);
5115 INIT_LIST_HEAD(&dev->napi_list);
5116 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5118 strcpy(dev->name, name);
5128 EXPORT_SYMBOL(alloc_netdev_mq);
5131 * free_netdev - free network device
5134 * This function does the last stage of destroying an allocated device
5135 * interface. The reference to the device object is released.
5136 * If this is the last reference then it will be freed.
5138 void free_netdev(struct net_device *dev)
5140 struct napi_struct *p, *n;
5142 release_net(dev_net(dev));
5146 /* Flush device addresses */
5147 dev_addr_flush(dev);
5149 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5152 /* Compatibility with error handling in drivers */
5153 if (dev->reg_state == NETREG_UNINITIALIZED) {
5154 kfree((char *)dev - dev->padded);
5158 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5159 dev->reg_state = NETREG_RELEASED;
5161 /* will free via device release */
5162 put_device(&dev->dev);
5166 * synchronize_net - Synchronize with packet receive processing
5168 * Wait for packets currently being received to be done.
5169 * Does not block later packets from starting.
5171 void synchronize_net(void)
5178 * unregister_netdevice - remove device from the kernel
5181 * This function shuts down a device interface and removes it
5182 * from the kernel tables.
5184 * Callers must hold the rtnl semaphore. You may want
5185 * unregister_netdev() instead of this.
5188 void unregister_netdevice(struct net_device *dev)
5192 rollback_registered(dev);
5193 /* Finish processing unregister after unlock */
5198 * unregister_netdev - remove device from the kernel
5201 * This function shuts down a device interface and removes it
5202 * from the kernel tables.
5204 * This is just a wrapper for unregister_netdevice that takes
5205 * the rtnl semaphore. In general you want to use this and not
5206 * unregister_netdevice.
5208 void unregister_netdev(struct net_device *dev)
5211 unregister_netdevice(dev);
5215 EXPORT_SYMBOL(unregister_netdev);
5218 * dev_change_net_namespace - move device to different nethost namespace
5220 * @net: network namespace
5221 * @pat: If not NULL name pattern to try if the current device name
5222 * is already taken in the destination network namespace.
5224 * This function shuts down a device interface and moves it
5225 * to a new network namespace. On success 0 is returned, on
5226 * a failure a netagive errno code is returned.
5228 * Callers must hold the rtnl semaphore.
5231 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5234 const char *destname;
5239 /* Don't allow namespace local devices to be moved. */
5241 if (dev->features & NETIF_F_NETNS_LOCAL)
5245 /* Don't allow real devices to be moved when sysfs
5249 if (dev->dev.parent)
5253 /* Ensure the device has been registrered */
5255 if (dev->reg_state != NETREG_REGISTERED)
5258 /* Get out if there is nothing todo */
5260 if (net_eq(dev_net(dev), net))
5263 /* Pick the destination device name, and ensure
5264 * we can use it in the destination network namespace.
5267 destname = dev->name;
5268 if (__dev_get_by_name(net, destname)) {
5269 /* We get here if we can't use the current device name */
5272 if (!dev_valid_name(pat))
5274 if (strchr(pat, '%')) {
5275 if (__dev_alloc_name(net, pat, buf) < 0)
5280 if (__dev_get_by_name(net, destname))
5285 * And now a mini version of register_netdevice unregister_netdevice.
5288 /* If device is running close it first. */
5291 /* And unlink it from device chain */
5293 unlist_netdevice(dev);
5297 /* Shutdown queueing discipline. */
5300 /* Notify protocols, that we are about to destroy
5301 this device. They should clean all the things.
5303 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5306 * Flush the unicast and multicast chains
5308 dev_unicast_flush(dev);
5309 dev_addr_discard(dev);
5311 netdev_unregister_kobject(dev);
5313 /* Actually switch the network namespace */
5314 dev_net_set(dev, net);
5316 /* Assign the new device name */
5317 if (destname != dev->name)
5318 strcpy(dev->name, destname);
5320 /* If there is an ifindex conflict assign a new one */
5321 if (__dev_get_by_index(net, dev->ifindex)) {
5322 int iflink = (dev->iflink == dev->ifindex);
5323 dev->ifindex = dev_new_index(net);
5325 dev->iflink = dev->ifindex;
5328 /* Fixup kobjects */
5329 err = netdev_register_kobject(dev);
5332 /* Add the device back in the hashes */
5333 list_netdevice(dev);
5335 /* Notify protocols, that a new device appeared. */
5336 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5344 static int dev_cpu_callback(struct notifier_block *nfb,
5345 unsigned long action,
5348 struct sk_buff **list_skb;
5349 struct Qdisc **list_net;
5350 struct sk_buff *skb;
5351 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5352 struct softnet_data *sd, *oldsd;
5354 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5357 local_irq_disable();
5358 cpu = smp_processor_id();
5359 sd = &per_cpu(softnet_data, cpu);
5360 oldsd = &per_cpu(softnet_data, oldcpu);
5362 /* Find end of our completion_queue. */
5363 list_skb = &sd->completion_queue;
5365 list_skb = &(*list_skb)->next;
5366 /* Append completion queue from offline CPU. */
5367 *list_skb = oldsd->completion_queue;
5368 oldsd->completion_queue = NULL;
5370 /* Find end of our output_queue. */
5371 list_net = &sd->output_queue;
5373 list_net = &(*list_net)->next_sched;
5374 /* Append output queue from offline CPU. */
5375 *list_net = oldsd->output_queue;
5376 oldsd->output_queue = NULL;
5378 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5381 /* Process offline CPU's input_pkt_queue */
5382 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5390 * netdev_increment_features - increment feature set by one
5391 * @all: current feature set
5392 * @one: new feature set
5393 * @mask: mask feature set
5395 * Computes a new feature set after adding a device with feature set
5396 * @one to the master device with current feature set @all. Will not
5397 * enable anything that is off in @mask. Returns the new feature set.
5399 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5402 /* If device needs checksumming, downgrade to it. */
5403 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5404 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5405 else if (mask & NETIF_F_ALL_CSUM) {
5406 /* If one device supports v4/v6 checksumming, set for all. */
5407 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5408 !(all & NETIF_F_GEN_CSUM)) {
5409 all &= ~NETIF_F_ALL_CSUM;
5410 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5413 /* If one device supports hw checksumming, set for all. */
5414 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5415 all &= ~NETIF_F_ALL_CSUM;
5416 all |= NETIF_F_HW_CSUM;
5420 one |= NETIF_F_ALL_CSUM;
5422 one |= all & NETIF_F_ONE_FOR_ALL;
5423 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5424 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5428 EXPORT_SYMBOL(netdev_increment_features);
5430 static struct hlist_head *netdev_create_hash(void)
5433 struct hlist_head *hash;
5435 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5437 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5438 INIT_HLIST_HEAD(&hash[i]);
5443 /* Initialize per network namespace state */
5444 static int __net_init netdev_init(struct net *net)
5446 INIT_LIST_HEAD(&net->dev_base_head);
5448 net->dev_name_head = netdev_create_hash();
5449 if (net->dev_name_head == NULL)
5452 net->dev_index_head = netdev_create_hash();
5453 if (net->dev_index_head == NULL)
5459 kfree(net->dev_name_head);
5465 * netdev_drivername - network driver for the device
5466 * @dev: network device
5467 * @buffer: buffer for resulting name
5468 * @len: size of buffer
5470 * Determine network driver for device.
5472 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5474 const struct device_driver *driver;
5475 const struct device *parent;
5477 if (len <= 0 || !buffer)
5481 parent = dev->dev.parent;
5486 driver = parent->driver;
5487 if (driver && driver->name)
5488 strlcpy(buffer, driver->name, len);
5492 static void __net_exit netdev_exit(struct net *net)
5494 kfree(net->dev_name_head);
5495 kfree(net->dev_index_head);
5498 static struct pernet_operations __net_initdata netdev_net_ops = {
5499 .init = netdev_init,
5500 .exit = netdev_exit,
5503 static void __net_exit default_device_exit(struct net *net)
5505 struct net_device *dev;
5507 * Push all migratable of the network devices back to the
5508 * initial network namespace
5512 for_each_netdev(net, dev) {
5514 char fb_name[IFNAMSIZ];
5516 /* Ignore unmoveable devices (i.e. loopback) */
5517 if (dev->features & NETIF_F_NETNS_LOCAL)
5520 /* Delete virtual devices */
5521 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5522 dev->rtnl_link_ops->dellink(dev);
5526 /* Push remaing network devices to init_net */
5527 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5528 err = dev_change_net_namespace(dev, &init_net, fb_name);
5530 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5531 __func__, dev->name, err);
5539 static struct pernet_operations __net_initdata default_device_ops = {
5540 .exit = default_device_exit,
5544 * Initialize the DEV module. At boot time this walks the device list and
5545 * unhooks any devices that fail to initialise (normally hardware not
5546 * present) and leaves us with a valid list of present and active devices.
5551 * This is called single threaded during boot, so no need
5552 * to take the rtnl semaphore.
5554 static int __init net_dev_init(void)
5556 int i, rc = -ENOMEM;
5558 BUG_ON(!dev_boot_phase);
5560 if (dev_proc_init())
5563 if (netdev_kobject_init())
5566 INIT_LIST_HEAD(&ptype_all);
5567 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5568 INIT_LIST_HEAD(&ptype_base[i]);
5570 if (register_pernet_subsys(&netdev_net_ops))
5574 * Initialise the packet receive queues.
5577 for_each_possible_cpu(i) {
5578 struct softnet_data *queue;
5580 queue = &per_cpu(softnet_data, i);
5581 skb_queue_head_init(&queue->input_pkt_queue);
5582 queue->completion_queue = NULL;
5583 INIT_LIST_HEAD(&queue->poll_list);
5585 queue->backlog.poll = process_backlog;
5586 queue->backlog.weight = weight_p;
5587 queue->backlog.gro_list = NULL;
5588 queue->backlog.gro_count = 0;
5593 /* The loopback device is special if any other network devices
5594 * is present in a network namespace the loopback device must
5595 * be present. Since we now dynamically allocate and free the
5596 * loopback device ensure this invariant is maintained by
5597 * keeping the loopback device as the first device on the
5598 * list of network devices. Ensuring the loopback devices
5599 * is the first device that appears and the last network device
5602 if (register_pernet_device(&loopback_net_ops))
5605 if (register_pernet_device(&default_device_ops))
5608 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5609 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5611 hotcpu_notifier(dev_cpu_callback, 0);
5619 subsys_initcall(net_dev_init);
5621 static int __init initialize_hashrnd(void)
5623 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5627 late_initcall_sync(initialize_hashrnd);
5629 EXPORT_SYMBOL(__dev_get_by_index);
5630 EXPORT_SYMBOL(__dev_get_by_name);
5631 EXPORT_SYMBOL(__dev_remove_pack);
5632 EXPORT_SYMBOL(dev_valid_name);
5633 EXPORT_SYMBOL(dev_add_pack);
5634 EXPORT_SYMBOL(dev_alloc_name);
5635 EXPORT_SYMBOL(dev_close);
5636 EXPORT_SYMBOL(dev_get_by_flags);
5637 EXPORT_SYMBOL(dev_get_by_index);
5638 EXPORT_SYMBOL(dev_get_by_name);
5639 EXPORT_SYMBOL(dev_open);
5640 EXPORT_SYMBOL(dev_queue_xmit);
5641 EXPORT_SYMBOL(dev_remove_pack);
5642 EXPORT_SYMBOL(dev_set_allmulti);
5643 EXPORT_SYMBOL(dev_set_promiscuity);
5644 EXPORT_SYMBOL(dev_change_flags);
5645 EXPORT_SYMBOL(dev_set_mtu);
5646 EXPORT_SYMBOL(dev_set_mac_address);
5647 EXPORT_SYMBOL(free_netdev);
5648 EXPORT_SYMBOL(netdev_boot_setup_check);
5649 EXPORT_SYMBOL(netdev_set_master);
5650 EXPORT_SYMBOL(netdev_state_change);
5651 EXPORT_SYMBOL(netif_receive_skb);
5652 EXPORT_SYMBOL(netif_rx);
5653 EXPORT_SYMBOL(register_gifconf);
5654 EXPORT_SYMBOL(register_netdevice);
5655 EXPORT_SYMBOL(register_netdevice_notifier);
5656 EXPORT_SYMBOL(skb_checksum_help);
5657 EXPORT_SYMBOL(synchronize_net);
5658 EXPORT_SYMBOL(unregister_netdevice);
5659 EXPORT_SYMBOL(unregister_netdevice_notifier);
5660 EXPORT_SYMBOL(net_enable_timestamp);
5661 EXPORT_SYMBOL(net_disable_timestamp);
5662 EXPORT_SYMBOL(dev_get_flags);
5664 EXPORT_SYMBOL(dev_load);
5666 EXPORT_PER_CPU_SYMBOL(softnet_data);