2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/slab.h>
84 #include <linux/sched.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <net/net_namespace.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/stat.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <net/xfrm.h>
108 #include <linux/highmem.h>
109 #include <linux/init.h>
110 #include <linux/kmod.h>
111 #include <linux/module.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
126 #include <linux/ipv6.h>
127 #include <linux/in.h>
128 #include <linux/jhash.h>
129 #include <linux/random.h>
130 #include <trace/events/napi.h>
131 #include <trace/events/net.h>
132 #include <trace/events/skb.h>
133 #include <linux/pci.h>
134 #include <linux/inetdevice.h>
135 #include <linux/cpu_rmap.h>
137 #include "net-sysfs.h"
139 /* Instead of increasing this, you should create a hash table. */
140 #define MAX_GRO_SKBS 8
142 /* This should be increased if a protocol with a bigger head is added. */
143 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146 * The list of packet types we will receive (as opposed to discard)
147 * and the routines to invoke.
149 * Why 16. Because with 16 the only overlap we get on a hash of the
150 * low nibble of the protocol value is RARP/SNAP/X.25.
152 * NOTE: That is no longer true with the addition of VLAN tags. Not
153 * sure which should go first, but I bet it won't make much
154 * difference if we are running VLANs. The good news is that
155 * this protocol won't be in the list unless compiled in, so
156 * the average user (w/out VLANs) will not be adversely affected.
173 #define PTYPE_HASH_SIZE (16)
174 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
176 static DEFINE_SPINLOCK(ptype_lock);
177 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
178 static struct list_head ptype_all __read_mostly; /* Taps */
181 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
186 * Writers must hold the rtnl semaphore while they loop through the
187 * dev_base_head list, and hold dev_base_lock for writing when they do the
188 * actual updates. This allows pure readers to access the list even
189 * while a writer is preparing to update it.
191 * To put it another way, dev_base_lock is held for writing only to
192 * protect against pure readers; the rtnl semaphore provides the
193 * protection against other writers.
195 * See, for example usages, register_netdevice() and
196 * unregister_netdevice(), which must be called with the rtnl
199 DEFINE_RWLOCK(dev_base_lock);
200 EXPORT_SYMBOL(dev_base_lock);
202 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
204 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
205 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
208 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
210 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
213 static inline void rps_lock(struct softnet_data *sd)
216 spin_lock(&sd->input_pkt_queue.lock);
220 static inline void rps_unlock(struct softnet_data *sd)
223 spin_unlock(&sd->input_pkt_queue.lock);
227 /* Device list insertion */
228 static int list_netdevice(struct net_device *dev)
230 struct net *net = dev_net(dev);
234 write_lock_bh(&dev_base_lock);
235 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
236 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
237 hlist_add_head_rcu(&dev->index_hlist,
238 dev_index_hash(net, dev->ifindex));
239 write_unlock_bh(&dev_base_lock);
243 /* Device list removal
244 * caller must respect a RCU grace period before freeing/reusing dev
246 static void unlist_netdevice(struct net_device *dev)
250 /* Unlink dev from the device chain */
251 write_lock_bh(&dev_base_lock);
252 list_del_rcu(&dev->dev_list);
253 hlist_del_rcu(&dev->name_hlist);
254 hlist_del_rcu(&dev->index_hlist);
255 write_unlock_bh(&dev_base_lock);
262 static RAW_NOTIFIER_HEAD(netdev_chain);
265 * Device drivers call our routines to queue packets here. We empty the
266 * queue in the local softnet handler.
269 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
270 EXPORT_PER_CPU_SYMBOL(softnet_data);
272 #ifdef CONFIG_LOCKDEP
274 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
275 * according to dev->type
277 static const unsigned short netdev_lock_type[] =
278 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
279 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
280 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
281 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
282 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
283 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
284 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
285 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
286 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
287 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
288 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
289 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
290 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
291 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
292 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
293 ARPHRD_VOID, ARPHRD_NONE};
295 static const char *const netdev_lock_name[] =
296 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
297 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
298 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
299 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
300 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
301 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
302 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
303 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
304 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
305 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
306 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
307 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
308 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
309 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
310 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
311 "_xmit_VOID", "_xmit_NONE"};
313 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
314 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
316 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
320 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
321 if (netdev_lock_type[i] == dev_type)
323 /* the last key is used by default */
324 return ARRAY_SIZE(netdev_lock_type) - 1;
327 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
332 i = netdev_lock_pos(dev_type);
333 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
334 netdev_lock_name[i]);
337 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
341 i = netdev_lock_pos(dev->type);
342 lockdep_set_class_and_name(&dev->addr_list_lock,
343 &netdev_addr_lock_key[i],
344 netdev_lock_name[i]);
347 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
348 unsigned short dev_type)
351 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
356 /*******************************************************************************
358 Protocol management and registration routines
360 *******************************************************************************/
363 * Add a protocol ID to the list. Now that the input handler is
364 * smarter we can dispense with all the messy stuff that used to be
367 * BEWARE!!! Protocol handlers, mangling input packets,
368 * MUST BE last in hash buckets and checking protocol handlers
369 * MUST start from promiscuous ptype_all chain in net_bh.
370 * It is true now, do not change it.
371 * Explanation follows: if protocol handler, mangling packet, will
372 * be the first on list, it is not able to sense, that packet
373 * is cloned and should be copied-on-write, so that it will
374 * change it and subsequent readers will get broken packet.
378 static inline struct list_head *ptype_head(const struct packet_type *pt)
380 if (pt->type == htons(ETH_P_ALL))
383 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
387 * dev_add_pack - add packet handler
388 * @pt: packet type declaration
390 * Add a protocol handler to the networking stack. The passed &packet_type
391 * is linked into kernel lists and may not be freed until it has been
392 * removed from the kernel lists.
394 * This call does not sleep therefore it can not
395 * guarantee all CPU's that are in middle of receiving packets
396 * will see the new packet type (until the next received packet).
399 void dev_add_pack(struct packet_type *pt)
401 struct list_head *head = ptype_head(pt);
403 spin_lock(&ptype_lock);
404 list_add_rcu(&pt->list, head);
405 spin_unlock(&ptype_lock);
407 EXPORT_SYMBOL(dev_add_pack);
410 * __dev_remove_pack - remove packet handler
411 * @pt: packet type declaration
413 * Remove a protocol handler that was previously added to the kernel
414 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
415 * from the kernel lists and can be freed or reused once this function
418 * The packet type might still be in use by receivers
419 * and must not be freed until after all the CPU's have gone
420 * through a quiescent state.
422 void __dev_remove_pack(struct packet_type *pt)
424 struct list_head *head = ptype_head(pt);
425 struct packet_type *pt1;
427 spin_lock(&ptype_lock);
429 list_for_each_entry(pt1, head, list) {
431 list_del_rcu(&pt->list);
436 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
438 spin_unlock(&ptype_lock);
440 EXPORT_SYMBOL(__dev_remove_pack);
443 * dev_remove_pack - remove packet handler
444 * @pt: packet type declaration
446 * Remove a protocol handler that was previously added to the kernel
447 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
448 * from the kernel lists and can be freed or reused once this function
451 * This call sleeps to guarantee that no CPU is looking at the packet
454 void dev_remove_pack(struct packet_type *pt)
456 __dev_remove_pack(pt);
460 EXPORT_SYMBOL(dev_remove_pack);
462 /******************************************************************************
464 Device Boot-time Settings Routines
466 *******************************************************************************/
468 /* Boot time configuration table */
469 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
472 * netdev_boot_setup_add - add new setup entry
473 * @name: name of the device
474 * @map: configured settings for the device
476 * Adds new setup entry to the dev_boot_setup list. The function
477 * returns 0 on error and 1 on success. This is a generic routine to
480 static int netdev_boot_setup_add(char *name, struct ifmap *map)
482 struct netdev_boot_setup *s;
486 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
488 memset(s[i].name, 0, sizeof(s[i].name));
489 strlcpy(s[i].name, name, IFNAMSIZ);
490 memcpy(&s[i].map, map, sizeof(s[i].map));
495 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
499 * netdev_boot_setup_check - check boot time settings
500 * @dev: the netdevice
502 * Check boot time settings for the device.
503 * The found settings are set for the device to be used
504 * later in the device probing.
505 * Returns 0 if no settings found, 1 if they are.
507 int netdev_boot_setup_check(struct net_device *dev)
509 struct netdev_boot_setup *s = dev_boot_setup;
512 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
513 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
514 !strcmp(dev->name, s[i].name)) {
515 dev->irq = s[i].map.irq;
516 dev->base_addr = s[i].map.base_addr;
517 dev->mem_start = s[i].map.mem_start;
518 dev->mem_end = s[i].map.mem_end;
524 EXPORT_SYMBOL(netdev_boot_setup_check);
528 * netdev_boot_base - get address from boot time settings
529 * @prefix: prefix for network device
530 * @unit: id for network device
532 * Check boot time settings for the base address of device.
533 * The found settings are set for the device to be used
534 * later in the device probing.
535 * Returns 0 if no settings found.
537 unsigned long netdev_boot_base(const char *prefix, int unit)
539 const struct netdev_boot_setup *s = dev_boot_setup;
543 sprintf(name, "%s%d", prefix, unit);
546 * If device already registered then return base of 1
547 * to indicate not to probe for this interface
549 if (__dev_get_by_name(&init_net, name))
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
553 if (!strcmp(name, s[i].name))
554 return s[i].map.base_addr;
559 * Saves at boot time configured settings for any netdevice.
561 int __init netdev_boot_setup(char *str)
566 str = get_options(str, ARRAY_SIZE(ints), ints);
571 memset(&map, 0, sizeof(map));
575 map.base_addr = ints[2];
577 map.mem_start = ints[3];
579 map.mem_end = ints[4];
581 /* Add new entry to the list */
582 return netdev_boot_setup_add(str, &map);
585 __setup("netdev=", netdev_boot_setup);
587 /*******************************************************************************
589 Device Interface Subroutines
591 *******************************************************************************/
594 * __dev_get_by_name - find a device by its name
595 * @net: the applicable net namespace
596 * @name: name to find
598 * Find an interface by name. Must be called under RTNL semaphore
599 * or @dev_base_lock. If the name is found a pointer to the device
600 * is returned. If the name is not found then %NULL is returned. The
601 * reference counters are not incremented so the caller must be
602 * careful with locks.
605 struct net_device *__dev_get_by_name(struct net *net, const char *name)
607 struct hlist_node *p;
608 struct net_device *dev;
609 struct hlist_head *head = dev_name_hash(net, name);
611 hlist_for_each_entry(dev, p, head, name_hlist)
612 if (!strncmp(dev->name, name, IFNAMSIZ))
617 EXPORT_SYMBOL(__dev_get_by_name);
620 * dev_get_by_name_rcu - find a device by its name
621 * @net: the applicable net namespace
622 * @name: name to find
624 * Find an interface by name.
625 * If the name is found a pointer to the device is returned.
626 * If the name is not found then %NULL is returned.
627 * The reference counters are not incremented so the caller must be
628 * careful with locks. The caller must hold RCU lock.
631 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
633 struct hlist_node *p;
634 struct net_device *dev;
635 struct hlist_head *head = dev_name_hash(net, name);
637 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
638 if (!strncmp(dev->name, name, IFNAMSIZ))
643 EXPORT_SYMBOL(dev_get_by_name_rcu);
646 * dev_get_by_name - find a device by its name
647 * @net: the applicable net namespace
648 * @name: name to find
650 * Find an interface by name. This can be called from any
651 * context and does its own locking. The returned handle has
652 * the usage count incremented and the caller must use dev_put() to
653 * release it when it is no longer needed. %NULL is returned if no
654 * matching device is found.
657 struct net_device *dev_get_by_name(struct net *net, const char *name)
659 struct net_device *dev;
662 dev = dev_get_by_name_rcu(net, name);
668 EXPORT_SYMBOL(dev_get_by_name);
671 * __dev_get_by_index - find a device by its ifindex
672 * @net: the applicable net namespace
673 * @ifindex: index of device
675 * Search for an interface by index. Returns %NULL if the device
676 * is not found or a pointer to the device. The device has not
677 * had its reference counter increased so the caller must be careful
678 * about locking. The caller must hold either the RTNL semaphore
682 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
684 struct hlist_node *p;
685 struct net_device *dev;
686 struct hlist_head *head = dev_index_hash(net, ifindex);
688 hlist_for_each_entry(dev, p, head, index_hlist)
689 if (dev->ifindex == ifindex)
694 EXPORT_SYMBOL(__dev_get_by_index);
697 * dev_get_by_index_rcu - find a device by its ifindex
698 * @net: the applicable net namespace
699 * @ifindex: index of device
701 * Search for an interface by index. Returns %NULL if the device
702 * is not found or a pointer to the device. The device has not
703 * had its reference counter increased so the caller must be careful
704 * about locking. The caller must hold RCU lock.
707 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
709 struct hlist_node *p;
710 struct net_device *dev;
711 struct hlist_head *head = dev_index_hash(net, ifindex);
713 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
714 if (dev->ifindex == ifindex)
719 EXPORT_SYMBOL(dev_get_by_index_rcu);
723 * dev_get_by_index - find a device by its ifindex
724 * @net: the applicable net namespace
725 * @ifindex: index of device
727 * Search for an interface by index. Returns NULL if the device
728 * is not found or a pointer to the device. The device returned has
729 * had a reference added and the pointer is safe until the user calls
730 * dev_put to indicate they have finished with it.
733 struct net_device *dev_get_by_index(struct net *net, int ifindex)
735 struct net_device *dev;
738 dev = dev_get_by_index_rcu(net, ifindex);
744 EXPORT_SYMBOL(dev_get_by_index);
747 * dev_getbyhwaddr_rcu - find a device by its hardware address
748 * @net: the applicable net namespace
749 * @type: media type of device
750 * @ha: hardware address
752 * Search for an interface by MAC address. Returns NULL if the device
753 * is not found or a pointer to the device.
754 * The caller must hold RCU or RTNL.
755 * The returned device has not had its ref count increased
756 * and the caller must therefore be careful about locking
760 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
763 struct net_device *dev;
765 for_each_netdev_rcu(net, dev)
766 if (dev->type == type &&
767 !memcmp(dev->dev_addr, ha, dev->addr_len))
772 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
774 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
776 struct net_device *dev;
779 for_each_netdev(net, dev)
780 if (dev->type == type)
785 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
787 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
789 struct net_device *dev, *ret = NULL;
792 for_each_netdev_rcu(net, dev)
793 if (dev->type == type) {
801 EXPORT_SYMBOL(dev_getfirstbyhwtype);
804 * dev_get_by_flags_rcu - find any device with given flags
805 * @net: the applicable net namespace
806 * @if_flags: IFF_* values
807 * @mask: bitmask of bits in if_flags to check
809 * Search for any interface with the given flags. Returns NULL if a device
810 * is not found or a pointer to the device. Must be called inside
811 * rcu_read_lock(), and result refcount is unchanged.
814 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
817 struct net_device *dev, *ret;
820 for_each_netdev_rcu(net, dev) {
821 if (((dev->flags ^ if_flags) & mask) == 0) {
828 EXPORT_SYMBOL(dev_get_by_flags_rcu);
831 * dev_valid_name - check if name is okay for network device
834 * Network device names need to be valid file names to
835 * to allow sysfs to work. We also disallow any kind of
838 int dev_valid_name(const char *name)
842 if (strlen(name) >= IFNAMSIZ)
844 if (!strcmp(name, ".") || !strcmp(name, ".."))
848 if (*name == '/' || isspace(*name))
854 EXPORT_SYMBOL(dev_valid_name);
857 * __dev_alloc_name - allocate a name for a device
858 * @net: network namespace to allocate the device name in
859 * @name: name format string
860 * @buf: scratch buffer and result name string
862 * Passed a format string - eg "lt%d" it will try and find a suitable
863 * id. It scans list of devices to build up a free map, then chooses
864 * the first empty slot. The caller must hold the dev_base or rtnl lock
865 * while allocating the name and adding the device in order to avoid
867 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
868 * Returns the number of the unit assigned or a negative errno code.
871 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
875 const int max_netdevices = 8*PAGE_SIZE;
876 unsigned long *inuse;
877 struct net_device *d;
879 p = strnchr(name, IFNAMSIZ-1, '%');
882 * Verify the string as this thing may have come from
883 * the user. There must be either one "%d" and no other "%"
886 if (p[1] != 'd' || strchr(p + 2, '%'))
889 /* Use one page as a bit array of possible slots */
890 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
894 for_each_netdev(net, d) {
895 if (!sscanf(d->name, name, &i))
897 if (i < 0 || i >= max_netdevices)
900 /* avoid cases where sscanf is not exact inverse of printf */
901 snprintf(buf, IFNAMSIZ, name, i);
902 if (!strncmp(buf, d->name, IFNAMSIZ))
906 i = find_first_zero_bit(inuse, max_netdevices);
907 free_page((unsigned long) inuse);
911 snprintf(buf, IFNAMSIZ, name, i);
912 if (!__dev_get_by_name(net, buf))
915 /* It is possible to run out of possible slots
916 * when the name is long and there isn't enough space left
917 * for the digits, or if all bits are used.
923 * dev_alloc_name - allocate a name for a device
925 * @name: name format string
927 * Passed a format string - eg "lt%d" it will try and find a suitable
928 * id. It scans list of devices to build up a free map, then chooses
929 * the first empty slot. The caller must hold the dev_base or rtnl lock
930 * while allocating the name and adding the device in order to avoid
932 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
933 * Returns the number of the unit assigned or a negative errno code.
936 int dev_alloc_name(struct net_device *dev, const char *name)
942 BUG_ON(!dev_net(dev));
944 ret = __dev_alloc_name(net, name, buf);
946 strlcpy(dev->name, buf, IFNAMSIZ);
949 EXPORT_SYMBOL(dev_alloc_name);
951 static int dev_get_valid_name(struct net_device *dev, const char *name)
955 BUG_ON(!dev_net(dev));
958 if (!dev_valid_name(name))
961 if (strchr(name, '%'))
962 return dev_alloc_name(dev, name);
963 else if (__dev_get_by_name(net, name))
965 else if (dev->name != name)
966 strlcpy(dev->name, name, IFNAMSIZ);
972 * dev_change_name - change name of a device
974 * @newname: name (or format string) must be at least IFNAMSIZ
976 * Change name of a device, can pass format strings "eth%d".
979 int dev_change_name(struct net_device *dev, const char *newname)
981 char oldname[IFNAMSIZ];
987 BUG_ON(!dev_net(dev));
990 if (dev->flags & IFF_UP)
993 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
996 memcpy(oldname, dev->name, IFNAMSIZ);
998 err = dev_get_valid_name(dev, newname);
1003 ret = device_rename(&dev->dev, dev->name);
1005 memcpy(dev->name, oldname, IFNAMSIZ);
1009 write_lock_bh(&dev_base_lock);
1010 hlist_del_rcu(&dev->name_hlist);
1011 write_unlock_bh(&dev_base_lock);
1015 write_lock_bh(&dev_base_lock);
1016 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1017 write_unlock_bh(&dev_base_lock);
1019 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1020 ret = notifier_to_errno(ret);
1023 /* err >= 0 after dev_alloc_name() or stores the first errno */
1026 memcpy(dev->name, oldname, IFNAMSIZ);
1030 "%s: name change rollback failed: %d.\n",
1039 * dev_set_alias - change ifalias of a device
1041 * @alias: name up to IFALIASZ
1042 * @len: limit of bytes to copy from info
1044 * Set ifalias for a device,
1046 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1050 if (len >= IFALIASZ)
1055 kfree(dev->ifalias);
1056 dev->ifalias = NULL;
1061 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1065 strlcpy(dev->ifalias, alias, len+1);
1071 * netdev_features_change - device changes features
1072 * @dev: device to cause notification
1074 * Called to indicate a device has changed features.
1076 void netdev_features_change(struct net_device *dev)
1078 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1080 EXPORT_SYMBOL(netdev_features_change);
1083 * netdev_state_change - device changes state
1084 * @dev: device to cause notification
1086 * Called to indicate a device has changed state. This function calls
1087 * the notifier chains for netdev_chain and sends a NEWLINK message
1088 * to the routing socket.
1090 void netdev_state_change(struct net_device *dev)
1092 if (dev->flags & IFF_UP) {
1093 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1094 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1097 EXPORT_SYMBOL(netdev_state_change);
1099 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1101 return call_netdevice_notifiers(event, dev);
1103 EXPORT_SYMBOL(netdev_bonding_change);
1106 * dev_load - load a network module
1107 * @net: the applicable net namespace
1108 * @name: name of interface
1110 * If a network interface is not present and the process has suitable
1111 * privileges this function loads the module. If module loading is not
1112 * available in this kernel then it becomes a nop.
1115 void dev_load(struct net *net, const char *name)
1117 struct net_device *dev;
1121 dev = dev_get_by_name_rcu(net, name);
1125 if (no_module && capable(CAP_NET_ADMIN))
1126 no_module = request_module("netdev-%s", name);
1127 if (no_module && capable(CAP_SYS_MODULE)) {
1128 if (!request_module("%s", name))
1129 pr_err("Loading kernel module for a network device "
1130 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1134 EXPORT_SYMBOL(dev_load);
1136 static int __dev_open(struct net_device *dev)
1138 const struct net_device_ops *ops = dev->netdev_ops;
1143 if (!netif_device_present(dev))
1146 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1147 ret = notifier_to_errno(ret);
1151 set_bit(__LINK_STATE_START, &dev->state);
1153 if (ops->ndo_validate_addr)
1154 ret = ops->ndo_validate_addr(dev);
1156 if (!ret && ops->ndo_open)
1157 ret = ops->ndo_open(dev);
1160 clear_bit(__LINK_STATE_START, &dev->state);
1162 dev->flags |= IFF_UP;
1163 net_dmaengine_get();
1164 dev_set_rx_mode(dev);
1172 * dev_open - prepare an interface for use.
1173 * @dev: device to open
1175 * Takes a device from down to up state. The device's private open
1176 * function is invoked and then the multicast lists are loaded. Finally
1177 * the device is moved into the up state and a %NETDEV_UP message is
1178 * sent to the netdev notifier chain.
1180 * Calling this function on an active interface is a nop. On a failure
1181 * a negative errno code is returned.
1183 int dev_open(struct net_device *dev)
1187 if (dev->flags & IFF_UP)
1190 ret = __dev_open(dev);
1194 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1195 call_netdevice_notifiers(NETDEV_UP, dev);
1199 EXPORT_SYMBOL(dev_open);
1201 static int __dev_close_many(struct list_head *head)
1203 struct net_device *dev;
1208 list_for_each_entry(dev, head, unreg_list) {
1209 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1211 clear_bit(__LINK_STATE_START, &dev->state);
1213 /* Synchronize to scheduled poll. We cannot touch poll list, it
1214 * can be even on different cpu. So just clear netif_running().
1216 * dev->stop() will invoke napi_disable() on all of it's
1217 * napi_struct instances on this device.
1219 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1222 dev_deactivate_many(head);
1224 list_for_each_entry(dev, head, unreg_list) {
1225 const struct net_device_ops *ops = dev->netdev_ops;
1228 * Call the device specific close. This cannot fail.
1229 * Only if device is UP
1231 * We allow it to be called even after a DETACH hot-plug
1237 dev->flags &= ~IFF_UP;
1238 net_dmaengine_put();
1244 static int __dev_close(struct net_device *dev)
1249 list_add(&dev->unreg_list, &single);
1250 retval = __dev_close_many(&single);
1255 static int dev_close_many(struct list_head *head)
1257 struct net_device *dev, *tmp;
1258 LIST_HEAD(tmp_list);
1260 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1261 if (!(dev->flags & IFF_UP))
1262 list_move(&dev->unreg_list, &tmp_list);
1264 __dev_close_many(head);
1266 list_for_each_entry(dev, head, unreg_list) {
1267 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1268 call_netdevice_notifiers(NETDEV_DOWN, dev);
1271 /* rollback_registered_many needs the complete original list */
1272 list_splice(&tmp_list, head);
1277 * dev_close - shutdown an interface.
1278 * @dev: device to shutdown
1280 * This function moves an active device into down state. A
1281 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1282 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1285 int dev_close(struct net_device *dev)
1287 if (dev->flags & IFF_UP) {
1290 list_add(&dev->unreg_list, &single);
1291 dev_close_many(&single);
1296 EXPORT_SYMBOL(dev_close);
1300 * dev_disable_lro - disable Large Receive Offload on a device
1303 * Disable Large Receive Offload (LRO) on a net device. Must be
1304 * called under RTNL. This is needed if received packets may be
1305 * forwarded to another interface.
1307 void dev_disable_lro(struct net_device *dev)
1311 if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
1312 flags = dev->ethtool_ops->get_flags(dev);
1314 flags = ethtool_op_get_flags(dev);
1316 if (!(flags & ETH_FLAG_LRO))
1319 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1320 if (unlikely(dev->features & NETIF_F_LRO))
1321 netdev_WARN(dev, "failed to disable LRO!\n");
1323 EXPORT_SYMBOL(dev_disable_lro);
1326 static int dev_boot_phase = 1;
1329 * register_netdevice_notifier - register a network notifier block
1332 * Register a notifier to be called when network device events occur.
1333 * The notifier passed is linked into the kernel structures and must
1334 * not be reused until it has been unregistered. A negative errno code
1335 * is returned on a failure.
1337 * When registered all registration and up events are replayed
1338 * to the new notifier to allow device to have a race free
1339 * view of the network device list.
1342 int register_netdevice_notifier(struct notifier_block *nb)
1344 struct net_device *dev;
1345 struct net_device *last;
1350 err = raw_notifier_chain_register(&netdev_chain, nb);
1356 for_each_netdev(net, dev) {
1357 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1358 err = notifier_to_errno(err);
1362 if (!(dev->flags & IFF_UP))
1365 nb->notifier_call(nb, NETDEV_UP, dev);
1376 for_each_netdev(net, dev) {
1380 if (dev->flags & IFF_UP) {
1381 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1382 nb->notifier_call(nb, NETDEV_DOWN, dev);
1384 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1385 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1389 raw_notifier_chain_unregister(&netdev_chain, nb);
1392 EXPORT_SYMBOL(register_netdevice_notifier);
1395 * unregister_netdevice_notifier - unregister a network notifier block
1398 * Unregister a notifier previously registered by
1399 * register_netdevice_notifier(). The notifier is unlinked into the
1400 * kernel structures and may then be reused. A negative errno code
1401 * is returned on a failure.
1404 int unregister_netdevice_notifier(struct notifier_block *nb)
1409 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1413 EXPORT_SYMBOL(unregister_netdevice_notifier);
1416 * call_netdevice_notifiers - call all network notifier blocks
1417 * @val: value passed unmodified to notifier function
1418 * @dev: net_device pointer passed unmodified to notifier function
1420 * Call all network notifier blocks. Parameters and return value
1421 * are as for raw_notifier_call_chain().
1424 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1427 return raw_notifier_call_chain(&netdev_chain, val, dev);
1429 EXPORT_SYMBOL(call_netdevice_notifiers);
1431 /* When > 0 there are consumers of rx skb time stamps */
1432 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1434 void net_enable_timestamp(void)
1436 atomic_inc(&netstamp_needed);
1438 EXPORT_SYMBOL(net_enable_timestamp);
1440 void net_disable_timestamp(void)
1442 atomic_dec(&netstamp_needed);
1444 EXPORT_SYMBOL(net_disable_timestamp);
1446 static inline void net_timestamp_set(struct sk_buff *skb)
1448 if (atomic_read(&netstamp_needed))
1449 __net_timestamp(skb);
1451 skb->tstamp.tv64 = 0;
1454 static inline void net_timestamp_check(struct sk_buff *skb)
1456 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1457 __net_timestamp(skb);
1460 static inline bool is_skb_forwardable(struct net_device *dev,
1461 struct sk_buff *skb)
1465 if (!(dev->flags & IFF_UP))
1468 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1469 if (skb->len <= len)
1472 /* if TSO is enabled, we don't care about the length as the packet
1473 * could be forwarded without being segmented before
1475 if (skb_is_gso(skb))
1482 * dev_forward_skb - loopback an skb to another netif
1484 * @dev: destination network device
1485 * @skb: buffer to forward
1488 * NET_RX_SUCCESS (no congestion)
1489 * NET_RX_DROP (packet was dropped, but freed)
1491 * dev_forward_skb can be used for injecting an skb from the
1492 * start_xmit function of one device into the receive queue
1493 * of another device.
1495 * The receiving device may be in another namespace, so
1496 * we have to clear all information in the skb that could
1497 * impact namespace isolation.
1499 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1504 if (unlikely(!is_skb_forwardable(dev, skb))) {
1505 atomic_long_inc(&dev->rx_dropped);
1509 skb_set_dev(skb, dev);
1510 skb->tstamp.tv64 = 0;
1511 skb->pkt_type = PACKET_HOST;
1512 skb->protocol = eth_type_trans(skb, dev);
1513 return netif_rx(skb);
1515 EXPORT_SYMBOL_GPL(dev_forward_skb);
1517 static inline int deliver_skb(struct sk_buff *skb,
1518 struct packet_type *pt_prev,
1519 struct net_device *orig_dev)
1521 atomic_inc(&skb->users);
1522 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1526 * Support routine. Sends outgoing frames to any network
1527 * taps currently in use.
1530 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1532 struct packet_type *ptype;
1533 struct sk_buff *skb2 = NULL;
1534 struct packet_type *pt_prev = NULL;
1537 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1538 /* Never send packets back to the socket
1539 * they originated from - MvS (miquels@drinkel.ow.org)
1541 if ((ptype->dev == dev || !ptype->dev) &&
1542 (ptype->af_packet_priv == NULL ||
1543 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1545 deliver_skb(skb2, pt_prev, skb->dev);
1550 skb2 = skb_clone(skb, GFP_ATOMIC);
1554 net_timestamp_set(skb2);
1556 /* skb->nh should be correctly
1557 set by sender, so that the second statement is
1558 just protection against buggy protocols.
1560 skb_reset_mac_header(skb2);
1562 if (skb_network_header(skb2) < skb2->data ||
1563 skb2->network_header > skb2->tail) {
1564 if (net_ratelimit())
1565 printk(KERN_CRIT "protocol %04x is "
1567 ntohs(skb2->protocol),
1569 skb_reset_network_header(skb2);
1572 skb2->transport_header = skb2->network_header;
1573 skb2->pkt_type = PACKET_OUTGOING;
1578 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1582 /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1583 * @dev: Network device
1584 * @txq: number of queues available
1586 * If real_num_tx_queues is changed the tc mappings may no longer be
1587 * valid. To resolve this verify the tc mapping remains valid and if
1588 * not NULL the mapping. With no priorities mapping to this
1589 * offset/count pair it will no longer be used. In the worst case TC0
1590 * is invalid nothing can be done so disable priority mappings. If is
1591 * expected that drivers will fix this mapping if they can before
1592 * calling netif_set_real_num_tx_queues.
1594 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1597 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1599 /* If TC0 is invalidated disable TC mapping */
1600 if (tc->offset + tc->count > txq) {
1601 pr_warning("Number of in use tx queues changed "
1602 "invalidating tc mappings. Priority "
1603 "traffic classification disabled!\n");
1608 /* Invalidated prio to tc mappings set to TC0 */
1609 for (i = 1; i < TC_BITMASK + 1; i++) {
1610 int q = netdev_get_prio_tc_map(dev, i);
1612 tc = &dev->tc_to_txq[q];
1613 if (tc->offset + tc->count > txq) {
1614 pr_warning("Number of in use tx queues "
1615 "changed. Priority %i to tc "
1616 "mapping %i is no longer valid "
1617 "setting map to 0\n",
1619 netdev_set_prio_tc_map(dev, i, 0);
1625 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1626 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1628 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1632 if (txq < 1 || txq > dev->num_tx_queues)
1635 if (dev->reg_state == NETREG_REGISTERED ||
1636 dev->reg_state == NETREG_UNREGISTERING) {
1639 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1645 netif_setup_tc(dev, txq);
1647 if (txq < dev->real_num_tx_queues)
1648 qdisc_reset_all_tx_gt(dev, txq);
1651 dev->real_num_tx_queues = txq;
1654 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1658 * netif_set_real_num_rx_queues - set actual number of RX queues used
1659 * @dev: Network device
1660 * @rxq: Actual number of RX queues
1662 * This must be called either with the rtnl_lock held or before
1663 * registration of the net device. Returns 0 on success, or a
1664 * negative error code. If called before registration, it always
1667 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1671 if (rxq < 1 || rxq > dev->num_rx_queues)
1674 if (dev->reg_state == NETREG_REGISTERED) {
1677 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1683 dev->real_num_rx_queues = rxq;
1686 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1689 static inline void __netif_reschedule(struct Qdisc *q)
1691 struct softnet_data *sd;
1692 unsigned long flags;
1694 local_irq_save(flags);
1695 sd = &__get_cpu_var(softnet_data);
1696 q->next_sched = NULL;
1697 *sd->output_queue_tailp = q;
1698 sd->output_queue_tailp = &q->next_sched;
1699 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1700 local_irq_restore(flags);
1703 void __netif_schedule(struct Qdisc *q)
1705 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1706 __netif_reschedule(q);
1708 EXPORT_SYMBOL(__netif_schedule);
1710 void dev_kfree_skb_irq(struct sk_buff *skb)
1712 if (atomic_dec_and_test(&skb->users)) {
1713 struct softnet_data *sd;
1714 unsigned long flags;
1716 local_irq_save(flags);
1717 sd = &__get_cpu_var(softnet_data);
1718 skb->next = sd->completion_queue;
1719 sd->completion_queue = skb;
1720 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1721 local_irq_restore(flags);
1724 EXPORT_SYMBOL(dev_kfree_skb_irq);
1726 void dev_kfree_skb_any(struct sk_buff *skb)
1728 if (in_irq() || irqs_disabled())
1729 dev_kfree_skb_irq(skb);
1733 EXPORT_SYMBOL(dev_kfree_skb_any);
1737 * netif_device_detach - mark device as removed
1738 * @dev: network device
1740 * Mark device as removed from system and therefore no longer available.
1742 void netif_device_detach(struct net_device *dev)
1744 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1745 netif_running(dev)) {
1746 netif_tx_stop_all_queues(dev);
1749 EXPORT_SYMBOL(netif_device_detach);
1752 * netif_device_attach - mark device as attached
1753 * @dev: network device
1755 * Mark device as attached from system and restart if needed.
1757 void netif_device_attach(struct net_device *dev)
1759 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1760 netif_running(dev)) {
1761 netif_tx_wake_all_queues(dev);
1762 __netdev_watchdog_up(dev);
1765 EXPORT_SYMBOL(netif_device_attach);
1768 * skb_dev_set -- assign a new device to a buffer
1769 * @skb: buffer for the new device
1770 * @dev: network device
1772 * If an skb is owned by a device already, we have to reset
1773 * all data private to the namespace a device belongs to
1774 * before assigning it a new device.
1776 #ifdef CONFIG_NET_NS
1777 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1780 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1783 skb_init_secmark(skb);
1787 skb->ipvs_property = 0;
1788 #ifdef CONFIG_NET_SCHED
1794 EXPORT_SYMBOL(skb_set_dev);
1795 #endif /* CONFIG_NET_NS */
1798 * Invalidate hardware checksum when packet is to be mangled, and
1799 * complete checksum manually on outgoing path.
1801 int skb_checksum_help(struct sk_buff *skb)
1804 int ret = 0, offset;
1806 if (skb->ip_summed == CHECKSUM_COMPLETE)
1807 goto out_set_summed;
1809 if (unlikely(skb_shinfo(skb)->gso_size)) {
1810 /* Let GSO fix up the checksum. */
1811 goto out_set_summed;
1814 offset = skb_checksum_start_offset(skb);
1815 BUG_ON(offset >= skb_headlen(skb));
1816 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1818 offset += skb->csum_offset;
1819 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1821 if (skb_cloned(skb) &&
1822 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1823 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1828 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1830 skb->ip_summed = CHECKSUM_NONE;
1834 EXPORT_SYMBOL(skb_checksum_help);
1837 * skb_gso_segment - Perform segmentation on skb.
1838 * @skb: buffer to segment
1839 * @features: features for the output path (see dev->features)
1841 * This function segments the given skb and returns a list of segments.
1843 * It may return NULL if the skb requires no segmentation. This is
1844 * only possible when GSO is used for verifying header integrity.
1846 struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
1848 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1849 struct packet_type *ptype;
1850 __be16 type = skb->protocol;
1851 int vlan_depth = ETH_HLEN;
1854 while (type == htons(ETH_P_8021Q)) {
1855 struct vlan_hdr *vh;
1857 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1858 return ERR_PTR(-EINVAL);
1860 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1861 type = vh->h_vlan_encapsulated_proto;
1862 vlan_depth += VLAN_HLEN;
1865 skb_reset_mac_header(skb);
1866 skb->mac_len = skb->network_header - skb->mac_header;
1867 __skb_pull(skb, skb->mac_len);
1869 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1870 struct net_device *dev = skb->dev;
1871 struct ethtool_drvinfo info = {};
1873 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1874 dev->ethtool_ops->get_drvinfo(dev, &info);
1876 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
1877 info.driver, dev ? dev->features : 0L,
1878 skb->sk ? skb->sk->sk_route_caps : 0L,
1879 skb->len, skb->data_len, skb->ip_summed);
1881 if (skb_header_cloned(skb) &&
1882 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1883 return ERR_PTR(err);
1887 list_for_each_entry_rcu(ptype,
1888 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1889 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1890 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1891 err = ptype->gso_send_check(skb);
1892 segs = ERR_PTR(err);
1893 if (err || skb_gso_ok(skb, features))
1895 __skb_push(skb, (skb->data -
1896 skb_network_header(skb)));
1898 segs = ptype->gso_segment(skb, features);
1904 __skb_push(skb, skb->data - skb_mac_header(skb));
1908 EXPORT_SYMBOL(skb_gso_segment);
1910 /* Take action when hardware reception checksum errors are detected. */
1912 void netdev_rx_csum_fault(struct net_device *dev)
1914 if (net_ratelimit()) {
1915 printk(KERN_ERR "%s: hw csum failure.\n",
1916 dev ? dev->name : "<unknown>");
1920 EXPORT_SYMBOL(netdev_rx_csum_fault);
1923 /* Actually, we should eliminate this check as soon as we know, that:
1924 * 1. IOMMU is present and allows to map all the memory.
1925 * 2. No high memory really exists on this machine.
1928 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1930 #ifdef CONFIG_HIGHMEM
1932 if (!(dev->features & NETIF_F_HIGHDMA)) {
1933 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1934 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1938 if (PCI_DMA_BUS_IS_PHYS) {
1939 struct device *pdev = dev->dev.parent;
1943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1944 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1945 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1954 void (*destructor)(struct sk_buff *skb);
1957 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1959 static void dev_gso_skb_destructor(struct sk_buff *skb)
1961 struct dev_gso_cb *cb;
1964 struct sk_buff *nskb = skb->next;
1966 skb->next = nskb->next;
1969 } while (skb->next);
1971 cb = DEV_GSO_CB(skb);
1973 cb->destructor(skb);
1977 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1978 * @skb: buffer to segment
1979 * @features: device features as applicable to this skb
1981 * This function segments the given skb and stores the list of segments
1984 static int dev_gso_segment(struct sk_buff *skb, int features)
1986 struct sk_buff *segs;
1988 segs = skb_gso_segment(skb, features);
1990 /* Verifying header integrity only. */
1995 return PTR_ERR(segs);
1998 DEV_GSO_CB(skb)->destructor = skb->destructor;
1999 skb->destructor = dev_gso_skb_destructor;
2005 * Try to orphan skb early, right before transmission by the device.
2006 * We cannot orphan skb if tx timestamp is requested or the sk-reference
2007 * is needed on driver level for other reasons, e.g. see net/can/raw.c
2009 static inline void skb_orphan_try(struct sk_buff *skb)
2011 struct sock *sk = skb->sk;
2013 if (sk && !skb_shinfo(skb)->tx_flags) {
2014 /* skb_tx_hash() wont be able to get sk.
2015 * We copy sk_hash into skb->rxhash
2018 skb->rxhash = sk->sk_hash;
2023 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
2025 return ((features & NETIF_F_GEN_CSUM) ||
2026 ((features & NETIF_F_V4_CSUM) &&
2027 protocol == htons(ETH_P_IP)) ||
2028 ((features & NETIF_F_V6_CSUM) &&
2029 protocol == htons(ETH_P_IPV6)) ||
2030 ((features & NETIF_F_FCOE_CRC) &&
2031 protocol == htons(ETH_P_FCOE)));
2034 static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
2036 if (!can_checksum_protocol(features, protocol)) {
2037 features &= ~NETIF_F_ALL_CSUM;
2038 features &= ~NETIF_F_SG;
2039 } else if (illegal_highdma(skb->dev, skb)) {
2040 features &= ~NETIF_F_SG;
2046 u32 netif_skb_features(struct sk_buff *skb)
2048 __be16 protocol = skb->protocol;
2049 u32 features = skb->dev->features;
2051 if (protocol == htons(ETH_P_8021Q)) {
2052 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2053 protocol = veh->h_vlan_encapsulated_proto;
2054 } else if (!vlan_tx_tag_present(skb)) {
2055 return harmonize_features(skb, protocol, features);
2058 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2060 if (protocol != htons(ETH_P_8021Q)) {
2061 return harmonize_features(skb, protocol, features);
2063 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2064 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2065 return harmonize_features(skb, protocol, features);
2068 EXPORT_SYMBOL(netif_skb_features);
2071 * Returns true if either:
2072 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2073 * 2. skb is fragmented and the device does not support SG, or if
2074 * at least one of fragments is in highmem and device does not
2075 * support DMA from it.
2077 static inline int skb_needs_linearize(struct sk_buff *skb,
2080 return skb_is_nonlinear(skb) &&
2081 ((skb_has_frag_list(skb) &&
2082 !(features & NETIF_F_FRAGLIST)) ||
2083 (skb_shinfo(skb)->nr_frags &&
2084 !(features & NETIF_F_SG)));
2087 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2088 struct netdev_queue *txq)
2090 const struct net_device_ops *ops = dev->netdev_ops;
2091 int rc = NETDEV_TX_OK;
2093 if (likely(!skb->next)) {
2097 * If device doesn't need skb->dst, release it right now while
2098 * its hot in this cpu cache
2100 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2103 if (!list_empty(&ptype_all))
2104 dev_queue_xmit_nit(skb, dev);
2106 skb_orphan_try(skb);
2108 features = netif_skb_features(skb);
2110 if (vlan_tx_tag_present(skb) &&
2111 !(features & NETIF_F_HW_VLAN_TX)) {
2112 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2119 if (netif_needs_gso(skb, features)) {
2120 if (unlikely(dev_gso_segment(skb, features)))
2125 if (skb_needs_linearize(skb, features) &&
2126 __skb_linearize(skb))
2129 /* If packet is not checksummed and device does not
2130 * support checksumming for this protocol, complete
2131 * checksumming here.
2133 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2134 skb_set_transport_header(skb,
2135 skb_checksum_start_offset(skb));
2136 if (!(features & NETIF_F_ALL_CSUM) &&
2137 skb_checksum_help(skb))
2142 rc = ops->ndo_start_xmit(skb, dev);
2143 trace_net_dev_xmit(skb, rc);
2144 if (rc == NETDEV_TX_OK)
2145 txq_trans_update(txq);
2151 struct sk_buff *nskb = skb->next;
2153 skb->next = nskb->next;
2157 * If device doesn't need nskb->dst, release it right now while
2158 * its hot in this cpu cache
2160 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2163 rc = ops->ndo_start_xmit(nskb, dev);
2164 trace_net_dev_xmit(nskb, rc);
2165 if (unlikely(rc != NETDEV_TX_OK)) {
2166 if (rc & ~NETDEV_TX_MASK)
2167 goto out_kfree_gso_skb;
2168 nskb->next = skb->next;
2172 txq_trans_update(txq);
2173 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
2174 return NETDEV_TX_BUSY;
2175 } while (skb->next);
2178 if (likely(skb->next == NULL))
2179 skb->destructor = DEV_GSO_CB(skb)->destructor;
2186 static u32 hashrnd __read_mostly;
2189 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2190 * to be used as a distribution range.
2192 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2193 unsigned int num_tx_queues)
2197 u16 qcount = num_tx_queues;
2199 if (skb_rx_queue_recorded(skb)) {
2200 hash = skb_get_rx_queue(skb);
2201 while (unlikely(hash >= num_tx_queues))
2202 hash -= num_tx_queues;
2207 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2208 qoffset = dev->tc_to_txq[tc].offset;
2209 qcount = dev->tc_to_txq[tc].count;
2212 if (skb->sk && skb->sk->sk_hash)
2213 hash = skb->sk->sk_hash;
2215 hash = (__force u16) skb->protocol ^ skb->rxhash;
2216 hash = jhash_1word(hash, hashrnd);
2218 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2220 EXPORT_SYMBOL(__skb_tx_hash);
2222 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2224 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2225 if (net_ratelimit()) {
2226 pr_warning("%s selects TX queue %d, but "
2227 "real number of TX queues is %d\n",
2228 dev->name, queue_index, dev->real_num_tx_queues);
2235 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2238 struct xps_dev_maps *dev_maps;
2239 struct xps_map *map;
2240 int queue_index = -1;
2243 dev_maps = rcu_dereference(dev->xps_maps);
2245 map = rcu_dereference(
2246 dev_maps->cpu_map[raw_smp_processor_id()]);
2249 queue_index = map->queues[0];
2252 if (skb->sk && skb->sk->sk_hash)
2253 hash = skb->sk->sk_hash;
2255 hash = (__force u16) skb->protocol ^
2257 hash = jhash_1word(hash, hashrnd);
2258 queue_index = map->queues[
2259 ((u64)hash * map->len) >> 32];
2261 if (unlikely(queue_index >= dev->real_num_tx_queues))
2273 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2274 struct sk_buff *skb)
2277 const struct net_device_ops *ops = dev->netdev_ops;
2279 if (dev->real_num_tx_queues == 1)
2281 else if (ops->ndo_select_queue) {
2282 queue_index = ops->ndo_select_queue(dev, skb);
2283 queue_index = dev_cap_txqueue(dev, queue_index);
2285 struct sock *sk = skb->sk;
2286 queue_index = sk_tx_queue_get(sk);
2288 if (queue_index < 0 || skb->ooo_okay ||
2289 queue_index >= dev->real_num_tx_queues) {
2290 int old_index = queue_index;
2292 queue_index = get_xps_queue(dev, skb);
2293 if (queue_index < 0)
2294 queue_index = skb_tx_hash(dev, skb);
2296 if (queue_index != old_index && sk) {
2297 struct dst_entry *dst =
2298 rcu_dereference_check(sk->sk_dst_cache, 1);
2300 if (dst && skb_dst(skb) == dst)
2301 sk_tx_queue_set(sk, queue_index);
2306 skb_set_queue_mapping(skb, queue_index);
2307 return netdev_get_tx_queue(dev, queue_index);
2310 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2311 struct net_device *dev,
2312 struct netdev_queue *txq)
2314 spinlock_t *root_lock = qdisc_lock(q);
2318 qdisc_skb_cb(skb)->pkt_len = skb->len;
2319 qdisc_calculate_pkt_len(skb, q);
2321 * Heuristic to force contended enqueues to serialize on a
2322 * separate lock before trying to get qdisc main lock.
2323 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2324 * and dequeue packets faster.
2326 contended = qdisc_is_running(q);
2327 if (unlikely(contended))
2328 spin_lock(&q->busylock);
2330 spin_lock(root_lock);
2331 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2334 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2335 qdisc_run_begin(q)) {
2337 * This is a work-conserving queue; there are no old skbs
2338 * waiting to be sent out; and the qdisc is not running -
2339 * xmit the skb directly.
2341 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2344 qdisc_bstats_update(q, skb);
2346 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2347 if (unlikely(contended)) {
2348 spin_unlock(&q->busylock);
2355 rc = NET_XMIT_SUCCESS;
2358 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2359 if (qdisc_run_begin(q)) {
2360 if (unlikely(contended)) {
2361 spin_unlock(&q->busylock);
2367 spin_unlock(root_lock);
2368 if (unlikely(contended))
2369 spin_unlock(&q->busylock);
2373 static DEFINE_PER_CPU(int, xmit_recursion);
2374 #define RECURSION_LIMIT 10
2377 * dev_queue_xmit - transmit a buffer
2378 * @skb: buffer to transmit
2380 * Queue a buffer for transmission to a network device. The caller must
2381 * have set the device and priority and built the buffer before calling
2382 * this function. The function can be called from an interrupt.
2384 * A negative errno code is returned on a failure. A success does not
2385 * guarantee the frame will be transmitted as it may be dropped due
2386 * to congestion or traffic shaping.
2388 * -----------------------------------------------------------------------------------
2389 * I notice this method can also return errors from the queue disciplines,
2390 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2393 * Regardless of the return value, the skb is consumed, so it is currently
2394 * difficult to retry a send to this method. (You can bump the ref count
2395 * before sending to hold a reference for retry if you are careful.)
2397 * When calling this method, interrupts MUST be enabled. This is because
2398 * the BH enable code must have IRQs enabled so that it will not deadlock.
2401 int dev_queue_xmit(struct sk_buff *skb)
2403 struct net_device *dev = skb->dev;
2404 struct netdev_queue *txq;
2408 /* Disable soft irqs for various locks below. Also
2409 * stops preemption for RCU.
2413 txq = dev_pick_tx(dev, skb);
2414 q = rcu_dereference_bh(txq->qdisc);
2416 #ifdef CONFIG_NET_CLS_ACT
2417 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2419 trace_net_dev_queue(skb);
2421 rc = __dev_xmit_skb(skb, q, dev, txq);
2425 /* The device has no queue. Common case for software devices:
2426 loopback, all the sorts of tunnels...
2428 Really, it is unlikely that netif_tx_lock protection is necessary
2429 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2431 However, it is possible, that they rely on protection
2434 Check this and shot the lock. It is not prone from deadlocks.
2435 Either shot noqueue qdisc, it is even simpler 8)
2437 if (dev->flags & IFF_UP) {
2438 int cpu = smp_processor_id(); /* ok because BHs are off */
2440 if (txq->xmit_lock_owner != cpu) {
2442 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2443 goto recursion_alert;
2445 HARD_TX_LOCK(dev, txq, cpu);
2447 if (!netif_tx_queue_stopped(txq)) {
2448 __this_cpu_inc(xmit_recursion);
2449 rc = dev_hard_start_xmit(skb, dev, txq);
2450 __this_cpu_dec(xmit_recursion);
2451 if (dev_xmit_complete(rc)) {
2452 HARD_TX_UNLOCK(dev, txq);
2456 HARD_TX_UNLOCK(dev, txq);
2457 if (net_ratelimit())
2458 printk(KERN_CRIT "Virtual device %s asks to "
2459 "queue packet!\n", dev->name);
2461 /* Recursion is detected! It is possible,
2465 if (net_ratelimit())
2466 printk(KERN_CRIT "Dead loop on virtual device "
2467 "%s, fix it urgently!\n", dev->name);
2472 rcu_read_unlock_bh();
2477 rcu_read_unlock_bh();
2480 EXPORT_SYMBOL(dev_queue_xmit);
2483 /*=======================================================================
2485 =======================================================================*/
2487 int netdev_max_backlog __read_mostly = 1000;
2488 int netdev_tstamp_prequeue __read_mostly = 1;
2489 int netdev_budget __read_mostly = 300;
2490 int weight_p __read_mostly = 64; /* old backlog weight */
2492 /* Called with irq disabled */
2493 static inline void ____napi_schedule(struct softnet_data *sd,
2494 struct napi_struct *napi)
2496 list_add_tail(&napi->poll_list, &sd->poll_list);
2497 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2501 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2502 * and src/dst port numbers. Returns a non-zero hash number on success
2505 __u32 __skb_get_rxhash(struct sk_buff *skb)
2507 int nhoff, hash = 0, poff;
2508 const struct ipv6hdr *ip6;
2509 const struct iphdr *ip;
2511 u32 addr1, addr2, ihl;
2517 nhoff = skb_network_offset(skb);
2519 switch (skb->protocol) {
2520 case __constant_htons(ETH_P_IP):
2521 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2524 ip = (const struct iphdr *) (skb->data + nhoff);
2525 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2528 ip_proto = ip->protocol;
2529 addr1 = (__force u32) ip->saddr;
2530 addr2 = (__force u32) ip->daddr;
2533 case __constant_htons(ETH_P_IPV6):
2534 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2537 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2538 ip_proto = ip6->nexthdr;
2539 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2540 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2548 poff = proto_ports_offset(ip_proto);
2550 nhoff += ihl * 4 + poff;
2551 if (pskb_may_pull(skb, nhoff + 4)) {
2552 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2553 if (ports.v16[1] < ports.v16[0])
2554 swap(ports.v16[0], ports.v16[1]);
2558 /* get a consistent hash (same value on both flow directions) */
2562 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2569 EXPORT_SYMBOL(__skb_get_rxhash);
2573 /* One global table that all flow-based protocols share. */
2574 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2575 EXPORT_SYMBOL(rps_sock_flow_table);
2577 static struct rps_dev_flow *
2578 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2579 struct rps_dev_flow *rflow, u16 next_cpu)
2583 tcpu = rflow->cpu = next_cpu;
2584 if (tcpu != RPS_NO_CPU) {
2585 #ifdef CONFIG_RFS_ACCEL
2586 struct netdev_rx_queue *rxqueue;
2587 struct rps_dev_flow_table *flow_table;
2588 struct rps_dev_flow *old_rflow;
2593 /* Should we steer this flow to a different hardware queue? */
2594 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2595 !(dev->features & NETIF_F_NTUPLE))
2597 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2598 if (rxq_index == skb_get_rx_queue(skb))
2601 rxqueue = dev->_rx + rxq_index;
2602 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2605 flow_id = skb->rxhash & flow_table->mask;
2606 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2607 rxq_index, flow_id);
2611 rflow = &flow_table->flows[flow_id];
2612 rflow->cpu = next_cpu;
2614 if (old_rflow->filter == rflow->filter)
2615 old_rflow->filter = RPS_NO_FILTER;
2619 per_cpu(softnet_data, tcpu).input_queue_head;
2626 * get_rps_cpu is called from netif_receive_skb and returns the target
2627 * CPU from the RPS map of the receiving queue for a given skb.
2628 * rcu_read_lock must be held on entry.
2630 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2631 struct rps_dev_flow **rflowp)
2633 struct netdev_rx_queue *rxqueue;
2634 struct rps_map *map;
2635 struct rps_dev_flow_table *flow_table;
2636 struct rps_sock_flow_table *sock_flow_table;
2640 if (skb_rx_queue_recorded(skb)) {
2641 u16 index = skb_get_rx_queue(skb);
2642 if (unlikely(index >= dev->real_num_rx_queues)) {
2643 WARN_ONCE(dev->real_num_rx_queues > 1,
2644 "%s received packet on queue %u, but number "
2645 "of RX queues is %u\n",
2646 dev->name, index, dev->real_num_rx_queues);
2649 rxqueue = dev->_rx + index;
2653 map = rcu_dereference(rxqueue->rps_map);
2655 if (map->len == 1 &&
2656 !rcu_dereference_raw(rxqueue->rps_flow_table)) {
2657 tcpu = map->cpus[0];
2658 if (cpu_online(tcpu))
2662 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
2666 skb_reset_network_header(skb);
2667 if (!skb_get_rxhash(skb))
2670 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2671 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2672 if (flow_table && sock_flow_table) {
2674 struct rps_dev_flow *rflow;
2676 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2679 next_cpu = sock_flow_table->ents[skb->rxhash &
2680 sock_flow_table->mask];
2683 * If the desired CPU (where last recvmsg was done) is
2684 * different from current CPU (one in the rx-queue flow
2685 * table entry), switch if one of the following holds:
2686 * - Current CPU is unset (equal to RPS_NO_CPU).
2687 * - Current CPU is offline.
2688 * - The current CPU's queue tail has advanced beyond the
2689 * last packet that was enqueued using this table entry.
2690 * This guarantees that all previous packets for the flow
2691 * have been dequeued, thus preserving in order delivery.
2693 if (unlikely(tcpu != next_cpu) &&
2694 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2695 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2696 rflow->last_qtail)) >= 0))
2697 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2699 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2707 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2709 if (cpu_online(tcpu)) {
2719 #ifdef CONFIG_RFS_ACCEL
2722 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2723 * @dev: Device on which the filter was set
2724 * @rxq_index: RX queue index
2725 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2726 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2728 * Drivers that implement ndo_rx_flow_steer() should periodically call
2729 * this function for each installed filter and remove the filters for
2730 * which it returns %true.
2732 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2733 u32 flow_id, u16 filter_id)
2735 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2736 struct rps_dev_flow_table *flow_table;
2737 struct rps_dev_flow *rflow;
2742 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2743 if (flow_table && flow_id <= flow_table->mask) {
2744 rflow = &flow_table->flows[flow_id];
2745 cpu = ACCESS_ONCE(rflow->cpu);
2746 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2747 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2748 rflow->last_qtail) <
2749 (int)(10 * flow_table->mask)))
2755 EXPORT_SYMBOL(rps_may_expire_flow);
2757 #endif /* CONFIG_RFS_ACCEL */
2759 /* Called from hardirq (IPI) context */
2760 static void rps_trigger_softirq(void *data)
2762 struct softnet_data *sd = data;
2764 ____napi_schedule(sd, &sd->backlog);
2768 #endif /* CONFIG_RPS */
2771 * Check if this softnet_data structure is another cpu one
2772 * If yes, queue it to our IPI list and return 1
2775 static int rps_ipi_queued(struct softnet_data *sd)
2778 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2781 sd->rps_ipi_next = mysd->rps_ipi_list;
2782 mysd->rps_ipi_list = sd;
2784 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2787 #endif /* CONFIG_RPS */
2792 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2793 * queue (may be a remote CPU queue).
2795 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2796 unsigned int *qtail)
2798 struct softnet_data *sd;
2799 unsigned long flags;
2801 sd = &per_cpu(softnet_data, cpu);
2803 local_irq_save(flags);
2806 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2807 if (skb_queue_len(&sd->input_pkt_queue)) {
2809 __skb_queue_tail(&sd->input_pkt_queue, skb);
2810 input_queue_tail_incr_save(sd, qtail);
2812 local_irq_restore(flags);
2813 return NET_RX_SUCCESS;
2816 /* Schedule NAPI for backlog device
2817 * We can use non atomic operation since we own the queue lock
2819 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2820 if (!rps_ipi_queued(sd))
2821 ____napi_schedule(sd, &sd->backlog);
2829 local_irq_restore(flags);
2831 atomic_long_inc(&skb->dev->rx_dropped);
2837 * netif_rx - post buffer to the network code
2838 * @skb: buffer to post
2840 * This function receives a packet from a device driver and queues it for
2841 * the upper (protocol) levels to process. It always succeeds. The buffer
2842 * may be dropped during processing for congestion control or by the
2846 * NET_RX_SUCCESS (no congestion)
2847 * NET_RX_DROP (packet was dropped)
2851 int netif_rx(struct sk_buff *skb)
2855 /* if netpoll wants it, pretend we never saw it */
2856 if (netpoll_rx(skb))
2859 if (netdev_tstamp_prequeue)
2860 net_timestamp_check(skb);
2862 trace_netif_rx(skb);
2865 struct rps_dev_flow voidflow, *rflow = &voidflow;
2871 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2873 cpu = smp_processor_id();
2875 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2883 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2889 EXPORT_SYMBOL(netif_rx);
2891 int netif_rx_ni(struct sk_buff *skb)
2896 err = netif_rx(skb);
2897 if (local_softirq_pending())
2903 EXPORT_SYMBOL(netif_rx_ni);
2905 static void net_tx_action(struct softirq_action *h)
2907 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2909 if (sd->completion_queue) {
2910 struct sk_buff *clist;
2912 local_irq_disable();
2913 clist = sd->completion_queue;
2914 sd->completion_queue = NULL;
2918 struct sk_buff *skb = clist;
2919 clist = clist->next;
2921 WARN_ON(atomic_read(&skb->users));
2922 trace_kfree_skb(skb, net_tx_action);
2927 if (sd->output_queue) {
2930 local_irq_disable();
2931 head = sd->output_queue;
2932 sd->output_queue = NULL;
2933 sd->output_queue_tailp = &sd->output_queue;
2937 struct Qdisc *q = head;
2938 spinlock_t *root_lock;
2940 head = head->next_sched;
2942 root_lock = qdisc_lock(q);
2943 if (spin_trylock(root_lock)) {
2944 smp_mb__before_clear_bit();
2945 clear_bit(__QDISC_STATE_SCHED,
2948 spin_unlock(root_lock);
2950 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2952 __netif_reschedule(q);
2954 smp_mb__before_clear_bit();
2955 clear_bit(__QDISC_STATE_SCHED,
2963 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2964 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
2965 /* This hook is defined here for ATM LANE */
2966 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2967 unsigned char *addr) __read_mostly;
2968 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2971 #ifdef CONFIG_NET_CLS_ACT
2972 /* TODO: Maybe we should just force sch_ingress to be compiled in
2973 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2974 * a compare and 2 stores extra right now if we dont have it on
2975 * but have CONFIG_NET_CLS_ACT
2976 * NOTE: This doesn't stop any functionality; if you dont have
2977 * the ingress scheduler, you just can't add policies on ingress.
2980 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
2982 struct net_device *dev = skb->dev;
2983 u32 ttl = G_TC_RTTL(skb->tc_verd);
2984 int result = TC_ACT_OK;
2987 if (unlikely(MAX_RED_LOOP < ttl++)) {
2988 if (net_ratelimit())
2989 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2990 skb->skb_iif, dev->ifindex);
2994 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2995 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2998 if (q != &noop_qdisc) {
2999 spin_lock(qdisc_lock(q));
3000 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3001 result = qdisc_enqueue_root(skb, q);
3002 spin_unlock(qdisc_lock(q));
3008 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3009 struct packet_type **pt_prev,
3010 int *ret, struct net_device *orig_dev)
3012 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3014 if (!rxq || rxq->qdisc == &noop_qdisc)
3018 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3022 switch (ing_filter(skb, rxq)) {
3036 * netdev_rx_handler_register - register receive handler
3037 * @dev: device to register a handler for
3038 * @rx_handler: receive handler to register
3039 * @rx_handler_data: data pointer that is used by rx handler
3041 * Register a receive hander for a device. This handler will then be
3042 * called from __netif_receive_skb. A negative errno code is returned
3045 * The caller must hold the rtnl_mutex.
3047 * For a general description of rx_handler, see enum rx_handler_result.
3049 int netdev_rx_handler_register(struct net_device *dev,
3050 rx_handler_func_t *rx_handler,
3051 void *rx_handler_data)
3055 if (dev->rx_handler)
3058 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3059 rcu_assign_pointer(dev->rx_handler, rx_handler);
3063 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3066 * netdev_rx_handler_unregister - unregister receive handler
3067 * @dev: device to unregister a handler from
3069 * Unregister a receive hander from a device.
3071 * The caller must hold the rtnl_mutex.
3073 void netdev_rx_handler_unregister(struct net_device *dev)
3077 rcu_assign_pointer(dev->rx_handler, NULL);
3078 rcu_assign_pointer(dev->rx_handler_data, NULL);
3080 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3082 static int __netif_receive_skb(struct sk_buff *skb)
3084 struct packet_type *ptype, *pt_prev;
3085 rx_handler_func_t *rx_handler;
3086 struct net_device *orig_dev;
3087 struct net_device *null_or_dev;
3088 bool deliver_exact = false;
3089 int ret = NET_RX_DROP;
3092 if (!netdev_tstamp_prequeue)
3093 net_timestamp_check(skb);
3095 trace_netif_receive_skb(skb);
3097 /* if we've gotten here through NAPI, check netpoll */
3098 if (netpoll_receive_skb(skb))
3102 skb->skb_iif = skb->dev->ifindex;
3103 orig_dev = skb->dev;
3105 skb_reset_network_header(skb);
3106 skb_reset_transport_header(skb);
3107 skb->mac_len = skb->network_header - skb->mac_header;
3115 __this_cpu_inc(softnet_data.processed);
3117 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3118 skb = vlan_untag(skb);
3123 #ifdef CONFIG_NET_CLS_ACT
3124 if (skb->tc_verd & TC_NCLS) {
3125 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3130 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3131 if (!ptype->dev || ptype->dev == skb->dev) {
3133 ret = deliver_skb(skb, pt_prev, orig_dev);
3138 #ifdef CONFIG_NET_CLS_ACT
3139 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3145 rx_handler = rcu_dereference(skb->dev->rx_handler);
3148 ret = deliver_skb(skb, pt_prev, orig_dev);
3151 switch (rx_handler(&skb)) {
3152 case RX_HANDLER_CONSUMED:
3154 case RX_HANDLER_ANOTHER:
3156 case RX_HANDLER_EXACT:
3157 deliver_exact = true;
3158 case RX_HANDLER_PASS:
3165 if (vlan_tx_tag_present(skb)) {
3167 ret = deliver_skb(skb, pt_prev, orig_dev);
3170 if (vlan_do_receive(&skb)) {
3171 ret = __netif_receive_skb(skb);
3173 } else if (unlikely(!skb))
3177 /* deliver only exact match when indicated */
3178 null_or_dev = deliver_exact ? skb->dev : NULL;
3180 type = skb->protocol;
3181 list_for_each_entry_rcu(ptype,
3182 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3183 if (ptype->type == type &&
3184 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3185 ptype->dev == orig_dev)) {
3187 ret = deliver_skb(skb, pt_prev, orig_dev);
3193 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3195 atomic_long_inc(&skb->dev->rx_dropped);
3197 /* Jamal, now you will not able to escape explaining
3198 * me how you were going to use this. :-)
3209 * netif_receive_skb - process receive buffer from network
3210 * @skb: buffer to process
3212 * netif_receive_skb() is the main receive data processing function.
3213 * It always succeeds. The buffer may be dropped during processing
3214 * for congestion control or by the protocol layers.
3216 * This function may only be called from softirq context and interrupts
3217 * should be enabled.
3219 * Return values (usually ignored):
3220 * NET_RX_SUCCESS: no congestion
3221 * NET_RX_DROP: packet was dropped
3223 int netif_receive_skb(struct sk_buff *skb)
3225 if (netdev_tstamp_prequeue)
3226 net_timestamp_check(skb);
3228 if (skb_defer_rx_timestamp(skb))
3229 return NET_RX_SUCCESS;
3233 struct rps_dev_flow voidflow, *rflow = &voidflow;
3238 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3241 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3245 ret = __netif_receive_skb(skb);
3251 return __netif_receive_skb(skb);
3254 EXPORT_SYMBOL(netif_receive_skb);
3256 /* Network device is going away, flush any packets still pending
3257 * Called with irqs disabled.
3259 static void flush_backlog(void *arg)
3261 struct net_device *dev = arg;
3262 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3263 struct sk_buff *skb, *tmp;
3266 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3267 if (skb->dev == dev) {
3268 __skb_unlink(skb, &sd->input_pkt_queue);
3270 input_queue_head_incr(sd);
3275 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3276 if (skb->dev == dev) {
3277 __skb_unlink(skb, &sd->process_queue);
3279 input_queue_head_incr(sd);
3284 static int napi_gro_complete(struct sk_buff *skb)
3286 struct packet_type *ptype;
3287 __be16 type = skb->protocol;
3288 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3291 if (NAPI_GRO_CB(skb)->count == 1) {
3292 skb_shinfo(skb)->gso_size = 0;
3297 list_for_each_entry_rcu(ptype, head, list) {
3298 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3301 err = ptype->gro_complete(skb);
3307 WARN_ON(&ptype->list == head);
3309 return NET_RX_SUCCESS;
3313 return netif_receive_skb(skb);
3316 inline void napi_gro_flush(struct napi_struct *napi)
3318 struct sk_buff *skb, *next;
3320 for (skb = napi->gro_list; skb; skb = next) {
3323 napi_gro_complete(skb);
3326 napi->gro_count = 0;
3327 napi->gro_list = NULL;
3329 EXPORT_SYMBOL(napi_gro_flush);
3331 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3333 struct sk_buff **pp = NULL;
3334 struct packet_type *ptype;
3335 __be16 type = skb->protocol;
3336 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3339 enum gro_result ret;
3341 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3344 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3348 list_for_each_entry_rcu(ptype, head, list) {
3349 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3352 skb_set_network_header(skb, skb_gro_offset(skb));
3353 mac_len = skb->network_header - skb->mac_header;
3354 skb->mac_len = mac_len;
3355 NAPI_GRO_CB(skb)->same_flow = 0;
3356 NAPI_GRO_CB(skb)->flush = 0;
3357 NAPI_GRO_CB(skb)->free = 0;
3359 pp = ptype->gro_receive(&napi->gro_list, skb);
3364 if (&ptype->list == head)
3367 same_flow = NAPI_GRO_CB(skb)->same_flow;
3368 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3371 struct sk_buff *nskb = *pp;
3375 napi_gro_complete(nskb);
3382 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3386 NAPI_GRO_CB(skb)->count = 1;
3387 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3388 skb->next = napi->gro_list;
3389 napi->gro_list = skb;
3393 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3394 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3396 BUG_ON(skb->end - skb->tail < grow);
3398 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3401 skb->data_len -= grow;
3403 skb_shinfo(skb)->frags[0].page_offset += grow;
3404 skb_shinfo(skb)->frags[0].size -= grow;
3406 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3407 put_page(skb_shinfo(skb)->frags[0].page);
3408 memmove(skb_shinfo(skb)->frags,
3409 skb_shinfo(skb)->frags + 1,
3410 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3421 EXPORT_SYMBOL(dev_gro_receive);
3423 static inline gro_result_t
3424 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3428 for (p = napi->gro_list; p; p = p->next) {
3429 unsigned long diffs;
3431 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3432 diffs |= p->vlan_tci ^ skb->vlan_tci;
3433 diffs |= compare_ether_header(skb_mac_header(p),
3434 skb_gro_mac_header(skb));
3435 NAPI_GRO_CB(p)->same_flow = !diffs;
3436 NAPI_GRO_CB(p)->flush = 0;
3439 return dev_gro_receive(napi, skb);
3442 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3446 if (netif_receive_skb(skb))
3451 case GRO_MERGED_FREE:
3462 EXPORT_SYMBOL(napi_skb_finish);
3464 void skb_gro_reset_offset(struct sk_buff *skb)
3466 NAPI_GRO_CB(skb)->data_offset = 0;
3467 NAPI_GRO_CB(skb)->frag0 = NULL;
3468 NAPI_GRO_CB(skb)->frag0_len = 0;
3470 if (skb->mac_header == skb->tail &&
3471 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
3472 NAPI_GRO_CB(skb)->frag0 =
3473 page_address(skb_shinfo(skb)->frags[0].page) +
3474 skb_shinfo(skb)->frags[0].page_offset;
3475 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3478 EXPORT_SYMBOL(skb_gro_reset_offset);
3480 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3482 skb_gro_reset_offset(skb);
3484 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3486 EXPORT_SYMBOL(napi_gro_receive);
3488 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3490 __skb_pull(skb, skb_headlen(skb));
3491 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3493 skb->dev = napi->dev;
3499 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3501 struct sk_buff *skb = napi->skb;
3504 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3510 EXPORT_SYMBOL(napi_get_frags);
3512 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3518 skb->protocol = eth_type_trans(skb, skb->dev);
3520 if (ret == GRO_HELD)
3521 skb_gro_pull(skb, -ETH_HLEN);
3522 else if (netif_receive_skb(skb))
3527 case GRO_MERGED_FREE:
3528 napi_reuse_skb(napi, skb);
3537 EXPORT_SYMBOL(napi_frags_finish);
3539 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3541 struct sk_buff *skb = napi->skb;
3548 skb_reset_mac_header(skb);
3549 skb_gro_reset_offset(skb);
3551 off = skb_gro_offset(skb);
3552 hlen = off + sizeof(*eth);
3553 eth = skb_gro_header_fast(skb, off);
3554 if (skb_gro_header_hard(skb, hlen)) {
3555 eth = skb_gro_header_slow(skb, hlen, off);
3556 if (unlikely(!eth)) {
3557 napi_reuse_skb(napi, skb);
3563 skb_gro_pull(skb, sizeof(*eth));
3566 * This works because the only protocols we care about don't require
3567 * special handling. We'll fix it up properly at the end.
3569 skb->protocol = eth->h_proto;
3574 EXPORT_SYMBOL(napi_frags_skb);
3576 gro_result_t napi_gro_frags(struct napi_struct *napi)
3578 struct sk_buff *skb = napi_frags_skb(napi);
3583 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3585 EXPORT_SYMBOL(napi_gro_frags);
3588 * net_rps_action sends any pending IPI's for rps.
3589 * Note: called with local irq disabled, but exits with local irq enabled.
3591 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3594 struct softnet_data *remsd = sd->rps_ipi_list;
3597 sd->rps_ipi_list = NULL;
3601 /* Send pending IPI's to kick RPS processing on remote cpus. */
3603 struct softnet_data *next = remsd->rps_ipi_next;
3605 if (cpu_online(remsd->cpu))
3606 __smp_call_function_single(remsd->cpu,
3615 static int process_backlog(struct napi_struct *napi, int quota)
3618 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3621 /* Check if we have pending ipi, its better to send them now,
3622 * not waiting net_rx_action() end.
3624 if (sd->rps_ipi_list) {
3625 local_irq_disable();
3626 net_rps_action_and_irq_enable(sd);
3629 napi->weight = weight_p;
3630 local_irq_disable();
3631 while (work < quota) {
3632 struct sk_buff *skb;
3635 while ((skb = __skb_dequeue(&sd->process_queue))) {
3637 __netif_receive_skb(skb);
3638 local_irq_disable();
3639 input_queue_head_incr(sd);
3640 if (++work >= quota) {
3647 qlen = skb_queue_len(&sd->input_pkt_queue);
3649 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3650 &sd->process_queue);
3652 if (qlen < quota - work) {
3654 * Inline a custom version of __napi_complete().
3655 * only current cpu owns and manipulates this napi,
3656 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3657 * we can use a plain write instead of clear_bit(),
3658 * and we dont need an smp_mb() memory barrier.
3660 list_del(&napi->poll_list);
3663 quota = work + qlen;
3673 * __napi_schedule - schedule for receive
3674 * @n: entry to schedule
3676 * The entry's receive function will be scheduled to run
3678 void __napi_schedule(struct napi_struct *n)
3680 unsigned long flags;
3682 local_irq_save(flags);
3683 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3684 local_irq_restore(flags);
3686 EXPORT_SYMBOL(__napi_schedule);
3688 void __napi_complete(struct napi_struct *n)
3690 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3691 BUG_ON(n->gro_list);
3693 list_del(&n->poll_list);
3694 smp_mb__before_clear_bit();
3695 clear_bit(NAPI_STATE_SCHED, &n->state);
3697 EXPORT_SYMBOL(__napi_complete);
3699 void napi_complete(struct napi_struct *n)
3701 unsigned long flags;
3704 * don't let napi dequeue from the cpu poll list
3705 * just in case its running on a different cpu
3707 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3711 local_irq_save(flags);
3713 local_irq_restore(flags);
3715 EXPORT_SYMBOL(napi_complete);
3717 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3718 int (*poll)(struct napi_struct *, int), int weight)
3720 INIT_LIST_HEAD(&napi->poll_list);
3721 napi->gro_count = 0;
3722 napi->gro_list = NULL;
3725 napi->weight = weight;
3726 list_add(&napi->dev_list, &dev->napi_list);
3728 #ifdef CONFIG_NETPOLL
3729 spin_lock_init(&napi->poll_lock);
3730 napi->poll_owner = -1;
3732 set_bit(NAPI_STATE_SCHED, &napi->state);
3734 EXPORT_SYMBOL(netif_napi_add);
3736 void netif_napi_del(struct napi_struct *napi)
3738 struct sk_buff *skb, *next;
3740 list_del_init(&napi->dev_list);
3741 napi_free_frags(napi);
3743 for (skb = napi->gro_list; skb; skb = next) {
3749 napi->gro_list = NULL;
3750 napi->gro_count = 0;
3752 EXPORT_SYMBOL(netif_napi_del);
3754 static void net_rx_action(struct softirq_action *h)
3756 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3757 unsigned long time_limit = jiffies + 2;
3758 int budget = netdev_budget;
3761 local_irq_disable();
3763 while (!list_empty(&sd->poll_list)) {
3764 struct napi_struct *n;
3767 /* If softirq window is exhuasted then punt.
3768 * Allow this to run for 2 jiffies since which will allow
3769 * an average latency of 1.5/HZ.
3771 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3776 /* Even though interrupts have been re-enabled, this
3777 * access is safe because interrupts can only add new
3778 * entries to the tail of this list, and only ->poll()
3779 * calls can remove this head entry from the list.
3781 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3783 have = netpoll_poll_lock(n);
3787 /* This NAPI_STATE_SCHED test is for avoiding a race
3788 * with netpoll's poll_napi(). Only the entity which
3789 * obtains the lock and sees NAPI_STATE_SCHED set will
3790 * actually make the ->poll() call. Therefore we avoid
3791 * accidentally calling ->poll() when NAPI is not scheduled.
3794 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3795 work = n->poll(n, weight);
3799 WARN_ON_ONCE(work > weight);
3803 local_irq_disable();
3805 /* Drivers must not modify the NAPI state if they
3806 * consume the entire weight. In such cases this code
3807 * still "owns" the NAPI instance and therefore can
3808 * move the instance around on the list at-will.
3810 if (unlikely(work == weight)) {
3811 if (unlikely(napi_disable_pending(n))) {
3814 local_irq_disable();
3816 list_move_tail(&n->poll_list, &sd->poll_list);
3819 netpoll_poll_unlock(have);
3822 net_rps_action_and_irq_enable(sd);
3824 #ifdef CONFIG_NET_DMA
3826 * There may not be any more sk_buffs coming right now, so push
3827 * any pending DMA copies to hardware
3829 dma_issue_pending_all();
3836 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3840 static gifconf_func_t *gifconf_list[NPROTO];
3843 * register_gifconf - register a SIOCGIF handler
3844 * @family: Address family
3845 * @gifconf: Function handler
3847 * Register protocol dependent address dumping routines. The handler
3848 * that is passed must not be freed or reused until it has been replaced
3849 * by another handler.
3851 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3853 if (family >= NPROTO)
3855 gifconf_list[family] = gifconf;
3858 EXPORT_SYMBOL(register_gifconf);
3862 * Map an interface index to its name (SIOCGIFNAME)
3866 * We need this ioctl for efficient implementation of the
3867 * if_indextoname() function required by the IPv6 API. Without
3868 * it, we would have to search all the interfaces to find a
3872 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3874 struct net_device *dev;
3878 * Fetch the caller's info block.
3881 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3885 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3891 strcpy(ifr.ifr_name, dev->name);
3894 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3900 * Perform a SIOCGIFCONF call. This structure will change
3901 * size eventually, and there is nothing I can do about it.
3902 * Thus we will need a 'compatibility mode'.
3905 static int dev_ifconf(struct net *net, char __user *arg)
3908 struct net_device *dev;
3915 * Fetch the caller's info block.
3918 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3925 * Loop over the interfaces, and write an info block for each.
3929 for_each_netdev(net, dev) {
3930 for (i = 0; i < NPROTO; i++) {
3931 if (gifconf_list[i]) {
3934 done = gifconf_list[i](dev, NULL, 0);
3936 done = gifconf_list[i](dev, pos + total,
3946 * All done. Write the updated control block back to the caller.
3948 ifc.ifc_len = total;
3951 * Both BSD and Solaris return 0 here, so we do too.
3953 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3956 #ifdef CONFIG_PROC_FS
3958 * This is invoked by the /proc filesystem handler to display a device
3961 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3964 struct net *net = seq_file_net(seq);
3966 struct net_device *dev;
3970 return SEQ_START_TOKEN;
3973 for_each_netdev_rcu(net, dev)
3980 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3982 struct net_device *dev = v;
3984 if (v == SEQ_START_TOKEN)
3985 dev = first_net_device_rcu(seq_file_net(seq));
3987 dev = next_net_device_rcu(dev);
3993 void dev_seq_stop(struct seq_file *seq, void *v)
3999 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4001 struct rtnl_link_stats64 temp;
4002 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4004 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4005 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4006 dev->name, stats->rx_bytes, stats->rx_packets,
4008 stats->rx_dropped + stats->rx_missed_errors,
4009 stats->rx_fifo_errors,
4010 stats->rx_length_errors + stats->rx_over_errors +
4011 stats->rx_crc_errors + stats->rx_frame_errors,
4012 stats->rx_compressed, stats->multicast,
4013 stats->tx_bytes, stats->tx_packets,
4014 stats->tx_errors, stats->tx_dropped,
4015 stats->tx_fifo_errors, stats->collisions,
4016 stats->tx_carrier_errors +
4017 stats->tx_aborted_errors +
4018 stats->tx_window_errors +
4019 stats->tx_heartbeat_errors,
4020 stats->tx_compressed);
4024 * Called from the PROCfs module. This now uses the new arbitrary sized
4025 * /proc/net interface to create /proc/net/dev
4027 static int dev_seq_show(struct seq_file *seq, void *v)
4029 if (v == SEQ_START_TOKEN)
4030 seq_puts(seq, "Inter-| Receive "
4032 " face |bytes packets errs drop fifo frame "
4033 "compressed multicast|bytes packets errs "
4034 "drop fifo colls carrier compressed\n");
4036 dev_seq_printf_stats(seq, v);
4040 static struct softnet_data *softnet_get_online(loff_t *pos)
4042 struct softnet_data *sd = NULL;
4044 while (*pos < nr_cpu_ids)
4045 if (cpu_online(*pos)) {
4046 sd = &per_cpu(softnet_data, *pos);
4053 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4055 return softnet_get_online(pos);
4058 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4061 return softnet_get_online(pos);
4064 static void softnet_seq_stop(struct seq_file *seq, void *v)
4068 static int softnet_seq_show(struct seq_file *seq, void *v)
4070 struct softnet_data *sd = v;
4072 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4073 sd->processed, sd->dropped, sd->time_squeeze, 0,
4074 0, 0, 0, 0, /* was fastroute */
4075 sd->cpu_collision, sd->received_rps);
4079 static const struct seq_operations dev_seq_ops = {
4080 .start = dev_seq_start,
4081 .next = dev_seq_next,
4082 .stop = dev_seq_stop,
4083 .show = dev_seq_show,
4086 static int dev_seq_open(struct inode *inode, struct file *file)
4088 return seq_open_net(inode, file, &dev_seq_ops,
4089 sizeof(struct seq_net_private));
4092 static const struct file_operations dev_seq_fops = {
4093 .owner = THIS_MODULE,
4094 .open = dev_seq_open,
4096 .llseek = seq_lseek,
4097 .release = seq_release_net,
4100 static const struct seq_operations softnet_seq_ops = {
4101 .start = softnet_seq_start,
4102 .next = softnet_seq_next,
4103 .stop = softnet_seq_stop,
4104 .show = softnet_seq_show,
4107 static int softnet_seq_open(struct inode *inode, struct file *file)
4109 return seq_open(file, &softnet_seq_ops);
4112 static const struct file_operations softnet_seq_fops = {
4113 .owner = THIS_MODULE,
4114 .open = softnet_seq_open,
4116 .llseek = seq_lseek,
4117 .release = seq_release,
4120 static void *ptype_get_idx(loff_t pos)
4122 struct packet_type *pt = NULL;
4126 list_for_each_entry_rcu(pt, &ptype_all, list) {
4132 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4133 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4142 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4146 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4149 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4151 struct packet_type *pt;
4152 struct list_head *nxt;
4156 if (v == SEQ_START_TOKEN)
4157 return ptype_get_idx(0);
4160 nxt = pt->list.next;
4161 if (pt->type == htons(ETH_P_ALL)) {
4162 if (nxt != &ptype_all)
4165 nxt = ptype_base[0].next;
4167 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4169 while (nxt == &ptype_base[hash]) {
4170 if (++hash >= PTYPE_HASH_SIZE)
4172 nxt = ptype_base[hash].next;
4175 return list_entry(nxt, struct packet_type, list);
4178 static void ptype_seq_stop(struct seq_file *seq, void *v)
4184 static int ptype_seq_show(struct seq_file *seq, void *v)
4186 struct packet_type *pt = v;
4188 if (v == SEQ_START_TOKEN)
4189 seq_puts(seq, "Type Device Function\n");
4190 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4191 if (pt->type == htons(ETH_P_ALL))
4192 seq_puts(seq, "ALL ");
4194 seq_printf(seq, "%04x", ntohs(pt->type));
4196 seq_printf(seq, " %-8s %pF\n",
4197 pt->dev ? pt->dev->name : "", pt->func);
4203 static const struct seq_operations ptype_seq_ops = {
4204 .start = ptype_seq_start,
4205 .next = ptype_seq_next,
4206 .stop = ptype_seq_stop,
4207 .show = ptype_seq_show,
4210 static int ptype_seq_open(struct inode *inode, struct file *file)
4212 return seq_open_net(inode, file, &ptype_seq_ops,
4213 sizeof(struct seq_net_private));
4216 static const struct file_operations ptype_seq_fops = {
4217 .owner = THIS_MODULE,
4218 .open = ptype_seq_open,
4220 .llseek = seq_lseek,
4221 .release = seq_release_net,
4225 static int __net_init dev_proc_net_init(struct net *net)
4229 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4231 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4233 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4236 if (wext_proc_init(net))
4242 proc_net_remove(net, "ptype");
4244 proc_net_remove(net, "softnet_stat");
4246 proc_net_remove(net, "dev");
4250 static void __net_exit dev_proc_net_exit(struct net *net)
4252 wext_proc_exit(net);
4254 proc_net_remove(net, "ptype");
4255 proc_net_remove(net, "softnet_stat");
4256 proc_net_remove(net, "dev");
4259 static struct pernet_operations __net_initdata dev_proc_ops = {
4260 .init = dev_proc_net_init,
4261 .exit = dev_proc_net_exit,
4264 static int __init dev_proc_init(void)
4266 return register_pernet_subsys(&dev_proc_ops);
4269 #define dev_proc_init() 0
4270 #endif /* CONFIG_PROC_FS */
4274 * netdev_set_master - set up master pointer
4275 * @slave: slave device
4276 * @master: new master device
4278 * Changes the master device of the slave. Pass %NULL to break the
4279 * bonding. The caller must hold the RTNL semaphore. On a failure
4280 * a negative errno code is returned. On success the reference counts
4281 * are adjusted and the function returns zero.
4283 int netdev_set_master(struct net_device *slave, struct net_device *master)
4285 struct net_device *old = slave->master;
4295 slave->master = master;
4303 EXPORT_SYMBOL(netdev_set_master);
4306 * netdev_set_bond_master - set up bonding master/slave pair
4307 * @slave: slave device
4308 * @master: new master device
4310 * Changes the master device of the slave. Pass %NULL to break the
4311 * bonding. The caller must hold the RTNL semaphore. On a failure
4312 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4313 * to the routing socket and the function returns zero.
4315 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4321 err = netdev_set_master(slave, master);
4325 slave->flags |= IFF_SLAVE;
4327 slave->flags &= ~IFF_SLAVE;
4329 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4332 EXPORT_SYMBOL(netdev_set_bond_master);
4334 static void dev_change_rx_flags(struct net_device *dev, int flags)
4336 const struct net_device_ops *ops = dev->netdev_ops;
4338 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4339 ops->ndo_change_rx_flags(dev, flags);
4342 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4344 unsigned short old_flags = dev->flags;
4350 dev->flags |= IFF_PROMISC;
4351 dev->promiscuity += inc;
4352 if (dev->promiscuity == 0) {
4355 * If inc causes overflow, untouch promisc and return error.
4358 dev->flags &= ~IFF_PROMISC;
4360 dev->promiscuity -= inc;
4361 printk(KERN_WARNING "%s: promiscuity touches roof, "
4362 "set promiscuity failed, promiscuity feature "
4363 "of device might be broken.\n", dev->name);
4367 if (dev->flags != old_flags) {
4368 printk(KERN_INFO "device %s %s promiscuous mode\n",
4369 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4371 if (audit_enabled) {
4372 current_uid_gid(&uid, &gid);
4373 audit_log(current->audit_context, GFP_ATOMIC,
4374 AUDIT_ANOM_PROMISCUOUS,
4375 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4376 dev->name, (dev->flags & IFF_PROMISC),
4377 (old_flags & IFF_PROMISC),
4378 audit_get_loginuid(current),
4380 audit_get_sessionid(current));
4383 dev_change_rx_flags(dev, IFF_PROMISC);
4389 * dev_set_promiscuity - update promiscuity count on a device
4393 * Add or remove promiscuity from a device. While the count in the device
4394 * remains above zero the interface remains promiscuous. Once it hits zero
4395 * the device reverts back to normal filtering operation. A negative inc
4396 * value is used to drop promiscuity on the device.
4397 * Return 0 if successful or a negative errno code on error.
4399 int dev_set_promiscuity(struct net_device *dev, int inc)
4401 unsigned short old_flags = dev->flags;
4404 err = __dev_set_promiscuity(dev, inc);
4407 if (dev->flags != old_flags)
4408 dev_set_rx_mode(dev);
4411 EXPORT_SYMBOL(dev_set_promiscuity);
4414 * dev_set_allmulti - update allmulti count on a device
4418 * Add or remove reception of all multicast frames to a device. While the
4419 * count in the device remains above zero the interface remains listening
4420 * to all interfaces. Once it hits zero the device reverts back to normal
4421 * filtering operation. A negative @inc value is used to drop the counter
4422 * when releasing a resource needing all multicasts.
4423 * Return 0 if successful or a negative errno code on error.
4426 int dev_set_allmulti(struct net_device *dev, int inc)
4428 unsigned short old_flags = dev->flags;
4432 dev->flags |= IFF_ALLMULTI;
4433 dev->allmulti += inc;
4434 if (dev->allmulti == 0) {
4437 * If inc causes overflow, untouch allmulti and return error.
4440 dev->flags &= ~IFF_ALLMULTI;
4442 dev->allmulti -= inc;
4443 printk(KERN_WARNING "%s: allmulti touches roof, "
4444 "set allmulti failed, allmulti feature of "
4445 "device might be broken.\n", dev->name);
4449 if (dev->flags ^ old_flags) {
4450 dev_change_rx_flags(dev, IFF_ALLMULTI);
4451 dev_set_rx_mode(dev);
4455 EXPORT_SYMBOL(dev_set_allmulti);
4458 * Upload unicast and multicast address lists to device and
4459 * configure RX filtering. When the device doesn't support unicast
4460 * filtering it is put in promiscuous mode while unicast addresses
4463 void __dev_set_rx_mode(struct net_device *dev)
4465 const struct net_device_ops *ops = dev->netdev_ops;
4467 /* dev_open will call this function so the list will stay sane. */
4468 if (!(dev->flags&IFF_UP))
4471 if (!netif_device_present(dev))
4474 if (ops->ndo_set_rx_mode)
4475 ops->ndo_set_rx_mode(dev);
4477 /* Unicast addresses changes may only happen under the rtnl,
4478 * therefore calling __dev_set_promiscuity here is safe.
4480 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4481 __dev_set_promiscuity(dev, 1);
4482 dev->uc_promisc = 1;
4483 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4484 __dev_set_promiscuity(dev, -1);
4485 dev->uc_promisc = 0;
4488 if (ops->ndo_set_multicast_list)
4489 ops->ndo_set_multicast_list(dev);
4493 void dev_set_rx_mode(struct net_device *dev)
4495 netif_addr_lock_bh(dev);
4496 __dev_set_rx_mode(dev);
4497 netif_addr_unlock_bh(dev);
4501 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4503 * @cmd: memory area for ethtool_ops::get_settings() result
4505 * The cmd arg is initialized properly (cleared and
4506 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4508 * Return device's ethtool_ops::get_settings() result value or
4509 * -EOPNOTSUPP when device doesn't expose
4510 * ethtool_ops::get_settings() operation.
4512 int dev_ethtool_get_settings(struct net_device *dev,
4513 struct ethtool_cmd *cmd)
4515 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4518 memset(cmd, 0, sizeof(struct ethtool_cmd));
4519 cmd->cmd = ETHTOOL_GSET;
4520 return dev->ethtool_ops->get_settings(dev, cmd);
4522 EXPORT_SYMBOL(dev_ethtool_get_settings);
4525 * dev_get_flags - get flags reported to userspace
4528 * Get the combination of flag bits exported through APIs to userspace.
4530 unsigned dev_get_flags(const struct net_device *dev)
4534 flags = (dev->flags & ~(IFF_PROMISC |
4539 (dev->gflags & (IFF_PROMISC |
4542 if (netif_running(dev)) {
4543 if (netif_oper_up(dev))
4544 flags |= IFF_RUNNING;
4545 if (netif_carrier_ok(dev))
4546 flags |= IFF_LOWER_UP;
4547 if (netif_dormant(dev))
4548 flags |= IFF_DORMANT;
4553 EXPORT_SYMBOL(dev_get_flags);
4555 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4557 int old_flags = dev->flags;
4563 * Set the flags on our device.
4566 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4567 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4569 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4573 * Load in the correct multicast list now the flags have changed.
4576 if ((old_flags ^ flags) & IFF_MULTICAST)
4577 dev_change_rx_flags(dev, IFF_MULTICAST);
4579 dev_set_rx_mode(dev);
4582 * Have we downed the interface. We handle IFF_UP ourselves
4583 * according to user attempts to set it, rather than blindly
4588 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4589 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4592 dev_set_rx_mode(dev);
4595 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4596 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4598 dev->gflags ^= IFF_PROMISC;
4599 dev_set_promiscuity(dev, inc);
4602 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4603 is important. Some (broken) drivers set IFF_PROMISC, when
4604 IFF_ALLMULTI is requested not asking us and not reporting.
4606 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4607 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4609 dev->gflags ^= IFF_ALLMULTI;
4610 dev_set_allmulti(dev, inc);
4616 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4618 unsigned int changes = dev->flags ^ old_flags;
4620 if (changes & IFF_UP) {
4621 if (dev->flags & IFF_UP)
4622 call_netdevice_notifiers(NETDEV_UP, dev);
4624 call_netdevice_notifiers(NETDEV_DOWN, dev);
4627 if (dev->flags & IFF_UP &&
4628 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4629 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4633 * dev_change_flags - change device settings
4635 * @flags: device state flags
4637 * Change settings on device based state flags. The flags are
4638 * in the userspace exported format.
4640 int dev_change_flags(struct net_device *dev, unsigned flags)
4643 int old_flags = dev->flags;
4645 ret = __dev_change_flags(dev, flags);
4649 changes = old_flags ^ dev->flags;
4651 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4653 __dev_notify_flags(dev, old_flags);
4656 EXPORT_SYMBOL(dev_change_flags);
4659 * dev_set_mtu - Change maximum transfer unit
4661 * @new_mtu: new transfer unit
4663 * Change the maximum transfer size of the network device.
4665 int dev_set_mtu(struct net_device *dev, int new_mtu)
4667 const struct net_device_ops *ops = dev->netdev_ops;
4670 if (new_mtu == dev->mtu)
4673 /* MTU must be positive. */
4677 if (!netif_device_present(dev))
4681 if (ops->ndo_change_mtu)
4682 err = ops->ndo_change_mtu(dev, new_mtu);
4686 if (!err && dev->flags & IFF_UP)
4687 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4690 EXPORT_SYMBOL(dev_set_mtu);
4693 * dev_set_group - Change group this device belongs to
4695 * @new_group: group this device should belong to
4697 void dev_set_group(struct net_device *dev, int new_group)
4699 dev->group = new_group;
4701 EXPORT_SYMBOL(dev_set_group);
4704 * dev_set_mac_address - Change Media Access Control Address
4708 * Change the hardware (MAC) address of the device
4710 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4712 const struct net_device_ops *ops = dev->netdev_ops;
4715 if (!ops->ndo_set_mac_address)
4717 if (sa->sa_family != dev->type)
4719 if (!netif_device_present(dev))
4721 err = ops->ndo_set_mac_address(dev, sa);
4723 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4726 EXPORT_SYMBOL(dev_set_mac_address);
4729 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4731 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4734 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4740 case SIOCGIFFLAGS: /* Get interface flags */
4741 ifr->ifr_flags = (short) dev_get_flags(dev);
4744 case SIOCGIFMETRIC: /* Get the metric on the interface
4745 (currently unused) */
4746 ifr->ifr_metric = 0;
4749 case SIOCGIFMTU: /* Get the MTU of a device */
4750 ifr->ifr_mtu = dev->mtu;
4755 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4757 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4758 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4759 ifr->ifr_hwaddr.sa_family = dev->type;
4767 ifr->ifr_map.mem_start = dev->mem_start;
4768 ifr->ifr_map.mem_end = dev->mem_end;
4769 ifr->ifr_map.base_addr = dev->base_addr;
4770 ifr->ifr_map.irq = dev->irq;
4771 ifr->ifr_map.dma = dev->dma;
4772 ifr->ifr_map.port = dev->if_port;
4776 ifr->ifr_ifindex = dev->ifindex;
4780 ifr->ifr_qlen = dev->tx_queue_len;
4784 /* dev_ioctl() should ensure this case
4796 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4798 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4801 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4802 const struct net_device_ops *ops;
4807 ops = dev->netdev_ops;
4810 case SIOCSIFFLAGS: /* Set interface flags */
4811 return dev_change_flags(dev, ifr->ifr_flags);
4813 case SIOCSIFMETRIC: /* Set the metric on the interface
4814 (currently unused) */
4817 case SIOCSIFMTU: /* Set the MTU of a device */
4818 return dev_set_mtu(dev, ifr->ifr_mtu);
4821 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4823 case SIOCSIFHWBROADCAST:
4824 if (ifr->ifr_hwaddr.sa_family != dev->type)
4826 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4827 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4828 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4832 if (ops->ndo_set_config) {
4833 if (!netif_device_present(dev))
4835 return ops->ndo_set_config(dev, &ifr->ifr_map);
4840 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4841 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4843 if (!netif_device_present(dev))
4845 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4848 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4849 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4851 if (!netif_device_present(dev))
4853 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4856 if (ifr->ifr_qlen < 0)
4858 dev->tx_queue_len = ifr->ifr_qlen;
4862 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4863 return dev_change_name(dev, ifr->ifr_newname);
4866 * Unknown or private ioctl
4869 if ((cmd >= SIOCDEVPRIVATE &&
4870 cmd <= SIOCDEVPRIVATE + 15) ||
4871 cmd == SIOCBONDENSLAVE ||
4872 cmd == SIOCBONDRELEASE ||
4873 cmd == SIOCBONDSETHWADDR ||
4874 cmd == SIOCBONDSLAVEINFOQUERY ||
4875 cmd == SIOCBONDINFOQUERY ||
4876 cmd == SIOCBONDCHANGEACTIVE ||
4877 cmd == SIOCGMIIPHY ||
4878 cmd == SIOCGMIIREG ||
4879 cmd == SIOCSMIIREG ||
4880 cmd == SIOCBRADDIF ||
4881 cmd == SIOCBRDELIF ||
4882 cmd == SIOCSHWTSTAMP ||
4883 cmd == SIOCWANDEV) {
4885 if (ops->ndo_do_ioctl) {
4886 if (netif_device_present(dev))
4887 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4899 * This function handles all "interface"-type I/O control requests. The actual
4900 * 'doing' part of this is dev_ifsioc above.
4904 * dev_ioctl - network device ioctl
4905 * @net: the applicable net namespace
4906 * @cmd: command to issue
4907 * @arg: pointer to a struct ifreq in user space
4909 * Issue ioctl functions to devices. This is normally called by the
4910 * user space syscall interfaces but can sometimes be useful for
4911 * other purposes. The return value is the return from the syscall if
4912 * positive or a negative errno code on error.
4915 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4921 /* One special case: SIOCGIFCONF takes ifconf argument
4922 and requires shared lock, because it sleeps writing
4926 if (cmd == SIOCGIFCONF) {
4928 ret = dev_ifconf(net, (char __user *) arg);
4932 if (cmd == SIOCGIFNAME)
4933 return dev_ifname(net, (struct ifreq __user *)arg);
4935 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4938 ifr.ifr_name[IFNAMSIZ-1] = 0;
4940 colon = strchr(ifr.ifr_name, ':');
4945 * See which interface the caller is talking about.
4950 * These ioctl calls:
4951 * - can be done by all.
4952 * - atomic and do not require locking.
4963 dev_load(net, ifr.ifr_name);
4965 ret = dev_ifsioc_locked(net, &ifr, cmd);
4970 if (copy_to_user(arg, &ifr,
4971 sizeof(struct ifreq)))
4977 dev_load(net, ifr.ifr_name);
4979 ret = dev_ethtool(net, &ifr);
4984 if (copy_to_user(arg, &ifr,
4985 sizeof(struct ifreq)))
4991 * These ioctl calls:
4992 * - require superuser power.
4993 * - require strict serialization.
4999 if (!capable(CAP_NET_ADMIN))
5001 dev_load(net, ifr.ifr_name);
5003 ret = dev_ifsioc(net, &ifr, cmd);
5008 if (copy_to_user(arg, &ifr,
5009 sizeof(struct ifreq)))
5015 * These ioctl calls:
5016 * - require superuser power.
5017 * - require strict serialization.
5018 * - do not return a value
5028 case SIOCSIFHWBROADCAST:
5031 case SIOCBONDENSLAVE:
5032 case SIOCBONDRELEASE:
5033 case SIOCBONDSETHWADDR:
5034 case SIOCBONDCHANGEACTIVE:
5038 if (!capable(CAP_NET_ADMIN))
5041 case SIOCBONDSLAVEINFOQUERY:
5042 case SIOCBONDINFOQUERY:
5043 dev_load(net, ifr.ifr_name);
5045 ret = dev_ifsioc(net, &ifr, cmd);
5050 /* Get the per device memory space. We can add this but
5051 * currently do not support it */
5053 /* Set the per device memory buffer space.
5054 * Not applicable in our case */
5059 * Unknown or private ioctl.
5062 if (cmd == SIOCWANDEV ||
5063 (cmd >= SIOCDEVPRIVATE &&
5064 cmd <= SIOCDEVPRIVATE + 15)) {
5065 dev_load(net, ifr.ifr_name);
5067 ret = dev_ifsioc(net, &ifr, cmd);
5069 if (!ret && copy_to_user(arg, &ifr,
5070 sizeof(struct ifreq)))
5074 /* Take care of Wireless Extensions */
5075 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5076 return wext_handle_ioctl(net, &ifr, cmd, arg);
5083 * dev_new_index - allocate an ifindex
5084 * @net: the applicable net namespace
5086 * Returns a suitable unique value for a new device interface
5087 * number. The caller must hold the rtnl semaphore or the
5088 * dev_base_lock to be sure it remains unique.
5090 static int dev_new_index(struct net *net)
5096 if (!__dev_get_by_index(net, ifindex))
5101 /* Delayed registration/unregisteration */
5102 static LIST_HEAD(net_todo_list);
5104 static void net_set_todo(struct net_device *dev)
5106 list_add_tail(&dev->todo_list, &net_todo_list);
5109 static void rollback_registered_many(struct list_head *head)
5111 struct net_device *dev, *tmp;
5113 BUG_ON(dev_boot_phase);
5116 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5117 /* Some devices call without registering
5118 * for initialization unwind. Remove those
5119 * devices and proceed with the remaining.
5121 if (dev->reg_state == NETREG_UNINITIALIZED) {
5122 pr_debug("unregister_netdevice: device %s/%p never "
5123 "was registered\n", dev->name, dev);
5126 list_del(&dev->unreg_list);
5129 dev->dismantle = true;
5130 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5133 /* If device is running, close it first. */
5134 dev_close_many(head);
5136 list_for_each_entry(dev, head, unreg_list) {
5137 /* And unlink it from device chain. */
5138 unlist_netdevice(dev);
5140 dev->reg_state = NETREG_UNREGISTERING;
5145 list_for_each_entry(dev, head, unreg_list) {
5146 /* Shutdown queueing discipline. */
5150 /* Notify protocols, that we are about to destroy
5151 this device. They should clean all the things.
5153 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5155 if (!dev->rtnl_link_ops ||
5156 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5157 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5160 * Flush the unicast and multicast chains
5165 if (dev->netdev_ops->ndo_uninit)
5166 dev->netdev_ops->ndo_uninit(dev);
5168 /* Notifier chain MUST detach us from master device. */
5169 WARN_ON(dev->master);
5171 /* Remove entries from kobject tree */
5172 netdev_unregister_kobject(dev);
5175 /* Process any work delayed until the end of the batch */
5176 dev = list_first_entry(head, struct net_device, unreg_list);
5177 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5181 list_for_each_entry(dev, head, unreg_list)
5185 static void rollback_registered(struct net_device *dev)
5189 list_add(&dev->unreg_list, &single);
5190 rollback_registered_many(&single);
5194 u32 netdev_fix_features(struct net_device *dev, u32 features)
5196 /* Fix illegal checksum combinations */
5197 if ((features & NETIF_F_HW_CSUM) &&
5198 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5199 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5200 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5203 if ((features & NETIF_F_NO_CSUM) &&
5204 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5205 netdev_warn(dev, "mixed no checksumming and other settings.\n");
5206 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5209 /* Fix illegal SG+CSUM combinations. */
5210 if ((features & NETIF_F_SG) &&
5211 !(features & NETIF_F_ALL_CSUM)) {
5213 "Dropping NETIF_F_SG since no checksum feature.\n");
5214 features &= ~NETIF_F_SG;
5217 /* TSO requires that SG is present as well. */
5218 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5219 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5220 features &= ~NETIF_F_ALL_TSO;
5223 /* TSO ECN requires that TSO is present as well. */
5224 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5225 features &= ~NETIF_F_TSO_ECN;
5227 /* Software GSO depends on SG. */
5228 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5229 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5230 features &= ~NETIF_F_GSO;
5233 /* UFO needs SG and checksumming */
5234 if (features & NETIF_F_UFO) {
5235 /* maybe split UFO into V4 and V6? */
5236 if (!((features & NETIF_F_GEN_CSUM) ||
5237 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5238 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5240 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5241 features &= ~NETIF_F_UFO;
5244 if (!(features & NETIF_F_SG)) {
5246 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5247 features &= ~NETIF_F_UFO;
5253 EXPORT_SYMBOL(netdev_fix_features);
5255 int __netdev_update_features(struct net_device *dev)
5262 features = netdev_get_wanted_features(dev);
5264 if (dev->netdev_ops->ndo_fix_features)
5265 features = dev->netdev_ops->ndo_fix_features(dev, features);
5267 /* driver might be less strict about feature dependencies */
5268 features = netdev_fix_features(dev, features);
5270 if (dev->features == features)
5273 netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
5274 dev->features, features);
5276 if (dev->netdev_ops->ndo_set_features)
5277 err = dev->netdev_ops->ndo_set_features(dev, features);
5279 if (unlikely(err < 0)) {
5281 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5282 err, features, dev->features);
5287 dev->features = features;
5293 * netdev_update_features - recalculate device features
5294 * @dev: the device to check
5296 * Recalculate dev->features set and send notifications if it
5297 * has changed. Should be called after driver or hardware dependent
5298 * conditions might have changed that influence the features.
5300 void netdev_update_features(struct net_device *dev)
5302 if (__netdev_update_features(dev))
5303 netdev_features_change(dev);
5305 EXPORT_SYMBOL(netdev_update_features);
5308 * netdev_change_features - recalculate device features
5309 * @dev: the device to check
5311 * Recalculate dev->features set and send notifications even
5312 * if they have not changed. Should be called instead of
5313 * netdev_update_features() if also dev->vlan_features might
5314 * have changed to allow the changes to be propagated to stacked
5317 void netdev_change_features(struct net_device *dev)
5319 __netdev_update_features(dev);
5320 netdev_features_change(dev);
5322 EXPORT_SYMBOL(netdev_change_features);
5325 * netif_stacked_transfer_operstate - transfer operstate
5326 * @rootdev: the root or lower level device to transfer state from
5327 * @dev: the device to transfer operstate to
5329 * Transfer operational state from root to device. This is normally
5330 * called when a stacking relationship exists between the root
5331 * device and the device(a leaf device).
5333 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5334 struct net_device *dev)
5336 if (rootdev->operstate == IF_OPER_DORMANT)
5337 netif_dormant_on(dev);
5339 netif_dormant_off(dev);
5341 if (netif_carrier_ok(rootdev)) {
5342 if (!netif_carrier_ok(dev))
5343 netif_carrier_on(dev);
5345 if (netif_carrier_ok(dev))
5346 netif_carrier_off(dev);
5349 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5352 static int netif_alloc_rx_queues(struct net_device *dev)
5354 unsigned int i, count = dev->num_rx_queues;
5355 struct netdev_rx_queue *rx;
5359 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5361 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5366 for (i = 0; i < count; i++)
5372 static void netdev_init_one_queue(struct net_device *dev,
5373 struct netdev_queue *queue, void *_unused)
5375 /* Initialize queue lock */
5376 spin_lock_init(&queue->_xmit_lock);
5377 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5378 queue->xmit_lock_owner = -1;
5379 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5383 static int netif_alloc_netdev_queues(struct net_device *dev)
5385 unsigned int count = dev->num_tx_queues;
5386 struct netdev_queue *tx;
5390 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5392 pr_err("netdev: Unable to allocate %u tx queues.\n",
5398 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5399 spin_lock_init(&dev->tx_global_lock);
5405 * register_netdevice - register a network device
5406 * @dev: device to register
5408 * Take a completed network device structure and add it to the kernel
5409 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5410 * chain. 0 is returned on success. A negative errno code is returned
5411 * on a failure to set up the device, or if the name is a duplicate.
5413 * Callers must hold the rtnl semaphore. You may want
5414 * register_netdev() instead of this.
5417 * The locking appears insufficient to guarantee two parallel registers
5418 * will not get the same name.
5421 int register_netdevice(struct net_device *dev)
5424 struct net *net = dev_net(dev);
5426 BUG_ON(dev_boot_phase);
5431 /* When net_device's are persistent, this will be fatal. */
5432 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5435 spin_lock_init(&dev->addr_list_lock);
5436 netdev_set_addr_lockdep_class(dev);
5440 ret = dev_get_valid_name(dev, dev->name);
5444 /* Init, if this function is available */
5445 if (dev->netdev_ops->ndo_init) {
5446 ret = dev->netdev_ops->ndo_init(dev);
5454 dev->ifindex = dev_new_index(net);
5455 if (dev->iflink == -1)
5456 dev->iflink = dev->ifindex;
5458 /* Transfer changeable features to wanted_features and enable
5459 * software offloads (GSO and GRO).
5461 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5462 dev->features |= NETIF_F_SOFT_FEATURES;
5463 dev->wanted_features = dev->features & dev->hw_features;
5465 /* Turn on no cache copy if HW is doing checksum */
5466 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5467 if ((dev->features & NETIF_F_ALL_CSUM) &&
5468 !(dev->features & NETIF_F_NO_CSUM)) {
5469 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5470 dev->features |= NETIF_F_NOCACHE_COPY;
5473 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5474 * vlan_dev_init() will do the dev->features check, so these features
5475 * are enabled only if supported by underlying device.
5477 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
5479 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5480 ret = notifier_to_errno(ret);
5484 ret = netdev_register_kobject(dev);
5487 dev->reg_state = NETREG_REGISTERED;
5489 __netdev_update_features(dev);
5492 * Default initial state at registry is that the
5493 * device is present.
5496 set_bit(__LINK_STATE_PRESENT, &dev->state);
5498 dev_init_scheduler(dev);
5500 list_netdevice(dev);
5502 /* Notify protocols, that a new device appeared. */
5503 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5504 ret = notifier_to_errno(ret);
5506 rollback_registered(dev);
5507 dev->reg_state = NETREG_UNREGISTERED;
5510 * Prevent userspace races by waiting until the network
5511 * device is fully setup before sending notifications.
5513 if (!dev->rtnl_link_ops ||
5514 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5515 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5521 if (dev->netdev_ops->ndo_uninit)
5522 dev->netdev_ops->ndo_uninit(dev);
5525 EXPORT_SYMBOL(register_netdevice);
5528 * init_dummy_netdev - init a dummy network device for NAPI
5529 * @dev: device to init
5531 * This takes a network device structure and initialize the minimum
5532 * amount of fields so it can be used to schedule NAPI polls without
5533 * registering a full blown interface. This is to be used by drivers
5534 * that need to tie several hardware interfaces to a single NAPI
5535 * poll scheduler due to HW limitations.
5537 int init_dummy_netdev(struct net_device *dev)
5539 /* Clear everything. Note we don't initialize spinlocks
5540 * are they aren't supposed to be taken by any of the
5541 * NAPI code and this dummy netdev is supposed to be
5542 * only ever used for NAPI polls
5544 memset(dev, 0, sizeof(struct net_device));
5546 /* make sure we BUG if trying to hit standard
5547 * register/unregister code path
5549 dev->reg_state = NETREG_DUMMY;
5551 /* NAPI wants this */
5552 INIT_LIST_HEAD(&dev->napi_list);
5554 /* a dummy interface is started by default */
5555 set_bit(__LINK_STATE_PRESENT, &dev->state);
5556 set_bit(__LINK_STATE_START, &dev->state);
5558 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5559 * because users of this 'device' dont need to change
5565 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5569 * register_netdev - register a network device
5570 * @dev: device to register
5572 * Take a completed network device structure and add it to the kernel
5573 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5574 * chain. 0 is returned on success. A negative errno code is returned
5575 * on a failure to set up the device, or if the name is a duplicate.
5577 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5578 * and expands the device name if you passed a format string to
5581 int register_netdev(struct net_device *dev)
5586 err = register_netdevice(dev);
5590 EXPORT_SYMBOL(register_netdev);
5592 int netdev_refcnt_read(const struct net_device *dev)
5596 for_each_possible_cpu(i)
5597 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5600 EXPORT_SYMBOL(netdev_refcnt_read);
5603 * netdev_wait_allrefs - wait until all references are gone.
5605 * This is called when unregistering network devices.
5607 * Any protocol or device that holds a reference should register
5608 * for netdevice notification, and cleanup and put back the
5609 * reference if they receive an UNREGISTER event.
5610 * We can get stuck here if buggy protocols don't correctly
5613 static void netdev_wait_allrefs(struct net_device *dev)
5615 unsigned long rebroadcast_time, warning_time;
5618 linkwatch_forget_dev(dev);
5620 rebroadcast_time = warning_time = jiffies;
5621 refcnt = netdev_refcnt_read(dev);
5623 while (refcnt != 0) {
5624 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5627 /* Rebroadcast unregister notification */
5628 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5629 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5630 * should have already handle it the first time */
5632 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5634 /* We must not have linkwatch events
5635 * pending on unregister. If this
5636 * happens, we simply run the queue
5637 * unscheduled, resulting in a noop
5640 linkwatch_run_queue();
5645 rebroadcast_time = jiffies;
5650 refcnt = netdev_refcnt_read(dev);
5652 if (time_after(jiffies, warning_time + 10 * HZ)) {
5653 printk(KERN_EMERG "unregister_netdevice: "
5654 "waiting for %s to become free. Usage "
5657 warning_time = jiffies;
5666 * register_netdevice(x1);
5667 * register_netdevice(x2);
5669 * unregister_netdevice(y1);
5670 * unregister_netdevice(y2);
5676 * We are invoked by rtnl_unlock().
5677 * This allows us to deal with problems:
5678 * 1) We can delete sysfs objects which invoke hotplug
5679 * without deadlocking with linkwatch via keventd.
5680 * 2) Since we run with the RTNL semaphore not held, we can sleep
5681 * safely in order to wait for the netdev refcnt to drop to zero.
5683 * We must not return until all unregister events added during
5684 * the interval the lock was held have been completed.
5686 void netdev_run_todo(void)
5688 struct list_head list;
5690 /* Snapshot list, allow later requests */
5691 list_replace_init(&net_todo_list, &list);
5695 while (!list_empty(&list)) {
5696 struct net_device *dev
5697 = list_first_entry(&list, struct net_device, todo_list);
5698 list_del(&dev->todo_list);
5700 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5701 printk(KERN_ERR "network todo '%s' but state %d\n",
5702 dev->name, dev->reg_state);
5707 dev->reg_state = NETREG_UNREGISTERED;
5709 on_each_cpu(flush_backlog, dev, 1);
5711 netdev_wait_allrefs(dev);
5714 BUG_ON(netdev_refcnt_read(dev));
5715 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5716 WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
5717 WARN_ON(dev->dn_ptr);
5719 if (dev->destructor)
5720 dev->destructor(dev);
5722 /* Free network device */
5723 kobject_put(&dev->dev.kobj);
5727 /* Convert net_device_stats to rtnl_link_stats64. They have the same
5728 * fields in the same order, with only the type differing.
5730 static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5731 const struct net_device_stats *netdev_stats)
5733 #if BITS_PER_LONG == 64
5734 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5735 memcpy(stats64, netdev_stats, sizeof(*stats64));
5737 size_t i, n = sizeof(*stats64) / sizeof(u64);
5738 const unsigned long *src = (const unsigned long *)netdev_stats;
5739 u64 *dst = (u64 *)stats64;
5741 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5742 sizeof(*stats64) / sizeof(u64));
5743 for (i = 0; i < n; i++)
5749 * dev_get_stats - get network device statistics
5750 * @dev: device to get statistics from
5751 * @storage: place to store stats
5753 * Get network statistics from device. Return @storage.
5754 * The device driver may provide its own method by setting
5755 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5756 * otherwise the internal statistics structure is used.
5758 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5759 struct rtnl_link_stats64 *storage)
5761 const struct net_device_ops *ops = dev->netdev_ops;
5763 if (ops->ndo_get_stats64) {
5764 memset(storage, 0, sizeof(*storage));
5765 ops->ndo_get_stats64(dev, storage);
5766 } else if (ops->ndo_get_stats) {
5767 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5769 netdev_stats_to_stats64(storage, &dev->stats);
5771 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5774 EXPORT_SYMBOL(dev_get_stats);
5776 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5778 struct netdev_queue *queue = dev_ingress_queue(dev);
5780 #ifdef CONFIG_NET_CLS_ACT
5783 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5786 netdev_init_one_queue(dev, queue, NULL);
5787 queue->qdisc = &noop_qdisc;
5788 queue->qdisc_sleeping = &noop_qdisc;
5789 rcu_assign_pointer(dev->ingress_queue, queue);
5795 * alloc_netdev_mqs - allocate network device
5796 * @sizeof_priv: size of private data to allocate space for
5797 * @name: device name format string
5798 * @setup: callback to initialize device
5799 * @txqs: the number of TX subqueues to allocate
5800 * @rxqs: the number of RX subqueues to allocate
5802 * Allocates a struct net_device with private data area for driver use
5803 * and performs basic initialization. Also allocates subquue structs
5804 * for each queue on the device.
5806 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5807 void (*setup)(struct net_device *),
5808 unsigned int txqs, unsigned int rxqs)
5810 struct net_device *dev;
5812 struct net_device *p;
5814 BUG_ON(strlen(name) >= sizeof(dev->name));
5817 pr_err("alloc_netdev: Unable to allocate device "
5818 "with zero queues.\n");
5824 pr_err("alloc_netdev: Unable to allocate device "
5825 "with zero RX queues.\n");
5830 alloc_size = sizeof(struct net_device);
5832 /* ensure 32-byte alignment of private area */
5833 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5834 alloc_size += sizeof_priv;
5836 /* ensure 32-byte alignment of whole construct */
5837 alloc_size += NETDEV_ALIGN - 1;
5839 p = kzalloc(alloc_size, GFP_KERNEL);
5841 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5845 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5846 dev->padded = (char *)dev - (char *)p;
5848 dev->pcpu_refcnt = alloc_percpu(int);
5849 if (!dev->pcpu_refcnt)
5852 if (dev_addr_init(dev))
5858 dev_net_set(dev, &init_net);
5860 dev->gso_max_size = GSO_MAX_SIZE;
5862 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5863 dev->ethtool_ntuple_list.count = 0;
5864 INIT_LIST_HEAD(&dev->napi_list);
5865 INIT_LIST_HEAD(&dev->unreg_list);
5866 INIT_LIST_HEAD(&dev->link_watch_list);
5867 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5870 dev->num_tx_queues = txqs;
5871 dev->real_num_tx_queues = txqs;
5872 if (netif_alloc_netdev_queues(dev))
5876 dev->num_rx_queues = rxqs;
5877 dev->real_num_rx_queues = rxqs;
5878 if (netif_alloc_rx_queues(dev))
5882 strcpy(dev->name, name);
5883 dev->group = INIT_NETDEV_GROUP;
5891 free_percpu(dev->pcpu_refcnt);
5901 EXPORT_SYMBOL(alloc_netdev_mqs);
5904 * free_netdev - free network device
5907 * This function does the last stage of destroying an allocated device
5908 * interface. The reference to the device object is released.
5909 * If this is the last reference then it will be freed.
5911 void free_netdev(struct net_device *dev)
5913 struct napi_struct *p, *n;
5915 release_net(dev_net(dev));
5922 kfree(rcu_dereference_raw(dev->ingress_queue));
5924 /* Flush device addresses */
5925 dev_addr_flush(dev);
5927 /* Clear ethtool n-tuple list */
5928 ethtool_ntuple_flush(dev);
5930 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5933 free_percpu(dev->pcpu_refcnt);
5934 dev->pcpu_refcnt = NULL;
5936 /* Compatibility with error handling in drivers */
5937 if (dev->reg_state == NETREG_UNINITIALIZED) {
5938 kfree((char *)dev - dev->padded);
5942 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5943 dev->reg_state = NETREG_RELEASED;
5945 /* will free via device release */
5946 put_device(&dev->dev);
5948 EXPORT_SYMBOL(free_netdev);
5951 * synchronize_net - Synchronize with packet receive processing
5953 * Wait for packets currently being received to be done.
5954 * Does not block later packets from starting.
5956 void synchronize_net(void)
5961 EXPORT_SYMBOL(synchronize_net);
5964 * unregister_netdevice_queue - remove device from the kernel
5968 * This function shuts down a device interface and removes it
5969 * from the kernel tables.
5970 * If head not NULL, device is queued to be unregistered later.
5972 * Callers must hold the rtnl semaphore. You may want
5973 * unregister_netdev() instead of this.
5976 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5981 list_move_tail(&dev->unreg_list, head);
5983 rollback_registered(dev);
5984 /* Finish processing unregister after unlock */
5988 EXPORT_SYMBOL(unregister_netdevice_queue);
5991 * unregister_netdevice_many - unregister many devices
5992 * @head: list of devices
5994 void unregister_netdevice_many(struct list_head *head)
5996 struct net_device *dev;
5998 if (!list_empty(head)) {
5999 rollback_registered_many(head);
6000 list_for_each_entry(dev, head, unreg_list)
6004 EXPORT_SYMBOL(unregister_netdevice_many);
6007 * unregister_netdev - remove device from the kernel
6010 * This function shuts down a device interface and removes it
6011 * from the kernel tables.
6013 * This is just a wrapper for unregister_netdevice that takes
6014 * the rtnl semaphore. In general you want to use this and not
6015 * unregister_netdevice.
6017 void unregister_netdev(struct net_device *dev)
6020 unregister_netdevice(dev);
6023 EXPORT_SYMBOL(unregister_netdev);
6026 * dev_change_net_namespace - move device to different nethost namespace
6028 * @net: network namespace
6029 * @pat: If not NULL name pattern to try if the current device name
6030 * is already taken in the destination network namespace.
6032 * This function shuts down a device interface and moves it
6033 * to a new network namespace. On success 0 is returned, on
6034 * a failure a netagive errno code is returned.
6036 * Callers must hold the rtnl semaphore.
6039 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6045 /* Don't allow namespace local devices to be moved. */
6047 if (dev->features & NETIF_F_NETNS_LOCAL)
6050 /* Ensure the device has been registrered */
6052 if (dev->reg_state != NETREG_REGISTERED)
6055 /* Get out if there is nothing todo */
6057 if (net_eq(dev_net(dev), net))
6060 /* Pick the destination device name, and ensure
6061 * we can use it in the destination network namespace.
6064 if (__dev_get_by_name(net, dev->name)) {
6065 /* We get here if we can't use the current device name */
6068 if (dev_get_valid_name(dev, pat) < 0)
6073 * And now a mini version of register_netdevice unregister_netdevice.
6076 /* If device is running close it first. */
6079 /* And unlink it from device chain */
6081 unlist_netdevice(dev);
6085 /* Shutdown queueing discipline. */
6088 /* Notify protocols, that we are about to destroy
6089 this device. They should clean all the things.
6091 Note that dev->reg_state stays at NETREG_REGISTERED.
6092 This is wanted because this way 8021q and macvlan know
6093 the device is just moving and can keep their slaves up.
6095 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6096 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6099 * Flush the unicast and multicast chains
6104 /* Actually switch the network namespace */
6105 dev_net_set(dev, net);
6107 /* If there is an ifindex conflict assign a new one */
6108 if (__dev_get_by_index(net, dev->ifindex)) {
6109 int iflink = (dev->iflink == dev->ifindex);
6110 dev->ifindex = dev_new_index(net);
6112 dev->iflink = dev->ifindex;
6115 /* Fixup kobjects */
6116 err = device_rename(&dev->dev, dev->name);
6119 /* Add the device back in the hashes */
6120 list_netdevice(dev);
6122 /* Notify protocols, that a new device appeared. */
6123 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6126 * Prevent userspace races by waiting until the network
6127 * device is fully setup before sending notifications.
6129 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6136 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6138 static int dev_cpu_callback(struct notifier_block *nfb,
6139 unsigned long action,
6142 struct sk_buff **list_skb;
6143 struct sk_buff *skb;
6144 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6145 struct softnet_data *sd, *oldsd;
6147 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6150 local_irq_disable();
6151 cpu = smp_processor_id();
6152 sd = &per_cpu(softnet_data, cpu);
6153 oldsd = &per_cpu(softnet_data, oldcpu);
6155 /* Find end of our completion_queue. */
6156 list_skb = &sd->completion_queue;
6158 list_skb = &(*list_skb)->next;
6159 /* Append completion queue from offline CPU. */
6160 *list_skb = oldsd->completion_queue;
6161 oldsd->completion_queue = NULL;
6163 /* Append output queue from offline CPU. */
6164 if (oldsd->output_queue) {
6165 *sd->output_queue_tailp = oldsd->output_queue;
6166 sd->output_queue_tailp = oldsd->output_queue_tailp;
6167 oldsd->output_queue = NULL;
6168 oldsd->output_queue_tailp = &oldsd->output_queue;
6171 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6174 /* Process offline CPU's input_pkt_queue */
6175 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6177 input_queue_head_incr(oldsd);
6179 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6181 input_queue_head_incr(oldsd);
6189 * netdev_increment_features - increment feature set by one
6190 * @all: current feature set
6191 * @one: new feature set
6192 * @mask: mask feature set
6194 * Computes a new feature set after adding a device with feature set
6195 * @one to the master device with current feature set @all. Will not
6196 * enable anything that is off in @mask. Returns the new feature set.
6198 u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6200 if (mask & NETIF_F_GEN_CSUM)
6201 mask |= NETIF_F_ALL_CSUM;
6202 mask |= NETIF_F_VLAN_CHALLENGED;
6204 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6205 all &= one | ~NETIF_F_ALL_FOR_ALL;
6207 /* If device needs checksumming, downgrade to it. */
6208 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6209 all &= ~NETIF_F_NO_CSUM;
6211 /* If one device supports hw checksumming, set for all. */
6212 if (all & NETIF_F_GEN_CSUM)
6213 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6217 EXPORT_SYMBOL(netdev_increment_features);
6219 static struct hlist_head *netdev_create_hash(void)
6222 struct hlist_head *hash;
6224 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6226 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6227 INIT_HLIST_HEAD(&hash[i]);
6232 /* Initialize per network namespace state */
6233 static int __net_init netdev_init(struct net *net)
6235 INIT_LIST_HEAD(&net->dev_base_head);
6237 net->dev_name_head = netdev_create_hash();
6238 if (net->dev_name_head == NULL)
6241 net->dev_index_head = netdev_create_hash();
6242 if (net->dev_index_head == NULL)
6248 kfree(net->dev_name_head);
6254 * netdev_drivername - network driver for the device
6255 * @dev: network device
6256 * @buffer: buffer for resulting name
6257 * @len: size of buffer
6259 * Determine network driver for device.
6261 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6263 const struct device_driver *driver;
6264 const struct device *parent;
6266 if (len <= 0 || !buffer)
6270 parent = dev->dev.parent;
6275 driver = parent->driver;
6276 if (driver && driver->name)
6277 strlcpy(buffer, driver->name, len);
6281 static int __netdev_printk(const char *level, const struct net_device *dev,
6282 struct va_format *vaf)
6286 if (dev && dev->dev.parent)
6287 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6288 netdev_name(dev), vaf);
6290 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6292 r = printk("%s(NULL net_device): %pV", level, vaf);
6297 int netdev_printk(const char *level, const struct net_device *dev,
6298 const char *format, ...)
6300 struct va_format vaf;
6304 va_start(args, format);
6309 r = __netdev_printk(level, dev, &vaf);
6314 EXPORT_SYMBOL(netdev_printk);
6316 #define define_netdev_printk_level(func, level) \
6317 int func(const struct net_device *dev, const char *fmt, ...) \
6320 struct va_format vaf; \
6323 va_start(args, fmt); \
6328 r = __netdev_printk(level, dev, &vaf); \
6333 EXPORT_SYMBOL(func);
6335 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6336 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6337 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6338 define_netdev_printk_level(netdev_err, KERN_ERR);
6339 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6340 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6341 define_netdev_printk_level(netdev_info, KERN_INFO);
6343 static void __net_exit netdev_exit(struct net *net)
6345 kfree(net->dev_name_head);
6346 kfree(net->dev_index_head);
6349 static struct pernet_operations __net_initdata netdev_net_ops = {
6350 .init = netdev_init,
6351 .exit = netdev_exit,
6354 static void __net_exit default_device_exit(struct net *net)
6356 struct net_device *dev, *aux;
6358 * Push all migratable network devices back to the
6359 * initial network namespace
6362 for_each_netdev_safe(net, dev, aux) {
6364 char fb_name[IFNAMSIZ];
6366 /* Ignore unmoveable devices (i.e. loopback) */
6367 if (dev->features & NETIF_F_NETNS_LOCAL)
6370 /* Leave virtual devices for the generic cleanup */
6371 if (dev->rtnl_link_ops)
6374 /* Push remaining network devices to init_net */
6375 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6376 err = dev_change_net_namespace(dev, &init_net, fb_name);
6378 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
6379 __func__, dev->name, err);
6386 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6388 /* At exit all network devices most be removed from a network
6389 * namespace. Do this in the reverse order of registration.
6390 * Do this across as many network namespaces as possible to
6391 * improve batching efficiency.
6393 struct net_device *dev;
6395 LIST_HEAD(dev_kill_list);
6398 list_for_each_entry(net, net_list, exit_list) {
6399 for_each_netdev_reverse(net, dev) {
6400 if (dev->rtnl_link_ops)
6401 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6403 unregister_netdevice_queue(dev, &dev_kill_list);
6406 unregister_netdevice_many(&dev_kill_list);
6407 list_del(&dev_kill_list);
6411 static struct pernet_operations __net_initdata default_device_ops = {
6412 .exit = default_device_exit,
6413 .exit_batch = default_device_exit_batch,
6417 * Initialize the DEV module. At boot time this walks the device list and
6418 * unhooks any devices that fail to initialise (normally hardware not
6419 * present) and leaves us with a valid list of present and active devices.
6424 * This is called single threaded during boot, so no need
6425 * to take the rtnl semaphore.
6427 static int __init net_dev_init(void)
6429 int i, rc = -ENOMEM;
6431 BUG_ON(!dev_boot_phase);
6433 if (dev_proc_init())
6436 if (netdev_kobject_init())
6439 INIT_LIST_HEAD(&ptype_all);
6440 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6441 INIT_LIST_HEAD(&ptype_base[i]);
6443 if (register_pernet_subsys(&netdev_net_ops))
6447 * Initialise the packet receive queues.
6450 for_each_possible_cpu(i) {
6451 struct softnet_data *sd = &per_cpu(softnet_data, i);
6453 memset(sd, 0, sizeof(*sd));
6454 skb_queue_head_init(&sd->input_pkt_queue);
6455 skb_queue_head_init(&sd->process_queue);
6456 sd->completion_queue = NULL;
6457 INIT_LIST_HEAD(&sd->poll_list);
6458 sd->output_queue = NULL;
6459 sd->output_queue_tailp = &sd->output_queue;
6461 sd->csd.func = rps_trigger_softirq;
6467 sd->backlog.poll = process_backlog;
6468 sd->backlog.weight = weight_p;
6469 sd->backlog.gro_list = NULL;
6470 sd->backlog.gro_count = 0;
6475 /* The loopback device is special if any other network devices
6476 * is present in a network namespace the loopback device must
6477 * be present. Since we now dynamically allocate and free the
6478 * loopback device ensure this invariant is maintained by
6479 * keeping the loopback device as the first device on the
6480 * list of network devices. Ensuring the loopback devices
6481 * is the first device that appears and the last network device
6484 if (register_pernet_device(&loopback_net_ops))
6487 if (register_pernet_device(&default_device_ops))
6490 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6491 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6493 hotcpu_notifier(dev_cpu_callback, 0);
6501 subsys_initcall(net_dev_init);
6503 static int __init initialize_hashrnd(void)
6505 get_random_bytes(&hashrnd, sizeof(hashrnd));
6509 late_initcall_sync(initialize_hashrnd);