2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/slab.h>
84 #include <linux/sched.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <net/net_namespace.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/stat.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <net/xfrm.h>
108 #include <linux/highmem.h>
109 #include <linux/init.h>
110 #include <linux/kmod.h>
111 #include <linux/module.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
126 #include <linux/ipv6.h>
127 #include <linux/in.h>
128 #include <linux/jhash.h>
129 #include <linux/random.h>
130 #include <trace/events/napi.h>
131 #include <trace/events/net.h>
132 #include <trace/events/skb.h>
133 #include <linux/pci.h>
134 #include <linux/inetdevice.h>
135 #include <linux/cpu_rmap.h>
136 #include <linux/if_tunnel.h>
137 #include <linux/if_pppox.h>
138 #include <linux/ppp_defs.h>
139 #include <linux/net_tstamp.h>
141 #include "net-sysfs.h"
143 /* Instead of increasing this, you should create a hash table. */
144 #define MAX_GRO_SKBS 8
146 /* This should be increased if a protocol with a bigger head is added. */
147 #define GRO_MAX_HEAD (MAX_HEADER + 128)
150 * The list of packet types we will receive (as opposed to discard)
151 * and the routines to invoke.
153 * Why 16. Because with 16 the only overlap we get on a hash of the
154 * low nibble of the protocol value is RARP/SNAP/X.25.
156 * NOTE: That is no longer true with the addition of VLAN tags. Not
157 * sure which should go first, but I bet it won't make much
158 * difference if we are running VLANs. The good news is that
159 * this protocol won't be in the list unless compiled in, so
160 * the average user (w/out VLANs) will not be adversely affected.
177 #define PTYPE_HASH_SIZE (16)
178 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
180 static DEFINE_SPINLOCK(ptype_lock);
181 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
182 static struct list_head ptype_all __read_mostly; /* Taps */
185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
190 * Writers must hold the rtnl semaphore while they loop through the
191 * dev_base_head list, and hold dev_base_lock for writing when they do the
192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
203 DEFINE_RWLOCK(dev_base_lock);
204 EXPORT_SYMBOL(dev_base_lock);
206 static inline void dev_base_seq_inc(struct net *net)
208 while (++net->dev_base_seq == 0);
211 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
213 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
214 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
217 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
219 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
222 static inline void rps_lock(struct softnet_data *sd)
225 spin_lock(&sd->input_pkt_queue.lock);
229 static inline void rps_unlock(struct softnet_data *sd)
232 spin_unlock(&sd->input_pkt_queue.lock);
236 /* Device list insertion */
237 static int list_netdevice(struct net_device *dev)
239 struct net *net = dev_net(dev);
243 write_lock_bh(&dev_base_lock);
244 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
245 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
246 hlist_add_head_rcu(&dev->index_hlist,
247 dev_index_hash(net, dev->ifindex));
248 write_unlock_bh(&dev_base_lock);
250 dev_base_seq_inc(net);
255 /* Device list removal
256 * caller must respect a RCU grace period before freeing/reusing dev
258 static void unlist_netdevice(struct net_device *dev)
262 /* Unlink dev from the device chain */
263 write_lock_bh(&dev_base_lock);
264 list_del_rcu(&dev->dev_list);
265 hlist_del_rcu(&dev->name_hlist);
266 hlist_del_rcu(&dev->index_hlist);
267 write_unlock_bh(&dev_base_lock);
269 dev_base_seq_inc(dev_net(dev));
276 static RAW_NOTIFIER_HEAD(netdev_chain);
279 * Device drivers call our routines to queue packets here. We empty the
280 * queue in the local softnet handler.
283 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
284 EXPORT_PER_CPU_SYMBOL(softnet_data);
286 #ifdef CONFIG_LOCKDEP
288 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
289 * according to dev->type
291 static const unsigned short netdev_lock_type[] =
292 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
293 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
294 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
295 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
296 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
297 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
298 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
299 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
300 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
301 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
302 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
303 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
304 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
305 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
306 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
307 ARPHRD_VOID, ARPHRD_NONE};
309 static const char *const netdev_lock_name[] =
310 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
311 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
312 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
313 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
314 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
315 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
316 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
317 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
318 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
319 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
320 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
321 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
322 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
323 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
324 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
325 "_xmit_VOID", "_xmit_NONE"};
327 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
328 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
330 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
334 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
335 if (netdev_lock_type[i] == dev_type)
337 /* the last key is used by default */
338 return ARRAY_SIZE(netdev_lock_type) - 1;
341 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
342 unsigned short dev_type)
346 i = netdev_lock_pos(dev_type);
347 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
348 netdev_lock_name[i]);
351 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
355 i = netdev_lock_pos(dev->type);
356 lockdep_set_class_and_name(&dev->addr_list_lock,
357 &netdev_addr_lock_key[i],
358 netdev_lock_name[i]);
361 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
362 unsigned short dev_type)
365 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
370 /*******************************************************************************
372 Protocol management and registration routines
374 *******************************************************************************/
377 * Add a protocol ID to the list. Now that the input handler is
378 * smarter we can dispense with all the messy stuff that used to be
381 * BEWARE!!! Protocol handlers, mangling input packets,
382 * MUST BE last in hash buckets and checking protocol handlers
383 * MUST start from promiscuous ptype_all chain in net_bh.
384 * It is true now, do not change it.
385 * Explanation follows: if protocol handler, mangling packet, will
386 * be the first on list, it is not able to sense, that packet
387 * is cloned and should be copied-on-write, so that it will
388 * change it and subsequent readers will get broken packet.
392 static inline struct list_head *ptype_head(const struct packet_type *pt)
394 if (pt->type == htons(ETH_P_ALL))
397 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
401 * dev_add_pack - add packet handler
402 * @pt: packet type declaration
404 * Add a protocol handler to the networking stack. The passed &packet_type
405 * is linked into kernel lists and may not be freed until it has been
406 * removed from the kernel lists.
408 * This call does not sleep therefore it can not
409 * guarantee all CPU's that are in middle of receiving packets
410 * will see the new packet type (until the next received packet).
413 void dev_add_pack(struct packet_type *pt)
415 struct list_head *head = ptype_head(pt);
417 spin_lock(&ptype_lock);
418 list_add_rcu(&pt->list, head);
419 spin_unlock(&ptype_lock);
421 EXPORT_SYMBOL(dev_add_pack);
424 * __dev_remove_pack - remove packet handler
425 * @pt: packet type declaration
427 * Remove a protocol handler that was previously added to the kernel
428 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
429 * from the kernel lists and can be freed or reused once this function
432 * The packet type might still be in use by receivers
433 * and must not be freed until after all the CPU's have gone
434 * through a quiescent state.
436 void __dev_remove_pack(struct packet_type *pt)
438 struct list_head *head = ptype_head(pt);
439 struct packet_type *pt1;
441 spin_lock(&ptype_lock);
443 list_for_each_entry(pt1, head, list) {
445 list_del_rcu(&pt->list);
450 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
452 spin_unlock(&ptype_lock);
454 EXPORT_SYMBOL(__dev_remove_pack);
457 * dev_remove_pack - remove packet handler
458 * @pt: packet type declaration
460 * Remove a protocol handler that was previously added to the kernel
461 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
462 * from the kernel lists and can be freed or reused once this function
465 * This call sleeps to guarantee that no CPU is looking at the packet
468 void dev_remove_pack(struct packet_type *pt)
470 __dev_remove_pack(pt);
474 EXPORT_SYMBOL(dev_remove_pack);
476 /******************************************************************************
478 Device Boot-time Settings Routines
480 *******************************************************************************/
482 /* Boot time configuration table */
483 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
486 * netdev_boot_setup_add - add new setup entry
487 * @name: name of the device
488 * @map: configured settings for the device
490 * Adds new setup entry to the dev_boot_setup list. The function
491 * returns 0 on error and 1 on success. This is a generic routine to
494 static int netdev_boot_setup_add(char *name, struct ifmap *map)
496 struct netdev_boot_setup *s;
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
502 memset(s[i].name, 0, sizeof(s[i].name));
503 strlcpy(s[i].name, name, IFNAMSIZ);
504 memcpy(&s[i].map, map, sizeof(s[i].map));
509 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
513 * netdev_boot_setup_check - check boot time settings
514 * @dev: the netdevice
516 * Check boot time settings for the device.
517 * The found settings are set for the device to be used
518 * later in the device probing.
519 * Returns 0 if no settings found, 1 if they are.
521 int netdev_boot_setup_check(struct net_device *dev)
523 struct netdev_boot_setup *s = dev_boot_setup;
526 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
527 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
528 !strcmp(dev->name, s[i].name)) {
529 dev->irq = s[i].map.irq;
530 dev->base_addr = s[i].map.base_addr;
531 dev->mem_start = s[i].map.mem_start;
532 dev->mem_end = s[i].map.mem_end;
538 EXPORT_SYMBOL(netdev_boot_setup_check);
542 * netdev_boot_base - get address from boot time settings
543 * @prefix: prefix for network device
544 * @unit: id for network device
546 * Check boot time settings for the base address of device.
547 * The found settings are set for the device to be used
548 * later in the device probing.
549 * Returns 0 if no settings found.
551 unsigned long netdev_boot_base(const char *prefix, int unit)
553 const struct netdev_boot_setup *s = dev_boot_setup;
557 sprintf(name, "%s%d", prefix, unit);
560 * If device already registered then return base of 1
561 * to indicate not to probe for this interface
563 if (__dev_get_by_name(&init_net, name))
566 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
567 if (!strcmp(name, s[i].name))
568 return s[i].map.base_addr;
573 * Saves at boot time configured settings for any netdevice.
575 int __init netdev_boot_setup(char *str)
580 str = get_options(str, ARRAY_SIZE(ints), ints);
585 memset(&map, 0, sizeof(map));
589 map.base_addr = ints[2];
591 map.mem_start = ints[3];
593 map.mem_end = ints[4];
595 /* Add new entry to the list */
596 return netdev_boot_setup_add(str, &map);
599 __setup("netdev=", netdev_boot_setup);
601 /*******************************************************************************
603 Device Interface Subroutines
605 *******************************************************************************/
608 * __dev_get_by_name - find a device by its name
609 * @net: the applicable net namespace
610 * @name: name to find
612 * Find an interface by name. Must be called under RTNL semaphore
613 * or @dev_base_lock. If the name is found a pointer to the device
614 * is returned. If the name is not found then %NULL is returned. The
615 * reference counters are not incremented so the caller must be
616 * careful with locks.
619 struct net_device *__dev_get_by_name(struct net *net, const char *name)
621 struct hlist_node *p;
622 struct net_device *dev;
623 struct hlist_head *head = dev_name_hash(net, name);
625 hlist_for_each_entry(dev, p, head, name_hlist)
626 if (!strncmp(dev->name, name, IFNAMSIZ))
631 EXPORT_SYMBOL(__dev_get_by_name);
634 * dev_get_by_name_rcu - find a device by its name
635 * @net: the applicable net namespace
636 * @name: name to find
638 * Find an interface by name.
639 * If the name is found a pointer to the device is returned.
640 * If the name is not found then %NULL is returned.
641 * The reference counters are not incremented so the caller must be
642 * careful with locks. The caller must hold RCU lock.
645 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
647 struct hlist_node *p;
648 struct net_device *dev;
649 struct hlist_head *head = dev_name_hash(net, name);
651 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
652 if (!strncmp(dev->name, name, IFNAMSIZ))
657 EXPORT_SYMBOL(dev_get_by_name_rcu);
660 * dev_get_by_name - find a device by its name
661 * @net: the applicable net namespace
662 * @name: name to find
664 * Find an interface by name. This can be called from any
665 * context and does its own locking. The returned handle has
666 * the usage count incremented and the caller must use dev_put() to
667 * release it when it is no longer needed. %NULL is returned if no
668 * matching device is found.
671 struct net_device *dev_get_by_name(struct net *net, const char *name)
673 struct net_device *dev;
676 dev = dev_get_by_name_rcu(net, name);
682 EXPORT_SYMBOL(dev_get_by_name);
685 * __dev_get_by_index - find a device by its ifindex
686 * @net: the applicable net namespace
687 * @ifindex: index of device
689 * Search for an interface by index. Returns %NULL if the device
690 * is not found or a pointer to the device. The device has not
691 * had its reference counter increased so the caller must be careful
692 * about locking. The caller must hold either the RTNL semaphore
696 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
698 struct hlist_node *p;
699 struct net_device *dev;
700 struct hlist_head *head = dev_index_hash(net, ifindex);
702 hlist_for_each_entry(dev, p, head, index_hlist)
703 if (dev->ifindex == ifindex)
708 EXPORT_SYMBOL(__dev_get_by_index);
711 * dev_get_by_index_rcu - find a device by its ifindex
712 * @net: the applicable net namespace
713 * @ifindex: index of device
715 * Search for an interface by index. Returns %NULL if the device
716 * is not found or a pointer to the device. The device has not
717 * had its reference counter increased so the caller must be careful
718 * about locking. The caller must hold RCU lock.
721 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
723 struct hlist_node *p;
724 struct net_device *dev;
725 struct hlist_head *head = dev_index_hash(net, ifindex);
727 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
728 if (dev->ifindex == ifindex)
733 EXPORT_SYMBOL(dev_get_by_index_rcu);
737 * dev_get_by_index - find a device by its ifindex
738 * @net: the applicable net namespace
739 * @ifindex: index of device
741 * Search for an interface by index. Returns NULL if the device
742 * is not found or a pointer to the device. The device returned has
743 * had a reference added and the pointer is safe until the user calls
744 * dev_put to indicate they have finished with it.
747 struct net_device *dev_get_by_index(struct net *net, int ifindex)
749 struct net_device *dev;
752 dev = dev_get_by_index_rcu(net, ifindex);
758 EXPORT_SYMBOL(dev_get_by_index);
761 * dev_getbyhwaddr_rcu - find a device by its hardware address
762 * @net: the applicable net namespace
763 * @type: media type of device
764 * @ha: hardware address
766 * Search for an interface by MAC address. Returns NULL if the device
767 * is not found or a pointer to the device.
768 * The caller must hold RCU or RTNL.
769 * The returned device has not had its ref count increased
770 * and the caller must therefore be careful about locking
774 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
777 struct net_device *dev;
779 for_each_netdev_rcu(net, dev)
780 if (dev->type == type &&
781 !memcmp(dev->dev_addr, ha, dev->addr_len))
786 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
788 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
790 struct net_device *dev;
793 for_each_netdev(net, dev)
794 if (dev->type == type)
799 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
801 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
803 struct net_device *dev, *ret = NULL;
806 for_each_netdev_rcu(net, dev)
807 if (dev->type == type) {
815 EXPORT_SYMBOL(dev_getfirstbyhwtype);
818 * dev_get_by_flags_rcu - find any device with given flags
819 * @net: the applicable net namespace
820 * @if_flags: IFF_* values
821 * @mask: bitmask of bits in if_flags to check
823 * Search for any interface with the given flags. Returns NULL if a device
824 * is not found or a pointer to the device. Must be called inside
825 * rcu_read_lock(), and result refcount is unchanged.
828 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
831 struct net_device *dev, *ret;
834 for_each_netdev_rcu(net, dev) {
835 if (((dev->flags ^ if_flags) & mask) == 0) {
842 EXPORT_SYMBOL(dev_get_by_flags_rcu);
845 * dev_valid_name - check if name is okay for network device
848 * Network device names need to be valid file names to
849 * to allow sysfs to work. We also disallow any kind of
852 int dev_valid_name(const char *name)
856 if (strlen(name) >= IFNAMSIZ)
858 if (!strcmp(name, ".") || !strcmp(name, ".."))
862 if (*name == '/' || isspace(*name))
868 EXPORT_SYMBOL(dev_valid_name);
871 * __dev_alloc_name - allocate a name for a device
872 * @net: network namespace to allocate the device name in
873 * @name: name format string
874 * @buf: scratch buffer and result name string
876 * Passed a format string - eg "lt%d" it will try and find a suitable
877 * id. It scans list of devices to build up a free map, then chooses
878 * the first empty slot. The caller must hold the dev_base or rtnl lock
879 * while allocating the name and adding the device in order to avoid
881 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
882 * Returns the number of the unit assigned or a negative errno code.
885 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
889 const int max_netdevices = 8*PAGE_SIZE;
890 unsigned long *inuse;
891 struct net_device *d;
893 p = strnchr(name, IFNAMSIZ-1, '%');
896 * Verify the string as this thing may have come from
897 * the user. There must be either one "%d" and no other "%"
900 if (p[1] != 'd' || strchr(p + 2, '%'))
903 /* Use one page as a bit array of possible slots */
904 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
908 for_each_netdev(net, d) {
909 if (!sscanf(d->name, name, &i))
911 if (i < 0 || i >= max_netdevices)
914 /* avoid cases where sscanf is not exact inverse of printf */
915 snprintf(buf, IFNAMSIZ, name, i);
916 if (!strncmp(buf, d->name, IFNAMSIZ))
920 i = find_first_zero_bit(inuse, max_netdevices);
921 free_page((unsigned long) inuse);
925 snprintf(buf, IFNAMSIZ, name, i);
926 if (!__dev_get_by_name(net, buf))
929 /* It is possible to run out of possible slots
930 * when the name is long and there isn't enough space left
931 * for the digits, or if all bits are used.
937 * dev_alloc_name - allocate a name for a device
939 * @name: name format string
941 * Passed a format string - eg "lt%d" it will try and find a suitable
942 * id. It scans list of devices to build up a free map, then chooses
943 * the first empty slot. The caller must hold the dev_base or rtnl lock
944 * while allocating the name and adding the device in order to avoid
946 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
947 * Returns the number of the unit assigned or a negative errno code.
950 int dev_alloc_name(struct net_device *dev, const char *name)
956 BUG_ON(!dev_net(dev));
958 ret = __dev_alloc_name(net, name, buf);
960 strlcpy(dev->name, buf, IFNAMSIZ);
963 EXPORT_SYMBOL(dev_alloc_name);
965 static int dev_get_valid_name(struct net_device *dev, const char *name)
969 BUG_ON(!dev_net(dev));
972 if (!dev_valid_name(name))
975 if (strchr(name, '%'))
976 return dev_alloc_name(dev, name);
977 else if (__dev_get_by_name(net, name))
979 else if (dev->name != name)
980 strlcpy(dev->name, name, IFNAMSIZ);
986 * dev_change_name - change name of a device
988 * @newname: name (or format string) must be at least IFNAMSIZ
990 * Change name of a device, can pass format strings "eth%d".
993 int dev_change_name(struct net_device *dev, const char *newname)
995 char oldname[IFNAMSIZ];
1001 BUG_ON(!dev_net(dev));
1004 if (dev->flags & IFF_UP)
1007 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1010 memcpy(oldname, dev->name, IFNAMSIZ);
1012 err = dev_get_valid_name(dev, newname);
1017 ret = device_rename(&dev->dev, dev->name);
1019 memcpy(dev->name, oldname, IFNAMSIZ);
1023 write_lock_bh(&dev_base_lock);
1024 hlist_del_rcu(&dev->name_hlist);
1025 write_unlock_bh(&dev_base_lock);
1029 write_lock_bh(&dev_base_lock);
1030 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1031 write_unlock_bh(&dev_base_lock);
1033 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1034 ret = notifier_to_errno(ret);
1037 /* err >= 0 after dev_alloc_name() or stores the first errno */
1040 memcpy(dev->name, oldname, IFNAMSIZ);
1044 "%s: name change rollback failed: %d.\n",
1053 * dev_set_alias - change ifalias of a device
1055 * @alias: name up to IFALIASZ
1056 * @len: limit of bytes to copy from info
1058 * Set ifalias for a device,
1060 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1064 if (len >= IFALIASZ)
1069 kfree(dev->ifalias);
1070 dev->ifalias = NULL;
1075 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1079 strlcpy(dev->ifalias, alias, len+1);
1085 * netdev_features_change - device changes features
1086 * @dev: device to cause notification
1088 * Called to indicate a device has changed features.
1090 void netdev_features_change(struct net_device *dev)
1092 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1094 EXPORT_SYMBOL(netdev_features_change);
1097 * netdev_state_change - device changes state
1098 * @dev: device to cause notification
1100 * Called to indicate a device has changed state. This function calls
1101 * the notifier chains for netdev_chain and sends a NEWLINK message
1102 * to the routing socket.
1104 void netdev_state_change(struct net_device *dev)
1106 if (dev->flags & IFF_UP) {
1107 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1108 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1111 EXPORT_SYMBOL(netdev_state_change);
1113 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1115 return call_netdevice_notifiers(event, dev);
1117 EXPORT_SYMBOL(netdev_bonding_change);
1120 * dev_load - load a network module
1121 * @net: the applicable net namespace
1122 * @name: name of interface
1124 * If a network interface is not present and the process has suitable
1125 * privileges this function loads the module. If module loading is not
1126 * available in this kernel then it becomes a nop.
1129 void dev_load(struct net *net, const char *name)
1131 struct net_device *dev;
1135 dev = dev_get_by_name_rcu(net, name);
1139 if (no_module && capable(CAP_NET_ADMIN))
1140 no_module = request_module("netdev-%s", name);
1141 if (no_module && capable(CAP_SYS_MODULE)) {
1142 if (!request_module("%s", name))
1143 pr_err("Loading kernel module for a network device "
1144 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1148 EXPORT_SYMBOL(dev_load);
1150 static int __dev_open(struct net_device *dev)
1152 const struct net_device_ops *ops = dev->netdev_ops;
1157 if (!netif_device_present(dev))
1160 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1161 ret = notifier_to_errno(ret);
1165 set_bit(__LINK_STATE_START, &dev->state);
1167 if (ops->ndo_validate_addr)
1168 ret = ops->ndo_validate_addr(dev);
1170 if (!ret && ops->ndo_open)
1171 ret = ops->ndo_open(dev);
1174 clear_bit(__LINK_STATE_START, &dev->state);
1176 dev->flags |= IFF_UP;
1177 net_dmaengine_get();
1178 dev_set_rx_mode(dev);
1186 * dev_open - prepare an interface for use.
1187 * @dev: device to open
1189 * Takes a device from down to up state. The device's private open
1190 * function is invoked and then the multicast lists are loaded. Finally
1191 * the device is moved into the up state and a %NETDEV_UP message is
1192 * sent to the netdev notifier chain.
1194 * Calling this function on an active interface is a nop. On a failure
1195 * a negative errno code is returned.
1197 int dev_open(struct net_device *dev)
1201 if (dev->flags & IFF_UP)
1204 ret = __dev_open(dev);
1208 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1209 call_netdevice_notifiers(NETDEV_UP, dev);
1213 EXPORT_SYMBOL(dev_open);
1215 static int __dev_close_many(struct list_head *head)
1217 struct net_device *dev;
1222 list_for_each_entry(dev, head, unreg_list) {
1223 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1225 clear_bit(__LINK_STATE_START, &dev->state);
1227 /* Synchronize to scheduled poll. We cannot touch poll list, it
1228 * can be even on different cpu. So just clear netif_running().
1230 * dev->stop() will invoke napi_disable() on all of it's
1231 * napi_struct instances on this device.
1233 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1236 dev_deactivate_many(head);
1238 list_for_each_entry(dev, head, unreg_list) {
1239 const struct net_device_ops *ops = dev->netdev_ops;
1242 * Call the device specific close. This cannot fail.
1243 * Only if device is UP
1245 * We allow it to be called even after a DETACH hot-plug
1251 dev->flags &= ~IFF_UP;
1252 net_dmaengine_put();
1258 static int __dev_close(struct net_device *dev)
1263 list_add(&dev->unreg_list, &single);
1264 retval = __dev_close_many(&single);
1269 static int dev_close_many(struct list_head *head)
1271 struct net_device *dev, *tmp;
1272 LIST_HEAD(tmp_list);
1274 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1275 if (!(dev->flags & IFF_UP))
1276 list_move(&dev->unreg_list, &tmp_list);
1278 __dev_close_many(head);
1280 list_for_each_entry(dev, head, unreg_list) {
1281 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1282 call_netdevice_notifiers(NETDEV_DOWN, dev);
1285 /* rollback_registered_many needs the complete original list */
1286 list_splice(&tmp_list, head);
1291 * dev_close - shutdown an interface.
1292 * @dev: device to shutdown
1294 * This function moves an active device into down state. A
1295 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1296 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1299 int dev_close(struct net_device *dev)
1301 if (dev->flags & IFF_UP) {
1304 list_add(&dev->unreg_list, &single);
1305 dev_close_many(&single);
1310 EXPORT_SYMBOL(dev_close);
1314 * dev_disable_lro - disable Large Receive Offload on a device
1317 * Disable Large Receive Offload (LRO) on a net device. Must be
1318 * called under RTNL. This is needed if received packets may be
1319 * forwarded to another interface.
1321 void dev_disable_lro(struct net_device *dev)
1326 * If we're trying to disable lro on a vlan device
1327 * use the underlying physical device instead
1329 if (is_vlan_dev(dev))
1330 dev = vlan_dev_real_dev(dev);
1332 if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
1333 flags = dev->ethtool_ops->get_flags(dev);
1335 flags = ethtool_op_get_flags(dev);
1337 if (!(flags & ETH_FLAG_LRO))
1340 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1341 if (unlikely(dev->features & NETIF_F_LRO))
1342 netdev_WARN(dev, "failed to disable LRO!\n");
1344 EXPORT_SYMBOL(dev_disable_lro);
1347 static int dev_boot_phase = 1;
1350 * register_netdevice_notifier - register a network notifier block
1353 * Register a notifier to be called when network device events occur.
1354 * The notifier passed is linked into the kernel structures and must
1355 * not be reused until it has been unregistered. A negative errno code
1356 * is returned on a failure.
1358 * When registered all registration and up events are replayed
1359 * to the new notifier to allow device to have a race free
1360 * view of the network device list.
1363 int register_netdevice_notifier(struct notifier_block *nb)
1365 struct net_device *dev;
1366 struct net_device *last;
1371 err = raw_notifier_chain_register(&netdev_chain, nb);
1377 for_each_netdev(net, dev) {
1378 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1379 err = notifier_to_errno(err);
1383 if (!(dev->flags & IFF_UP))
1386 nb->notifier_call(nb, NETDEV_UP, dev);
1397 for_each_netdev(net, dev) {
1401 if (dev->flags & IFF_UP) {
1402 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1403 nb->notifier_call(nb, NETDEV_DOWN, dev);
1405 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1406 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1411 raw_notifier_chain_unregister(&netdev_chain, nb);
1414 EXPORT_SYMBOL(register_netdevice_notifier);
1417 * unregister_netdevice_notifier - unregister a network notifier block
1420 * Unregister a notifier previously registered by
1421 * register_netdevice_notifier(). The notifier is unlinked into the
1422 * kernel structures and may then be reused. A negative errno code
1423 * is returned on a failure.
1426 int unregister_netdevice_notifier(struct notifier_block *nb)
1431 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1435 EXPORT_SYMBOL(unregister_netdevice_notifier);
1438 * call_netdevice_notifiers - call all network notifier blocks
1439 * @val: value passed unmodified to notifier function
1440 * @dev: net_device pointer passed unmodified to notifier function
1442 * Call all network notifier blocks. Parameters and return value
1443 * are as for raw_notifier_call_chain().
1446 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1449 return raw_notifier_call_chain(&netdev_chain, val, dev);
1451 EXPORT_SYMBOL(call_netdevice_notifiers);
1453 /* When > 0 there are consumers of rx skb time stamps */
1454 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1456 void net_enable_timestamp(void)
1458 atomic_inc(&netstamp_needed);
1460 EXPORT_SYMBOL(net_enable_timestamp);
1462 void net_disable_timestamp(void)
1464 atomic_dec(&netstamp_needed);
1466 EXPORT_SYMBOL(net_disable_timestamp);
1468 static inline void net_timestamp_set(struct sk_buff *skb)
1470 if (atomic_read(&netstamp_needed))
1471 __net_timestamp(skb);
1473 skb->tstamp.tv64 = 0;
1476 static inline void net_timestamp_check(struct sk_buff *skb)
1478 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1479 __net_timestamp(skb);
1482 static int net_hwtstamp_validate(struct ifreq *ifr)
1484 struct hwtstamp_config cfg;
1485 enum hwtstamp_tx_types tx_type;
1486 enum hwtstamp_rx_filters rx_filter;
1487 int tx_type_valid = 0;
1488 int rx_filter_valid = 0;
1490 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1493 if (cfg.flags) /* reserved for future extensions */
1496 tx_type = cfg.tx_type;
1497 rx_filter = cfg.rx_filter;
1500 case HWTSTAMP_TX_OFF:
1501 case HWTSTAMP_TX_ON:
1502 case HWTSTAMP_TX_ONESTEP_SYNC:
1507 switch (rx_filter) {
1508 case HWTSTAMP_FILTER_NONE:
1509 case HWTSTAMP_FILTER_ALL:
1510 case HWTSTAMP_FILTER_SOME:
1511 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1512 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1513 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1514 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1515 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1516 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1517 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1518 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1519 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1520 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1521 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1522 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1523 rx_filter_valid = 1;
1527 if (!tx_type_valid || !rx_filter_valid)
1533 static inline bool is_skb_forwardable(struct net_device *dev,
1534 struct sk_buff *skb)
1538 if (!(dev->flags & IFF_UP))
1541 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1542 if (skb->len <= len)
1545 /* if TSO is enabled, we don't care about the length as the packet
1546 * could be forwarded without being segmented before
1548 if (skb_is_gso(skb))
1555 * dev_forward_skb - loopback an skb to another netif
1557 * @dev: destination network device
1558 * @skb: buffer to forward
1561 * NET_RX_SUCCESS (no congestion)
1562 * NET_RX_DROP (packet was dropped, but freed)
1564 * dev_forward_skb can be used for injecting an skb from the
1565 * start_xmit function of one device into the receive queue
1566 * of another device.
1568 * The receiving device may be in another namespace, so
1569 * we have to clear all information in the skb that could
1570 * impact namespace isolation.
1572 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1574 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1575 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1576 atomic_long_inc(&dev->rx_dropped);
1585 if (unlikely(!is_skb_forwardable(dev, skb))) {
1586 atomic_long_inc(&dev->rx_dropped);
1590 skb_set_dev(skb, dev);
1591 skb->tstamp.tv64 = 0;
1592 skb->pkt_type = PACKET_HOST;
1593 skb->protocol = eth_type_trans(skb, dev);
1594 return netif_rx(skb);
1596 EXPORT_SYMBOL_GPL(dev_forward_skb);
1598 static inline int deliver_skb(struct sk_buff *skb,
1599 struct packet_type *pt_prev,
1600 struct net_device *orig_dev)
1602 atomic_inc(&skb->users);
1603 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1607 * Support routine. Sends outgoing frames to any network
1608 * taps currently in use.
1611 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1613 struct packet_type *ptype;
1614 struct sk_buff *skb2 = NULL;
1615 struct packet_type *pt_prev = NULL;
1618 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1619 /* Never send packets back to the socket
1620 * they originated from - MvS (miquels@drinkel.ow.org)
1622 if ((ptype->dev == dev || !ptype->dev) &&
1623 (ptype->af_packet_priv == NULL ||
1624 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1626 deliver_skb(skb2, pt_prev, skb->dev);
1631 skb2 = skb_clone(skb, GFP_ATOMIC);
1635 net_timestamp_set(skb2);
1637 /* skb->nh should be correctly
1638 set by sender, so that the second statement is
1639 just protection against buggy protocols.
1641 skb_reset_mac_header(skb2);
1643 if (skb_network_header(skb2) < skb2->data ||
1644 skb2->network_header > skb2->tail) {
1645 if (net_ratelimit())
1646 printk(KERN_CRIT "protocol %04x is "
1648 ntohs(skb2->protocol),
1650 skb_reset_network_header(skb2);
1653 skb2->transport_header = skb2->network_header;
1654 skb2->pkt_type = PACKET_OUTGOING;
1659 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1663 /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1664 * @dev: Network device
1665 * @txq: number of queues available
1667 * If real_num_tx_queues is changed the tc mappings may no longer be
1668 * valid. To resolve this verify the tc mapping remains valid and if
1669 * not NULL the mapping. With no priorities mapping to this
1670 * offset/count pair it will no longer be used. In the worst case TC0
1671 * is invalid nothing can be done so disable priority mappings. If is
1672 * expected that drivers will fix this mapping if they can before
1673 * calling netif_set_real_num_tx_queues.
1675 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1678 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1680 /* If TC0 is invalidated disable TC mapping */
1681 if (tc->offset + tc->count > txq) {
1682 pr_warning("Number of in use tx queues changed "
1683 "invalidating tc mappings. Priority "
1684 "traffic classification disabled!\n");
1689 /* Invalidated prio to tc mappings set to TC0 */
1690 for (i = 1; i < TC_BITMASK + 1; i++) {
1691 int q = netdev_get_prio_tc_map(dev, i);
1693 tc = &dev->tc_to_txq[q];
1694 if (tc->offset + tc->count > txq) {
1695 pr_warning("Number of in use tx queues "
1696 "changed. Priority %i to tc "
1697 "mapping %i is no longer valid "
1698 "setting map to 0\n",
1700 netdev_set_prio_tc_map(dev, i, 0);
1706 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1707 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1709 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1713 if (txq < 1 || txq > dev->num_tx_queues)
1716 if (dev->reg_state == NETREG_REGISTERED ||
1717 dev->reg_state == NETREG_UNREGISTERING) {
1720 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1726 netif_setup_tc(dev, txq);
1728 if (txq < dev->real_num_tx_queues)
1729 qdisc_reset_all_tx_gt(dev, txq);
1732 dev->real_num_tx_queues = txq;
1735 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1739 * netif_set_real_num_rx_queues - set actual number of RX queues used
1740 * @dev: Network device
1741 * @rxq: Actual number of RX queues
1743 * This must be called either with the rtnl_lock held or before
1744 * registration of the net device. Returns 0 on success, or a
1745 * negative error code. If called before registration, it always
1748 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1752 if (rxq < 1 || rxq > dev->num_rx_queues)
1755 if (dev->reg_state == NETREG_REGISTERED) {
1758 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1764 dev->real_num_rx_queues = rxq;
1767 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1770 static inline void __netif_reschedule(struct Qdisc *q)
1772 struct softnet_data *sd;
1773 unsigned long flags;
1775 local_irq_save(flags);
1776 sd = &__get_cpu_var(softnet_data);
1777 q->next_sched = NULL;
1778 *sd->output_queue_tailp = q;
1779 sd->output_queue_tailp = &q->next_sched;
1780 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1781 local_irq_restore(flags);
1784 void __netif_schedule(struct Qdisc *q)
1786 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1787 __netif_reschedule(q);
1789 EXPORT_SYMBOL(__netif_schedule);
1791 void dev_kfree_skb_irq(struct sk_buff *skb)
1793 if (atomic_dec_and_test(&skb->users)) {
1794 struct softnet_data *sd;
1795 unsigned long flags;
1797 local_irq_save(flags);
1798 sd = &__get_cpu_var(softnet_data);
1799 skb->next = sd->completion_queue;
1800 sd->completion_queue = skb;
1801 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1802 local_irq_restore(flags);
1805 EXPORT_SYMBOL(dev_kfree_skb_irq);
1807 void dev_kfree_skb_any(struct sk_buff *skb)
1809 if (in_irq() || irqs_disabled())
1810 dev_kfree_skb_irq(skb);
1814 EXPORT_SYMBOL(dev_kfree_skb_any);
1818 * netif_device_detach - mark device as removed
1819 * @dev: network device
1821 * Mark device as removed from system and therefore no longer available.
1823 void netif_device_detach(struct net_device *dev)
1825 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1826 netif_running(dev)) {
1827 netif_tx_stop_all_queues(dev);
1830 EXPORT_SYMBOL(netif_device_detach);
1833 * netif_device_attach - mark device as attached
1834 * @dev: network device
1836 * Mark device as attached from system and restart if needed.
1838 void netif_device_attach(struct net_device *dev)
1840 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1841 netif_running(dev)) {
1842 netif_tx_wake_all_queues(dev);
1843 __netdev_watchdog_up(dev);
1846 EXPORT_SYMBOL(netif_device_attach);
1849 * skb_dev_set -- assign a new device to a buffer
1850 * @skb: buffer for the new device
1851 * @dev: network device
1853 * If an skb is owned by a device already, we have to reset
1854 * all data private to the namespace a device belongs to
1855 * before assigning it a new device.
1857 #ifdef CONFIG_NET_NS
1858 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1861 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1864 skb_init_secmark(skb);
1868 skb->ipvs_property = 0;
1869 #ifdef CONFIG_NET_SCHED
1875 EXPORT_SYMBOL(skb_set_dev);
1876 #endif /* CONFIG_NET_NS */
1879 * Invalidate hardware checksum when packet is to be mangled, and
1880 * complete checksum manually on outgoing path.
1882 int skb_checksum_help(struct sk_buff *skb)
1885 int ret = 0, offset;
1887 if (skb->ip_summed == CHECKSUM_COMPLETE)
1888 goto out_set_summed;
1890 if (unlikely(skb_shinfo(skb)->gso_size)) {
1891 /* Let GSO fix up the checksum. */
1892 goto out_set_summed;
1895 offset = skb_checksum_start_offset(skb);
1896 BUG_ON(offset >= skb_headlen(skb));
1897 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1899 offset += skb->csum_offset;
1900 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1902 if (skb_cloned(skb) &&
1903 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1904 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1909 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1911 skb->ip_summed = CHECKSUM_NONE;
1915 EXPORT_SYMBOL(skb_checksum_help);
1918 * skb_gso_segment - Perform segmentation on skb.
1919 * @skb: buffer to segment
1920 * @features: features for the output path (see dev->features)
1922 * This function segments the given skb and returns a list of segments.
1924 * It may return NULL if the skb requires no segmentation. This is
1925 * only possible when GSO is used for verifying header integrity.
1927 struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
1929 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1930 struct packet_type *ptype;
1931 __be16 type = skb->protocol;
1932 int vlan_depth = ETH_HLEN;
1935 while (type == htons(ETH_P_8021Q)) {
1936 struct vlan_hdr *vh;
1938 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1939 return ERR_PTR(-EINVAL);
1941 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1942 type = vh->h_vlan_encapsulated_proto;
1943 vlan_depth += VLAN_HLEN;
1946 skb_reset_mac_header(skb);
1947 skb->mac_len = skb->network_header - skb->mac_header;
1948 __skb_pull(skb, skb->mac_len);
1950 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1951 struct net_device *dev = skb->dev;
1952 struct ethtool_drvinfo info = {};
1954 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1955 dev->ethtool_ops->get_drvinfo(dev, &info);
1957 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
1958 info.driver, dev ? dev->features : 0L,
1959 skb->sk ? skb->sk->sk_route_caps : 0L,
1960 skb->len, skb->data_len, skb->ip_summed);
1962 if (skb_header_cloned(skb) &&
1963 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1964 return ERR_PTR(err);
1968 list_for_each_entry_rcu(ptype,
1969 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1970 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1971 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1972 err = ptype->gso_send_check(skb);
1973 segs = ERR_PTR(err);
1974 if (err || skb_gso_ok(skb, features))
1976 __skb_push(skb, (skb->data -
1977 skb_network_header(skb)));
1979 segs = ptype->gso_segment(skb, features);
1985 __skb_push(skb, skb->data - skb_mac_header(skb));
1989 EXPORT_SYMBOL(skb_gso_segment);
1991 /* Take action when hardware reception checksum errors are detected. */
1993 void netdev_rx_csum_fault(struct net_device *dev)
1995 if (net_ratelimit()) {
1996 printk(KERN_ERR "%s: hw csum failure.\n",
1997 dev ? dev->name : "<unknown>");
2001 EXPORT_SYMBOL(netdev_rx_csum_fault);
2004 /* Actually, we should eliminate this check as soon as we know, that:
2005 * 1. IOMMU is present and allows to map all the memory.
2006 * 2. No high memory really exists on this machine.
2009 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2011 #ifdef CONFIG_HIGHMEM
2013 if (!(dev->features & NETIF_F_HIGHDMA)) {
2014 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2015 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2016 if (PageHighMem(skb_frag_page(frag)))
2021 if (PCI_DMA_BUS_IS_PHYS) {
2022 struct device *pdev = dev->dev.parent;
2026 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2027 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2028 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2029 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2038 void (*destructor)(struct sk_buff *skb);
2041 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2043 static void dev_gso_skb_destructor(struct sk_buff *skb)
2045 struct dev_gso_cb *cb;
2048 struct sk_buff *nskb = skb->next;
2050 skb->next = nskb->next;
2053 } while (skb->next);
2055 cb = DEV_GSO_CB(skb);
2057 cb->destructor(skb);
2061 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2062 * @skb: buffer to segment
2063 * @features: device features as applicable to this skb
2065 * This function segments the given skb and stores the list of segments
2068 static int dev_gso_segment(struct sk_buff *skb, int features)
2070 struct sk_buff *segs;
2072 segs = skb_gso_segment(skb, features);
2074 /* Verifying header integrity only. */
2079 return PTR_ERR(segs);
2082 DEV_GSO_CB(skb)->destructor = skb->destructor;
2083 skb->destructor = dev_gso_skb_destructor;
2089 * Try to orphan skb early, right before transmission by the device.
2090 * We cannot orphan skb if tx timestamp is requested or the sk-reference
2091 * is needed on driver level for other reasons, e.g. see net/can/raw.c
2093 static inline void skb_orphan_try(struct sk_buff *skb)
2095 struct sock *sk = skb->sk;
2097 if (sk && !skb_shinfo(skb)->tx_flags) {
2098 /* skb_tx_hash() wont be able to get sk.
2099 * We copy sk_hash into skb->rxhash
2102 skb->rxhash = sk->sk_hash;
2107 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
2109 return ((features & NETIF_F_GEN_CSUM) ||
2110 ((features & NETIF_F_V4_CSUM) &&
2111 protocol == htons(ETH_P_IP)) ||
2112 ((features & NETIF_F_V6_CSUM) &&
2113 protocol == htons(ETH_P_IPV6)) ||
2114 ((features & NETIF_F_FCOE_CRC) &&
2115 protocol == htons(ETH_P_FCOE)));
2118 static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
2120 if (!can_checksum_protocol(features, protocol)) {
2121 features &= ~NETIF_F_ALL_CSUM;
2122 features &= ~NETIF_F_SG;
2123 } else if (illegal_highdma(skb->dev, skb)) {
2124 features &= ~NETIF_F_SG;
2130 u32 netif_skb_features(struct sk_buff *skb)
2132 __be16 protocol = skb->protocol;
2133 u32 features = skb->dev->features;
2135 if (protocol == htons(ETH_P_8021Q)) {
2136 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2137 protocol = veh->h_vlan_encapsulated_proto;
2138 } else if (!vlan_tx_tag_present(skb)) {
2139 return harmonize_features(skb, protocol, features);
2142 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2144 if (protocol != htons(ETH_P_8021Q)) {
2145 return harmonize_features(skb, protocol, features);
2147 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2148 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2149 return harmonize_features(skb, protocol, features);
2152 EXPORT_SYMBOL(netif_skb_features);
2155 * Returns true if either:
2156 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2157 * 2. skb is fragmented and the device does not support SG, or if
2158 * at least one of fragments is in highmem and device does not
2159 * support DMA from it.
2161 static inline int skb_needs_linearize(struct sk_buff *skb,
2164 return skb_is_nonlinear(skb) &&
2165 ((skb_has_frag_list(skb) &&
2166 !(features & NETIF_F_FRAGLIST)) ||
2167 (skb_shinfo(skb)->nr_frags &&
2168 !(features & NETIF_F_SG)));
2171 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2172 struct netdev_queue *txq)
2174 const struct net_device_ops *ops = dev->netdev_ops;
2175 int rc = NETDEV_TX_OK;
2176 unsigned int skb_len;
2178 if (likely(!skb->next)) {
2182 * If device doesn't need skb->dst, release it right now while
2183 * its hot in this cpu cache
2185 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2188 if (!list_empty(&ptype_all))
2189 dev_queue_xmit_nit(skb, dev);
2191 skb_orphan_try(skb);
2193 features = netif_skb_features(skb);
2195 if (vlan_tx_tag_present(skb) &&
2196 !(features & NETIF_F_HW_VLAN_TX)) {
2197 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2204 if (netif_needs_gso(skb, features)) {
2205 if (unlikely(dev_gso_segment(skb, features)))
2210 if (skb_needs_linearize(skb, features) &&
2211 __skb_linearize(skb))
2214 /* If packet is not checksummed and device does not
2215 * support checksumming for this protocol, complete
2216 * checksumming here.
2218 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2219 skb_set_transport_header(skb,
2220 skb_checksum_start_offset(skb));
2221 if (!(features & NETIF_F_ALL_CSUM) &&
2222 skb_checksum_help(skb))
2228 rc = ops->ndo_start_xmit(skb, dev);
2229 trace_net_dev_xmit(skb, rc, dev, skb_len);
2230 if (rc == NETDEV_TX_OK)
2231 txq_trans_update(txq);
2237 struct sk_buff *nskb = skb->next;
2239 skb->next = nskb->next;
2243 * If device doesn't need nskb->dst, release it right now while
2244 * its hot in this cpu cache
2246 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2249 skb_len = nskb->len;
2250 rc = ops->ndo_start_xmit(nskb, dev);
2251 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2252 if (unlikely(rc != NETDEV_TX_OK)) {
2253 if (rc & ~NETDEV_TX_MASK)
2254 goto out_kfree_gso_skb;
2255 nskb->next = skb->next;
2259 txq_trans_update(txq);
2260 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
2261 return NETDEV_TX_BUSY;
2262 } while (skb->next);
2265 if (likely(skb->next == NULL))
2266 skb->destructor = DEV_GSO_CB(skb)->destructor;
2273 static u32 hashrnd __read_mostly;
2276 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2277 * to be used as a distribution range.
2279 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2280 unsigned int num_tx_queues)
2284 u16 qcount = num_tx_queues;
2286 if (skb_rx_queue_recorded(skb)) {
2287 hash = skb_get_rx_queue(skb);
2288 while (unlikely(hash >= num_tx_queues))
2289 hash -= num_tx_queues;
2294 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2295 qoffset = dev->tc_to_txq[tc].offset;
2296 qcount = dev->tc_to_txq[tc].count;
2299 if (skb->sk && skb->sk->sk_hash)
2300 hash = skb->sk->sk_hash;
2302 hash = (__force u16) skb->protocol ^ skb->rxhash;
2303 hash = jhash_1word(hash, hashrnd);
2305 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2307 EXPORT_SYMBOL(__skb_tx_hash);
2309 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2311 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2312 if (net_ratelimit()) {
2313 pr_warning("%s selects TX queue %d, but "
2314 "real number of TX queues is %d\n",
2315 dev->name, queue_index, dev->real_num_tx_queues);
2322 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2325 struct xps_dev_maps *dev_maps;
2326 struct xps_map *map;
2327 int queue_index = -1;
2330 dev_maps = rcu_dereference(dev->xps_maps);
2332 map = rcu_dereference(
2333 dev_maps->cpu_map[raw_smp_processor_id()]);
2336 queue_index = map->queues[0];
2339 if (skb->sk && skb->sk->sk_hash)
2340 hash = skb->sk->sk_hash;
2342 hash = (__force u16) skb->protocol ^
2344 hash = jhash_1word(hash, hashrnd);
2345 queue_index = map->queues[
2346 ((u64)hash * map->len) >> 32];
2348 if (unlikely(queue_index >= dev->real_num_tx_queues))
2360 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2361 struct sk_buff *skb)
2364 const struct net_device_ops *ops = dev->netdev_ops;
2366 if (dev->real_num_tx_queues == 1)
2368 else if (ops->ndo_select_queue) {
2369 queue_index = ops->ndo_select_queue(dev, skb);
2370 queue_index = dev_cap_txqueue(dev, queue_index);
2372 struct sock *sk = skb->sk;
2373 queue_index = sk_tx_queue_get(sk);
2375 if (queue_index < 0 || skb->ooo_okay ||
2376 queue_index >= dev->real_num_tx_queues) {
2377 int old_index = queue_index;
2379 queue_index = get_xps_queue(dev, skb);
2380 if (queue_index < 0)
2381 queue_index = skb_tx_hash(dev, skb);
2383 if (queue_index != old_index && sk) {
2384 struct dst_entry *dst =
2385 rcu_dereference_check(sk->sk_dst_cache, 1);
2387 if (dst && skb_dst(skb) == dst)
2388 sk_tx_queue_set(sk, queue_index);
2393 skb_set_queue_mapping(skb, queue_index);
2394 return netdev_get_tx_queue(dev, queue_index);
2397 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2398 struct net_device *dev,
2399 struct netdev_queue *txq)
2401 spinlock_t *root_lock = qdisc_lock(q);
2405 qdisc_skb_cb(skb)->pkt_len = skb->len;
2406 qdisc_calculate_pkt_len(skb, q);
2408 * Heuristic to force contended enqueues to serialize on a
2409 * separate lock before trying to get qdisc main lock.
2410 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2411 * and dequeue packets faster.
2413 contended = qdisc_is_running(q);
2414 if (unlikely(contended))
2415 spin_lock(&q->busylock);
2417 spin_lock(root_lock);
2418 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2421 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2422 qdisc_run_begin(q)) {
2424 * This is a work-conserving queue; there are no old skbs
2425 * waiting to be sent out; and the qdisc is not running -
2426 * xmit the skb directly.
2428 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2431 qdisc_bstats_update(q, skb);
2433 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2434 if (unlikely(contended)) {
2435 spin_unlock(&q->busylock);
2442 rc = NET_XMIT_SUCCESS;
2445 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2446 if (qdisc_run_begin(q)) {
2447 if (unlikely(contended)) {
2448 spin_unlock(&q->busylock);
2454 spin_unlock(root_lock);
2455 if (unlikely(contended))
2456 spin_unlock(&q->busylock);
2460 static DEFINE_PER_CPU(int, xmit_recursion);
2461 #define RECURSION_LIMIT 10
2464 * dev_queue_xmit - transmit a buffer
2465 * @skb: buffer to transmit
2467 * Queue a buffer for transmission to a network device. The caller must
2468 * have set the device and priority and built the buffer before calling
2469 * this function. The function can be called from an interrupt.
2471 * A negative errno code is returned on a failure. A success does not
2472 * guarantee the frame will be transmitted as it may be dropped due
2473 * to congestion or traffic shaping.
2475 * -----------------------------------------------------------------------------------
2476 * I notice this method can also return errors from the queue disciplines,
2477 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2480 * Regardless of the return value, the skb is consumed, so it is currently
2481 * difficult to retry a send to this method. (You can bump the ref count
2482 * before sending to hold a reference for retry if you are careful.)
2484 * When calling this method, interrupts MUST be enabled. This is because
2485 * the BH enable code must have IRQs enabled so that it will not deadlock.
2488 int dev_queue_xmit(struct sk_buff *skb)
2490 struct net_device *dev = skb->dev;
2491 struct netdev_queue *txq;
2495 /* Disable soft irqs for various locks below. Also
2496 * stops preemption for RCU.
2500 txq = dev_pick_tx(dev, skb);
2501 q = rcu_dereference_bh(txq->qdisc);
2503 #ifdef CONFIG_NET_CLS_ACT
2504 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2506 trace_net_dev_queue(skb);
2508 rc = __dev_xmit_skb(skb, q, dev, txq);
2512 /* The device has no queue. Common case for software devices:
2513 loopback, all the sorts of tunnels...
2515 Really, it is unlikely that netif_tx_lock protection is necessary
2516 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2518 However, it is possible, that they rely on protection
2521 Check this and shot the lock. It is not prone from deadlocks.
2522 Either shot noqueue qdisc, it is even simpler 8)
2524 if (dev->flags & IFF_UP) {
2525 int cpu = smp_processor_id(); /* ok because BHs are off */
2527 if (txq->xmit_lock_owner != cpu) {
2529 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2530 goto recursion_alert;
2532 HARD_TX_LOCK(dev, txq, cpu);
2534 if (!netif_tx_queue_stopped(txq)) {
2535 __this_cpu_inc(xmit_recursion);
2536 rc = dev_hard_start_xmit(skb, dev, txq);
2537 __this_cpu_dec(xmit_recursion);
2538 if (dev_xmit_complete(rc)) {
2539 HARD_TX_UNLOCK(dev, txq);
2543 HARD_TX_UNLOCK(dev, txq);
2544 if (net_ratelimit())
2545 printk(KERN_CRIT "Virtual device %s asks to "
2546 "queue packet!\n", dev->name);
2548 /* Recursion is detected! It is possible,
2552 if (net_ratelimit())
2553 printk(KERN_CRIT "Dead loop on virtual device "
2554 "%s, fix it urgently!\n", dev->name);
2559 rcu_read_unlock_bh();
2564 rcu_read_unlock_bh();
2567 EXPORT_SYMBOL(dev_queue_xmit);
2570 /*=======================================================================
2572 =======================================================================*/
2574 int netdev_max_backlog __read_mostly = 1000;
2575 int netdev_tstamp_prequeue __read_mostly = 1;
2576 int netdev_budget __read_mostly = 300;
2577 int weight_p __read_mostly = 64; /* old backlog weight */
2579 /* Called with irq disabled */
2580 static inline void ____napi_schedule(struct softnet_data *sd,
2581 struct napi_struct *napi)
2583 list_add_tail(&napi->poll_list, &sd->poll_list);
2584 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2588 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2589 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2590 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2591 * if hash is a canonical 4-tuple hash over transport ports.
2593 void __skb_get_rxhash(struct sk_buff *skb)
2595 int nhoff, hash = 0, poff;
2596 const struct ipv6hdr *ip6;
2597 const struct iphdr *ip;
2598 const struct vlan_hdr *vlan;
2607 nhoff = skb_network_offset(skb);
2608 proto = skb->protocol;
2612 case __constant_htons(ETH_P_IP):
2614 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2617 ip = (const struct iphdr *) (skb->data + nhoff);
2618 if (ip_is_fragment(ip))
2621 ip_proto = ip->protocol;
2622 addr1 = (__force u32) ip->saddr;
2623 addr2 = (__force u32) ip->daddr;
2624 nhoff += ip->ihl * 4;
2626 case __constant_htons(ETH_P_IPV6):
2628 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2631 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2632 ip_proto = ip6->nexthdr;
2633 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2634 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2637 case __constant_htons(ETH_P_8021Q):
2638 if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
2640 vlan = (const struct vlan_hdr *) (skb->data + nhoff);
2641 proto = vlan->h_vlan_encapsulated_proto;
2642 nhoff += sizeof(*vlan);
2644 case __constant_htons(ETH_P_PPP_SES):
2645 if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
2647 proto = *((__be16 *) (skb->data + nhoff +
2648 sizeof(struct pppoe_hdr)));
2649 nhoff += PPPOE_SES_HLEN;
2651 case __constant_htons(PPP_IP):
2653 case __constant_htons(PPP_IPV6):
2664 if (pskb_may_pull(skb, nhoff + 16)) {
2665 u8 *h = skb->data + nhoff;
2666 __be16 flags = *(__be16 *)h;
2669 * Only look inside GRE if version zero and no
2672 if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
2673 proto = *(__be16 *)(h + 2);
2675 if (flags & GRE_CSUM)
2677 if (flags & GRE_KEY)
2679 if (flags & GRE_SEQ)
2692 poff = proto_ports_offset(ip_proto);
2695 if (pskb_may_pull(skb, nhoff + 4)) {
2696 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2697 if (ports.v16[1] < ports.v16[0])
2698 swap(ports.v16[0], ports.v16[1]);
2703 /* get a consistent hash (same value on both flow directions) */
2707 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2714 EXPORT_SYMBOL(__skb_get_rxhash);
2718 /* One global table that all flow-based protocols share. */
2719 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2720 EXPORT_SYMBOL(rps_sock_flow_table);
2722 static struct rps_dev_flow *
2723 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2724 struct rps_dev_flow *rflow, u16 next_cpu)
2726 if (next_cpu != RPS_NO_CPU) {
2727 #ifdef CONFIG_RFS_ACCEL
2728 struct netdev_rx_queue *rxqueue;
2729 struct rps_dev_flow_table *flow_table;
2730 struct rps_dev_flow *old_rflow;
2735 /* Should we steer this flow to a different hardware queue? */
2736 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2737 !(dev->features & NETIF_F_NTUPLE))
2739 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2740 if (rxq_index == skb_get_rx_queue(skb))
2743 rxqueue = dev->_rx + rxq_index;
2744 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2747 flow_id = skb->rxhash & flow_table->mask;
2748 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2749 rxq_index, flow_id);
2753 rflow = &flow_table->flows[flow_id];
2755 if (old_rflow->filter == rflow->filter)
2756 old_rflow->filter = RPS_NO_FILTER;
2760 per_cpu(softnet_data, next_cpu).input_queue_head;
2763 rflow->cpu = next_cpu;
2768 * get_rps_cpu is called from netif_receive_skb and returns the target
2769 * CPU from the RPS map of the receiving queue for a given skb.
2770 * rcu_read_lock must be held on entry.
2772 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2773 struct rps_dev_flow **rflowp)
2775 struct netdev_rx_queue *rxqueue;
2776 struct rps_map *map;
2777 struct rps_dev_flow_table *flow_table;
2778 struct rps_sock_flow_table *sock_flow_table;
2782 if (skb_rx_queue_recorded(skb)) {
2783 u16 index = skb_get_rx_queue(skb);
2784 if (unlikely(index >= dev->real_num_rx_queues)) {
2785 WARN_ONCE(dev->real_num_rx_queues > 1,
2786 "%s received packet on queue %u, but number "
2787 "of RX queues is %u\n",
2788 dev->name, index, dev->real_num_rx_queues);
2791 rxqueue = dev->_rx + index;
2795 map = rcu_dereference(rxqueue->rps_map);
2797 if (map->len == 1 &&
2798 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2799 tcpu = map->cpus[0];
2800 if (cpu_online(tcpu))
2804 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2808 skb_reset_network_header(skb);
2809 if (!skb_get_rxhash(skb))
2812 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2813 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2814 if (flow_table && sock_flow_table) {
2816 struct rps_dev_flow *rflow;
2818 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2821 next_cpu = sock_flow_table->ents[skb->rxhash &
2822 sock_flow_table->mask];
2825 * If the desired CPU (where last recvmsg was done) is
2826 * different from current CPU (one in the rx-queue flow
2827 * table entry), switch if one of the following holds:
2828 * - Current CPU is unset (equal to RPS_NO_CPU).
2829 * - Current CPU is offline.
2830 * - The current CPU's queue tail has advanced beyond the
2831 * last packet that was enqueued using this table entry.
2832 * This guarantees that all previous packets for the flow
2833 * have been dequeued, thus preserving in order delivery.
2835 if (unlikely(tcpu != next_cpu) &&
2836 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2837 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2838 rflow->last_qtail)) >= 0))
2839 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2841 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2849 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2851 if (cpu_online(tcpu)) {
2861 #ifdef CONFIG_RFS_ACCEL
2864 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2865 * @dev: Device on which the filter was set
2866 * @rxq_index: RX queue index
2867 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2868 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2870 * Drivers that implement ndo_rx_flow_steer() should periodically call
2871 * this function for each installed filter and remove the filters for
2872 * which it returns %true.
2874 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2875 u32 flow_id, u16 filter_id)
2877 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2878 struct rps_dev_flow_table *flow_table;
2879 struct rps_dev_flow *rflow;
2884 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2885 if (flow_table && flow_id <= flow_table->mask) {
2886 rflow = &flow_table->flows[flow_id];
2887 cpu = ACCESS_ONCE(rflow->cpu);
2888 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2889 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2890 rflow->last_qtail) <
2891 (int)(10 * flow_table->mask)))
2897 EXPORT_SYMBOL(rps_may_expire_flow);
2899 #endif /* CONFIG_RFS_ACCEL */
2901 /* Called from hardirq (IPI) context */
2902 static void rps_trigger_softirq(void *data)
2904 struct softnet_data *sd = data;
2906 ____napi_schedule(sd, &sd->backlog);
2910 #endif /* CONFIG_RPS */
2913 * Check if this softnet_data structure is another cpu one
2914 * If yes, queue it to our IPI list and return 1
2917 static int rps_ipi_queued(struct softnet_data *sd)
2920 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2923 sd->rps_ipi_next = mysd->rps_ipi_list;
2924 mysd->rps_ipi_list = sd;
2926 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2929 #endif /* CONFIG_RPS */
2934 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2935 * queue (may be a remote CPU queue).
2937 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2938 unsigned int *qtail)
2940 struct softnet_data *sd;
2941 unsigned long flags;
2943 sd = &per_cpu(softnet_data, cpu);
2945 local_irq_save(flags);
2948 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2949 if (skb_queue_len(&sd->input_pkt_queue)) {
2951 __skb_queue_tail(&sd->input_pkt_queue, skb);
2952 input_queue_tail_incr_save(sd, qtail);
2954 local_irq_restore(flags);
2955 return NET_RX_SUCCESS;
2958 /* Schedule NAPI for backlog device
2959 * We can use non atomic operation since we own the queue lock
2961 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2962 if (!rps_ipi_queued(sd))
2963 ____napi_schedule(sd, &sd->backlog);
2971 local_irq_restore(flags);
2973 atomic_long_inc(&skb->dev->rx_dropped);
2979 * netif_rx - post buffer to the network code
2980 * @skb: buffer to post
2982 * This function receives a packet from a device driver and queues it for
2983 * the upper (protocol) levels to process. It always succeeds. The buffer
2984 * may be dropped during processing for congestion control or by the
2988 * NET_RX_SUCCESS (no congestion)
2989 * NET_RX_DROP (packet was dropped)
2993 int netif_rx(struct sk_buff *skb)
2997 /* if netpoll wants it, pretend we never saw it */
2998 if (netpoll_rx(skb))
3001 if (netdev_tstamp_prequeue)
3002 net_timestamp_check(skb);
3004 trace_netif_rx(skb);
3007 struct rps_dev_flow voidflow, *rflow = &voidflow;
3013 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3015 cpu = smp_processor_id();
3017 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3025 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3031 EXPORT_SYMBOL(netif_rx);
3033 int netif_rx_ni(struct sk_buff *skb)
3038 err = netif_rx(skb);
3039 if (local_softirq_pending())
3045 EXPORT_SYMBOL(netif_rx_ni);
3047 static void net_tx_action(struct softirq_action *h)
3049 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3051 if (sd->completion_queue) {
3052 struct sk_buff *clist;
3054 local_irq_disable();
3055 clist = sd->completion_queue;
3056 sd->completion_queue = NULL;
3060 struct sk_buff *skb = clist;
3061 clist = clist->next;
3063 WARN_ON(atomic_read(&skb->users));
3064 trace_kfree_skb(skb, net_tx_action);
3069 if (sd->output_queue) {
3072 local_irq_disable();
3073 head = sd->output_queue;
3074 sd->output_queue = NULL;
3075 sd->output_queue_tailp = &sd->output_queue;
3079 struct Qdisc *q = head;
3080 spinlock_t *root_lock;
3082 head = head->next_sched;
3084 root_lock = qdisc_lock(q);
3085 if (spin_trylock(root_lock)) {
3086 smp_mb__before_clear_bit();
3087 clear_bit(__QDISC_STATE_SCHED,
3090 spin_unlock(root_lock);
3092 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3094 __netif_reschedule(q);
3096 smp_mb__before_clear_bit();
3097 clear_bit(__QDISC_STATE_SCHED,
3105 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3106 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3107 /* This hook is defined here for ATM LANE */
3108 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3109 unsigned char *addr) __read_mostly;
3110 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3113 #ifdef CONFIG_NET_CLS_ACT
3114 /* TODO: Maybe we should just force sch_ingress to be compiled in
3115 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3116 * a compare and 2 stores extra right now if we dont have it on
3117 * but have CONFIG_NET_CLS_ACT
3118 * NOTE: This doesn't stop any functionality; if you dont have
3119 * the ingress scheduler, you just can't add policies on ingress.
3122 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3124 struct net_device *dev = skb->dev;
3125 u32 ttl = G_TC_RTTL(skb->tc_verd);
3126 int result = TC_ACT_OK;
3129 if (unlikely(MAX_RED_LOOP < ttl++)) {
3130 if (net_ratelimit())
3131 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
3132 skb->skb_iif, dev->ifindex);
3136 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3137 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3140 if (q != &noop_qdisc) {
3141 spin_lock(qdisc_lock(q));
3142 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3143 result = qdisc_enqueue_root(skb, q);
3144 spin_unlock(qdisc_lock(q));
3150 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3151 struct packet_type **pt_prev,
3152 int *ret, struct net_device *orig_dev)
3154 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3156 if (!rxq || rxq->qdisc == &noop_qdisc)
3160 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3164 switch (ing_filter(skb, rxq)) {
3178 * netdev_rx_handler_register - register receive handler
3179 * @dev: device to register a handler for
3180 * @rx_handler: receive handler to register
3181 * @rx_handler_data: data pointer that is used by rx handler
3183 * Register a receive hander for a device. This handler will then be
3184 * called from __netif_receive_skb. A negative errno code is returned
3187 * The caller must hold the rtnl_mutex.
3189 * For a general description of rx_handler, see enum rx_handler_result.
3191 int netdev_rx_handler_register(struct net_device *dev,
3192 rx_handler_func_t *rx_handler,
3193 void *rx_handler_data)
3197 if (dev->rx_handler)
3200 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3201 rcu_assign_pointer(dev->rx_handler, rx_handler);
3205 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3208 * netdev_rx_handler_unregister - unregister receive handler
3209 * @dev: device to unregister a handler from
3211 * Unregister a receive hander from a device.
3213 * The caller must hold the rtnl_mutex.
3215 void netdev_rx_handler_unregister(struct net_device *dev)
3219 RCU_INIT_POINTER(dev->rx_handler, NULL);
3220 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3222 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3224 static int __netif_receive_skb(struct sk_buff *skb)
3226 struct packet_type *ptype, *pt_prev;
3227 rx_handler_func_t *rx_handler;
3228 struct net_device *orig_dev;
3229 struct net_device *null_or_dev;
3230 bool deliver_exact = false;
3231 int ret = NET_RX_DROP;
3234 if (!netdev_tstamp_prequeue)
3235 net_timestamp_check(skb);
3237 trace_netif_receive_skb(skb);
3239 /* if we've gotten here through NAPI, check netpoll */
3240 if (netpoll_receive_skb(skb))
3244 skb->skb_iif = skb->dev->ifindex;
3245 orig_dev = skb->dev;
3247 skb_reset_network_header(skb);
3248 skb_reset_transport_header(skb);
3249 skb_reset_mac_len(skb);
3257 __this_cpu_inc(softnet_data.processed);
3259 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3260 skb = vlan_untag(skb);
3265 #ifdef CONFIG_NET_CLS_ACT
3266 if (skb->tc_verd & TC_NCLS) {
3267 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3272 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3273 if (!ptype->dev || ptype->dev == skb->dev) {
3275 ret = deliver_skb(skb, pt_prev, orig_dev);
3280 #ifdef CONFIG_NET_CLS_ACT
3281 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3287 rx_handler = rcu_dereference(skb->dev->rx_handler);
3288 if (vlan_tx_tag_present(skb)) {
3290 ret = deliver_skb(skb, pt_prev, orig_dev);
3293 if (vlan_do_receive(&skb, !rx_handler))
3295 else if (unlikely(!skb))
3301 ret = deliver_skb(skb, pt_prev, orig_dev);
3304 switch (rx_handler(&skb)) {
3305 case RX_HANDLER_CONSUMED:
3307 case RX_HANDLER_ANOTHER:
3309 case RX_HANDLER_EXACT:
3310 deliver_exact = true;
3311 case RX_HANDLER_PASS:
3318 /* deliver only exact match when indicated */
3319 null_or_dev = deliver_exact ? skb->dev : NULL;
3321 type = skb->protocol;
3322 list_for_each_entry_rcu(ptype,
3323 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3324 if (ptype->type == type &&
3325 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3326 ptype->dev == orig_dev)) {
3328 ret = deliver_skb(skb, pt_prev, orig_dev);
3334 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3336 atomic_long_inc(&skb->dev->rx_dropped);
3338 /* Jamal, now you will not able to escape explaining
3339 * me how you were going to use this. :-)
3350 * netif_receive_skb - process receive buffer from network
3351 * @skb: buffer to process
3353 * netif_receive_skb() is the main receive data processing function.
3354 * It always succeeds. The buffer may be dropped during processing
3355 * for congestion control or by the protocol layers.
3357 * This function may only be called from softirq context and interrupts
3358 * should be enabled.
3360 * Return values (usually ignored):
3361 * NET_RX_SUCCESS: no congestion
3362 * NET_RX_DROP: packet was dropped
3364 int netif_receive_skb(struct sk_buff *skb)
3366 if (netdev_tstamp_prequeue)
3367 net_timestamp_check(skb);
3369 if (skb_defer_rx_timestamp(skb))
3370 return NET_RX_SUCCESS;
3374 struct rps_dev_flow voidflow, *rflow = &voidflow;
3379 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3382 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3386 ret = __netif_receive_skb(skb);
3392 return __netif_receive_skb(skb);
3395 EXPORT_SYMBOL(netif_receive_skb);
3397 /* Network device is going away, flush any packets still pending
3398 * Called with irqs disabled.
3400 static void flush_backlog(void *arg)
3402 struct net_device *dev = arg;
3403 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3404 struct sk_buff *skb, *tmp;
3407 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3408 if (skb->dev == dev) {
3409 __skb_unlink(skb, &sd->input_pkt_queue);
3411 input_queue_head_incr(sd);
3416 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3417 if (skb->dev == dev) {
3418 __skb_unlink(skb, &sd->process_queue);
3420 input_queue_head_incr(sd);
3425 static int napi_gro_complete(struct sk_buff *skb)
3427 struct packet_type *ptype;
3428 __be16 type = skb->protocol;
3429 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3432 if (NAPI_GRO_CB(skb)->count == 1) {
3433 skb_shinfo(skb)->gso_size = 0;
3438 list_for_each_entry_rcu(ptype, head, list) {
3439 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3442 err = ptype->gro_complete(skb);
3448 WARN_ON(&ptype->list == head);
3450 return NET_RX_SUCCESS;
3454 return netif_receive_skb(skb);
3457 inline void napi_gro_flush(struct napi_struct *napi)
3459 struct sk_buff *skb, *next;
3461 for (skb = napi->gro_list; skb; skb = next) {
3464 napi_gro_complete(skb);
3467 napi->gro_count = 0;
3468 napi->gro_list = NULL;
3470 EXPORT_SYMBOL(napi_gro_flush);
3472 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3474 struct sk_buff **pp = NULL;
3475 struct packet_type *ptype;
3476 __be16 type = skb->protocol;
3477 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3480 enum gro_result ret;
3482 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3485 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3489 list_for_each_entry_rcu(ptype, head, list) {
3490 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3493 skb_set_network_header(skb, skb_gro_offset(skb));
3494 mac_len = skb->network_header - skb->mac_header;
3495 skb->mac_len = mac_len;
3496 NAPI_GRO_CB(skb)->same_flow = 0;
3497 NAPI_GRO_CB(skb)->flush = 0;
3498 NAPI_GRO_CB(skb)->free = 0;
3500 pp = ptype->gro_receive(&napi->gro_list, skb);
3505 if (&ptype->list == head)
3508 same_flow = NAPI_GRO_CB(skb)->same_flow;
3509 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3512 struct sk_buff *nskb = *pp;
3516 napi_gro_complete(nskb);
3523 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3527 NAPI_GRO_CB(skb)->count = 1;
3528 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3529 skb->next = napi->gro_list;
3530 napi->gro_list = skb;
3534 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3535 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3537 BUG_ON(skb->end - skb->tail < grow);
3539 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3542 skb->data_len -= grow;
3544 skb_shinfo(skb)->frags[0].page_offset += grow;
3545 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3547 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3548 skb_frag_unref(skb, 0);
3549 memmove(skb_shinfo(skb)->frags,
3550 skb_shinfo(skb)->frags + 1,
3551 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3562 EXPORT_SYMBOL(dev_gro_receive);
3564 static inline gro_result_t
3565 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3568 unsigned int maclen = skb->dev->hard_header_len;
3570 for (p = napi->gro_list; p; p = p->next) {
3571 unsigned long diffs;
3573 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3574 diffs |= p->vlan_tci ^ skb->vlan_tci;
3575 if (maclen == ETH_HLEN)
3576 diffs |= compare_ether_header(skb_mac_header(p),
3577 skb_gro_mac_header(skb));
3579 diffs = memcmp(skb_mac_header(p),
3580 skb_gro_mac_header(skb),
3582 NAPI_GRO_CB(p)->same_flow = !diffs;
3583 NAPI_GRO_CB(p)->flush = 0;
3586 return dev_gro_receive(napi, skb);
3589 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3593 if (netif_receive_skb(skb))
3598 case GRO_MERGED_FREE:
3609 EXPORT_SYMBOL(napi_skb_finish);
3611 void skb_gro_reset_offset(struct sk_buff *skb)
3613 NAPI_GRO_CB(skb)->data_offset = 0;
3614 NAPI_GRO_CB(skb)->frag0 = NULL;
3615 NAPI_GRO_CB(skb)->frag0_len = 0;
3617 if (skb->mac_header == skb->tail &&
3618 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3619 NAPI_GRO_CB(skb)->frag0 =
3620 skb_frag_address(&skb_shinfo(skb)->frags[0]);
3621 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3624 EXPORT_SYMBOL(skb_gro_reset_offset);
3626 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3628 skb_gro_reset_offset(skb);
3630 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3632 EXPORT_SYMBOL(napi_gro_receive);
3634 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3636 __skb_pull(skb, skb_headlen(skb));
3637 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3639 skb->dev = napi->dev;
3645 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3647 struct sk_buff *skb = napi->skb;
3650 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3656 EXPORT_SYMBOL(napi_get_frags);
3658 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3664 skb->protocol = eth_type_trans(skb, skb->dev);
3666 if (ret == GRO_HELD)
3667 skb_gro_pull(skb, -ETH_HLEN);
3668 else if (netif_receive_skb(skb))
3673 case GRO_MERGED_FREE:
3674 napi_reuse_skb(napi, skb);
3683 EXPORT_SYMBOL(napi_frags_finish);
3685 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3687 struct sk_buff *skb = napi->skb;
3694 skb_reset_mac_header(skb);
3695 skb_gro_reset_offset(skb);
3697 off = skb_gro_offset(skb);
3698 hlen = off + sizeof(*eth);
3699 eth = skb_gro_header_fast(skb, off);
3700 if (skb_gro_header_hard(skb, hlen)) {
3701 eth = skb_gro_header_slow(skb, hlen, off);
3702 if (unlikely(!eth)) {
3703 napi_reuse_skb(napi, skb);
3709 skb_gro_pull(skb, sizeof(*eth));
3712 * This works because the only protocols we care about don't require
3713 * special handling. We'll fix it up properly at the end.
3715 skb->protocol = eth->h_proto;
3720 EXPORT_SYMBOL(napi_frags_skb);
3722 gro_result_t napi_gro_frags(struct napi_struct *napi)
3724 struct sk_buff *skb = napi_frags_skb(napi);
3729 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3731 EXPORT_SYMBOL(napi_gro_frags);
3734 * net_rps_action sends any pending IPI's for rps.
3735 * Note: called with local irq disabled, but exits with local irq enabled.
3737 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3740 struct softnet_data *remsd = sd->rps_ipi_list;
3743 sd->rps_ipi_list = NULL;
3747 /* Send pending IPI's to kick RPS processing on remote cpus. */
3749 struct softnet_data *next = remsd->rps_ipi_next;
3751 if (cpu_online(remsd->cpu))
3752 __smp_call_function_single(remsd->cpu,
3761 static int process_backlog(struct napi_struct *napi, int quota)
3764 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3767 /* Check if we have pending ipi, its better to send them now,
3768 * not waiting net_rx_action() end.
3770 if (sd->rps_ipi_list) {
3771 local_irq_disable();
3772 net_rps_action_and_irq_enable(sd);
3775 napi->weight = weight_p;
3776 local_irq_disable();
3777 while (work < quota) {
3778 struct sk_buff *skb;
3781 while ((skb = __skb_dequeue(&sd->process_queue))) {
3783 __netif_receive_skb(skb);
3784 local_irq_disable();
3785 input_queue_head_incr(sd);
3786 if (++work >= quota) {
3793 qlen = skb_queue_len(&sd->input_pkt_queue);
3795 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3796 &sd->process_queue);
3798 if (qlen < quota - work) {
3800 * Inline a custom version of __napi_complete().
3801 * only current cpu owns and manipulates this napi,
3802 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3803 * we can use a plain write instead of clear_bit(),
3804 * and we dont need an smp_mb() memory barrier.
3806 list_del(&napi->poll_list);
3809 quota = work + qlen;
3819 * __napi_schedule - schedule for receive
3820 * @n: entry to schedule
3822 * The entry's receive function will be scheduled to run
3824 void __napi_schedule(struct napi_struct *n)
3826 unsigned long flags;
3828 local_irq_save(flags);
3829 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3830 local_irq_restore(flags);
3832 EXPORT_SYMBOL(__napi_schedule);
3834 void __napi_complete(struct napi_struct *n)
3836 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3837 BUG_ON(n->gro_list);
3839 list_del(&n->poll_list);
3840 smp_mb__before_clear_bit();
3841 clear_bit(NAPI_STATE_SCHED, &n->state);
3843 EXPORT_SYMBOL(__napi_complete);
3845 void napi_complete(struct napi_struct *n)
3847 unsigned long flags;
3850 * don't let napi dequeue from the cpu poll list
3851 * just in case its running on a different cpu
3853 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3857 local_irq_save(flags);
3859 local_irq_restore(flags);
3861 EXPORT_SYMBOL(napi_complete);
3863 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3864 int (*poll)(struct napi_struct *, int), int weight)
3866 INIT_LIST_HEAD(&napi->poll_list);
3867 napi->gro_count = 0;
3868 napi->gro_list = NULL;
3871 napi->weight = weight;
3872 list_add(&napi->dev_list, &dev->napi_list);
3874 #ifdef CONFIG_NETPOLL
3875 spin_lock_init(&napi->poll_lock);
3876 napi->poll_owner = -1;
3878 set_bit(NAPI_STATE_SCHED, &napi->state);
3880 EXPORT_SYMBOL(netif_napi_add);
3882 void netif_napi_del(struct napi_struct *napi)
3884 struct sk_buff *skb, *next;
3886 list_del_init(&napi->dev_list);
3887 napi_free_frags(napi);
3889 for (skb = napi->gro_list; skb; skb = next) {
3895 napi->gro_list = NULL;
3896 napi->gro_count = 0;
3898 EXPORT_SYMBOL(netif_napi_del);
3900 static void net_rx_action(struct softirq_action *h)
3902 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3903 unsigned long time_limit = jiffies + 2;
3904 int budget = netdev_budget;
3907 local_irq_disable();
3909 while (!list_empty(&sd->poll_list)) {
3910 struct napi_struct *n;
3913 /* If softirq window is exhuasted then punt.
3914 * Allow this to run for 2 jiffies since which will allow
3915 * an average latency of 1.5/HZ.
3917 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3922 /* Even though interrupts have been re-enabled, this
3923 * access is safe because interrupts can only add new
3924 * entries to the tail of this list, and only ->poll()
3925 * calls can remove this head entry from the list.
3927 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3929 have = netpoll_poll_lock(n);
3933 /* This NAPI_STATE_SCHED test is for avoiding a race
3934 * with netpoll's poll_napi(). Only the entity which
3935 * obtains the lock and sees NAPI_STATE_SCHED set will
3936 * actually make the ->poll() call. Therefore we avoid
3937 * accidentally calling ->poll() when NAPI is not scheduled.
3940 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3941 work = n->poll(n, weight);
3945 WARN_ON_ONCE(work > weight);
3949 local_irq_disable();
3951 /* Drivers must not modify the NAPI state if they
3952 * consume the entire weight. In such cases this code
3953 * still "owns" the NAPI instance and therefore can
3954 * move the instance around on the list at-will.
3956 if (unlikely(work == weight)) {
3957 if (unlikely(napi_disable_pending(n))) {
3960 local_irq_disable();
3962 list_move_tail(&n->poll_list, &sd->poll_list);
3965 netpoll_poll_unlock(have);
3968 net_rps_action_and_irq_enable(sd);
3970 #ifdef CONFIG_NET_DMA
3972 * There may not be any more sk_buffs coming right now, so push
3973 * any pending DMA copies to hardware
3975 dma_issue_pending_all();
3982 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3986 static gifconf_func_t *gifconf_list[NPROTO];
3989 * register_gifconf - register a SIOCGIF handler
3990 * @family: Address family
3991 * @gifconf: Function handler
3993 * Register protocol dependent address dumping routines. The handler
3994 * that is passed must not be freed or reused until it has been replaced
3995 * by another handler.
3997 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3999 if (family >= NPROTO)
4001 gifconf_list[family] = gifconf;
4004 EXPORT_SYMBOL(register_gifconf);
4008 * Map an interface index to its name (SIOCGIFNAME)
4012 * We need this ioctl for efficient implementation of the
4013 * if_indextoname() function required by the IPv6 API. Without
4014 * it, we would have to search all the interfaces to find a
4018 static int dev_ifname(struct net *net, struct ifreq __user *arg)
4020 struct net_device *dev;
4024 * Fetch the caller's info block.
4027 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4031 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4037 strcpy(ifr.ifr_name, dev->name);
4040 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4046 * Perform a SIOCGIFCONF call. This structure will change
4047 * size eventually, and there is nothing I can do about it.
4048 * Thus we will need a 'compatibility mode'.
4051 static int dev_ifconf(struct net *net, char __user *arg)
4054 struct net_device *dev;
4061 * Fetch the caller's info block.
4064 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4071 * Loop over the interfaces, and write an info block for each.
4075 for_each_netdev(net, dev) {
4076 for (i = 0; i < NPROTO; i++) {
4077 if (gifconf_list[i]) {
4080 done = gifconf_list[i](dev, NULL, 0);
4082 done = gifconf_list[i](dev, pos + total,
4092 * All done. Write the updated control block back to the caller.
4094 ifc.ifc_len = total;
4097 * Both BSD and Solaris return 0 here, so we do too.
4099 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4102 #ifdef CONFIG_PROC_FS
4104 #define BUCKET_SPACE (32 - NETDEV_HASHBITS)
4106 struct dev_iter_state {
4107 struct seq_net_private p;
4108 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4111 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4112 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4113 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4115 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
4117 struct dev_iter_state *state = seq->private;
4118 struct net *net = seq_file_net(seq);
4119 struct net_device *dev;
4120 struct hlist_node *p;
4121 struct hlist_head *h;
4122 unsigned int count, bucket, offset;
4124 bucket = get_bucket(state->pos);
4125 offset = get_offset(state->pos);
4126 h = &net->dev_name_head[bucket];
4128 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4129 if (count++ == offset) {
4130 state->pos = set_bucket_offset(bucket, count);
4138 static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4140 struct dev_iter_state *state = seq->private;
4141 struct net_device *dev;
4142 unsigned int bucket;
4144 bucket = get_bucket(state->pos);
4146 dev = dev_from_same_bucket(seq);
4151 state->pos = set_bucket_offset(bucket, 0);
4152 } while (bucket < NETDEV_HASHENTRIES);
4158 * This is invoked by the /proc filesystem handler to display a device
4161 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4164 struct dev_iter_state *state = seq->private;
4168 return SEQ_START_TOKEN;
4170 /* check for end of the hash */
4171 if (state->pos == 0 && *pos > 1)
4174 return dev_from_new_bucket(seq);
4177 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4179 struct net_device *dev;
4183 if (v == SEQ_START_TOKEN)
4184 return dev_from_new_bucket(seq);
4186 dev = dev_from_same_bucket(seq);
4190 return dev_from_new_bucket(seq);
4193 void dev_seq_stop(struct seq_file *seq, void *v)
4199 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4201 struct rtnl_link_stats64 temp;
4202 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4204 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4205 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4206 dev->name, stats->rx_bytes, stats->rx_packets,
4208 stats->rx_dropped + stats->rx_missed_errors,
4209 stats->rx_fifo_errors,
4210 stats->rx_length_errors + stats->rx_over_errors +
4211 stats->rx_crc_errors + stats->rx_frame_errors,
4212 stats->rx_compressed, stats->multicast,
4213 stats->tx_bytes, stats->tx_packets,
4214 stats->tx_errors, stats->tx_dropped,
4215 stats->tx_fifo_errors, stats->collisions,
4216 stats->tx_carrier_errors +
4217 stats->tx_aborted_errors +
4218 stats->tx_window_errors +
4219 stats->tx_heartbeat_errors,
4220 stats->tx_compressed);
4224 * Called from the PROCfs module. This now uses the new arbitrary sized
4225 * /proc/net interface to create /proc/net/dev
4227 static int dev_seq_show(struct seq_file *seq, void *v)
4229 if (v == SEQ_START_TOKEN)
4230 seq_puts(seq, "Inter-| Receive "
4232 " face |bytes packets errs drop fifo frame "
4233 "compressed multicast|bytes packets errs "
4234 "drop fifo colls carrier compressed\n");
4236 dev_seq_printf_stats(seq, v);
4240 static struct softnet_data *softnet_get_online(loff_t *pos)
4242 struct softnet_data *sd = NULL;
4244 while (*pos < nr_cpu_ids)
4245 if (cpu_online(*pos)) {
4246 sd = &per_cpu(softnet_data, *pos);
4253 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4255 return softnet_get_online(pos);
4258 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4261 return softnet_get_online(pos);
4264 static void softnet_seq_stop(struct seq_file *seq, void *v)
4268 static int softnet_seq_show(struct seq_file *seq, void *v)
4270 struct softnet_data *sd = v;
4272 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4273 sd->processed, sd->dropped, sd->time_squeeze, 0,
4274 0, 0, 0, 0, /* was fastroute */
4275 sd->cpu_collision, sd->received_rps);
4279 static const struct seq_operations dev_seq_ops = {
4280 .start = dev_seq_start,
4281 .next = dev_seq_next,
4282 .stop = dev_seq_stop,
4283 .show = dev_seq_show,
4286 static int dev_seq_open(struct inode *inode, struct file *file)
4288 return seq_open_net(inode, file, &dev_seq_ops,
4289 sizeof(struct dev_iter_state));
4292 int dev_seq_open_ops(struct inode *inode, struct file *file,
4293 const struct seq_operations *ops)
4295 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4298 static const struct file_operations dev_seq_fops = {
4299 .owner = THIS_MODULE,
4300 .open = dev_seq_open,
4302 .llseek = seq_lseek,
4303 .release = seq_release_net,
4306 static const struct seq_operations softnet_seq_ops = {
4307 .start = softnet_seq_start,
4308 .next = softnet_seq_next,
4309 .stop = softnet_seq_stop,
4310 .show = softnet_seq_show,
4313 static int softnet_seq_open(struct inode *inode, struct file *file)
4315 return seq_open(file, &softnet_seq_ops);
4318 static const struct file_operations softnet_seq_fops = {
4319 .owner = THIS_MODULE,
4320 .open = softnet_seq_open,
4322 .llseek = seq_lseek,
4323 .release = seq_release,
4326 static void *ptype_get_idx(loff_t pos)
4328 struct packet_type *pt = NULL;
4332 list_for_each_entry_rcu(pt, &ptype_all, list) {
4338 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4339 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4348 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4352 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4355 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4357 struct packet_type *pt;
4358 struct list_head *nxt;
4362 if (v == SEQ_START_TOKEN)
4363 return ptype_get_idx(0);
4366 nxt = pt->list.next;
4367 if (pt->type == htons(ETH_P_ALL)) {
4368 if (nxt != &ptype_all)
4371 nxt = ptype_base[0].next;
4373 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4375 while (nxt == &ptype_base[hash]) {
4376 if (++hash >= PTYPE_HASH_SIZE)
4378 nxt = ptype_base[hash].next;
4381 return list_entry(nxt, struct packet_type, list);
4384 static void ptype_seq_stop(struct seq_file *seq, void *v)
4390 static int ptype_seq_show(struct seq_file *seq, void *v)
4392 struct packet_type *pt = v;
4394 if (v == SEQ_START_TOKEN)
4395 seq_puts(seq, "Type Device Function\n");
4396 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4397 if (pt->type == htons(ETH_P_ALL))
4398 seq_puts(seq, "ALL ");
4400 seq_printf(seq, "%04x", ntohs(pt->type));
4402 seq_printf(seq, " %-8s %pF\n",
4403 pt->dev ? pt->dev->name : "", pt->func);
4409 static const struct seq_operations ptype_seq_ops = {
4410 .start = ptype_seq_start,
4411 .next = ptype_seq_next,
4412 .stop = ptype_seq_stop,
4413 .show = ptype_seq_show,
4416 static int ptype_seq_open(struct inode *inode, struct file *file)
4418 return seq_open_net(inode, file, &ptype_seq_ops,
4419 sizeof(struct seq_net_private));
4422 static const struct file_operations ptype_seq_fops = {
4423 .owner = THIS_MODULE,
4424 .open = ptype_seq_open,
4426 .llseek = seq_lseek,
4427 .release = seq_release_net,
4431 static int __net_init dev_proc_net_init(struct net *net)
4435 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4437 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4439 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4442 if (wext_proc_init(net))
4448 proc_net_remove(net, "ptype");
4450 proc_net_remove(net, "softnet_stat");
4452 proc_net_remove(net, "dev");
4456 static void __net_exit dev_proc_net_exit(struct net *net)
4458 wext_proc_exit(net);
4460 proc_net_remove(net, "ptype");
4461 proc_net_remove(net, "softnet_stat");
4462 proc_net_remove(net, "dev");
4465 static struct pernet_operations __net_initdata dev_proc_ops = {
4466 .init = dev_proc_net_init,
4467 .exit = dev_proc_net_exit,
4470 static int __init dev_proc_init(void)
4472 return register_pernet_subsys(&dev_proc_ops);
4475 #define dev_proc_init() 0
4476 #endif /* CONFIG_PROC_FS */
4480 * netdev_set_master - set up master pointer
4481 * @slave: slave device
4482 * @master: new master device
4484 * Changes the master device of the slave. Pass %NULL to break the
4485 * bonding. The caller must hold the RTNL semaphore. On a failure
4486 * a negative errno code is returned. On success the reference counts
4487 * are adjusted and the function returns zero.
4489 int netdev_set_master(struct net_device *slave, struct net_device *master)
4491 struct net_device *old = slave->master;
4501 slave->master = master;
4507 EXPORT_SYMBOL(netdev_set_master);
4510 * netdev_set_bond_master - set up bonding master/slave pair
4511 * @slave: slave device
4512 * @master: new master device
4514 * Changes the master device of the slave. Pass %NULL to break the
4515 * bonding. The caller must hold the RTNL semaphore. On a failure
4516 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4517 * to the routing socket and the function returns zero.
4519 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4525 err = netdev_set_master(slave, master);
4529 slave->flags |= IFF_SLAVE;
4531 slave->flags &= ~IFF_SLAVE;
4533 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4536 EXPORT_SYMBOL(netdev_set_bond_master);
4538 static void dev_change_rx_flags(struct net_device *dev, int flags)
4540 const struct net_device_ops *ops = dev->netdev_ops;
4542 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4543 ops->ndo_change_rx_flags(dev, flags);
4546 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4548 unsigned short old_flags = dev->flags;
4554 dev->flags |= IFF_PROMISC;
4555 dev->promiscuity += inc;
4556 if (dev->promiscuity == 0) {
4559 * If inc causes overflow, untouch promisc and return error.
4562 dev->flags &= ~IFF_PROMISC;
4564 dev->promiscuity -= inc;
4565 printk(KERN_WARNING "%s: promiscuity touches roof, "
4566 "set promiscuity failed, promiscuity feature "
4567 "of device might be broken.\n", dev->name);
4571 if (dev->flags != old_flags) {
4572 printk(KERN_INFO "device %s %s promiscuous mode\n",
4573 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4575 if (audit_enabled) {
4576 current_uid_gid(&uid, &gid);
4577 audit_log(current->audit_context, GFP_ATOMIC,
4578 AUDIT_ANOM_PROMISCUOUS,
4579 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4580 dev->name, (dev->flags & IFF_PROMISC),
4581 (old_flags & IFF_PROMISC),
4582 audit_get_loginuid(current),
4584 audit_get_sessionid(current));
4587 dev_change_rx_flags(dev, IFF_PROMISC);
4593 * dev_set_promiscuity - update promiscuity count on a device
4597 * Add or remove promiscuity from a device. While the count in the device
4598 * remains above zero the interface remains promiscuous. Once it hits zero
4599 * the device reverts back to normal filtering operation. A negative inc
4600 * value is used to drop promiscuity on the device.
4601 * Return 0 if successful or a negative errno code on error.
4603 int dev_set_promiscuity(struct net_device *dev, int inc)
4605 unsigned short old_flags = dev->flags;
4608 err = __dev_set_promiscuity(dev, inc);
4611 if (dev->flags != old_flags)
4612 dev_set_rx_mode(dev);
4615 EXPORT_SYMBOL(dev_set_promiscuity);
4618 * dev_set_allmulti - update allmulti count on a device
4622 * Add or remove reception of all multicast frames to a device. While the
4623 * count in the device remains above zero the interface remains listening
4624 * to all interfaces. Once it hits zero the device reverts back to normal
4625 * filtering operation. A negative @inc value is used to drop the counter
4626 * when releasing a resource needing all multicasts.
4627 * Return 0 if successful or a negative errno code on error.
4630 int dev_set_allmulti(struct net_device *dev, int inc)
4632 unsigned short old_flags = dev->flags;
4636 dev->flags |= IFF_ALLMULTI;
4637 dev->allmulti += inc;
4638 if (dev->allmulti == 0) {
4641 * If inc causes overflow, untouch allmulti and return error.
4644 dev->flags &= ~IFF_ALLMULTI;
4646 dev->allmulti -= inc;
4647 printk(KERN_WARNING "%s: allmulti touches roof, "
4648 "set allmulti failed, allmulti feature of "
4649 "device might be broken.\n", dev->name);
4653 if (dev->flags ^ old_flags) {
4654 dev_change_rx_flags(dev, IFF_ALLMULTI);
4655 dev_set_rx_mode(dev);
4659 EXPORT_SYMBOL(dev_set_allmulti);
4662 * Upload unicast and multicast address lists to device and
4663 * configure RX filtering. When the device doesn't support unicast
4664 * filtering it is put in promiscuous mode while unicast addresses
4667 void __dev_set_rx_mode(struct net_device *dev)
4669 const struct net_device_ops *ops = dev->netdev_ops;
4671 /* dev_open will call this function so the list will stay sane. */
4672 if (!(dev->flags&IFF_UP))
4675 if (!netif_device_present(dev))
4678 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4679 /* Unicast addresses changes may only happen under the rtnl,
4680 * therefore calling __dev_set_promiscuity here is safe.
4682 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4683 __dev_set_promiscuity(dev, 1);
4684 dev->uc_promisc = true;
4685 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4686 __dev_set_promiscuity(dev, -1);
4687 dev->uc_promisc = false;
4691 if (ops->ndo_set_rx_mode)
4692 ops->ndo_set_rx_mode(dev);
4695 void dev_set_rx_mode(struct net_device *dev)
4697 netif_addr_lock_bh(dev);
4698 __dev_set_rx_mode(dev);
4699 netif_addr_unlock_bh(dev);
4703 * dev_get_flags - get flags reported to userspace
4706 * Get the combination of flag bits exported through APIs to userspace.
4708 unsigned dev_get_flags(const struct net_device *dev)
4712 flags = (dev->flags & ~(IFF_PROMISC |
4717 (dev->gflags & (IFF_PROMISC |
4720 if (netif_running(dev)) {
4721 if (netif_oper_up(dev))
4722 flags |= IFF_RUNNING;
4723 if (netif_carrier_ok(dev))
4724 flags |= IFF_LOWER_UP;
4725 if (netif_dormant(dev))
4726 flags |= IFF_DORMANT;
4731 EXPORT_SYMBOL(dev_get_flags);
4733 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4735 int old_flags = dev->flags;
4741 * Set the flags on our device.
4744 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4745 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4747 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4751 * Load in the correct multicast list now the flags have changed.
4754 if ((old_flags ^ flags) & IFF_MULTICAST)
4755 dev_change_rx_flags(dev, IFF_MULTICAST);
4757 dev_set_rx_mode(dev);
4760 * Have we downed the interface. We handle IFF_UP ourselves
4761 * according to user attempts to set it, rather than blindly
4766 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4767 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4770 dev_set_rx_mode(dev);
4773 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4774 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4776 dev->gflags ^= IFF_PROMISC;
4777 dev_set_promiscuity(dev, inc);
4780 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4781 is important. Some (broken) drivers set IFF_PROMISC, when
4782 IFF_ALLMULTI is requested not asking us and not reporting.
4784 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4785 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4787 dev->gflags ^= IFF_ALLMULTI;
4788 dev_set_allmulti(dev, inc);
4794 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4796 unsigned int changes = dev->flags ^ old_flags;
4798 if (changes & IFF_UP) {
4799 if (dev->flags & IFF_UP)
4800 call_netdevice_notifiers(NETDEV_UP, dev);
4802 call_netdevice_notifiers(NETDEV_DOWN, dev);
4805 if (dev->flags & IFF_UP &&
4806 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4807 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4811 * dev_change_flags - change device settings
4813 * @flags: device state flags
4815 * Change settings on device based state flags. The flags are
4816 * in the userspace exported format.
4818 int dev_change_flags(struct net_device *dev, unsigned flags)
4821 int old_flags = dev->flags;
4823 ret = __dev_change_flags(dev, flags);
4827 changes = old_flags ^ dev->flags;
4829 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4831 __dev_notify_flags(dev, old_flags);
4834 EXPORT_SYMBOL(dev_change_flags);
4837 * dev_set_mtu - Change maximum transfer unit
4839 * @new_mtu: new transfer unit
4841 * Change the maximum transfer size of the network device.
4843 int dev_set_mtu(struct net_device *dev, int new_mtu)
4845 const struct net_device_ops *ops = dev->netdev_ops;
4848 if (new_mtu == dev->mtu)
4851 /* MTU must be positive. */
4855 if (!netif_device_present(dev))
4859 if (ops->ndo_change_mtu)
4860 err = ops->ndo_change_mtu(dev, new_mtu);
4864 if (!err && dev->flags & IFF_UP)
4865 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4868 EXPORT_SYMBOL(dev_set_mtu);
4871 * dev_set_group - Change group this device belongs to
4873 * @new_group: group this device should belong to
4875 void dev_set_group(struct net_device *dev, int new_group)
4877 dev->group = new_group;
4879 EXPORT_SYMBOL(dev_set_group);
4882 * dev_set_mac_address - Change Media Access Control Address
4886 * Change the hardware (MAC) address of the device
4888 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4890 const struct net_device_ops *ops = dev->netdev_ops;
4893 if (!ops->ndo_set_mac_address)
4895 if (sa->sa_family != dev->type)
4897 if (!netif_device_present(dev))
4899 err = ops->ndo_set_mac_address(dev, sa);
4901 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4904 EXPORT_SYMBOL(dev_set_mac_address);
4907 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4909 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4912 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4918 case SIOCGIFFLAGS: /* Get interface flags */
4919 ifr->ifr_flags = (short) dev_get_flags(dev);
4922 case SIOCGIFMETRIC: /* Get the metric on the interface
4923 (currently unused) */
4924 ifr->ifr_metric = 0;
4927 case SIOCGIFMTU: /* Get the MTU of a device */
4928 ifr->ifr_mtu = dev->mtu;
4933 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4935 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4936 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4937 ifr->ifr_hwaddr.sa_family = dev->type;
4945 ifr->ifr_map.mem_start = dev->mem_start;
4946 ifr->ifr_map.mem_end = dev->mem_end;
4947 ifr->ifr_map.base_addr = dev->base_addr;
4948 ifr->ifr_map.irq = dev->irq;
4949 ifr->ifr_map.dma = dev->dma;
4950 ifr->ifr_map.port = dev->if_port;
4954 ifr->ifr_ifindex = dev->ifindex;
4958 ifr->ifr_qlen = dev->tx_queue_len;
4962 /* dev_ioctl() should ensure this case
4974 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4976 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4979 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4980 const struct net_device_ops *ops;
4985 ops = dev->netdev_ops;
4988 case SIOCSIFFLAGS: /* Set interface flags */
4989 return dev_change_flags(dev, ifr->ifr_flags);
4991 case SIOCSIFMETRIC: /* Set the metric on the interface
4992 (currently unused) */
4995 case SIOCSIFMTU: /* Set the MTU of a device */
4996 return dev_set_mtu(dev, ifr->ifr_mtu);
4999 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
5001 case SIOCSIFHWBROADCAST:
5002 if (ifr->ifr_hwaddr.sa_family != dev->type)
5004 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5005 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5006 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5010 if (ops->ndo_set_config) {
5011 if (!netif_device_present(dev))
5013 return ops->ndo_set_config(dev, &ifr->ifr_map);
5018 if (!ops->ndo_set_rx_mode ||
5019 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5021 if (!netif_device_present(dev))
5023 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5026 if (!ops->ndo_set_rx_mode ||
5027 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5029 if (!netif_device_present(dev))
5031 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5034 if (ifr->ifr_qlen < 0)
5036 dev->tx_queue_len = ifr->ifr_qlen;
5040 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5041 return dev_change_name(dev, ifr->ifr_newname);
5044 err = net_hwtstamp_validate(ifr);
5050 * Unknown or private ioctl
5053 if ((cmd >= SIOCDEVPRIVATE &&
5054 cmd <= SIOCDEVPRIVATE + 15) ||
5055 cmd == SIOCBONDENSLAVE ||
5056 cmd == SIOCBONDRELEASE ||
5057 cmd == SIOCBONDSETHWADDR ||
5058 cmd == SIOCBONDSLAVEINFOQUERY ||
5059 cmd == SIOCBONDINFOQUERY ||
5060 cmd == SIOCBONDCHANGEACTIVE ||
5061 cmd == SIOCGMIIPHY ||
5062 cmd == SIOCGMIIREG ||
5063 cmd == SIOCSMIIREG ||
5064 cmd == SIOCBRADDIF ||
5065 cmd == SIOCBRDELIF ||
5066 cmd == SIOCSHWTSTAMP ||
5067 cmd == SIOCWANDEV) {
5069 if (ops->ndo_do_ioctl) {
5070 if (netif_device_present(dev))
5071 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5083 * This function handles all "interface"-type I/O control requests. The actual
5084 * 'doing' part of this is dev_ifsioc above.
5088 * dev_ioctl - network device ioctl
5089 * @net: the applicable net namespace
5090 * @cmd: command to issue
5091 * @arg: pointer to a struct ifreq in user space
5093 * Issue ioctl functions to devices. This is normally called by the
5094 * user space syscall interfaces but can sometimes be useful for
5095 * other purposes. The return value is the return from the syscall if
5096 * positive or a negative errno code on error.
5099 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5105 /* One special case: SIOCGIFCONF takes ifconf argument
5106 and requires shared lock, because it sleeps writing
5110 if (cmd == SIOCGIFCONF) {
5112 ret = dev_ifconf(net, (char __user *) arg);
5116 if (cmd == SIOCGIFNAME)
5117 return dev_ifname(net, (struct ifreq __user *)arg);
5119 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5122 ifr.ifr_name[IFNAMSIZ-1] = 0;
5124 colon = strchr(ifr.ifr_name, ':');
5129 * See which interface the caller is talking about.
5134 * These ioctl calls:
5135 * - can be done by all.
5136 * - atomic and do not require locking.
5147 dev_load(net, ifr.ifr_name);
5149 ret = dev_ifsioc_locked(net, &ifr, cmd);
5154 if (copy_to_user(arg, &ifr,
5155 sizeof(struct ifreq)))
5161 dev_load(net, ifr.ifr_name);
5163 ret = dev_ethtool(net, &ifr);
5168 if (copy_to_user(arg, &ifr,
5169 sizeof(struct ifreq)))
5175 * These ioctl calls:
5176 * - require superuser power.
5177 * - require strict serialization.
5183 if (!capable(CAP_NET_ADMIN))
5185 dev_load(net, ifr.ifr_name);
5187 ret = dev_ifsioc(net, &ifr, cmd);
5192 if (copy_to_user(arg, &ifr,
5193 sizeof(struct ifreq)))
5199 * These ioctl calls:
5200 * - require superuser power.
5201 * - require strict serialization.
5202 * - do not return a value
5212 case SIOCSIFHWBROADCAST:
5215 case SIOCBONDENSLAVE:
5216 case SIOCBONDRELEASE:
5217 case SIOCBONDSETHWADDR:
5218 case SIOCBONDCHANGEACTIVE:
5222 if (!capable(CAP_NET_ADMIN))
5225 case SIOCBONDSLAVEINFOQUERY:
5226 case SIOCBONDINFOQUERY:
5227 dev_load(net, ifr.ifr_name);
5229 ret = dev_ifsioc(net, &ifr, cmd);
5234 /* Get the per device memory space. We can add this but
5235 * currently do not support it */
5237 /* Set the per device memory buffer space.
5238 * Not applicable in our case */
5243 * Unknown or private ioctl.
5246 if (cmd == SIOCWANDEV ||
5247 (cmd >= SIOCDEVPRIVATE &&
5248 cmd <= SIOCDEVPRIVATE + 15)) {
5249 dev_load(net, ifr.ifr_name);
5251 ret = dev_ifsioc(net, &ifr, cmd);
5253 if (!ret && copy_to_user(arg, &ifr,
5254 sizeof(struct ifreq)))
5258 /* Take care of Wireless Extensions */
5259 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5260 return wext_handle_ioctl(net, &ifr, cmd, arg);
5267 * dev_new_index - allocate an ifindex
5268 * @net: the applicable net namespace
5270 * Returns a suitable unique value for a new device interface
5271 * number. The caller must hold the rtnl semaphore or the
5272 * dev_base_lock to be sure it remains unique.
5274 static int dev_new_index(struct net *net)
5280 if (!__dev_get_by_index(net, ifindex))
5285 /* Delayed registration/unregisteration */
5286 static LIST_HEAD(net_todo_list);
5288 static void net_set_todo(struct net_device *dev)
5290 list_add_tail(&dev->todo_list, &net_todo_list);
5293 static void rollback_registered_many(struct list_head *head)
5295 struct net_device *dev, *tmp;
5297 BUG_ON(dev_boot_phase);
5300 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5301 /* Some devices call without registering
5302 * for initialization unwind. Remove those
5303 * devices and proceed with the remaining.
5305 if (dev->reg_state == NETREG_UNINITIALIZED) {
5306 pr_debug("unregister_netdevice: device %s/%p never "
5307 "was registered\n", dev->name, dev);
5310 list_del(&dev->unreg_list);
5313 dev->dismantle = true;
5314 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5317 /* If device is running, close it first. */
5318 dev_close_many(head);
5320 list_for_each_entry(dev, head, unreg_list) {
5321 /* And unlink it from device chain. */
5322 unlist_netdevice(dev);
5324 dev->reg_state = NETREG_UNREGISTERING;
5329 list_for_each_entry(dev, head, unreg_list) {
5330 /* Shutdown queueing discipline. */
5334 /* Notify protocols, that we are about to destroy
5335 this device. They should clean all the things.
5337 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5339 if (!dev->rtnl_link_ops ||
5340 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5341 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5344 * Flush the unicast and multicast chains
5349 if (dev->netdev_ops->ndo_uninit)
5350 dev->netdev_ops->ndo_uninit(dev);
5352 /* Notifier chain MUST detach us from master device. */
5353 WARN_ON(dev->master);
5355 /* Remove entries from kobject tree */
5356 netdev_unregister_kobject(dev);
5359 /* Process any work delayed until the end of the batch */
5360 dev = list_first_entry(head, struct net_device, unreg_list);
5361 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5365 list_for_each_entry(dev, head, unreg_list)
5369 static void rollback_registered(struct net_device *dev)
5373 list_add(&dev->unreg_list, &single);
5374 rollback_registered_many(&single);
5378 static u32 netdev_fix_features(struct net_device *dev, u32 features)
5380 /* Fix illegal checksum combinations */
5381 if ((features & NETIF_F_HW_CSUM) &&
5382 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5383 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5384 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5387 if ((features & NETIF_F_NO_CSUM) &&
5388 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5389 netdev_warn(dev, "mixed no checksumming and other settings.\n");
5390 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5393 /* Fix illegal SG+CSUM combinations. */
5394 if ((features & NETIF_F_SG) &&
5395 !(features & NETIF_F_ALL_CSUM)) {
5397 "Dropping NETIF_F_SG since no checksum feature.\n");
5398 features &= ~NETIF_F_SG;
5401 /* TSO requires that SG is present as well. */
5402 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5403 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5404 features &= ~NETIF_F_ALL_TSO;
5407 /* TSO ECN requires that TSO is present as well. */
5408 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5409 features &= ~NETIF_F_TSO_ECN;
5411 /* Software GSO depends on SG. */
5412 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5413 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5414 features &= ~NETIF_F_GSO;
5417 /* UFO needs SG and checksumming */
5418 if (features & NETIF_F_UFO) {
5419 /* maybe split UFO into V4 and V6? */
5420 if (!((features & NETIF_F_GEN_CSUM) ||
5421 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5422 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5424 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5425 features &= ~NETIF_F_UFO;
5428 if (!(features & NETIF_F_SG)) {
5430 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5431 features &= ~NETIF_F_UFO;
5438 int __netdev_update_features(struct net_device *dev)
5445 features = netdev_get_wanted_features(dev);
5447 if (dev->netdev_ops->ndo_fix_features)
5448 features = dev->netdev_ops->ndo_fix_features(dev, features);
5450 /* driver might be less strict about feature dependencies */
5451 features = netdev_fix_features(dev, features);
5453 if (dev->features == features)
5456 netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
5457 dev->features, features);
5459 if (dev->netdev_ops->ndo_set_features)
5460 err = dev->netdev_ops->ndo_set_features(dev, features);
5462 if (unlikely(err < 0)) {
5464 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5465 err, features, dev->features);
5470 dev->features = features;
5476 * netdev_update_features - recalculate device features
5477 * @dev: the device to check
5479 * Recalculate dev->features set and send notifications if it
5480 * has changed. Should be called after driver or hardware dependent
5481 * conditions might have changed that influence the features.
5483 void netdev_update_features(struct net_device *dev)
5485 if (__netdev_update_features(dev))
5486 netdev_features_change(dev);
5488 EXPORT_SYMBOL(netdev_update_features);
5491 * netdev_change_features - recalculate device features
5492 * @dev: the device to check
5494 * Recalculate dev->features set and send notifications even
5495 * if they have not changed. Should be called instead of
5496 * netdev_update_features() if also dev->vlan_features might
5497 * have changed to allow the changes to be propagated to stacked
5500 void netdev_change_features(struct net_device *dev)
5502 __netdev_update_features(dev);
5503 netdev_features_change(dev);
5505 EXPORT_SYMBOL(netdev_change_features);
5508 * netif_stacked_transfer_operstate - transfer operstate
5509 * @rootdev: the root or lower level device to transfer state from
5510 * @dev: the device to transfer operstate to
5512 * Transfer operational state from root to device. This is normally
5513 * called when a stacking relationship exists between the root
5514 * device and the device(a leaf device).
5516 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5517 struct net_device *dev)
5519 if (rootdev->operstate == IF_OPER_DORMANT)
5520 netif_dormant_on(dev);
5522 netif_dormant_off(dev);
5524 if (netif_carrier_ok(rootdev)) {
5525 if (!netif_carrier_ok(dev))
5526 netif_carrier_on(dev);
5528 if (netif_carrier_ok(dev))
5529 netif_carrier_off(dev);
5532 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5535 static int netif_alloc_rx_queues(struct net_device *dev)
5537 unsigned int i, count = dev->num_rx_queues;
5538 struct netdev_rx_queue *rx;
5542 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5544 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5549 for (i = 0; i < count; i++)
5555 static void netdev_init_one_queue(struct net_device *dev,
5556 struct netdev_queue *queue, void *_unused)
5558 /* Initialize queue lock */
5559 spin_lock_init(&queue->_xmit_lock);
5560 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5561 queue->xmit_lock_owner = -1;
5562 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5566 static int netif_alloc_netdev_queues(struct net_device *dev)
5568 unsigned int count = dev->num_tx_queues;
5569 struct netdev_queue *tx;
5573 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5575 pr_err("netdev: Unable to allocate %u tx queues.\n",
5581 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5582 spin_lock_init(&dev->tx_global_lock);
5588 * register_netdevice - register a network device
5589 * @dev: device to register
5591 * Take a completed network device structure and add it to the kernel
5592 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5593 * chain. 0 is returned on success. A negative errno code is returned
5594 * on a failure to set up the device, or if the name is a duplicate.
5596 * Callers must hold the rtnl semaphore. You may want
5597 * register_netdev() instead of this.
5600 * The locking appears insufficient to guarantee two parallel registers
5601 * will not get the same name.
5604 int register_netdevice(struct net_device *dev)
5607 struct net *net = dev_net(dev);
5609 BUG_ON(dev_boot_phase);
5614 /* When net_device's are persistent, this will be fatal. */
5615 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5618 spin_lock_init(&dev->addr_list_lock);
5619 netdev_set_addr_lockdep_class(dev);
5623 ret = dev_get_valid_name(dev, dev->name);
5627 /* Init, if this function is available */
5628 if (dev->netdev_ops->ndo_init) {
5629 ret = dev->netdev_ops->ndo_init(dev);
5637 dev->ifindex = dev_new_index(net);
5638 if (dev->iflink == -1)
5639 dev->iflink = dev->ifindex;
5641 /* Transfer changeable features to wanted_features and enable
5642 * software offloads (GSO and GRO).
5644 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5645 dev->features |= NETIF_F_SOFT_FEATURES;
5646 dev->wanted_features = dev->features & dev->hw_features;
5648 /* Turn on no cache copy if HW is doing checksum */
5649 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5650 if ((dev->features & NETIF_F_ALL_CSUM) &&
5651 !(dev->features & NETIF_F_NO_CSUM)) {
5652 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5653 dev->features |= NETIF_F_NOCACHE_COPY;
5656 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5658 dev->vlan_features |= NETIF_F_HIGHDMA;
5660 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5661 ret = notifier_to_errno(ret);
5665 ret = netdev_register_kobject(dev);
5668 dev->reg_state = NETREG_REGISTERED;
5670 __netdev_update_features(dev);
5673 * Default initial state at registry is that the
5674 * device is present.
5677 set_bit(__LINK_STATE_PRESENT, &dev->state);
5679 dev_init_scheduler(dev);
5681 list_netdevice(dev);
5683 /* Notify protocols, that a new device appeared. */
5684 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5685 ret = notifier_to_errno(ret);
5687 rollback_registered(dev);
5688 dev->reg_state = NETREG_UNREGISTERED;
5691 * Prevent userspace races by waiting until the network
5692 * device is fully setup before sending notifications.
5694 if (!dev->rtnl_link_ops ||
5695 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5696 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5702 if (dev->netdev_ops->ndo_uninit)
5703 dev->netdev_ops->ndo_uninit(dev);
5706 EXPORT_SYMBOL(register_netdevice);
5709 * init_dummy_netdev - init a dummy network device for NAPI
5710 * @dev: device to init
5712 * This takes a network device structure and initialize the minimum
5713 * amount of fields so it can be used to schedule NAPI polls without
5714 * registering a full blown interface. This is to be used by drivers
5715 * that need to tie several hardware interfaces to a single NAPI
5716 * poll scheduler due to HW limitations.
5718 int init_dummy_netdev(struct net_device *dev)
5720 /* Clear everything. Note we don't initialize spinlocks
5721 * are they aren't supposed to be taken by any of the
5722 * NAPI code and this dummy netdev is supposed to be
5723 * only ever used for NAPI polls
5725 memset(dev, 0, sizeof(struct net_device));
5727 /* make sure we BUG if trying to hit standard
5728 * register/unregister code path
5730 dev->reg_state = NETREG_DUMMY;
5732 /* NAPI wants this */
5733 INIT_LIST_HEAD(&dev->napi_list);
5735 /* a dummy interface is started by default */
5736 set_bit(__LINK_STATE_PRESENT, &dev->state);
5737 set_bit(__LINK_STATE_START, &dev->state);
5739 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5740 * because users of this 'device' dont need to change
5746 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5750 * register_netdev - register a network device
5751 * @dev: device to register
5753 * Take a completed network device structure and add it to the kernel
5754 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5755 * chain. 0 is returned on success. A negative errno code is returned
5756 * on a failure to set up the device, or if the name is a duplicate.
5758 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5759 * and expands the device name if you passed a format string to
5762 int register_netdev(struct net_device *dev)
5767 err = register_netdevice(dev);
5771 EXPORT_SYMBOL(register_netdev);
5773 int netdev_refcnt_read(const struct net_device *dev)
5777 for_each_possible_cpu(i)
5778 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5781 EXPORT_SYMBOL(netdev_refcnt_read);
5784 * netdev_wait_allrefs - wait until all references are gone.
5786 * This is called when unregistering network devices.
5788 * Any protocol or device that holds a reference should register
5789 * for netdevice notification, and cleanup and put back the
5790 * reference if they receive an UNREGISTER event.
5791 * We can get stuck here if buggy protocols don't correctly
5794 static void netdev_wait_allrefs(struct net_device *dev)
5796 unsigned long rebroadcast_time, warning_time;
5799 linkwatch_forget_dev(dev);
5801 rebroadcast_time = warning_time = jiffies;
5802 refcnt = netdev_refcnt_read(dev);
5804 while (refcnt != 0) {
5805 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5808 /* Rebroadcast unregister notification */
5809 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5810 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5811 * should have already handle it the first time */
5813 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5815 /* We must not have linkwatch events
5816 * pending on unregister. If this
5817 * happens, we simply run the queue
5818 * unscheduled, resulting in a noop
5821 linkwatch_run_queue();
5826 rebroadcast_time = jiffies;
5831 refcnt = netdev_refcnt_read(dev);
5833 if (time_after(jiffies, warning_time + 10 * HZ)) {
5834 printk(KERN_EMERG "unregister_netdevice: "
5835 "waiting for %s to become free. Usage "
5838 warning_time = jiffies;
5847 * register_netdevice(x1);
5848 * register_netdevice(x2);
5850 * unregister_netdevice(y1);
5851 * unregister_netdevice(y2);
5857 * We are invoked by rtnl_unlock().
5858 * This allows us to deal with problems:
5859 * 1) We can delete sysfs objects which invoke hotplug
5860 * without deadlocking with linkwatch via keventd.
5861 * 2) Since we run with the RTNL semaphore not held, we can sleep
5862 * safely in order to wait for the netdev refcnt to drop to zero.
5864 * We must not return until all unregister events added during
5865 * the interval the lock was held have been completed.
5867 void netdev_run_todo(void)
5869 struct list_head list;
5871 /* Snapshot list, allow later requests */
5872 list_replace_init(&net_todo_list, &list);
5876 /* Wait for rcu callbacks to finish before attempting to drain
5877 * the device list. This usually avoids a 250ms wait.
5879 if (!list_empty(&list))
5882 while (!list_empty(&list)) {
5883 struct net_device *dev
5884 = list_first_entry(&list, struct net_device, todo_list);
5885 list_del(&dev->todo_list);
5887 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5888 printk(KERN_ERR "network todo '%s' but state %d\n",
5889 dev->name, dev->reg_state);
5894 dev->reg_state = NETREG_UNREGISTERED;
5896 on_each_cpu(flush_backlog, dev, 1);
5898 netdev_wait_allrefs(dev);
5901 BUG_ON(netdev_refcnt_read(dev));
5902 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5903 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5904 WARN_ON(dev->dn_ptr);
5906 if (dev->destructor)
5907 dev->destructor(dev);
5909 /* Free network device */
5910 kobject_put(&dev->dev.kobj);
5914 /* Convert net_device_stats to rtnl_link_stats64. They have the same
5915 * fields in the same order, with only the type differing.
5917 static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5918 const struct net_device_stats *netdev_stats)
5920 #if BITS_PER_LONG == 64
5921 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5922 memcpy(stats64, netdev_stats, sizeof(*stats64));
5924 size_t i, n = sizeof(*stats64) / sizeof(u64);
5925 const unsigned long *src = (const unsigned long *)netdev_stats;
5926 u64 *dst = (u64 *)stats64;
5928 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5929 sizeof(*stats64) / sizeof(u64));
5930 for (i = 0; i < n; i++)
5936 * dev_get_stats - get network device statistics
5937 * @dev: device to get statistics from
5938 * @storage: place to store stats
5940 * Get network statistics from device. Return @storage.
5941 * The device driver may provide its own method by setting
5942 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5943 * otherwise the internal statistics structure is used.
5945 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5946 struct rtnl_link_stats64 *storage)
5948 const struct net_device_ops *ops = dev->netdev_ops;
5950 if (ops->ndo_get_stats64) {
5951 memset(storage, 0, sizeof(*storage));
5952 ops->ndo_get_stats64(dev, storage);
5953 } else if (ops->ndo_get_stats) {
5954 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5956 netdev_stats_to_stats64(storage, &dev->stats);
5958 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5961 EXPORT_SYMBOL(dev_get_stats);
5963 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5965 struct netdev_queue *queue = dev_ingress_queue(dev);
5967 #ifdef CONFIG_NET_CLS_ACT
5970 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5973 netdev_init_one_queue(dev, queue, NULL);
5974 queue->qdisc = &noop_qdisc;
5975 queue->qdisc_sleeping = &noop_qdisc;
5976 rcu_assign_pointer(dev->ingress_queue, queue);
5982 * alloc_netdev_mqs - allocate network device
5983 * @sizeof_priv: size of private data to allocate space for
5984 * @name: device name format string
5985 * @setup: callback to initialize device
5986 * @txqs: the number of TX subqueues to allocate
5987 * @rxqs: the number of RX subqueues to allocate
5989 * Allocates a struct net_device with private data area for driver use
5990 * and performs basic initialization. Also allocates subquue structs
5991 * for each queue on the device.
5993 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5994 void (*setup)(struct net_device *),
5995 unsigned int txqs, unsigned int rxqs)
5997 struct net_device *dev;
5999 struct net_device *p;
6001 BUG_ON(strlen(name) >= sizeof(dev->name));
6004 pr_err("alloc_netdev: Unable to allocate device "
6005 "with zero queues.\n");
6011 pr_err("alloc_netdev: Unable to allocate device "
6012 "with zero RX queues.\n");
6017 alloc_size = sizeof(struct net_device);
6019 /* ensure 32-byte alignment of private area */
6020 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6021 alloc_size += sizeof_priv;
6023 /* ensure 32-byte alignment of whole construct */
6024 alloc_size += NETDEV_ALIGN - 1;
6026 p = kzalloc(alloc_size, GFP_KERNEL);
6028 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
6032 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6033 dev->padded = (char *)dev - (char *)p;
6035 dev->pcpu_refcnt = alloc_percpu(int);
6036 if (!dev->pcpu_refcnt)
6039 if (dev_addr_init(dev))
6045 dev_net_set(dev, &init_net);
6047 dev->gso_max_size = GSO_MAX_SIZE;
6049 INIT_LIST_HEAD(&dev->napi_list);
6050 INIT_LIST_HEAD(&dev->unreg_list);
6051 INIT_LIST_HEAD(&dev->link_watch_list);
6052 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6055 dev->num_tx_queues = txqs;
6056 dev->real_num_tx_queues = txqs;
6057 if (netif_alloc_netdev_queues(dev))
6061 dev->num_rx_queues = rxqs;
6062 dev->real_num_rx_queues = rxqs;
6063 if (netif_alloc_rx_queues(dev))
6067 strcpy(dev->name, name);
6068 dev->group = INIT_NETDEV_GROUP;
6076 free_percpu(dev->pcpu_refcnt);
6086 EXPORT_SYMBOL(alloc_netdev_mqs);
6089 * free_netdev - free network device
6092 * This function does the last stage of destroying an allocated device
6093 * interface. The reference to the device object is released.
6094 * If this is the last reference then it will be freed.
6096 void free_netdev(struct net_device *dev)
6098 struct napi_struct *p, *n;
6100 release_net(dev_net(dev));
6107 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6109 /* Flush device addresses */
6110 dev_addr_flush(dev);
6112 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6115 free_percpu(dev->pcpu_refcnt);
6116 dev->pcpu_refcnt = NULL;
6118 /* Compatibility with error handling in drivers */
6119 if (dev->reg_state == NETREG_UNINITIALIZED) {
6120 kfree((char *)dev - dev->padded);
6124 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6125 dev->reg_state = NETREG_RELEASED;
6127 /* will free via device release */
6128 put_device(&dev->dev);
6130 EXPORT_SYMBOL(free_netdev);
6133 * synchronize_net - Synchronize with packet receive processing
6135 * Wait for packets currently being received to be done.
6136 * Does not block later packets from starting.
6138 void synchronize_net(void)
6141 if (rtnl_is_locked())
6142 synchronize_rcu_expedited();
6146 EXPORT_SYMBOL(synchronize_net);
6149 * unregister_netdevice_queue - remove device from the kernel
6153 * This function shuts down a device interface and removes it
6154 * from the kernel tables.
6155 * If head not NULL, device is queued to be unregistered later.
6157 * Callers must hold the rtnl semaphore. You may want
6158 * unregister_netdev() instead of this.
6161 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6166 list_move_tail(&dev->unreg_list, head);
6168 rollback_registered(dev);
6169 /* Finish processing unregister after unlock */
6173 EXPORT_SYMBOL(unregister_netdevice_queue);
6176 * unregister_netdevice_many - unregister many devices
6177 * @head: list of devices
6179 void unregister_netdevice_many(struct list_head *head)
6181 struct net_device *dev;
6183 if (!list_empty(head)) {
6184 rollback_registered_many(head);
6185 list_for_each_entry(dev, head, unreg_list)
6189 EXPORT_SYMBOL(unregister_netdevice_many);
6192 * unregister_netdev - remove device from the kernel
6195 * This function shuts down a device interface and removes it
6196 * from the kernel tables.
6198 * This is just a wrapper for unregister_netdevice that takes
6199 * the rtnl semaphore. In general you want to use this and not
6200 * unregister_netdevice.
6202 void unregister_netdev(struct net_device *dev)
6205 unregister_netdevice(dev);
6208 EXPORT_SYMBOL(unregister_netdev);
6211 * dev_change_net_namespace - move device to different nethost namespace
6213 * @net: network namespace
6214 * @pat: If not NULL name pattern to try if the current device name
6215 * is already taken in the destination network namespace.
6217 * This function shuts down a device interface and moves it
6218 * to a new network namespace. On success 0 is returned, on
6219 * a failure a netagive errno code is returned.
6221 * Callers must hold the rtnl semaphore.
6224 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6230 /* Don't allow namespace local devices to be moved. */
6232 if (dev->features & NETIF_F_NETNS_LOCAL)
6235 /* Ensure the device has been registrered */
6237 if (dev->reg_state != NETREG_REGISTERED)
6240 /* Get out if there is nothing todo */
6242 if (net_eq(dev_net(dev), net))
6245 /* Pick the destination device name, and ensure
6246 * we can use it in the destination network namespace.
6249 if (__dev_get_by_name(net, dev->name)) {
6250 /* We get here if we can't use the current device name */
6253 if (dev_get_valid_name(dev, pat) < 0)
6258 * And now a mini version of register_netdevice unregister_netdevice.
6261 /* If device is running close it first. */
6264 /* And unlink it from device chain */
6266 unlist_netdevice(dev);
6270 /* Shutdown queueing discipline. */
6273 /* Notify protocols, that we are about to destroy
6274 this device. They should clean all the things.
6276 Note that dev->reg_state stays at NETREG_REGISTERED.
6277 This is wanted because this way 8021q and macvlan know
6278 the device is just moving and can keep their slaves up.
6280 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6281 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6282 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6285 * Flush the unicast and multicast chains
6290 /* Actually switch the network namespace */
6291 dev_net_set(dev, net);
6293 /* If there is an ifindex conflict assign a new one */
6294 if (__dev_get_by_index(net, dev->ifindex)) {
6295 int iflink = (dev->iflink == dev->ifindex);
6296 dev->ifindex = dev_new_index(net);
6298 dev->iflink = dev->ifindex;
6301 /* Fixup kobjects */
6302 err = device_rename(&dev->dev, dev->name);
6305 /* Add the device back in the hashes */
6306 list_netdevice(dev);
6308 /* Notify protocols, that a new device appeared. */
6309 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6312 * Prevent userspace races by waiting until the network
6313 * device is fully setup before sending notifications.
6315 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6322 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6324 static int dev_cpu_callback(struct notifier_block *nfb,
6325 unsigned long action,
6328 struct sk_buff **list_skb;
6329 struct sk_buff *skb;
6330 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6331 struct softnet_data *sd, *oldsd;
6333 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6336 local_irq_disable();
6337 cpu = smp_processor_id();
6338 sd = &per_cpu(softnet_data, cpu);
6339 oldsd = &per_cpu(softnet_data, oldcpu);
6341 /* Find end of our completion_queue. */
6342 list_skb = &sd->completion_queue;
6344 list_skb = &(*list_skb)->next;
6345 /* Append completion queue from offline CPU. */
6346 *list_skb = oldsd->completion_queue;
6347 oldsd->completion_queue = NULL;
6349 /* Append output queue from offline CPU. */
6350 if (oldsd->output_queue) {
6351 *sd->output_queue_tailp = oldsd->output_queue;
6352 sd->output_queue_tailp = oldsd->output_queue_tailp;
6353 oldsd->output_queue = NULL;
6354 oldsd->output_queue_tailp = &oldsd->output_queue;
6356 /* Append NAPI poll list from offline CPU. */
6357 if (!list_empty(&oldsd->poll_list)) {
6358 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6359 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6362 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6365 /* Process offline CPU's input_pkt_queue */
6366 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6368 input_queue_head_incr(oldsd);
6370 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6372 input_queue_head_incr(oldsd);
6380 * netdev_increment_features - increment feature set by one
6381 * @all: current feature set
6382 * @one: new feature set
6383 * @mask: mask feature set
6385 * Computes a new feature set after adding a device with feature set
6386 * @one to the master device with current feature set @all. Will not
6387 * enable anything that is off in @mask. Returns the new feature set.
6389 u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6391 if (mask & NETIF_F_GEN_CSUM)
6392 mask |= NETIF_F_ALL_CSUM;
6393 mask |= NETIF_F_VLAN_CHALLENGED;
6395 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6396 all &= one | ~NETIF_F_ALL_FOR_ALL;
6398 /* If device needs checksumming, downgrade to it. */
6399 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6400 all &= ~NETIF_F_NO_CSUM;
6402 /* If one device supports hw checksumming, set for all. */
6403 if (all & NETIF_F_GEN_CSUM)
6404 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6408 EXPORT_SYMBOL(netdev_increment_features);
6410 static struct hlist_head *netdev_create_hash(void)
6413 struct hlist_head *hash;
6415 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6417 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6418 INIT_HLIST_HEAD(&hash[i]);
6423 /* Initialize per network namespace state */
6424 static int __net_init netdev_init(struct net *net)
6426 INIT_LIST_HEAD(&net->dev_base_head);
6428 net->dev_name_head = netdev_create_hash();
6429 if (net->dev_name_head == NULL)
6432 net->dev_index_head = netdev_create_hash();
6433 if (net->dev_index_head == NULL)
6439 kfree(net->dev_name_head);
6445 * netdev_drivername - network driver for the device
6446 * @dev: network device
6448 * Determine network driver for device.
6450 const char *netdev_drivername(const struct net_device *dev)
6452 const struct device_driver *driver;
6453 const struct device *parent;
6454 const char *empty = "";
6456 parent = dev->dev.parent;
6460 driver = parent->driver;
6461 if (driver && driver->name)
6462 return driver->name;
6466 int __netdev_printk(const char *level, const struct net_device *dev,
6467 struct va_format *vaf)
6471 if (dev && dev->dev.parent)
6472 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6473 netdev_name(dev), vaf);
6475 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6477 r = printk("%s(NULL net_device): %pV", level, vaf);
6481 EXPORT_SYMBOL(__netdev_printk);
6483 int netdev_printk(const char *level, const struct net_device *dev,
6484 const char *format, ...)
6486 struct va_format vaf;
6490 va_start(args, format);
6495 r = __netdev_printk(level, dev, &vaf);
6500 EXPORT_SYMBOL(netdev_printk);
6502 #define define_netdev_printk_level(func, level) \
6503 int func(const struct net_device *dev, const char *fmt, ...) \
6506 struct va_format vaf; \
6509 va_start(args, fmt); \
6514 r = __netdev_printk(level, dev, &vaf); \
6519 EXPORT_SYMBOL(func);
6521 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6522 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6523 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6524 define_netdev_printk_level(netdev_err, KERN_ERR);
6525 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6526 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6527 define_netdev_printk_level(netdev_info, KERN_INFO);
6529 static void __net_exit netdev_exit(struct net *net)
6531 kfree(net->dev_name_head);
6532 kfree(net->dev_index_head);
6535 static struct pernet_operations __net_initdata netdev_net_ops = {
6536 .init = netdev_init,
6537 .exit = netdev_exit,
6540 static void __net_exit default_device_exit(struct net *net)
6542 struct net_device *dev, *aux;
6544 * Push all migratable network devices back to the
6545 * initial network namespace
6548 for_each_netdev_safe(net, dev, aux) {
6550 char fb_name[IFNAMSIZ];
6552 /* Ignore unmoveable devices (i.e. loopback) */
6553 if (dev->features & NETIF_F_NETNS_LOCAL)
6556 /* Leave virtual devices for the generic cleanup */
6557 if (dev->rtnl_link_ops)
6560 /* Push remaining network devices to init_net */
6561 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6562 err = dev_change_net_namespace(dev, &init_net, fb_name);
6564 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
6565 __func__, dev->name, err);
6572 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6574 /* At exit all network devices most be removed from a network
6575 * namespace. Do this in the reverse order of registration.
6576 * Do this across as many network namespaces as possible to
6577 * improve batching efficiency.
6579 struct net_device *dev;
6581 LIST_HEAD(dev_kill_list);
6584 list_for_each_entry(net, net_list, exit_list) {
6585 for_each_netdev_reverse(net, dev) {
6586 if (dev->rtnl_link_ops)
6587 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6589 unregister_netdevice_queue(dev, &dev_kill_list);
6592 unregister_netdevice_many(&dev_kill_list);
6593 list_del(&dev_kill_list);
6597 static struct pernet_operations __net_initdata default_device_ops = {
6598 .exit = default_device_exit,
6599 .exit_batch = default_device_exit_batch,
6603 * Initialize the DEV module. At boot time this walks the device list and
6604 * unhooks any devices that fail to initialise (normally hardware not
6605 * present) and leaves us with a valid list of present and active devices.
6610 * This is called single threaded during boot, so no need
6611 * to take the rtnl semaphore.
6613 static int __init net_dev_init(void)
6615 int i, rc = -ENOMEM;
6617 BUG_ON(!dev_boot_phase);
6619 if (dev_proc_init())
6622 if (netdev_kobject_init())
6625 INIT_LIST_HEAD(&ptype_all);
6626 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6627 INIT_LIST_HEAD(&ptype_base[i]);
6629 if (register_pernet_subsys(&netdev_net_ops))
6633 * Initialise the packet receive queues.
6636 for_each_possible_cpu(i) {
6637 struct softnet_data *sd = &per_cpu(softnet_data, i);
6639 memset(sd, 0, sizeof(*sd));
6640 skb_queue_head_init(&sd->input_pkt_queue);
6641 skb_queue_head_init(&sd->process_queue);
6642 sd->completion_queue = NULL;
6643 INIT_LIST_HEAD(&sd->poll_list);
6644 sd->output_queue = NULL;
6645 sd->output_queue_tailp = &sd->output_queue;
6647 sd->csd.func = rps_trigger_softirq;
6653 sd->backlog.poll = process_backlog;
6654 sd->backlog.weight = weight_p;
6655 sd->backlog.gro_list = NULL;
6656 sd->backlog.gro_count = 0;
6661 /* The loopback device is special if any other network devices
6662 * is present in a network namespace the loopback device must
6663 * be present. Since we now dynamically allocate and free the
6664 * loopback device ensure this invariant is maintained by
6665 * keeping the loopback device as the first device on the
6666 * list of network devices. Ensuring the loopback devices
6667 * is the first device that appears and the last network device
6670 if (register_pernet_device(&loopback_net_ops))
6673 if (register_pernet_device(&default_device_ops))
6676 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6677 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6679 hotcpu_notifier(dev_cpu_callback, 0);
6687 subsys_initcall(net_dev_init);
6689 static int __init initialize_hashrnd(void)
6691 get_random_bytes(&hashrnd, sizeof(hashrnd));
6695 late_initcall_sync(initialize_hashrnd);