2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/kallsyms.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
143 * the average user (w/out VLANs) will not be adversely affected.
160 #define PTYPE_HASH_SIZE (16)
161 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
163 static DEFINE_SPINLOCK(ptype_lock);
164 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
165 static struct list_head ptype_all __read_mostly; /* Taps */
167 #ifdef CONFIG_NET_DMA
169 struct dma_client client;
171 cpumask_t channel_mask;
172 struct dma_chan **channels;
175 static enum dma_state_client
176 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
179 static struct net_dma net_dma = {
181 .event_callback = netdev_dma_event,
187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
190 * Pure readers hold dev_base_lock for reading.
192 * Writers must hold the rtnl semaphore while they loop through the
193 * dev_base_head list, and hold dev_base_lock for writing when they do the
194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
205 DEFINE_RWLOCK(dev_base_lock);
207 EXPORT_SYMBOL(dev_base_lock);
209 #define NETDEV_HASHBITS 8
210 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
212 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
223 /* Device list insertion */
224 static int list_netdevice(struct net_device *dev)
226 struct net *net = dev_net(dev);
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
238 /* Device list removal */
239 static void unlist_netdevice(struct net_device *dev)
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
255 static RAW_NOTIFIER_HEAD(netdev_chain);
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
262 DEFINE_PER_CPU(struct softnet_data, softnet_data);
264 #ifdef CONFIG_LOCKDEP
266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
267 * according to dev->type
269 static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
286 static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
303 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
304 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
306 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
317 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
327 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
337 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
341 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
346 /*******************************************************************************
348 Protocol management and registration routines
350 *******************************************************************************/
353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
376 * This call does not sleep therefore it can not
377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
381 void dev_add_pack(struct packet_type *pt)
385 spin_lock_bh(&ptype_lock);
386 if (pt->type == htons(ETH_P_ALL))
387 list_add_rcu(&pt->list, &ptype_all);
389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
390 list_add_rcu(&pt->list, &ptype_base[hash]);
392 spin_unlock_bh(&ptype_lock);
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
408 void __dev_remove_pack(struct packet_type *pt)
410 struct list_head *head;
411 struct packet_type *pt1;
413 spin_lock_bh(&ptype_lock);
415 if (pt->type == htons(ETH_P_ALL))
418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
420 list_for_each_entry(pt1, head, list) {
422 list_del_rcu(&pt->list);
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
429 spin_unlock_bh(&ptype_lock);
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
440 * This call sleeps to guarantee that no CPU is looking at the packet
443 void dev_remove_pack(struct packet_type *pt)
445 __dev_remove_pack(pt);
450 /******************************************************************************
452 Device Boot-time Settings Routines
454 *******************************************************************************/
456 /* Boot time configuration table */
457 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
468 static int netdev_boot_setup_add(char *name, struct ifmap *map)
470 struct netdev_boot_setup *s;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
477 strlcpy(s[i].name, name, IFNAMSIZ);
478 memcpy(&s[i].map, map, sizeof(s[i].map));
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
495 int netdev_boot_setup_check(struct net_device *dev)
497 struct netdev_boot_setup *s = dev_boot_setup;
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
502 !strcmp(dev->name, s[i].name)) {
503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
524 unsigned long netdev_boot_base(const char *prefix, int unit)
526 const struct netdev_boot_setup *s = dev_boot_setup;
530 sprintf(name, "%s%d", prefix, unit);
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
536 if (__dev_get_by_name(&init_net, name))
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
546 * Saves at boot time configured settings for any netdevice.
548 int __init netdev_boot_setup(char *str)
553 str = get_options(str, ARRAY_SIZE(ints), ints);
558 memset(&map, 0, sizeof(map));
562 map.base_addr = ints[2];
564 map.mem_start = ints[3];
566 map.mem_end = ints[4];
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
572 __setup("netdev=", netdev_boot_setup);
574 /*******************************************************************************
576 Device Interface Subroutines
578 *******************************************************************************/
581 * __dev_get_by_name - find a device by its name
582 * @net: the applicable net namespace
583 * @name: name to find
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
592 struct net_device *__dev_get_by_name(struct net *net, const char *name)
594 struct hlist_node *p;
596 hlist_for_each(p, dev_name_hash(net, name)) {
597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
606 * dev_get_by_name - find a device by its name
607 * @net: the applicable net namespace
608 * @name: name to find
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
617 struct net_device *dev_get_by_name(struct net *net, const char *name)
619 struct net_device *dev;
621 read_lock(&dev_base_lock);
622 dev = __dev_get_by_name(net, name);
625 read_unlock(&dev_base_lock);
630 * __dev_get_by_index - find a device by its ifindex
631 * @net: the applicable net namespace
632 * @ifindex: index of device
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
641 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
643 struct hlist_node *p;
645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
656 * dev_get_by_index - find a device by its ifindex
657 * @net: the applicable net namespace
658 * @ifindex: index of device
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
666 struct net_device *dev_get_by_index(struct net *net, int ifindex)
668 struct net_device *dev;
670 read_lock(&dev_base_lock);
671 dev = __dev_get_by_index(net, ifindex);
674 read_unlock(&dev_base_lock);
679 * dev_getbyhwaddr - find a device by its hardware address
680 * @net: the applicable net namespace
681 * @type: media type of device
682 * @ha: hardware address
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
690 * If the API was consistent this would be __dev_get_by_hwaddr
693 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
695 struct net_device *dev;
699 for_each_netdev(net, dev)
700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
707 EXPORT_SYMBOL(dev_getbyhwaddr);
709 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
711 struct net_device *dev;
714 for_each_netdev(net, dev)
715 if (dev->type == type)
721 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
723 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
725 struct net_device *dev;
728 dev = __dev_getfirstbyhwtype(net, type);
735 EXPORT_SYMBOL(dev_getfirstbyhwtype);
738 * dev_get_by_flags - find any device with given flags
739 * @net: the applicable net namespace
740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
743 * Search for any interface with the given flags. Returns NULL if a device
744 * is not found or a pointer to the device. The device returned has
745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
749 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
751 struct net_device *dev, *ret;
754 read_lock(&dev_base_lock);
755 for_each_netdev(net, dev) {
756 if (((dev->flags ^ if_flags) & mask) == 0) {
762 read_unlock(&dev_base_lock);
767 * dev_valid_name - check if name is okay for network device
770 * Network device names need to be valid file names to
771 * to allow sysfs to work. We also disallow any kind of
774 int dev_valid_name(const char *name)
778 if (strlen(name) >= IFNAMSIZ)
780 if (!strcmp(name, ".") || !strcmp(name, ".."))
784 if (*name == '/' || isspace(*name))
792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
794 * @name: name format string
795 * @buf: scratch buffer and result name string
797 * Passed a format string - eg "lt%d" it will try and find a suitable
798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
806 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
810 const int max_netdevices = 8*PAGE_SIZE;
811 unsigned long *inuse;
812 struct net_device *d;
814 p = strnchr(name, IFNAMSIZ-1, '%');
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
821 if (p[1] != 'd' || strchr(p + 2, '%'))
824 /* Use one page as a bit array of possible slots */
825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
829 for_each_netdev(net, d) {
830 if (!sscanf(d->name, name, &i))
832 if (i < 0 || i >= max_netdevices)
835 /* avoid cases where sscanf is not exact inverse of printf */
836 snprintf(buf, IFNAMSIZ, name, i);
837 if (!strncmp(buf, d->name, IFNAMSIZ))
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
857 * dev_alloc_name - allocate a name for a device
859 * @name: name format string
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
870 int dev_alloc_name(struct net_device *dev, const char *name)
876 BUG_ON(!dev_net(dev));
878 ret = __dev_alloc_name(net, name, buf);
880 strlcpy(dev->name, buf, IFNAMSIZ);
886 * dev_change_name - change name of a device
888 * @newname: name (or format string) must be at least IFNAMSIZ
890 * Change name of a device, can pass format strings "eth%d".
893 int dev_change_name(struct net_device *dev, const char *newname)
895 char oldname[IFNAMSIZ];
901 BUG_ON(!dev_net(dev));
904 if (dev->flags & IFF_UP)
907 if (!dev_valid_name(newname))
910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
913 memcpy(oldname, dev->name, IFNAMSIZ);
915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
920 else if (__dev_get_by_name(net, newname))
923 strlcpy(dev->name, newname, IFNAMSIZ);
926 err = device_rename(&dev->dev, dev->name);
928 memcpy(dev->name, oldname, IFNAMSIZ);
932 write_lock_bh(&dev_base_lock);
933 hlist_del(&dev->name_hlist);
934 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
935 write_unlock_bh(&dev_base_lock);
937 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
938 ret = notifier_to_errno(ret);
943 "%s: name change rollback failed: %d.\n",
947 memcpy(dev->name, oldname, IFNAMSIZ);
956 * dev_set_alias - change ifalias of a device
958 * @alias: name up to IFALIASZ
960 * Set ifalias for a device,
962 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
977 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
981 strlcpy(dev->ifalias, alias, len+1);
987 * netdev_features_change - device changes features
988 * @dev: device to cause notification
990 * Called to indicate a device has changed features.
992 void netdev_features_change(struct net_device *dev)
994 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
996 EXPORT_SYMBOL(netdev_features_change);
999 * netdev_state_change - device changes state
1000 * @dev: device to cause notification
1002 * Called to indicate a device has changed state. This function calls
1003 * the notifier chains for netdev_chain and sends a NEWLINK message
1004 * to the routing socket.
1006 void netdev_state_change(struct net_device *dev)
1008 if (dev->flags & IFF_UP) {
1009 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1010 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1014 void netdev_bonding_change(struct net_device *dev)
1016 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1018 EXPORT_SYMBOL(netdev_bonding_change);
1021 * dev_load - load a network module
1022 * @net: the applicable net namespace
1023 * @name: name of interface
1025 * If a network interface is not present and the process has suitable
1026 * privileges this function loads the module. If module loading is not
1027 * available in this kernel then it becomes a nop.
1030 void dev_load(struct net *net, const char *name)
1032 struct net_device *dev;
1034 read_lock(&dev_base_lock);
1035 dev = __dev_get_by_name(net, name);
1036 read_unlock(&dev_base_lock);
1038 if (!dev && capable(CAP_SYS_MODULE))
1039 request_module("%s", name);
1043 * dev_open - prepare an interface for use.
1044 * @dev: device to open
1046 * Takes a device from down to up state. The device's private open
1047 * function is invoked and then the multicast lists are loaded. Finally
1048 * the device is moved into the up state and a %NETDEV_UP message is
1049 * sent to the netdev notifier chain.
1051 * Calling this function on an active interface is a nop. On a failure
1052 * a negative errno code is returned.
1054 int dev_open(struct net_device *dev)
1064 if (dev->flags & IFF_UP)
1068 * Is it even present?
1070 if (!netif_device_present(dev))
1074 * Call device private open method
1076 set_bit(__LINK_STATE_START, &dev->state);
1078 if (dev->validate_addr)
1079 ret = dev->validate_addr(dev);
1081 if (!ret && dev->open)
1082 ret = dev->open(dev);
1085 * If it went open OK then:
1089 clear_bit(__LINK_STATE_START, &dev->state);
1094 dev->flags |= IFF_UP;
1097 * Initialize multicasting status
1099 dev_set_rx_mode(dev);
1102 * Wakeup transmit queue engine
1107 * ... and announce new interface.
1109 call_netdevice_notifiers(NETDEV_UP, dev);
1116 * dev_close - shutdown an interface.
1117 * @dev: device to shutdown
1119 * This function moves an active device into down state. A
1120 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1121 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1124 int dev_close(struct net_device *dev)
1130 if (!(dev->flags & IFF_UP))
1134 * Tell people we are going down, so that they can
1135 * prepare to death, when device is still operating.
1137 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1139 clear_bit(__LINK_STATE_START, &dev->state);
1141 /* Synchronize to scheduled poll. We cannot touch poll list,
1142 * it can be even on different cpu. So just clear netif_running().
1144 * dev->stop() will invoke napi_disable() on all of it's
1145 * napi_struct instances on this device.
1147 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1149 dev_deactivate(dev);
1152 * Call the device specific close. This cannot fail.
1153 * Only if device is UP
1155 * We allow it to be called even after a DETACH hot-plug
1162 * Device is now down.
1165 dev->flags &= ~IFF_UP;
1168 * Tell people we are down
1170 call_netdevice_notifiers(NETDEV_DOWN, dev);
1177 * dev_disable_lro - disable Large Receive Offload on a device
1180 * Disable Large Receive Offload (LRO) on a net device. Must be
1181 * called under RTNL. This is needed if received packets may be
1182 * forwarded to another interface.
1184 void dev_disable_lro(struct net_device *dev)
1186 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1187 dev->ethtool_ops->set_flags) {
1188 u32 flags = dev->ethtool_ops->get_flags(dev);
1189 if (flags & ETH_FLAG_LRO) {
1190 flags &= ~ETH_FLAG_LRO;
1191 dev->ethtool_ops->set_flags(dev, flags);
1194 WARN_ON(dev->features & NETIF_F_LRO);
1196 EXPORT_SYMBOL(dev_disable_lro);
1199 static int dev_boot_phase = 1;
1202 * Device change register/unregister. These are not inline or static
1203 * as we export them to the world.
1207 * register_netdevice_notifier - register a network notifier block
1210 * Register a notifier to be called when network device events occur.
1211 * The notifier passed is linked into the kernel structures and must
1212 * not be reused until it has been unregistered. A negative errno code
1213 * is returned on a failure.
1215 * When registered all registration and up events are replayed
1216 * to the new notifier to allow device to have a race free
1217 * view of the network device list.
1220 int register_netdevice_notifier(struct notifier_block *nb)
1222 struct net_device *dev;
1223 struct net_device *last;
1228 err = raw_notifier_chain_register(&netdev_chain, nb);
1234 for_each_netdev(net, dev) {
1235 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1236 err = notifier_to_errno(err);
1240 if (!(dev->flags & IFF_UP))
1243 nb->notifier_call(nb, NETDEV_UP, dev);
1254 for_each_netdev(net, dev) {
1258 if (dev->flags & IFF_UP) {
1259 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1260 nb->notifier_call(nb, NETDEV_DOWN, dev);
1262 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1266 raw_notifier_chain_unregister(&netdev_chain, nb);
1271 * unregister_netdevice_notifier - unregister a network notifier block
1274 * Unregister a notifier previously registered by
1275 * register_netdevice_notifier(). The notifier is unlinked into the
1276 * kernel structures and may then be reused. A negative errno code
1277 * is returned on a failure.
1280 int unregister_netdevice_notifier(struct notifier_block *nb)
1285 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1291 * call_netdevice_notifiers - call all network notifier blocks
1292 * @val: value passed unmodified to notifier function
1293 * @dev: net_device pointer passed unmodified to notifier function
1295 * Call all network notifier blocks. Parameters and return value
1296 * are as for raw_notifier_call_chain().
1299 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1301 return raw_notifier_call_chain(&netdev_chain, val, dev);
1304 /* When > 0 there are consumers of rx skb time stamps */
1305 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1307 void net_enable_timestamp(void)
1309 atomic_inc(&netstamp_needed);
1312 void net_disable_timestamp(void)
1314 atomic_dec(&netstamp_needed);
1317 static inline void net_timestamp(struct sk_buff *skb)
1319 if (atomic_read(&netstamp_needed))
1320 __net_timestamp(skb);
1322 skb->tstamp.tv64 = 0;
1326 * Support routine. Sends outgoing frames to any network
1327 * taps currently in use.
1330 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1332 struct packet_type *ptype;
1337 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1338 /* Never send packets back to the socket
1339 * they originated from - MvS (miquels@drinkel.ow.org)
1341 if ((ptype->dev == dev || !ptype->dev) &&
1342 (ptype->af_packet_priv == NULL ||
1343 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1344 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1348 /* skb->nh should be correctly
1349 set by sender, so that the second statement is
1350 just protection against buggy protocols.
1352 skb_reset_mac_header(skb2);
1354 if (skb_network_header(skb2) < skb2->data ||
1355 skb2->network_header > skb2->tail) {
1356 if (net_ratelimit())
1357 printk(KERN_CRIT "protocol %04x is "
1359 skb2->protocol, dev->name);
1360 skb_reset_network_header(skb2);
1363 skb2->transport_header = skb2->network_header;
1364 skb2->pkt_type = PACKET_OUTGOING;
1365 ptype->func(skb2, skb->dev, ptype, skb->dev);
1372 static inline void __netif_reschedule(struct Qdisc *q)
1374 struct softnet_data *sd;
1375 unsigned long flags;
1377 local_irq_save(flags);
1378 sd = &__get_cpu_var(softnet_data);
1379 q->next_sched = sd->output_queue;
1380 sd->output_queue = q;
1381 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1382 local_irq_restore(flags);
1385 void __netif_schedule(struct Qdisc *q)
1387 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1388 __netif_reschedule(q);
1390 EXPORT_SYMBOL(__netif_schedule);
1392 void dev_kfree_skb_irq(struct sk_buff *skb)
1394 if (atomic_dec_and_test(&skb->users)) {
1395 struct softnet_data *sd;
1396 unsigned long flags;
1398 local_irq_save(flags);
1399 sd = &__get_cpu_var(softnet_data);
1400 skb->next = sd->completion_queue;
1401 sd->completion_queue = skb;
1402 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1403 local_irq_restore(flags);
1406 EXPORT_SYMBOL(dev_kfree_skb_irq);
1408 void dev_kfree_skb_any(struct sk_buff *skb)
1410 if (in_irq() || irqs_disabled())
1411 dev_kfree_skb_irq(skb);
1415 EXPORT_SYMBOL(dev_kfree_skb_any);
1419 * netif_device_detach - mark device as removed
1420 * @dev: network device
1422 * Mark device as removed from system and therefore no longer available.
1424 void netif_device_detach(struct net_device *dev)
1426 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1427 netif_running(dev)) {
1428 netif_stop_queue(dev);
1431 EXPORT_SYMBOL(netif_device_detach);
1434 * netif_device_attach - mark device as attached
1435 * @dev: network device
1437 * Mark device as attached from system and restart if needed.
1439 void netif_device_attach(struct net_device *dev)
1441 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1442 netif_running(dev)) {
1443 netif_wake_queue(dev);
1444 __netdev_watchdog_up(dev);
1447 EXPORT_SYMBOL(netif_device_attach);
1449 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1451 return ((features & NETIF_F_GEN_CSUM) ||
1452 ((features & NETIF_F_IP_CSUM) &&
1453 protocol == htons(ETH_P_IP)) ||
1454 ((features & NETIF_F_IPV6_CSUM) &&
1455 protocol == htons(ETH_P_IPV6)));
1458 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1460 if (can_checksum_protocol(dev->features, skb->protocol))
1463 if (skb->protocol == htons(ETH_P_8021Q)) {
1464 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1465 if (can_checksum_protocol(dev->features & dev->vlan_features,
1466 veh->h_vlan_encapsulated_proto))
1474 * Invalidate hardware checksum when packet is to be mangled, and
1475 * complete checksum manually on outgoing path.
1477 int skb_checksum_help(struct sk_buff *skb)
1480 int ret = 0, offset;
1482 if (skb->ip_summed == CHECKSUM_COMPLETE)
1483 goto out_set_summed;
1485 if (unlikely(skb_shinfo(skb)->gso_size)) {
1486 /* Let GSO fix up the checksum. */
1487 goto out_set_summed;
1490 offset = skb->csum_start - skb_headroom(skb);
1491 BUG_ON(offset >= skb_headlen(skb));
1492 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1494 offset += skb->csum_offset;
1495 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1497 if (skb_cloned(skb) &&
1498 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1499 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1504 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1506 skb->ip_summed = CHECKSUM_NONE;
1512 * skb_gso_segment - Perform segmentation on skb.
1513 * @skb: buffer to segment
1514 * @features: features for the output path (see dev->features)
1516 * This function segments the given skb and returns a list of segments.
1518 * It may return NULL if the skb requires no segmentation. This is
1519 * only possible when GSO is used for verifying header integrity.
1521 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1523 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1524 struct packet_type *ptype;
1525 __be16 type = skb->protocol;
1528 BUG_ON(skb_shinfo(skb)->frag_list);
1530 skb_reset_mac_header(skb);
1531 skb->mac_len = skb->network_header - skb->mac_header;
1532 __skb_pull(skb, skb->mac_len);
1534 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1535 if (skb_header_cloned(skb) &&
1536 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1537 return ERR_PTR(err);
1541 list_for_each_entry_rcu(ptype,
1542 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1543 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1544 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1545 err = ptype->gso_send_check(skb);
1546 segs = ERR_PTR(err);
1547 if (err || skb_gso_ok(skb, features))
1549 __skb_push(skb, (skb->data -
1550 skb_network_header(skb)));
1552 segs = ptype->gso_segment(skb, features);
1558 __skb_push(skb, skb->data - skb_mac_header(skb));
1563 EXPORT_SYMBOL(skb_gso_segment);
1565 /* Take action when hardware reception checksum errors are detected. */
1567 void netdev_rx_csum_fault(struct net_device *dev)
1569 if (net_ratelimit()) {
1570 printk(KERN_ERR "%s: hw csum failure.\n",
1571 dev ? dev->name : "<unknown>");
1575 EXPORT_SYMBOL(netdev_rx_csum_fault);
1578 /* Actually, we should eliminate this check as soon as we know, that:
1579 * 1. IOMMU is present and allows to map all the memory.
1580 * 2. No high memory really exists on this machine.
1583 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1585 #ifdef CONFIG_HIGHMEM
1588 if (dev->features & NETIF_F_HIGHDMA)
1591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1592 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1600 void (*destructor)(struct sk_buff *skb);
1603 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1605 static void dev_gso_skb_destructor(struct sk_buff *skb)
1607 struct dev_gso_cb *cb;
1610 struct sk_buff *nskb = skb->next;
1612 skb->next = nskb->next;
1615 } while (skb->next);
1617 cb = DEV_GSO_CB(skb);
1619 cb->destructor(skb);
1623 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1624 * @skb: buffer to segment
1626 * This function segments the given skb and stores the list of segments
1629 static int dev_gso_segment(struct sk_buff *skb)
1631 struct net_device *dev = skb->dev;
1632 struct sk_buff *segs;
1633 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1636 segs = skb_gso_segment(skb, features);
1638 /* Verifying header integrity only. */
1643 return PTR_ERR(segs);
1646 DEV_GSO_CB(skb)->destructor = skb->destructor;
1647 skb->destructor = dev_gso_skb_destructor;
1652 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1653 struct netdev_queue *txq)
1655 if (likely(!skb->next)) {
1656 if (!list_empty(&ptype_all))
1657 dev_queue_xmit_nit(skb, dev);
1659 if (netif_needs_gso(dev, skb)) {
1660 if (unlikely(dev_gso_segment(skb)))
1666 return dev->hard_start_xmit(skb, dev);
1671 struct sk_buff *nskb = skb->next;
1674 skb->next = nskb->next;
1676 rc = dev->hard_start_xmit(nskb, dev);
1678 nskb->next = skb->next;
1682 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1683 return NETDEV_TX_BUSY;
1684 } while (skb->next);
1686 skb->destructor = DEV_GSO_CB(skb)->destructor;
1693 static u32 simple_tx_hashrnd;
1694 static int simple_tx_hashrnd_initialized = 0;
1696 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1698 u32 addr1, addr2, ports;
1702 if (unlikely(!simple_tx_hashrnd_initialized)) {
1703 get_random_bytes(&simple_tx_hashrnd, 4);
1704 simple_tx_hashrnd_initialized = 1;
1707 switch (skb->protocol) {
1708 case htons(ETH_P_IP):
1709 ip_proto = ip_hdr(skb)->protocol;
1710 addr1 = ip_hdr(skb)->saddr;
1711 addr2 = ip_hdr(skb)->daddr;
1712 ihl = ip_hdr(skb)->ihl;
1714 case htons(ETH_P_IPV6):
1715 ip_proto = ipv6_hdr(skb)->nexthdr;
1716 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1717 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1732 case IPPROTO_UDPLITE:
1733 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1741 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1743 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1746 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1747 struct sk_buff *skb)
1749 u16 queue_index = 0;
1751 if (dev->select_queue)
1752 queue_index = dev->select_queue(dev, skb);
1753 else if (dev->real_num_tx_queues > 1)
1754 queue_index = simple_tx_hash(dev, skb);
1756 skb_set_queue_mapping(skb, queue_index);
1757 return netdev_get_tx_queue(dev, queue_index);
1761 * dev_queue_xmit - transmit a buffer
1762 * @skb: buffer to transmit
1764 * Queue a buffer for transmission to a network device. The caller must
1765 * have set the device and priority and built the buffer before calling
1766 * this function. The function can be called from an interrupt.
1768 * A negative errno code is returned on a failure. A success does not
1769 * guarantee the frame will be transmitted as it may be dropped due
1770 * to congestion or traffic shaping.
1772 * -----------------------------------------------------------------------------------
1773 * I notice this method can also return errors from the queue disciplines,
1774 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1777 * Regardless of the return value, the skb is consumed, so it is currently
1778 * difficult to retry a send to this method. (You can bump the ref count
1779 * before sending to hold a reference for retry if you are careful.)
1781 * When calling this method, interrupts MUST be enabled. This is because
1782 * the BH enable code must have IRQs enabled so that it will not deadlock.
1785 int dev_queue_xmit(struct sk_buff *skb)
1787 struct net_device *dev = skb->dev;
1788 struct netdev_queue *txq;
1792 /* GSO will handle the following emulations directly. */
1793 if (netif_needs_gso(dev, skb))
1796 if (skb_shinfo(skb)->frag_list &&
1797 !(dev->features & NETIF_F_FRAGLIST) &&
1798 __skb_linearize(skb))
1801 /* Fragmented skb is linearized if device does not support SG,
1802 * or if at least one of fragments is in highmem and device
1803 * does not support DMA from it.
1805 if (skb_shinfo(skb)->nr_frags &&
1806 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1807 __skb_linearize(skb))
1810 /* If packet is not checksummed and device does not support
1811 * checksumming for this protocol, complete checksumming here.
1813 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1814 skb_set_transport_header(skb, skb->csum_start -
1816 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1821 /* Disable soft irqs for various locks below. Also
1822 * stops preemption for RCU.
1826 txq = dev_pick_tx(dev, skb);
1827 q = rcu_dereference(txq->qdisc);
1829 #ifdef CONFIG_NET_CLS_ACT
1830 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1833 spinlock_t *root_lock = qdisc_lock(q);
1835 spin_lock(root_lock);
1837 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1841 rc = qdisc_enqueue_root(skb, q);
1844 spin_unlock(root_lock);
1849 /* The device has no queue. Common case for software devices:
1850 loopback, all the sorts of tunnels...
1852 Really, it is unlikely that netif_tx_lock protection is necessary
1853 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1855 However, it is possible, that they rely on protection
1858 Check this and shot the lock. It is not prone from deadlocks.
1859 Either shot noqueue qdisc, it is even simpler 8)
1861 if (dev->flags & IFF_UP) {
1862 int cpu = smp_processor_id(); /* ok because BHs are off */
1864 if (txq->xmit_lock_owner != cpu) {
1866 HARD_TX_LOCK(dev, txq, cpu);
1868 if (!netif_tx_queue_stopped(txq)) {
1870 if (!dev_hard_start_xmit(skb, dev, txq)) {
1871 HARD_TX_UNLOCK(dev, txq);
1875 HARD_TX_UNLOCK(dev, txq);
1876 if (net_ratelimit())
1877 printk(KERN_CRIT "Virtual device %s asks to "
1878 "queue packet!\n", dev->name);
1880 /* Recursion is detected! It is possible,
1882 if (net_ratelimit())
1883 printk(KERN_CRIT "Dead loop on virtual device "
1884 "%s, fix it urgently!\n", dev->name);
1889 rcu_read_unlock_bh();
1895 rcu_read_unlock_bh();
1900 /*=======================================================================
1902 =======================================================================*/
1904 int netdev_max_backlog __read_mostly = 1000;
1905 int netdev_budget __read_mostly = 300;
1906 int weight_p __read_mostly = 64; /* old backlog weight */
1908 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1912 * netif_rx - post buffer to the network code
1913 * @skb: buffer to post
1915 * This function receives a packet from a device driver and queues it for
1916 * the upper (protocol) levels to process. It always succeeds. The buffer
1917 * may be dropped during processing for congestion control or by the
1921 * NET_RX_SUCCESS (no congestion)
1922 * NET_RX_DROP (packet was dropped)
1926 int netif_rx(struct sk_buff *skb)
1928 struct softnet_data *queue;
1929 unsigned long flags;
1931 /* if netpoll wants it, pretend we never saw it */
1932 if (netpoll_rx(skb))
1935 if (!skb->tstamp.tv64)
1939 * The code is rearranged so that the path is the most
1940 * short when CPU is congested, but is still operating.
1942 local_irq_save(flags);
1943 queue = &__get_cpu_var(softnet_data);
1945 __get_cpu_var(netdev_rx_stat).total++;
1946 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1947 if (queue->input_pkt_queue.qlen) {
1949 __skb_queue_tail(&queue->input_pkt_queue, skb);
1950 local_irq_restore(flags);
1951 return NET_RX_SUCCESS;
1954 napi_schedule(&queue->backlog);
1958 __get_cpu_var(netdev_rx_stat).dropped++;
1959 local_irq_restore(flags);
1965 int netif_rx_ni(struct sk_buff *skb)
1970 err = netif_rx(skb);
1971 if (local_softirq_pending())
1978 EXPORT_SYMBOL(netif_rx_ni);
1980 static void net_tx_action(struct softirq_action *h)
1982 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1984 if (sd->completion_queue) {
1985 struct sk_buff *clist;
1987 local_irq_disable();
1988 clist = sd->completion_queue;
1989 sd->completion_queue = NULL;
1993 struct sk_buff *skb = clist;
1994 clist = clist->next;
1996 WARN_ON(atomic_read(&skb->users));
2001 if (sd->output_queue) {
2004 local_irq_disable();
2005 head = sd->output_queue;
2006 sd->output_queue = NULL;
2010 struct Qdisc *q = head;
2011 spinlock_t *root_lock;
2013 head = head->next_sched;
2015 root_lock = qdisc_lock(q);
2016 if (spin_trylock(root_lock)) {
2017 smp_mb__before_clear_bit();
2018 clear_bit(__QDISC_STATE_SCHED,
2021 spin_unlock(root_lock);
2023 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2025 __netif_reschedule(q);
2027 smp_mb__before_clear_bit();
2028 clear_bit(__QDISC_STATE_SCHED,
2036 static inline int deliver_skb(struct sk_buff *skb,
2037 struct packet_type *pt_prev,
2038 struct net_device *orig_dev)
2040 atomic_inc(&skb->users);
2041 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2044 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2045 /* These hooks defined here for ATM */
2047 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2048 unsigned char *addr);
2049 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2052 * If bridge module is loaded call bridging hook.
2053 * returns NULL if packet was consumed.
2055 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2056 struct sk_buff *skb) __read_mostly;
2057 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2058 struct packet_type **pt_prev, int *ret,
2059 struct net_device *orig_dev)
2061 struct net_bridge_port *port;
2063 if (skb->pkt_type == PACKET_LOOPBACK ||
2064 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2068 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2072 return br_handle_frame_hook(port, skb);
2075 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2078 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2079 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2080 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2082 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2083 struct packet_type **pt_prev,
2085 struct net_device *orig_dev)
2087 if (skb->dev->macvlan_port == NULL)
2091 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2094 return macvlan_handle_frame_hook(skb);
2097 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2100 #ifdef CONFIG_NET_CLS_ACT
2101 /* TODO: Maybe we should just force sch_ingress to be compiled in
2102 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2103 * a compare and 2 stores extra right now if we dont have it on
2104 * but have CONFIG_NET_CLS_ACT
2105 * NOTE: This doesnt stop any functionality; if you dont have
2106 * the ingress scheduler, you just cant add policies on ingress.
2109 static int ing_filter(struct sk_buff *skb)
2111 struct net_device *dev = skb->dev;
2112 u32 ttl = G_TC_RTTL(skb->tc_verd);
2113 struct netdev_queue *rxq;
2114 int result = TC_ACT_OK;
2117 if (MAX_RED_LOOP < ttl++) {
2119 "Redir loop detected Dropping packet (%d->%d)\n",
2120 skb->iif, dev->ifindex);
2124 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2125 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2127 rxq = &dev->rx_queue;
2130 if (q != &noop_qdisc) {
2131 spin_lock(qdisc_lock(q));
2132 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2133 result = qdisc_enqueue_root(skb, q);
2134 spin_unlock(qdisc_lock(q));
2140 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2141 struct packet_type **pt_prev,
2142 int *ret, struct net_device *orig_dev)
2144 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2148 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2151 /* Huh? Why does turning on AF_PACKET affect this? */
2152 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2155 switch (ing_filter(skb)) {
2169 * netif_nit_deliver - deliver received packets to network taps
2172 * This function is used to deliver incoming packets to network
2173 * taps. It should be used when the normal netif_receive_skb path
2174 * is bypassed, for example because of VLAN acceleration.
2176 void netif_nit_deliver(struct sk_buff *skb)
2178 struct packet_type *ptype;
2180 if (list_empty(&ptype_all))
2183 skb_reset_network_header(skb);
2184 skb_reset_transport_header(skb);
2185 skb->mac_len = skb->network_header - skb->mac_header;
2188 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2189 if (!ptype->dev || ptype->dev == skb->dev)
2190 deliver_skb(skb, ptype, skb->dev);
2196 * netif_receive_skb - process receive buffer from network
2197 * @skb: buffer to process
2199 * netif_receive_skb() is the main receive data processing function.
2200 * It always succeeds. The buffer may be dropped during processing
2201 * for congestion control or by the protocol layers.
2203 * This function may only be called from softirq context and interrupts
2204 * should be enabled.
2206 * Return values (usually ignored):
2207 * NET_RX_SUCCESS: no congestion
2208 * NET_RX_DROP: packet was dropped
2210 int netif_receive_skb(struct sk_buff *skb)
2212 struct packet_type *ptype, *pt_prev;
2213 struct net_device *orig_dev;
2214 struct net_device *null_or_orig;
2215 int ret = NET_RX_DROP;
2218 /* if we've gotten here through NAPI, check netpoll */
2219 if (netpoll_receive_skb(skb))
2222 if (!skb->tstamp.tv64)
2226 skb->iif = skb->dev->ifindex;
2228 null_or_orig = NULL;
2229 orig_dev = skb->dev;
2230 if (orig_dev->master) {
2231 if (skb_bond_should_drop(skb))
2232 null_or_orig = orig_dev; /* deliver only exact match */
2234 skb->dev = orig_dev->master;
2237 __get_cpu_var(netdev_rx_stat).total++;
2239 skb_reset_network_header(skb);
2240 skb_reset_transport_header(skb);
2241 skb->mac_len = skb->network_header - skb->mac_header;
2247 /* Don't receive packets in an exiting network namespace */
2248 if (!net_alive(dev_net(skb->dev)))
2251 #ifdef CONFIG_NET_CLS_ACT
2252 if (skb->tc_verd & TC_NCLS) {
2253 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2258 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2259 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2260 ptype->dev == orig_dev) {
2262 ret = deliver_skb(skb, pt_prev, orig_dev);
2267 #ifdef CONFIG_NET_CLS_ACT
2268 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2274 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2277 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2281 type = skb->protocol;
2282 list_for_each_entry_rcu(ptype,
2283 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2284 if (ptype->type == type &&
2285 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2286 ptype->dev == orig_dev)) {
2288 ret = deliver_skb(skb, pt_prev, orig_dev);
2294 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2297 /* Jamal, now you will not able to escape explaining
2298 * me how you were going to use this. :-)
2308 /* Network device is going away, flush any packets still pending */
2309 static void flush_backlog(void *arg)
2311 struct net_device *dev = arg;
2312 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2313 struct sk_buff *skb, *tmp;
2315 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2316 if (skb->dev == dev) {
2317 __skb_unlink(skb, &queue->input_pkt_queue);
2322 static int process_backlog(struct napi_struct *napi, int quota)
2325 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2326 unsigned long start_time = jiffies;
2328 napi->weight = weight_p;
2330 struct sk_buff *skb;
2332 local_irq_disable();
2333 skb = __skb_dequeue(&queue->input_pkt_queue);
2335 __napi_complete(napi);
2341 netif_receive_skb(skb);
2342 } while (++work < quota && jiffies == start_time);
2348 * __napi_schedule - schedule for receive
2349 * @n: entry to schedule
2351 * The entry's receive function will be scheduled to run
2353 void __napi_schedule(struct napi_struct *n)
2355 unsigned long flags;
2357 local_irq_save(flags);
2358 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2359 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2360 local_irq_restore(flags);
2362 EXPORT_SYMBOL(__napi_schedule);
2365 static void net_rx_action(struct softirq_action *h)
2367 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2368 unsigned long start_time = jiffies;
2369 int budget = netdev_budget;
2372 local_irq_disable();
2374 while (!list_empty(list)) {
2375 struct napi_struct *n;
2378 /* If softirq window is exhuasted then punt.
2380 * Note that this is a slight policy change from the
2381 * previous NAPI code, which would allow up to 2
2382 * jiffies to pass before breaking out. The test
2383 * used to be "jiffies - start_time > 1".
2385 if (unlikely(budget <= 0 || jiffies != start_time))
2390 /* Even though interrupts have been re-enabled, this
2391 * access is safe because interrupts can only add new
2392 * entries to the tail of this list, and only ->poll()
2393 * calls can remove this head entry from the list.
2395 n = list_entry(list->next, struct napi_struct, poll_list);
2397 have = netpoll_poll_lock(n);
2401 /* This NAPI_STATE_SCHED test is for avoiding a race
2402 * with netpoll's poll_napi(). Only the entity which
2403 * obtains the lock and sees NAPI_STATE_SCHED set will
2404 * actually make the ->poll() call. Therefore we avoid
2405 * accidently calling ->poll() when NAPI is not scheduled.
2408 if (test_bit(NAPI_STATE_SCHED, &n->state))
2409 work = n->poll(n, weight);
2411 WARN_ON_ONCE(work > weight);
2415 local_irq_disable();
2417 /* Drivers must not modify the NAPI state if they
2418 * consume the entire weight. In such cases this code
2419 * still "owns" the NAPI instance and therefore can
2420 * move the instance around on the list at-will.
2422 if (unlikely(work == weight)) {
2423 if (unlikely(napi_disable_pending(n)))
2426 list_move_tail(&n->poll_list, list);
2429 netpoll_poll_unlock(have);
2434 #ifdef CONFIG_NET_DMA
2436 * There may not be any more sk_buffs coming right now, so push
2437 * any pending DMA copies to hardware
2439 if (!cpus_empty(net_dma.channel_mask)) {
2441 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2442 struct dma_chan *chan = net_dma.channels[chan_idx];
2444 dma_async_memcpy_issue_pending(chan);
2452 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2453 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2457 static gifconf_func_t * gifconf_list [NPROTO];
2460 * register_gifconf - register a SIOCGIF handler
2461 * @family: Address family
2462 * @gifconf: Function handler
2464 * Register protocol dependent address dumping routines. The handler
2465 * that is passed must not be freed or reused until it has been replaced
2466 * by another handler.
2468 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2470 if (family >= NPROTO)
2472 gifconf_list[family] = gifconf;
2478 * Map an interface index to its name (SIOCGIFNAME)
2482 * We need this ioctl for efficient implementation of the
2483 * if_indextoname() function required by the IPv6 API. Without
2484 * it, we would have to search all the interfaces to find a
2488 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2490 struct net_device *dev;
2494 * Fetch the caller's info block.
2497 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2500 read_lock(&dev_base_lock);
2501 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2503 read_unlock(&dev_base_lock);
2507 strcpy(ifr.ifr_name, dev->name);
2508 read_unlock(&dev_base_lock);
2510 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2516 * Perform a SIOCGIFCONF call. This structure will change
2517 * size eventually, and there is nothing I can do about it.
2518 * Thus we will need a 'compatibility mode'.
2521 static int dev_ifconf(struct net *net, char __user *arg)
2524 struct net_device *dev;
2531 * Fetch the caller's info block.
2534 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2541 * Loop over the interfaces, and write an info block for each.
2545 for_each_netdev(net, dev) {
2546 for (i = 0; i < NPROTO; i++) {
2547 if (gifconf_list[i]) {
2550 done = gifconf_list[i](dev, NULL, 0);
2552 done = gifconf_list[i](dev, pos + total,
2562 * All done. Write the updated control block back to the caller.
2564 ifc.ifc_len = total;
2567 * Both BSD and Solaris return 0 here, so we do too.
2569 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2572 #ifdef CONFIG_PROC_FS
2574 * This is invoked by the /proc filesystem handler to display a device
2577 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2578 __acquires(dev_base_lock)
2580 struct net *net = seq_file_net(seq);
2582 struct net_device *dev;
2584 read_lock(&dev_base_lock);
2586 return SEQ_START_TOKEN;
2589 for_each_netdev(net, dev)
2596 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2598 struct net *net = seq_file_net(seq);
2600 return v == SEQ_START_TOKEN ?
2601 first_net_device(net) : next_net_device((struct net_device *)v);
2604 void dev_seq_stop(struct seq_file *seq, void *v)
2605 __releases(dev_base_lock)
2607 read_unlock(&dev_base_lock);
2610 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2612 struct net_device_stats *stats = dev->get_stats(dev);
2614 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2615 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2616 dev->name, stats->rx_bytes, stats->rx_packets,
2618 stats->rx_dropped + stats->rx_missed_errors,
2619 stats->rx_fifo_errors,
2620 stats->rx_length_errors + stats->rx_over_errors +
2621 stats->rx_crc_errors + stats->rx_frame_errors,
2622 stats->rx_compressed, stats->multicast,
2623 stats->tx_bytes, stats->tx_packets,
2624 stats->tx_errors, stats->tx_dropped,
2625 stats->tx_fifo_errors, stats->collisions,
2626 stats->tx_carrier_errors +
2627 stats->tx_aborted_errors +
2628 stats->tx_window_errors +
2629 stats->tx_heartbeat_errors,
2630 stats->tx_compressed);
2634 * Called from the PROCfs module. This now uses the new arbitrary sized
2635 * /proc/net interface to create /proc/net/dev
2637 static int dev_seq_show(struct seq_file *seq, void *v)
2639 if (v == SEQ_START_TOKEN)
2640 seq_puts(seq, "Inter-| Receive "
2642 " face |bytes packets errs drop fifo frame "
2643 "compressed multicast|bytes packets errs "
2644 "drop fifo colls carrier compressed\n");
2646 dev_seq_printf_stats(seq, v);
2650 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2652 struct netif_rx_stats *rc = NULL;
2654 while (*pos < nr_cpu_ids)
2655 if (cpu_online(*pos)) {
2656 rc = &per_cpu(netdev_rx_stat, *pos);
2663 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2665 return softnet_get_online(pos);
2668 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2671 return softnet_get_online(pos);
2674 static void softnet_seq_stop(struct seq_file *seq, void *v)
2678 static int softnet_seq_show(struct seq_file *seq, void *v)
2680 struct netif_rx_stats *s = v;
2682 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2683 s->total, s->dropped, s->time_squeeze, 0,
2684 0, 0, 0, 0, /* was fastroute */
2689 static const struct seq_operations dev_seq_ops = {
2690 .start = dev_seq_start,
2691 .next = dev_seq_next,
2692 .stop = dev_seq_stop,
2693 .show = dev_seq_show,
2696 static int dev_seq_open(struct inode *inode, struct file *file)
2698 return seq_open_net(inode, file, &dev_seq_ops,
2699 sizeof(struct seq_net_private));
2702 static const struct file_operations dev_seq_fops = {
2703 .owner = THIS_MODULE,
2704 .open = dev_seq_open,
2706 .llseek = seq_lseek,
2707 .release = seq_release_net,
2710 static const struct seq_operations softnet_seq_ops = {
2711 .start = softnet_seq_start,
2712 .next = softnet_seq_next,
2713 .stop = softnet_seq_stop,
2714 .show = softnet_seq_show,
2717 static int softnet_seq_open(struct inode *inode, struct file *file)
2719 return seq_open(file, &softnet_seq_ops);
2722 static const struct file_operations softnet_seq_fops = {
2723 .owner = THIS_MODULE,
2724 .open = softnet_seq_open,
2726 .llseek = seq_lseek,
2727 .release = seq_release,
2730 static void *ptype_get_idx(loff_t pos)
2732 struct packet_type *pt = NULL;
2736 list_for_each_entry_rcu(pt, &ptype_all, list) {
2742 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2743 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2752 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2756 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2759 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2761 struct packet_type *pt;
2762 struct list_head *nxt;
2766 if (v == SEQ_START_TOKEN)
2767 return ptype_get_idx(0);
2770 nxt = pt->list.next;
2771 if (pt->type == htons(ETH_P_ALL)) {
2772 if (nxt != &ptype_all)
2775 nxt = ptype_base[0].next;
2777 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2779 while (nxt == &ptype_base[hash]) {
2780 if (++hash >= PTYPE_HASH_SIZE)
2782 nxt = ptype_base[hash].next;
2785 return list_entry(nxt, struct packet_type, list);
2788 static void ptype_seq_stop(struct seq_file *seq, void *v)
2794 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2796 #ifdef CONFIG_KALLSYMS
2797 unsigned long offset = 0, symsize;
2798 const char *symname;
2802 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2809 modname = delim = "";
2810 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2816 seq_printf(seq, "[%p]", sym);
2819 static int ptype_seq_show(struct seq_file *seq, void *v)
2821 struct packet_type *pt = v;
2823 if (v == SEQ_START_TOKEN)
2824 seq_puts(seq, "Type Device Function\n");
2825 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2826 if (pt->type == htons(ETH_P_ALL))
2827 seq_puts(seq, "ALL ");
2829 seq_printf(seq, "%04x", ntohs(pt->type));
2831 seq_printf(seq, " %-8s ",
2832 pt->dev ? pt->dev->name : "");
2833 ptype_seq_decode(seq, pt->func);
2834 seq_putc(seq, '\n');
2840 static const struct seq_operations ptype_seq_ops = {
2841 .start = ptype_seq_start,
2842 .next = ptype_seq_next,
2843 .stop = ptype_seq_stop,
2844 .show = ptype_seq_show,
2847 static int ptype_seq_open(struct inode *inode, struct file *file)
2849 return seq_open_net(inode, file, &ptype_seq_ops,
2850 sizeof(struct seq_net_private));
2853 static const struct file_operations ptype_seq_fops = {
2854 .owner = THIS_MODULE,
2855 .open = ptype_seq_open,
2857 .llseek = seq_lseek,
2858 .release = seq_release_net,
2862 static int __net_init dev_proc_net_init(struct net *net)
2866 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2868 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2870 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2873 if (wext_proc_init(net))
2879 proc_net_remove(net, "ptype");
2881 proc_net_remove(net, "softnet_stat");
2883 proc_net_remove(net, "dev");
2887 static void __net_exit dev_proc_net_exit(struct net *net)
2889 wext_proc_exit(net);
2891 proc_net_remove(net, "ptype");
2892 proc_net_remove(net, "softnet_stat");
2893 proc_net_remove(net, "dev");
2896 static struct pernet_operations __net_initdata dev_proc_ops = {
2897 .init = dev_proc_net_init,
2898 .exit = dev_proc_net_exit,
2901 static int __init dev_proc_init(void)
2903 return register_pernet_subsys(&dev_proc_ops);
2906 #define dev_proc_init() 0
2907 #endif /* CONFIG_PROC_FS */
2911 * netdev_set_master - set up master/slave pair
2912 * @slave: slave device
2913 * @master: new master device
2915 * Changes the master device of the slave. Pass %NULL to break the
2916 * bonding. The caller must hold the RTNL semaphore. On a failure
2917 * a negative errno code is returned. On success the reference counts
2918 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2919 * function returns zero.
2921 int netdev_set_master(struct net_device *slave, struct net_device *master)
2923 struct net_device *old = slave->master;
2933 slave->master = master;
2941 slave->flags |= IFF_SLAVE;
2943 slave->flags &= ~IFF_SLAVE;
2945 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2949 static int __dev_set_promiscuity(struct net_device *dev, int inc)
2951 unsigned short old_flags = dev->flags;
2955 dev->flags |= IFF_PROMISC;
2956 dev->promiscuity += inc;
2957 if (dev->promiscuity == 0) {
2960 * If inc causes overflow, untouch promisc and return error.
2963 dev->flags &= ~IFF_PROMISC;
2965 dev->promiscuity -= inc;
2966 printk(KERN_WARNING "%s: promiscuity touches roof, "
2967 "set promiscuity failed, promiscuity feature "
2968 "of device might be broken.\n", dev->name);
2972 if (dev->flags != old_flags) {
2973 printk(KERN_INFO "device %s %s promiscuous mode\n",
2974 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2977 audit_log(current->audit_context, GFP_ATOMIC,
2978 AUDIT_ANOM_PROMISCUOUS,
2979 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2980 dev->name, (dev->flags & IFF_PROMISC),
2981 (old_flags & IFF_PROMISC),
2982 audit_get_loginuid(current),
2983 current->uid, current->gid,
2984 audit_get_sessionid(current));
2986 if (dev->change_rx_flags)
2987 dev->change_rx_flags(dev, IFF_PROMISC);
2993 * dev_set_promiscuity - update promiscuity count on a device
2997 * Add or remove promiscuity from a device. While the count in the device
2998 * remains above zero the interface remains promiscuous. Once it hits zero
2999 * the device reverts back to normal filtering operation. A negative inc
3000 * value is used to drop promiscuity on the device.
3001 * Return 0 if successful or a negative errno code on error.
3003 int dev_set_promiscuity(struct net_device *dev, int inc)
3005 unsigned short old_flags = dev->flags;
3008 err = __dev_set_promiscuity(dev, inc);
3011 if (dev->flags != old_flags)
3012 dev_set_rx_mode(dev);
3017 * dev_set_allmulti - update allmulti count on a device
3021 * Add or remove reception of all multicast frames to a device. While the
3022 * count in the device remains above zero the interface remains listening
3023 * to all interfaces. Once it hits zero the device reverts back to normal
3024 * filtering operation. A negative @inc value is used to drop the counter
3025 * when releasing a resource needing all multicasts.
3026 * Return 0 if successful or a negative errno code on error.
3029 int dev_set_allmulti(struct net_device *dev, int inc)
3031 unsigned short old_flags = dev->flags;
3035 dev->flags |= IFF_ALLMULTI;
3036 dev->allmulti += inc;
3037 if (dev->allmulti == 0) {
3040 * If inc causes overflow, untouch allmulti and return error.
3043 dev->flags &= ~IFF_ALLMULTI;
3045 dev->allmulti -= inc;
3046 printk(KERN_WARNING "%s: allmulti touches roof, "
3047 "set allmulti failed, allmulti feature of "
3048 "device might be broken.\n", dev->name);
3052 if (dev->flags ^ old_flags) {
3053 if (dev->change_rx_flags)
3054 dev->change_rx_flags(dev, IFF_ALLMULTI);
3055 dev_set_rx_mode(dev);
3061 * Upload unicast and multicast address lists to device and
3062 * configure RX filtering. When the device doesn't support unicast
3063 * filtering it is put in promiscuous mode while unicast addresses
3066 void __dev_set_rx_mode(struct net_device *dev)
3068 /* dev_open will call this function so the list will stay sane. */
3069 if (!(dev->flags&IFF_UP))
3072 if (!netif_device_present(dev))
3075 if (dev->set_rx_mode)
3076 dev->set_rx_mode(dev);
3078 /* Unicast addresses changes may only happen under the rtnl,
3079 * therefore calling __dev_set_promiscuity here is safe.
3081 if (dev->uc_count > 0 && !dev->uc_promisc) {
3082 __dev_set_promiscuity(dev, 1);
3083 dev->uc_promisc = 1;
3084 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3085 __dev_set_promiscuity(dev, -1);
3086 dev->uc_promisc = 0;
3089 if (dev->set_multicast_list)
3090 dev->set_multicast_list(dev);
3094 void dev_set_rx_mode(struct net_device *dev)
3096 netif_addr_lock_bh(dev);
3097 __dev_set_rx_mode(dev);
3098 netif_addr_unlock_bh(dev);
3101 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3102 void *addr, int alen, int glbl)
3104 struct dev_addr_list *da;
3106 for (; (da = *list) != NULL; list = &da->next) {
3107 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3108 alen == da->da_addrlen) {
3110 int old_glbl = da->da_gusers;
3127 int __dev_addr_add(struct dev_addr_list **list, int *count,
3128 void *addr, int alen, int glbl)
3130 struct dev_addr_list *da;
3132 for (da = *list; da != NULL; da = da->next) {
3133 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3134 da->da_addrlen == alen) {
3136 int old_glbl = da->da_gusers;
3146 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3149 memcpy(da->da_addr, addr, alen);
3150 da->da_addrlen = alen;
3152 da->da_gusers = glbl ? 1 : 0;
3160 * dev_unicast_delete - Release secondary unicast address.
3162 * @addr: address to delete
3163 * @alen: length of @addr
3165 * Release reference to a secondary unicast address and remove it
3166 * from the device if the reference count drops to zero.
3168 * The caller must hold the rtnl_mutex.
3170 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3176 netif_addr_lock_bh(dev);
3177 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3179 __dev_set_rx_mode(dev);
3180 netif_addr_unlock_bh(dev);
3183 EXPORT_SYMBOL(dev_unicast_delete);
3186 * dev_unicast_add - add a secondary unicast address
3188 * @addr: address to add
3189 * @alen: length of @addr
3191 * Add a secondary unicast address to the device or increase
3192 * the reference count if it already exists.
3194 * The caller must hold the rtnl_mutex.
3196 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3202 netif_addr_lock_bh(dev);
3203 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3205 __dev_set_rx_mode(dev);
3206 netif_addr_unlock_bh(dev);
3209 EXPORT_SYMBOL(dev_unicast_add);
3211 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3212 struct dev_addr_list **from, int *from_count)
3214 struct dev_addr_list *da, *next;
3218 while (da != NULL) {
3220 if (!da->da_synced) {
3221 err = __dev_addr_add(to, to_count,
3222 da->da_addr, da->da_addrlen, 0);
3227 } else if (da->da_users == 1) {
3228 __dev_addr_delete(to, to_count,
3229 da->da_addr, da->da_addrlen, 0);
3230 __dev_addr_delete(from, from_count,
3231 da->da_addr, da->da_addrlen, 0);
3238 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3239 struct dev_addr_list **from, int *from_count)
3241 struct dev_addr_list *da, *next;
3244 while (da != NULL) {
3246 if (da->da_synced) {
3247 __dev_addr_delete(to, to_count,
3248 da->da_addr, da->da_addrlen, 0);
3250 __dev_addr_delete(from, from_count,
3251 da->da_addr, da->da_addrlen, 0);
3258 * dev_unicast_sync - Synchronize device's unicast list to another device
3259 * @to: destination device
3260 * @from: source device
3262 * Add newly added addresses to the destination device and release
3263 * addresses that have no users left. The source device must be
3264 * locked by netif_tx_lock_bh.
3266 * This function is intended to be called from the dev->set_rx_mode
3267 * function of layered software devices.
3269 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3273 netif_addr_lock_bh(to);
3274 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3275 &from->uc_list, &from->uc_count);
3277 __dev_set_rx_mode(to);
3278 netif_addr_unlock_bh(to);
3281 EXPORT_SYMBOL(dev_unicast_sync);
3284 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3285 * @to: destination device
3286 * @from: source device
3288 * Remove all addresses that were added to the destination device by
3289 * dev_unicast_sync(). This function is intended to be called from the
3290 * dev->stop function of layered software devices.
3292 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3294 netif_addr_lock_bh(from);
3295 netif_addr_lock(to);
3297 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3298 &from->uc_list, &from->uc_count);
3299 __dev_set_rx_mode(to);
3301 netif_addr_unlock(to);
3302 netif_addr_unlock_bh(from);
3304 EXPORT_SYMBOL(dev_unicast_unsync);
3306 static void __dev_addr_discard(struct dev_addr_list **list)
3308 struct dev_addr_list *tmp;
3310 while (*list != NULL) {
3313 if (tmp->da_users > tmp->da_gusers)
3314 printk("__dev_addr_discard: address leakage! "
3315 "da_users=%d\n", tmp->da_users);
3320 static void dev_addr_discard(struct net_device *dev)
3322 netif_addr_lock_bh(dev);
3324 __dev_addr_discard(&dev->uc_list);
3327 __dev_addr_discard(&dev->mc_list);
3330 netif_addr_unlock_bh(dev);
3333 unsigned dev_get_flags(const struct net_device *dev)
3337 flags = (dev->flags & ~(IFF_PROMISC |
3342 (dev->gflags & (IFF_PROMISC |
3345 if (netif_running(dev)) {
3346 if (netif_oper_up(dev))
3347 flags |= IFF_RUNNING;
3348 if (netif_carrier_ok(dev))
3349 flags |= IFF_LOWER_UP;
3350 if (netif_dormant(dev))
3351 flags |= IFF_DORMANT;
3357 int dev_change_flags(struct net_device *dev, unsigned flags)
3360 int old_flags = dev->flags;
3365 * Set the flags on our device.
3368 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3369 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3371 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3375 * Load in the correct multicast list now the flags have changed.
3378 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3379 dev->change_rx_flags(dev, IFF_MULTICAST);
3381 dev_set_rx_mode(dev);
3384 * Have we downed the interface. We handle IFF_UP ourselves
3385 * according to user attempts to set it, rather than blindly
3390 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3391 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3394 dev_set_rx_mode(dev);
3397 if (dev->flags & IFF_UP &&
3398 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3400 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3402 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3403 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3404 dev->gflags ^= IFF_PROMISC;
3405 dev_set_promiscuity(dev, inc);
3408 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3409 is important. Some (broken) drivers set IFF_PROMISC, when
3410 IFF_ALLMULTI is requested not asking us and not reporting.
3412 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3413 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3414 dev->gflags ^= IFF_ALLMULTI;
3415 dev_set_allmulti(dev, inc);
3418 /* Exclude state transition flags, already notified */
3419 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3421 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3426 int dev_set_mtu(struct net_device *dev, int new_mtu)
3430 if (new_mtu == dev->mtu)
3433 /* MTU must be positive. */
3437 if (!netif_device_present(dev))
3441 if (dev->change_mtu)
3442 err = dev->change_mtu(dev, new_mtu);
3445 if (!err && dev->flags & IFF_UP)
3446 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3450 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3454 if (!dev->set_mac_address)
3456 if (sa->sa_family != dev->type)
3458 if (!netif_device_present(dev))
3460 err = dev->set_mac_address(dev, sa);
3462 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3467 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3469 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3472 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3478 case SIOCGIFFLAGS: /* Get interface flags */
3479 ifr->ifr_flags = dev_get_flags(dev);
3482 case SIOCGIFMETRIC: /* Get the metric on the interface
3483 (currently unused) */
3484 ifr->ifr_metric = 0;
3487 case SIOCGIFMTU: /* Get the MTU of a device */
3488 ifr->ifr_mtu = dev->mtu;
3493 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3495 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3496 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3497 ifr->ifr_hwaddr.sa_family = dev->type;
3505 ifr->ifr_map.mem_start = dev->mem_start;
3506 ifr->ifr_map.mem_end = dev->mem_end;
3507 ifr->ifr_map.base_addr = dev->base_addr;
3508 ifr->ifr_map.irq = dev->irq;
3509 ifr->ifr_map.dma = dev->dma;
3510 ifr->ifr_map.port = dev->if_port;
3514 ifr->ifr_ifindex = dev->ifindex;
3518 ifr->ifr_qlen = dev->tx_queue_len;
3522 /* dev_ioctl() should ensure this case
3534 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3536 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3539 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3545 case SIOCSIFFLAGS: /* Set interface flags */
3546 return dev_change_flags(dev, ifr->ifr_flags);
3548 case SIOCSIFMETRIC: /* Set the metric on the interface
3549 (currently unused) */
3552 case SIOCSIFMTU: /* Set the MTU of a device */
3553 return dev_set_mtu(dev, ifr->ifr_mtu);
3556 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3558 case SIOCSIFHWBROADCAST:
3559 if (ifr->ifr_hwaddr.sa_family != dev->type)
3561 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3562 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3563 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3567 if (dev->set_config) {
3568 if (!netif_device_present(dev))
3570 return dev->set_config(dev, &ifr->ifr_map);
3575 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3576 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3578 if (!netif_device_present(dev))
3580 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3584 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3585 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3587 if (!netif_device_present(dev))
3589 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3593 if (ifr->ifr_qlen < 0)
3595 dev->tx_queue_len = ifr->ifr_qlen;
3599 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3600 return dev_change_name(dev, ifr->ifr_newname);
3603 * Unknown or private ioctl
3607 if ((cmd >= SIOCDEVPRIVATE &&
3608 cmd <= SIOCDEVPRIVATE + 15) ||
3609 cmd == SIOCBONDENSLAVE ||
3610 cmd == SIOCBONDRELEASE ||
3611 cmd == SIOCBONDSETHWADDR ||
3612 cmd == SIOCBONDSLAVEINFOQUERY ||
3613 cmd == SIOCBONDINFOQUERY ||
3614 cmd == SIOCBONDCHANGEACTIVE ||
3615 cmd == SIOCGMIIPHY ||
3616 cmd == SIOCGMIIREG ||
3617 cmd == SIOCSMIIREG ||
3618 cmd == SIOCBRADDIF ||
3619 cmd == SIOCBRDELIF ||
3620 cmd == SIOCWANDEV) {
3622 if (dev->do_ioctl) {
3623 if (netif_device_present(dev))
3624 err = dev->do_ioctl(dev, ifr,
3637 * This function handles all "interface"-type I/O control requests. The actual
3638 * 'doing' part of this is dev_ifsioc above.
3642 * dev_ioctl - network device ioctl
3643 * @net: the applicable net namespace
3644 * @cmd: command to issue
3645 * @arg: pointer to a struct ifreq in user space
3647 * Issue ioctl functions to devices. This is normally called by the
3648 * user space syscall interfaces but can sometimes be useful for
3649 * other purposes. The return value is the return from the syscall if
3650 * positive or a negative errno code on error.
3653 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3659 /* One special case: SIOCGIFCONF takes ifconf argument
3660 and requires shared lock, because it sleeps writing
3664 if (cmd == SIOCGIFCONF) {
3666 ret = dev_ifconf(net, (char __user *) arg);
3670 if (cmd == SIOCGIFNAME)
3671 return dev_ifname(net, (struct ifreq __user *)arg);
3673 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3676 ifr.ifr_name[IFNAMSIZ-1] = 0;
3678 colon = strchr(ifr.ifr_name, ':');
3683 * See which interface the caller is talking about.
3688 * These ioctl calls:
3689 * - can be done by all.
3690 * - atomic and do not require locking.
3701 dev_load(net, ifr.ifr_name);
3702 read_lock(&dev_base_lock);
3703 ret = dev_ifsioc_locked(net, &ifr, cmd);
3704 read_unlock(&dev_base_lock);
3708 if (copy_to_user(arg, &ifr,
3709 sizeof(struct ifreq)))
3715 dev_load(net, ifr.ifr_name);
3717 ret = dev_ethtool(net, &ifr);
3722 if (copy_to_user(arg, &ifr,
3723 sizeof(struct ifreq)))
3729 * These ioctl calls:
3730 * - require superuser power.
3731 * - require strict serialization.
3737 if (!capable(CAP_NET_ADMIN))
3739 dev_load(net, ifr.ifr_name);
3741 ret = dev_ifsioc(net, &ifr, cmd);
3746 if (copy_to_user(arg, &ifr,
3747 sizeof(struct ifreq)))
3753 * These ioctl calls:
3754 * - require superuser power.
3755 * - require strict serialization.
3756 * - do not return a value
3766 case SIOCSIFHWBROADCAST:
3769 case SIOCBONDENSLAVE:
3770 case SIOCBONDRELEASE:
3771 case SIOCBONDSETHWADDR:
3772 case SIOCBONDCHANGEACTIVE:
3775 if (!capable(CAP_NET_ADMIN))
3778 case SIOCBONDSLAVEINFOQUERY:
3779 case SIOCBONDINFOQUERY:
3780 dev_load(net, ifr.ifr_name);
3782 ret = dev_ifsioc(net, &ifr, cmd);
3787 /* Get the per device memory space. We can add this but
3788 * currently do not support it */
3790 /* Set the per device memory buffer space.
3791 * Not applicable in our case */
3796 * Unknown or private ioctl.
3799 if (cmd == SIOCWANDEV ||
3800 (cmd >= SIOCDEVPRIVATE &&
3801 cmd <= SIOCDEVPRIVATE + 15)) {
3802 dev_load(net, ifr.ifr_name);
3804 ret = dev_ifsioc(net, &ifr, cmd);
3806 if (!ret && copy_to_user(arg, &ifr,
3807 sizeof(struct ifreq)))
3811 /* Take care of Wireless Extensions */
3812 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3813 return wext_handle_ioctl(net, &ifr, cmd, arg);
3820 * dev_new_index - allocate an ifindex
3821 * @net: the applicable net namespace
3823 * Returns a suitable unique value for a new device interface
3824 * number. The caller must hold the rtnl semaphore or the
3825 * dev_base_lock to be sure it remains unique.
3827 static int dev_new_index(struct net *net)
3833 if (!__dev_get_by_index(net, ifindex))
3838 /* Delayed registration/unregisteration */
3839 static DEFINE_SPINLOCK(net_todo_list_lock);
3840 static LIST_HEAD(net_todo_list);
3842 static void net_set_todo(struct net_device *dev)
3844 spin_lock(&net_todo_list_lock);
3845 list_add_tail(&dev->todo_list, &net_todo_list);
3846 spin_unlock(&net_todo_list_lock);
3849 static void rollback_registered(struct net_device *dev)
3851 BUG_ON(dev_boot_phase);
3854 /* Some devices call without registering for initialization unwind. */
3855 if (dev->reg_state == NETREG_UNINITIALIZED) {
3856 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3857 "was registered\n", dev->name, dev);
3863 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3865 /* If device is running, close it first. */
3868 /* And unlink it from device chain. */
3869 unlist_netdevice(dev);
3871 dev->reg_state = NETREG_UNREGISTERING;
3875 /* Shutdown queueing discipline. */
3879 /* Notify protocols, that we are about to destroy
3880 this device. They should clean all the things.
3882 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3885 * Flush the unicast and multicast chains
3887 dev_addr_discard(dev);
3892 /* Notifier chain MUST detach us from master device. */
3893 WARN_ON(dev->master);
3895 /* Remove entries from kobject tree */
3896 netdev_unregister_kobject(dev);
3903 static void __netdev_init_queue_locks_one(struct net_device *dev,
3904 struct netdev_queue *dev_queue,
3907 spin_lock_init(&dev_queue->_xmit_lock);
3908 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3909 dev_queue->xmit_lock_owner = -1;
3912 static void netdev_init_queue_locks(struct net_device *dev)
3914 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3915 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3919 * register_netdevice - register a network device
3920 * @dev: device to register
3922 * Take a completed network device structure and add it to the kernel
3923 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3924 * chain. 0 is returned on success. A negative errno code is returned
3925 * on a failure to set up the device, or if the name is a duplicate.
3927 * Callers must hold the rtnl semaphore. You may want
3928 * register_netdev() instead of this.
3931 * The locking appears insufficient to guarantee two parallel registers
3932 * will not get the same name.
3935 int register_netdevice(struct net_device *dev)
3937 struct hlist_head *head;
3938 struct hlist_node *p;
3942 BUG_ON(dev_boot_phase);
3947 /* When net_device's are persistent, this will be fatal. */
3948 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3949 BUG_ON(!dev_net(dev));
3952 spin_lock_init(&dev->addr_list_lock);
3953 netdev_set_addr_lockdep_class(dev);
3954 netdev_init_queue_locks(dev);
3958 /* Init, if this function is available */
3960 ret = dev->init(dev);
3968 if (!dev_valid_name(dev->name)) {
3973 dev->ifindex = dev_new_index(net);
3974 if (dev->iflink == -1)
3975 dev->iflink = dev->ifindex;
3977 /* Check for existence of name */
3978 head = dev_name_hash(net, dev->name);
3979 hlist_for_each(p, head) {
3980 struct net_device *d
3981 = hlist_entry(p, struct net_device, name_hlist);
3982 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3988 /* Fix illegal checksum combinations */
3989 if ((dev->features & NETIF_F_HW_CSUM) &&
3990 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3991 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3993 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3996 if ((dev->features & NETIF_F_NO_CSUM) &&
3997 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3998 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4000 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4004 /* Fix illegal SG+CSUM combinations. */
4005 if ((dev->features & NETIF_F_SG) &&
4006 !(dev->features & NETIF_F_ALL_CSUM)) {
4007 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
4009 dev->features &= ~NETIF_F_SG;
4012 /* TSO requires that SG is present as well. */
4013 if ((dev->features & NETIF_F_TSO) &&
4014 !(dev->features & NETIF_F_SG)) {
4015 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
4017 dev->features &= ~NETIF_F_TSO;
4019 if (dev->features & NETIF_F_UFO) {
4020 if (!(dev->features & NETIF_F_HW_CSUM)) {
4021 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4022 "NETIF_F_HW_CSUM feature.\n",
4024 dev->features &= ~NETIF_F_UFO;
4026 if (!(dev->features & NETIF_F_SG)) {
4027 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4028 "NETIF_F_SG feature.\n",
4030 dev->features &= ~NETIF_F_UFO;
4034 /* Enable software GSO if SG is supported. */
4035 if (dev->features & NETIF_F_SG)
4036 dev->features |= NETIF_F_GSO;
4038 netdev_initialize_kobject(dev);
4039 ret = netdev_register_kobject(dev);
4042 dev->reg_state = NETREG_REGISTERED;
4045 * Default initial state at registry is that the
4046 * device is present.
4049 set_bit(__LINK_STATE_PRESENT, &dev->state);
4051 dev_init_scheduler(dev);
4053 list_netdevice(dev);
4055 /* Notify protocols, that a new device appeared. */
4056 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4057 ret = notifier_to_errno(ret);
4059 rollback_registered(dev);
4060 dev->reg_state = NETREG_UNREGISTERED;
4073 * register_netdev - register a network device
4074 * @dev: device to register
4076 * Take a completed network device structure and add it to the kernel
4077 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4078 * chain. 0 is returned on success. A negative errno code is returned
4079 * on a failure to set up the device, or if the name is a duplicate.
4081 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4082 * and expands the device name if you passed a format string to
4085 int register_netdev(struct net_device *dev)
4092 * If the name is a format string the caller wants us to do a
4095 if (strchr(dev->name, '%')) {
4096 err = dev_alloc_name(dev, dev->name);
4101 err = register_netdevice(dev);
4106 EXPORT_SYMBOL(register_netdev);
4109 * netdev_wait_allrefs - wait until all references are gone.
4111 * This is called when unregistering network devices.
4113 * Any protocol or device that holds a reference should register
4114 * for netdevice notification, and cleanup and put back the
4115 * reference if they receive an UNREGISTER event.
4116 * We can get stuck here if buggy protocols don't correctly
4119 static void netdev_wait_allrefs(struct net_device *dev)
4121 unsigned long rebroadcast_time, warning_time;
4123 rebroadcast_time = warning_time = jiffies;
4124 while (atomic_read(&dev->refcnt) != 0) {
4125 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4128 /* Rebroadcast unregister notification */
4129 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4131 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4133 /* We must not have linkwatch events
4134 * pending on unregister. If this
4135 * happens, we simply run the queue
4136 * unscheduled, resulting in a noop
4139 linkwatch_run_queue();
4144 rebroadcast_time = jiffies;
4149 if (time_after(jiffies, warning_time + 10 * HZ)) {
4150 printk(KERN_EMERG "unregister_netdevice: "
4151 "waiting for %s to become free. Usage "
4153 dev->name, atomic_read(&dev->refcnt));
4154 warning_time = jiffies;
4163 * register_netdevice(x1);
4164 * register_netdevice(x2);
4166 * unregister_netdevice(y1);
4167 * unregister_netdevice(y2);
4173 * We are invoked by rtnl_unlock() after it drops the semaphore.
4174 * This allows us to deal with problems:
4175 * 1) We can delete sysfs objects which invoke hotplug
4176 * without deadlocking with linkwatch via keventd.
4177 * 2) Since we run with the RTNL semaphore not held, we can sleep
4178 * safely in order to wait for the netdev refcnt to drop to zero.
4180 static DEFINE_MUTEX(net_todo_run_mutex);
4181 void netdev_run_todo(void)
4183 struct list_head list;
4185 /* Need to guard against multiple cpu's getting out of order. */
4186 mutex_lock(&net_todo_run_mutex);
4188 /* Not safe to do outside the semaphore. We must not return
4189 * until all unregister events invoked by the local processor
4190 * have been completed (either by this todo run, or one on
4193 if (list_empty(&net_todo_list))
4196 /* Snapshot list, allow later requests */
4197 spin_lock(&net_todo_list_lock);
4198 list_replace_init(&net_todo_list, &list);
4199 spin_unlock(&net_todo_list_lock);
4201 while (!list_empty(&list)) {
4202 struct net_device *dev
4203 = list_entry(list.next, struct net_device, todo_list);
4204 list_del(&dev->todo_list);
4206 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4207 printk(KERN_ERR "network todo '%s' but state %d\n",
4208 dev->name, dev->reg_state);
4213 dev->reg_state = NETREG_UNREGISTERED;
4215 on_each_cpu(flush_backlog, dev, 1);
4217 netdev_wait_allrefs(dev);
4220 BUG_ON(atomic_read(&dev->refcnt));
4221 WARN_ON(dev->ip_ptr);
4222 WARN_ON(dev->ip6_ptr);
4223 WARN_ON(dev->dn_ptr);
4225 if (dev->destructor)
4226 dev->destructor(dev);
4228 /* Free network device */
4229 kobject_put(&dev->dev.kobj);
4233 mutex_unlock(&net_todo_run_mutex);
4236 static struct net_device_stats *internal_stats(struct net_device *dev)
4241 static void netdev_init_one_queue(struct net_device *dev,
4242 struct netdev_queue *queue,
4248 static void netdev_init_queues(struct net_device *dev)
4250 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4251 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4252 spin_lock_init(&dev->tx_global_lock);
4256 * alloc_netdev_mq - allocate network device
4257 * @sizeof_priv: size of private data to allocate space for
4258 * @name: device name format string
4259 * @setup: callback to initialize device
4260 * @queue_count: the number of subqueues to allocate
4262 * Allocates a struct net_device with private data area for driver use
4263 * and performs basic initialization. Also allocates subquue structs
4264 * for each queue on the device at the end of the netdevice.
4266 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4267 void (*setup)(struct net_device *), unsigned int queue_count)
4269 struct netdev_queue *tx;
4270 struct net_device *dev;
4274 BUG_ON(strlen(name) >= sizeof(dev->name));
4276 alloc_size = sizeof(struct net_device);
4278 /* ensure 32-byte alignment of private area */
4279 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4280 alloc_size += sizeof_priv;
4282 /* ensure 32-byte alignment of whole construct */
4283 alloc_size += NETDEV_ALIGN_CONST;
4285 p = kzalloc(alloc_size, GFP_KERNEL);
4287 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4291 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4293 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4299 dev = (struct net_device *)
4300 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4301 dev->padded = (char *)dev - (char *)p;
4302 dev_net_set(dev, &init_net);
4305 dev->num_tx_queues = queue_count;
4306 dev->real_num_tx_queues = queue_count;
4309 dev->priv = ((char *)dev +
4310 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4311 & ~NETDEV_ALIGN_CONST));
4314 dev->gso_max_size = GSO_MAX_SIZE;
4316 netdev_init_queues(dev);
4318 dev->get_stats = internal_stats;
4319 netpoll_netdev_init(dev);
4321 strcpy(dev->name, name);
4324 EXPORT_SYMBOL(alloc_netdev_mq);
4327 * free_netdev - free network device
4330 * This function does the last stage of destroying an allocated device
4331 * interface. The reference to the device object is released.
4332 * If this is the last reference then it will be freed.
4334 void free_netdev(struct net_device *dev)
4336 release_net(dev_net(dev));
4340 /* Compatibility with error handling in drivers */
4341 if (dev->reg_state == NETREG_UNINITIALIZED) {
4342 kfree((char *)dev - dev->padded);
4346 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4347 dev->reg_state = NETREG_RELEASED;
4349 /* will free via device release */
4350 put_device(&dev->dev);
4353 /* Synchronize with packet receive processing. */
4354 void synchronize_net(void)
4361 * unregister_netdevice - remove device from the kernel
4364 * This function shuts down a device interface and removes it
4365 * from the kernel tables.
4367 * Callers must hold the rtnl semaphore. You may want
4368 * unregister_netdev() instead of this.
4371 void unregister_netdevice(struct net_device *dev)
4375 rollback_registered(dev);
4376 /* Finish processing unregister after unlock */
4381 * unregister_netdev - remove device from the kernel
4384 * This function shuts down a device interface and removes it
4385 * from the kernel tables.
4387 * This is just a wrapper for unregister_netdevice that takes
4388 * the rtnl semaphore. In general you want to use this and not
4389 * unregister_netdevice.
4391 void unregister_netdev(struct net_device *dev)
4394 unregister_netdevice(dev);
4398 EXPORT_SYMBOL(unregister_netdev);
4401 * dev_change_net_namespace - move device to different nethost namespace
4403 * @net: network namespace
4404 * @pat: If not NULL name pattern to try if the current device name
4405 * is already taken in the destination network namespace.
4407 * This function shuts down a device interface and moves it
4408 * to a new network namespace. On success 0 is returned, on
4409 * a failure a netagive errno code is returned.
4411 * Callers must hold the rtnl semaphore.
4414 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4417 const char *destname;
4422 /* Don't allow namespace local devices to be moved. */
4424 if (dev->features & NETIF_F_NETNS_LOCAL)
4427 /* Ensure the device has been registrered */
4429 if (dev->reg_state != NETREG_REGISTERED)
4432 /* Get out if there is nothing todo */
4434 if (net_eq(dev_net(dev), net))
4437 /* Pick the destination device name, and ensure
4438 * we can use it in the destination network namespace.
4441 destname = dev->name;
4442 if (__dev_get_by_name(net, destname)) {
4443 /* We get here if we can't use the current device name */
4446 if (!dev_valid_name(pat))
4448 if (strchr(pat, '%')) {
4449 if (__dev_alloc_name(net, pat, buf) < 0)
4454 if (__dev_get_by_name(net, destname))
4459 * And now a mini version of register_netdevice unregister_netdevice.
4462 /* If device is running close it first. */
4465 /* And unlink it from device chain */
4467 unlist_netdevice(dev);
4471 /* Shutdown queueing discipline. */
4474 /* Notify protocols, that we are about to destroy
4475 this device. They should clean all the things.
4477 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4480 * Flush the unicast and multicast chains
4482 dev_addr_discard(dev);
4484 /* Actually switch the network namespace */
4485 dev_net_set(dev, net);
4487 /* Assign the new device name */
4488 if (destname != dev->name)
4489 strcpy(dev->name, destname);
4491 /* If there is an ifindex conflict assign a new one */
4492 if (__dev_get_by_index(net, dev->ifindex)) {
4493 int iflink = (dev->iflink == dev->ifindex);
4494 dev->ifindex = dev_new_index(net);
4496 dev->iflink = dev->ifindex;
4499 /* Fixup kobjects */
4500 netdev_unregister_kobject(dev);
4501 err = netdev_register_kobject(dev);
4504 /* Add the device back in the hashes */
4505 list_netdevice(dev);
4507 /* Notify protocols, that a new device appeared. */
4508 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4516 static int dev_cpu_callback(struct notifier_block *nfb,
4517 unsigned long action,
4520 struct sk_buff **list_skb;
4521 struct Qdisc **list_net;
4522 struct sk_buff *skb;
4523 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4524 struct softnet_data *sd, *oldsd;
4526 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4529 local_irq_disable();
4530 cpu = smp_processor_id();
4531 sd = &per_cpu(softnet_data, cpu);
4532 oldsd = &per_cpu(softnet_data, oldcpu);
4534 /* Find end of our completion_queue. */
4535 list_skb = &sd->completion_queue;
4537 list_skb = &(*list_skb)->next;
4538 /* Append completion queue from offline CPU. */
4539 *list_skb = oldsd->completion_queue;
4540 oldsd->completion_queue = NULL;
4542 /* Find end of our output_queue. */
4543 list_net = &sd->output_queue;
4545 list_net = &(*list_net)->next_sched;
4546 /* Append output queue from offline CPU. */
4547 *list_net = oldsd->output_queue;
4548 oldsd->output_queue = NULL;
4550 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4553 /* Process offline CPU's input_pkt_queue */
4554 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4560 #ifdef CONFIG_NET_DMA
4562 * net_dma_rebalance - try to maintain one DMA channel per CPU
4563 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4565 * This is called when the number of channels allocated to the net_dma client
4566 * changes. The net_dma client tries to have one DMA channel per CPU.
4569 static void net_dma_rebalance(struct net_dma *net_dma)
4571 unsigned int cpu, i, n, chan_idx;
4572 struct dma_chan *chan;
4574 if (cpus_empty(net_dma->channel_mask)) {
4575 for_each_online_cpu(cpu)
4576 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4581 cpu = first_cpu(cpu_online_map);
4583 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4584 chan = net_dma->channels[chan_idx];
4586 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4587 + (i < (num_online_cpus() %
4588 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4591 per_cpu(softnet_data, cpu).net_dma = chan;
4592 cpu = next_cpu(cpu, cpu_online_map);
4600 * netdev_dma_event - event callback for the net_dma_client
4601 * @client: should always be net_dma_client
4602 * @chan: DMA channel for the event
4603 * @state: DMA state to be handled
4605 static enum dma_state_client
4606 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4607 enum dma_state state)
4609 int i, found = 0, pos = -1;
4610 struct net_dma *net_dma =
4611 container_of(client, struct net_dma, client);
4612 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4614 spin_lock(&net_dma->lock);
4616 case DMA_RESOURCE_AVAILABLE:
4617 for (i = 0; i < nr_cpu_ids; i++)
4618 if (net_dma->channels[i] == chan) {
4621 } else if (net_dma->channels[i] == NULL && pos < 0)
4624 if (!found && pos >= 0) {
4626 net_dma->channels[pos] = chan;
4627 cpu_set(pos, net_dma->channel_mask);
4628 net_dma_rebalance(net_dma);
4631 case DMA_RESOURCE_REMOVED:
4632 for (i = 0; i < nr_cpu_ids; i++)
4633 if (net_dma->channels[i] == chan) {
4641 cpu_clear(pos, net_dma->channel_mask);
4642 net_dma->channels[i] = NULL;
4643 net_dma_rebalance(net_dma);
4649 spin_unlock(&net_dma->lock);
4655 * netdev_dma_regiser - register the networking subsystem as a DMA client
4657 static int __init netdev_dma_register(void)
4659 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4661 if (unlikely(!net_dma.channels)) {
4663 "netdev_dma: no memory for net_dma.channels\n");
4666 spin_lock_init(&net_dma.lock);
4667 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4668 dma_async_client_register(&net_dma.client);
4669 dma_async_client_chan_request(&net_dma.client);
4674 static int __init netdev_dma_register(void) { return -ENODEV; }
4675 #endif /* CONFIG_NET_DMA */
4678 * netdev_compute_feature - compute conjunction of two feature sets
4679 * @all: first feature set
4680 * @one: second feature set
4682 * Computes a new feature set after adding a device with feature set
4683 * @one to the master device with current feature set @all. Returns
4684 * the new feature set.
4686 int netdev_compute_features(unsigned long all, unsigned long one)
4688 /* if device needs checksumming, downgrade to hw checksumming */
4689 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4690 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4692 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4693 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4694 all ^= NETIF_F_HW_CSUM
4695 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4697 if (one & NETIF_F_GSO)
4698 one |= NETIF_F_GSO_SOFTWARE;
4702 * If even one device supports a GSO protocol with software fallback,
4703 * enable it for all.
4705 all |= one & NETIF_F_GSO_SOFTWARE;
4707 /* If even one device supports robust GSO, enable it for all. */
4708 if (one & NETIF_F_GSO_ROBUST)
4709 all |= NETIF_F_GSO_ROBUST;
4711 all &= one | NETIF_F_LLTX;
4713 if (!(all & NETIF_F_ALL_CSUM))
4715 if (!(all & NETIF_F_SG))
4716 all &= ~NETIF_F_GSO_MASK;
4720 EXPORT_SYMBOL(netdev_compute_features);
4722 static struct hlist_head *netdev_create_hash(void)
4725 struct hlist_head *hash;
4727 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4729 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4730 INIT_HLIST_HEAD(&hash[i]);
4735 /* Initialize per network namespace state */
4736 static int __net_init netdev_init(struct net *net)
4738 INIT_LIST_HEAD(&net->dev_base_head);
4740 net->dev_name_head = netdev_create_hash();
4741 if (net->dev_name_head == NULL)
4744 net->dev_index_head = netdev_create_hash();
4745 if (net->dev_index_head == NULL)
4751 kfree(net->dev_name_head);
4756 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
4758 const struct device_driver *driver;
4759 const struct device *parent;
4761 if (len <= 0 || !buffer)
4765 parent = dev->dev.parent;
4770 driver = parent->driver;
4771 if (driver && driver->name)
4772 strlcpy(buffer, driver->name, len);
4776 static void __net_exit netdev_exit(struct net *net)
4778 kfree(net->dev_name_head);
4779 kfree(net->dev_index_head);
4782 static struct pernet_operations __net_initdata netdev_net_ops = {
4783 .init = netdev_init,
4784 .exit = netdev_exit,
4787 static void __net_exit default_device_exit(struct net *net)
4789 struct net_device *dev, *next;
4791 * Push all migratable of the network devices back to the
4792 * initial network namespace
4795 for_each_netdev_safe(net, dev, next) {
4797 char fb_name[IFNAMSIZ];
4799 /* Ignore unmoveable devices (i.e. loopback) */
4800 if (dev->features & NETIF_F_NETNS_LOCAL)
4803 /* Push remaing network devices to init_net */
4804 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4805 err = dev_change_net_namespace(dev, &init_net, fb_name);
4807 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4808 __func__, dev->name, err);
4815 static struct pernet_operations __net_initdata default_device_ops = {
4816 .exit = default_device_exit,
4820 * Initialize the DEV module. At boot time this walks the device list and
4821 * unhooks any devices that fail to initialise (normally hardware not
4822 * present) and leaves us with a valid list of present and active devices.
4827 * This is called single threaded during boot, so no need
4828 * to take the rtnl semaphore.
4830 static int __init net_dev_init(void)
4832 int i, rc = -ENOMEM;
4834 BUG_ON(!dev_boot_phase);
4836 if (dev_proc_init())
4839 if (netdev_kobject_init())
4842 INIT_LIST_HEAD(&ptype_all);
4843 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4844 INIT_LIST_HEAD(&ptype_base[i]);
4846 if (register_pernet_subsys(&netdev_net_ops))
4849 if (register_pernet_device(&default_device_ops))
4853 * Initialise the packet receive queues.
4856 for_each_possible_cpu(i) {
4857 struct softnet_data *queue;
4859 queue = &per_cpu(softnet_data, i);
4860 skb_queue_head_init(&queue->input_pkt_queue);
4861 queue->completion_queue = NULL;
4862 INIT_LIST_HEAD(&queue->poll_list);
4864 queue->backlog.poll = process_backlog;
4865 queue->backlog.weight = weight_p;
4868 netdev_dma_register();
4872 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4873 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4875 hotcpu_notifier(dev_cpu_callback, 0);
4883 subsys_initcall(net_dev_init);
4885 EXPORT_SYMBOL(__dev_get_by_index);
4886 EXPORT_SYMBOL(__dev_get_by_name);
4887 EXPORT_SYMBOL(__dev_remove_pack);
4888 EXPORT_SYMBOL(dev_valid_name);
4889 EXPORT_SYMBOL(dev_add_pack);
4890 EXPORT_SYMBOL(dev_alloc_name);
4891 EXPORT_SYMBOL(dev_close);
4892 EXPORT_SYMBOL(dev_get_by_flags);
4893 EXPORT_SYMBOL(dev_get_by_index);
4894 EXPORT_SYMBOL(dev_get_by_name);
4895 EXPORT_SYMBOL(dev_open);
4896 EXPORT_SYMBOL(dev_queue_xmit);
4897 EXPORT_SYMBOL(dev_remove_pack);
4898 EXPORT_SYMBOL(dev_set_allmulti);
4899 EXPORT_SYMBOL(dev_set_promiscuity);
4900 EXPORT_SYMBOL(dev_change_flags);
4901 EXPORT_SYMBOL(dev_set_mtu);
4902 EXPORT_SYMBOL(dev_set_mac_address);
4903 EXPORT_SYMBOL(free_netdev);
4904 EXPORT_SYMBOL(netdev_boot_setup_check);
4905 EXPORT_SYMBOL(netdev_set_master);
4906 EXPORT_SYMBOL(netdev_state_change);
4907 EXPORT_SYMBOL(netif_receive_skb);
4908 EXPORT_SYMBOL(netif_rx);
4909 EXPORT_SYMBOL(register_gifconf);
4910 EXPORT_SYMBOL(register_netdevice);
4911 EXPORT_SYMBOL(register_netdevice_notifier);
4912 EXPORT_SYMBOL(skb_checksum_help);
4913 EXPORT_SYMBOL(synchronize_net);
4914 EXPORT_SYMBOL(unregister_netdevice);
4915 EXPORT_SYMBOL(unregister_netdevice_notifier);
4916 EXPORT_SYMBOL(net_enable_timestamp);
4917 EXPORT_SYMBOL(net_disable_timestamp);
4918 EXPORT_SYMBOL(dev_get_flags);
4920 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4921 EXPORT_SYMBOL(br_handle_frame_hook);
4922 EXPORT_SYMBOL(br_fdb_get_hook);
4923 EXPORT_SYMBOL(br_fdb_put_hook);
4927 EXPORT_SYMBOL(dev_load);
4930 EXPORT_PER_CPU_SYMBOL(softnet_data);