net: Fix stacked vlan offload features computation
[pandora-kernel.git] / net / core / dev.c
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *      Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:        Ross Biro
11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *      Additional Authors:
15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
17  *              David Hinds <dahinds@users.sourceforge.net>
18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *              Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *      Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *              Alan Cox        :       device private ioctl copies fields back.
28  *              Alan Cox        :       Transmit queue code does relevant
29  *                                      stunts to keep the queue safe.
30  *              Alan Cox        :       Fixed double lock.
31  *              Alan Cox        :       Fixed promisc NULL pointer trap
32  *              ????????        :       Support the full private ioctl range
33  *              Alan Cox        :       Moved ioctl permission check into
34  *                                      drivers
35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
36  *              Alan Cox        :       100 backlog just doesn't cut it when
37  *                                      you start doing multicast video 8)
38  *              Alan Cox        :       Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *              Alan Cox        :       Took out transmit every packet pass
41  *                                      Saved a few bytes in the ioctl handler
42  *              Alan Cox        :       Network driver sets packet type before
43  *                                      calling netif_rx. Saves a function
44  *                                      call a packet.
45  *              Alan Cox        :       Hashed net_bh()
46  *              Richard Kooijman:       Timestamp fixes.
47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
48  *              Alan Cox        :       Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *                                      changes.
51  *              Rudi Cilibrasi  :       Pass the right thing to
52  *                                      set_mac_address()
53  *              Dave Miller     :       32bit quantity for the device lock to
54  *                                      make it work out on a Sparc.
55  *              Bjorn Ekwall    :       Added KERNELD hack.
56  *              Alan Cox        :       Cleaned up the backlog initialise.
57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
58  *                                      1 device.
59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
60  *                                      is no device open function.
61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
63  *              Cyrus Durgin    :       Cleaned for KMOD
64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
65  *                                      A network device unload needs to purge
66  *                                      the backlog queue.
67  *      Paul Rusty Russell      :       SIOCSIFNAME
68  *              Pekka Riikonen  :       Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *                                      - netif_rx() feedback
73  */
74
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138
139 #include "net-sysfs.h"
140
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146
147 static DEFINE_SPINLOCK(ptype_lock);
148 static DEFINE_SPINLOCK(offload_lock);
149 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
150 struct list_head ptype_all __read_mostly;       /* Taps */
151 static struct list_head offload_base __read_mostly;
152
153 static int netif_rx_internal(struct sk_buff *skb);
154 static int call_netdevice_notifiers_info(unsigned long val,
155                                          struct net_device *dev,
156                                          struct netdev_notifier_info *info);
157
158 /*
159  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
160  * semaphore.
161  *
162  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
163  *
164  * Writers must hold the rtnl semaphore while they loop through the
165  * dev_base_head list, and hold dev_base_lock for writing when they do the
166  * actual updates.  This allows pure readers to access the list even
167  * while a writer is preparing to update it.
168  *
169  * To put it another way, dev_base_lock is held for writing only to
170  * protect against pure readers; the rtnl semaphore provides the
171  * protection against other writers.
172  *
173  * See, for example usages, register_netdevice() and
174  * unregister_netdevice(), which must be called with the rtnl
175  * semaphore held.
176  */
177 DEFINE_RWLOCK(dev_base_lock);
178 EXPORT_SYMBOL(dev_base_lock);
179
180 /* protects napi_hash addition/deletion and napi_gen_id */
181 static DEFINE_SPINLOCK(napi_hash_lock);
182
183 static unsigned int napi_gen_id;
184 static DEFINE_HASHTABLE(napi_hash, 8);
185
186 static seqcount_t devnet_rename_seq;
187
188 static inline void dev_base_seq_inc(struct net *net)
189 {
190         while (++net->dev_base_seq == 0);
191 }
192
193 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
194 {
195         unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
196
197         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
198 }
199
200 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
201 {
202         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
203 }
204
205 static inline void rps_lock(struct softnet_data *sd)
206 {
207 #ifdef CONFIG_RPS
208         spin_lock(&sd->input_pkt_queue.lock);
209 #endif
210 }
211
212 static inline void rps_unlock(struct softnet_data *sd)
213 {
214 #ifdef CONFIG_RPS
215         spin_unlock(&sd->input_pkt_queue.lock);
216 #endif
217 }
218
219 /* Device list insertion */
220 static void list_netdevice(struct net_device *dev)
221 {
222         struct net *net = dev_net(dev);
223
224         ASSERT_RTNL();
225
226         write_lock_bh(&dev_base_lock);
227         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
228         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
229         hlist_add_head_rcu(&dev->index_hlist,
230                            dev_index_hash(net, dev->ifindex));
231         write_unlock_bh(&dev_base_lock);
232
233         dev_base_seq_inc(net);
234 }
235
236 /* Device list removal
237  * caller must respect a RCU grace period before freeing/reusing dev
238  */
239 static void unlist_netdevice(struct net_device *dev)
240 {
241         ASSERT_RTNL();
242
243         /* Unlink dev from the device chain */
244         write_lock_bh(&dev_base_lock);
245         list_del_rcu(&dev->dev_list);
246         hlist_del_rcu(&dev->name_hlist);
247         hlist_del_rcu(&dev->index_hlist);
248         write_unlock_bh(&dev_base_lock);
249
250         dev_base_seq_inc(dev_net(dev));
251 }
252
253 /*
254  *      Our notifier list
255  */
256
257 static RAW_NOTIFIER_HEAD(netdev_chain);
258
259 /*
260  *      Device drivers call our routines to queue packets here. We empty the
261  *      queue in the local softnet handler.
262  */
263
264 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
265 EXPORT_PER_CPU_SYMBOL(softnet_data);
266
267 #ifdef CONFIG_LOCKDEP
268 /*
269  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270  * according to dev->type
271  */
272 static const unsigned short netdev_lock_type[] =
273         {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
285          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
286          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
287          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
288
289 static const char *const netdev_lock_name[] =
290         {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291          "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292          "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293          "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294          "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295          "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296          "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297          "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298          "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299          "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300          "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301          "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302          "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303          "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304          "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
305
306 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
307 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
308
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310 {
311         int i;
312
313         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314                 if (netdev_lock_type[i] == dev_type)
315                         return i;
316         /* the last key is used by default */
317         return ARRAY_SIZE(netdev_lock_type) - 1;
318 }
319
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321                                                  unsigned short dev_type)
322 {
323         int i;
324
325         i = netdev_lock_pos(dev_type);
326         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327                                    netdev_lock_name[i]);
328 }
329
330 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331 {
332         int i;
333
334         i = netdev_lock_pos(dev->type);
335         lockdep_set_class_and_name(&dev->addr_list_lock,
336                                    &netdev_addr_lock_key[i],
337                                    netdev_lock_name[i]);
338 }
339 #else
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341                                                  unsigned short dev_type)
342 {
343 }
344 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
345 {
346 }
347 #endif
348
349 /*******************************************************************************
350
351                 Protocol management and registration routines
352
353 *******************************************************************************/
354
355 /*
356  *      Add a protocol ID to the list. Now that the input handler is
357  *      smarter we can dispense with all the messy stuff that used to be
358  *      here.
359  *
360  *      BEWARE!!! Protocol handlers, mangling input packets,
361  *      MUST BE last in hash buckets and checking protocol handlers
362  *      MUST start from promiscuous ptype_all chain in net_bh.
363  *      It is true now, do not change it.
364  *      Explanation follows: if protocol handler, mangling packet, will
365  *      be the first on list, it is not able to sense, that packet
366  *      is cloned and should be copied-on-write, so that it will
367  *      change it and subsequent readers will get broken packet.
368  *                                                      --ANK (980803)
369  */
370
371 static inline struct list_head *ptype_head(const struct packet_type *pt)
372 {
373         if (pt->type == htons(ETH_P_ALL))
374                 return &ptype_all;
375         else
376                 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
377 }
378
379 /**
380  *      dev_add_pack - add packet handler
381  *      @pt: packet type declaration
382  *
383  *      Add a protocol handler to the networking stack. The passed &packet_type
384  *      is linked into kernel lists and may not be freed until it has been
385  *      removed from the kernel lists.
386  *
387  *      This call does not sleep therefore it can not
388  *      guarantee all CPU's that are in middle of receiving packets
389  *      will see the new packet type (until the next received packet).
390  */
391
392 void dev_add_pack(struct packet_type *pt)
393 {
394         struct list_head *head = ptype_head(pt);
395
396         spin_lock(&ptype_lock);
397         list_add_rcu(&pt->list, head);
398         spin_unlock(&ptype_lock);
399 }
400 EXPORT_SYMBOL(dev_add_pack);
401
402 /**
403  *      __dev_remove_pack        - remove packet handler
404  *      @pt: packet type declaration
405  *
406  *      Remove a protocol handler that was previously added to the kernel
407  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
408  *      from the kernel lists and can be freed or reused once this function
409  *      returns.
410  *
411  *      The packet type might still be in use by receivers
412  *      and must not be freed until after all the CPU's have gone
413  *      through a quiescent state.
414  */
415 void __dev_remove_pack(struct packet_type *pt)
416 {
417         struct list_head *head = ptype_head(pt);
418         struct packet_type *pt1;
419
420         spin_lock(&ptype_lock);
421
422         list_for_each_entry(pt1, head, list) {
423                 if (pt == pt1) {
424                         list_del_rcu(&pt->list);
425                         goto out;
426                 }
427         }
428
429         pr_warn("dev_remove_pack: %p not found\n", pt);
430 out:
431         spin_unlock(&ptype_lock);
432 }
433 EXPORT_SYMBOL(__dev_remove_pack);
434
435 /**
436  *      dev_remove_pack  - remove packet handler
437  *      @pt: packet type declaration
438  *
439  *      Remove a protocol handler that was previously added to the kernel
440  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
441  *      from the kernel lists and can be freed or reused once this function
442  *      returns.
443  *
444  *      This call sleeps to guarantee that no CPU is looking at the packet
445  *      type after return.
446  */
447 void dev_remove_pack(struct packet_type *pt)
448 {
449         __dev_remove_pack(pt);
450
451         synchronize_net();
452 }
453 EXPORT_SYMBOL(dev_remove_pack);
454
455
456 /**
457  *      dev_add_offload - register offload handlers
458  *      @po: protocol offload declaration
459  *
460  *      Add protocol offload handlers to the networking stack. The passed
461  *      &proto_offload is linked into kernel lists and may not be freed until
462  *      it has been removed from the kernel lists.
463  *
464  *      This call does not sleep therefore it can not
465  *      guarantee all CPU's that are in middle of receiving packets
466  *      will see the new offload handlers (until the next received packet).
467  */
468 void dev_add_offload(struct packet_offload *po)
469 {
470         struct list_head *head = &offload_base;
471
472         spin_lock(&offload_lock);
473         list_add_rcu(&po->list, head);
474         spin_unlock(&offload_lock);
475 }
476 EXPORT_SYMBOL(dev_add_offload);
477
478 /**
479  *      __dev_remove_offload     - remove offload handler
480  *      @po: packet offload declaration
481  *
482  *      Remove a protocol offload handler that was previously added to the
483  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
484  *      is removed from the kernel lists and can be freed or reused once this
485  *      function returns.
486  *
487  *      The packet type might still be in use by receivers
488  *      and must not be freed until after all the CPU's have gone
489  *      through a quiescent state.
490  */
491 static void __dev_remove_offload(struct packet_offload *po)
492 {
493         struct list_head *head = &offload_base;
494         struct packet_offload *po1;
495
496         spin_lock(&offload_lock);
497
498         list_for_each_entry(po1, head, list) {
499                 if (po == po1) {
500                         list_del_rcu(&po->list);
501                         goto out;
502                 }
503         }
504
505         pr_warn("dev_remove_offload: %p not found\n", po);
506 out:
507         spin_unlock(&offload_lock);
508 }
509
510 /**
511  *      dev_remove_offload       - remove packet offload handler
512  *      @po: packet offload declaration
513  *
514  *      Remove a packet offload handler that was previously added to the kernel
515  *      offload handlers by dev_add_offload(). The passed &offload_type is
516  *      removed from the kernel lists and can be freed or reused once this
517  *      function returns.
518  *
519  *      This call sleeps to guarantee that no CPU is looking at the packet
520  *      type after return.
521  */
522 void dev_remove_offload(struct packet_offload *po)
523 {
524         __dev_remove_offload(po);
525
526         synchronize_net();
527 }
528 EXPORT_SYMBOL(dev_remove_offload);
529
530 /******************************************************************************
531
532                       Device Boot-time Settings Routines
533
534 *******************************************************************************/
535
536 /* Boot time configuration table */
537 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
538
539 /**
540  *      netdev_boot_setup_add   - add new setup entry
541  *      @name: name of the device
542  *      @map: configured settings for the device
543  *
544  *      Adds new setup entry to the dev_boot_setup list.  The function
545  *      returns 0 on error and 1 on success.  This is a generic routine to
546  *      all netdevices.
547  */
548 static int netdev_boot_setup_add(char *name, struct ifmap *map)
549 {
550         struct netdev_boot_setup *s;
551         int i;
552
553         s = dev_boot_setup;
554         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
555                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
556                         memset(s[i].name, 0, sizeof(s[i].name));
557                         strlcpy(s[i].name, name, IFNAMSIZ);
558                         memcpy(&s[i].map, map, sizeof(s[i].map));
559                         break;
560                 }
561         }
562
563         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
564 }
565
566 /**
567  *      netdev_boot_setup_check - check boot time settings
568  *      @dev: the netdevice
569  *
570  *      Check boot time settings for the device.
571  *      The found settings are set for the device to be used
572  *      later in the device probing.
573  *      Returns 0 if no settings found, 1 if they are.
574  */
575 int netdev_boot_setup_check(struct net_device *dev)
576 {
577         struct netdev_boot_setup *s = dev_boot_setup;
578         int i;
579
580         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
581                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
582                     !strcmp(dev->name, s[i].name)) {
583                         dev->irq        = s[i].map.irq;
584                         dev->base_addr  = s[i].map.base_addr;
585                         dev->mem_start  = s[i].map.mem_start;
586                         dev->mem_end    = s[i].map.mem_end;
587                         return 1;
588                 }
589         }
590         return 0;
591 }
592 EXPORT_SYMBOL(netdev_boot_setup_check);
593
594
595 /**
596  *      netdev_boot_base        - get address from boot time settings
597  *      @prefix: prefix for network device
598  *      @unit: id for network device
599  *
600  *      Check boot time settings for the base address of device.
601  *      The found settings are set for the device to be used
602  *      later in the device probing.
603  *      Returns 0 if no settings found.
604  */
605 unsigned long netdev_boot_base(const char *prefix, int unit)
606 {
607         const struct netdev_boot_setup *s = dev_boot_setup;
608         char name[IFNAMSIZ];
609         int i;
610
611         sprintf(name, "%s%d", prefix, unit);
612
613         /*
614          * If device already registered then return base of 1
615          * to indicate not to probe for this interface
616          */
617         if (__dev_get_by_name(&init_net, name))
618                 return 1;
619
620         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
621                 if (!strcmp(name, s[i].name))
622                         return s[i].map.base_addr;
623         return 0;
624 }
625
626 /*
627  * Saves at boot time configured settings for any netdevice.
628  */
629 int __init netdev_boot_setup(char *str)
630 {
631         int ints[5];
632         struct ifmap map;
633
634         str = get_options(str, ARRAY_SIZE(ints), ints);
635         if (!str || !*str)
636                 return 0;
637
638         /* Save settings */
639         memset(&map, 0, sizeof(map));
640         if (ints[0] > 0)
641                 map.irq = ints[1];
642         if (ints[0] > 1)
643                 map.base_addr = ints[2];
644         if (ints[0] > 2)
645                 map.mem_start = ints[3];
646         if (ints[0] > 3)
647                 map.mem_end = ints[4];
648
649         /* Add new entry to the list */
650         return netdev_boot_setup_add(str, &map);
651 }
652
653 __setup("netdev=", netdev_boot_setup);
654
655 /*******************************************************************************
656
657                             Device Interface Subroutines
658
659 *******************************************************************************/
660
661 /**
662  *      __dev_get_by_name       - find a device by its name
663  *      @net: the applicable net namespace
664  *      @name: name to find
665  *
666  *      Find an interface by name. Must be called under RTNL semaphore
667  *      or @dev_base_lock. If the name is found a pointer to the device
668  *      is returned. If the name is not found then %NULL is returned. The
669  *      reference counters are not incremented so the caller must be
670  *      careful with locks.
671  */
672
673 struct net_device *__dev_get_by_name(struct net *net, const char *name)
674 {
675         struct net_device *dev;
676         struct hlist_head *head = dev_name_hash(net, name);
677
678         hlist_for_each_entry(dev, head, name_hlist)
679                 if (!strncmp(dev->name, name, IFNAMSIZ))
680                         return dev;
681
682         return NULL;
683 }
684 EXPORT_SYMBOL(__dev_get_by_name);
685
686 /**
687  *      dev_get_by_name_rcu     - find a device by its name
688  *      @net: the applicable net namespace
689  *      @name: name to find
690  *
691  *      Find an interface by name.
692  *      If the name is found a pointer to the device is returned.
693  *      If the name is not found then %NULL is returned.
694  *      The reference counters are not incremented so the caller must be
695  *      careful with locks. The caller must hold RCU lock.
696  */
697
698 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
699 {
700         struct net_device *dev;
701         struct hlist_head *head = dev_name_hash(net, name);
702
703         hlist_for_each_entry_rcu(dev, head, name_hlist)
704                 if (!strncmp(dev->name, name, IFNAMSIZ))
705                         return dev;
706
707         return NULL;
708 }
709 EXPORT_SYMBOL(dev_get_by_name_rcu);
710
711 /**
712  *      dev_get_by_name         - find a device by its name
713  *      @net: the applicable net namespace
714  *      @name: name to find
715  *
716  *      Find an interface by name. This can be called from any
717  *      context and does its own locking. The returned handle has
718  *      the usage count incremented and the caller must use dev_put() to
719  *      release it when it is no longer needed. %NULL is returned if no
720  *      matching device is found.
721  */
722
723 struct net_device *dev_get_by_name(struct net *net, const char *name)
724 {
725         struct net_device *dev;
726
727         rcu_read_lock();
728         dev = dev_get_by_name_rcu(net, name);
729         if (dev)
730                 dev_hold(dev);
731         rcu_read_unlock();
732         return dev;
733 }
734 EXPORT_SYMBOL(dev_get_by_name);
735
736 /**
737  *      __dev_get_by_index - find a device by its ifindex
738  *      @net: the applicable net namespace
739  *      @ifindex: index of device
740  *
741  *      Search for an interface by index. Returns %NULL if the device
742  *      is not found or a pointer to the device. The device has not
743  *      had its reference counter increased so the caller must be careful
744  *      about locking. The caller must hold either the RTNL semaphore
745  *      or @dev_base_lock.
746  */
747
748 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
749 {
750         struct net_device *dev;
751         struct hlist_head *head = dev_index_hash(net, ifindex);
752
753         hlist_for_each_entry(dev, head, index_hlist)
754                 if (dev->ifindex == ifindex)
755                         return dev;
756
757         return NULL;
758 }
759 EXPORT_SYMBOL(__dev_get_by_index);
760
761 /**
762  *      dev_get_by_index_rcu - find a device by its ifindex
763  *      @net: the applicable net namespace
764  *      @ifindex: index of device
765  *
766  *      Search for an interface by index. Returns %NULL if the device
767  *      is not found or a pointer to the device. The device has not
768  *      had its reference counter increased so the caller must be careful
769  *      about locking. The caller must hold RCU lock.
770  */
771
772 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
773 {
774         struct net_device *dev;
775         struct hlist_head *head = dev_index_hash(net, ifindex);
776
777         hlist_for_each_entry_rcu(dev, head, index_hlist)
778                 if (dev->ifindex == ifindex)
779                         return dev;
780
781         return NULL;
782 }
783 EXPORT_SYMBOL(dev_get_by_index_rcu);
784
785
786 /**
787  *      dev_get_by_index - find a device by its ifindex
788  *      @net: the applicable net namespace
789  *      @ifindex: index of device
790  *
791  *      Search for an interface by index. Returns NULL if the device
792  *      is not found or a pointer to the device. The device returned has
793  *      had a reference added and the pointer is safe until the user calls
794  *      dev_put to indicate they have finished with it.
795  */
796
797 struct net_device *dev_get_by_index(struct net *net, int ifindex)
798 {
799         struct net_device *dev;
800
801         rcu_read_lock();
802         dev = dev_get_by_index_rcu(net, ifindex);
803         if (dev)
804                 dev_hold(dev);
805         rcu_read_unlock();
806         return dev;
807 }
808 EXPORT_SYMBOL(dev_get_by_index);
809
810 /**
811  *      netdev_get_name - get a netdevice name, knowing its ifindex.
812  *      @net: network namespace
813  *      @name: a pointer to the buffer where the name will be stored.
814  *      @ifindex: the ifindex of the interface to get the name from.
815  *
816  *      The use of raw_seqcount_begin() and cond_resched() before
817  *      retrying is required as we want to give the writers a chance
818  *      to complete when CONFIG_PREEMPT is not set.
819  */
820 int netdev_get_name(struct net *net, char *name, int ifindex)
821 {
822         struct net_device *dev;
823         unsigned int seq;
824
825 retry:
826         seq = raw_seqcount_begin(&devnet_rename_seq);
827         rcu_read_lock();
828         dev = dev_get_by_index_rcu(net, ifindex);
829         if (!dev) {
830                 rcu_read_unlock();
831                 return -ENODEV;
832         }
833
834         strcpy(name, dev->name);
835         rcu_read_unlock();
836         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
837                 cond_resched();
838                 goto retry;
839         }
840
841         return 0;
842 }
843
844 /**
845  *      dev_getbyhwaddr_rcu - find a device by its hardware address
846  *      @net: the applicable net namespace
847  *      @type: media type of device
848  *      @ha: hardware address
849  *
850  *      Search for an interface by MAC address. Returns NULL if the device
851  *      is not found or a pointer to the device.
852  *      The caller must hold RCU or RTNL.
853  *      The returned device has not had its ref count increased
854  *      and the caller must therefore be careful about locking
855  *
856  */
857
858 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
859                                        const char *ha)
860 {
861         struct net_device *dev;
862
863         for_each_netdev_rcu(net, dev)
864                 if (dev->type == type &&
865                     !memcmp(dev->dev_addr, ha, dev->addr_len))
866                         return dev;
867
868         return NULL;
869 }
870 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
871
872 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
873 {
874         struct net_device *dev;
875
876         ASSERT_RTNL();
877         for_each_netdev(net, dev)
878                 if (dev->type == type)
879                         return dev;
880
881         return NULL;
882 }
883 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
884
885 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
886 {
887         struct net_device *dev, *ret = NULL;
888
889         rcu_read_lock();
890         for_each_netdev_rcu(net, dev)
891                 if (dev->type == type) {
892                         dev_hold(dev);
893                         ret = dev;
894                         break;
895                 }
896         rcu_read_unlock();
897         return ret;
898 }
899 EXPORT_SYMBOL(dev_getfirstbyhwtype);
900
901 /**
902  *      __dev_get_by_flags - find any device with given flags
903  *      @net: the applicable net namespace
904  *      @if_flags: IFF_* values
905  *      @mask: bitmask of bits in if_flags to check
906  *
907  *      Search for any interface with the given flags. Returns NULL if a device
908  *      is not found or a pointer to the device. Must be called inside
909  *      rtnl_lock(), and result refcount is unchanged.
910  */
911
912 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
913                                       unsigned short mask)
914 {
915         struct net_device *dev, *ret;
916
917         ASSERT_RTNL();
918
919         ret = NULL;
920         for_each_netdev(net, dev) {
921                 if (((dev->flags ^ if_flags) & mask) == 0) {
922                         ret = dev;
923                         break;
924                 }
925         }
926         return ret;
927 }
928 EXPORT_SYMBOL(__dev_get_by_flags);
929
930 /**
931  *      dev_valid_name - check if name is okay for network device
932  *      @name: name string
933  *
934  *      Network device names need to be valid file names to
935  *      to allow sysfs to work.  We also disallow any kind of
936  *      whitespace.
937  */
938 bool dev_valid_name(const char *name)
939 {
940         if (*name == '\0')
941                 return false;
942         if (strlen(name) >= IFNAMSIZ)
943                 return false;
944         if (!strcmp(name, ".") || !strcmp(name, ".."))
945                 return false;
946
947         while (*name) {
948                 if (*name == '/' || isspace(*name))
949                         return false;
950                 name++;
951         }
952         return true;
953 }
954 EXPORT_SYMBOL(dev_valid_name);
955
956 /**
957  *      __dev_alloc_name - allocate a name for a device
958  *      @net: network namespace to allocate the device name in
959  *      @name: name format string
960  *      @buf:  scratch buffer and result name string
961  *
962  *      Passed a format string - eg "lt%d" it will try and find a suitable
963  *      id. It scans list of devices to build up a free map, then chooses
964  *      the first empty slot. The caller must hold the dev_base or rtnl lock
965  *      while allocating the name and adding the device in order to avoid
966  *      duplicates.
967  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
968  *      Returns the number of the unit assigned or a negative errno code.
969  */
970
971 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
972 {
973         int i = 0;
974         const char *p;
975         const int max_netdevices = 8*PAGE_SIZE;
976         unsigned long *inuse;
977         struct net_device *d;
978
979         p = strnchr(name, IFNAMSIZ-1, '%');
980         if (p) {
981                 /*
982                  * Verify the string as this thing may have come from
983                  * the user.  There must be either one "%d" and no other "%"
984                  * characters.
985                  */
986                 if (p[1] != 'd' || strchr(p + 2, '%'))
987                         return -EINVAL;
988
989                 /* Use one page as a bit array of possible slots */
990                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
991                 if (!inuse)
992                         return -ENOMEM;
993
994                 for_each_netdev(net, d) {
995                         if (!sscanf(d->name, name, &i))
996                                 continue;
997                         if (i < 0 || i >= max_netdevices)
998                                 continue;
999
1000                         /*  avoid cases where sscanf is not exact inverse of printf */
1001                         snprintf(buf, IFNAMSIZ, name, i);
1002                         if (!strncmp(buf, d->name, IFNAMSIZ))
1003                                 set_bit(i, inuse);
1004                 }
1005
1006                 i = find_first_zero_bit(inuse, max_netdevices);
1007                 free_page((unsigned long) inuse);
1008         }
1009
1010         if (buf != name)
1011                 snprintf(buf, IFNAMSIZ, name, i);
1012         if (!__dev_get_by_name(net, buf))
1013                 return i;
1014
1015         /* It is possible to run out of possible slots
1016          * when the name is long and there isn't enough space left
1017          * for the digits, or if all bits are used.
1018          */
1019         return -ENFILE;
1020 }
1021
1022 /**
1023  *      dev_alloc_name - allocate a name for a device
1024  *      @dev: device
1025  *      @name: name format string
1026  *
1027  *      Passed a format string - eg "lt%d" it will try and find a suitable
1028  *      id. It scans list of devices to build up a free map, then chooses
1029  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1030  *      while allocating the name and adding the device in order to avoid
1031  *      duplicates.
1032  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1033  *      Returns the number of the unit assigned or a negative errno code.
1034  */
1035
1036 int dev_alloc_name(struct net_device *dev, const char *name)
1037 {
1038         char buf[IFNAMSIZ];
1039         struct net *net;
1040         int ret;
1041
1042         BUG_ON(!dev_net(dev));
1043         net = dev_net(dev);
1044         ret = __dev_alloc_name(net, name, buf);
1045         if (ret >= 0)
1046                 strlcpy(dev->name, buf, IFNAMSIZ);
1047         return ret;
1048 }
1049 EXPORT_SYMBOL(dev_alloc_name);
1050
1051 static int dev_alloc_name_ns(struct net *net,
1052                              struct net_device *dev,
1053                              const char *name)
1054 {
1055         char buf[IFNAMSIZ];
1056         int ret;
1057
1058         ret = __dev_alloc_name(net, name, buf);
1059         if (ret >= 0)
1060                 strlcpy(dev->name, buf, IFNAMSIZ);
1061         return ret;
1062 }
1063
1064 static int dev_get_valid_name(struct net *net,
1065                               struct net_device *dev,
1066                               const char *name)
1067 {
1068         BUG_ON(!net);
1069
1070         if (!dev_valid_name(name))
1071                 return -EINVAL;
1072
1073         if (strchr(name, '%'))
1074                 return dev_alloc_name_ns(net, dev, name);
1075         else if (__dev_get_by_name(net, name))
1076                 return -EEXIST;
1077         else if (dev->name != name)
1078                 strlcpy(dev->name, name, IFNAMSIZ);
1079
1080         return 0;
1081 }
1082
1083 /**
1084  *      dev_change_name - change name of a device
1085  *      @dev: device
1086  *      @newname: name (or format string) must be at least IFNAMSIZ
1087  *
1088  *      Change name of a device, can pass format strings "eth%d".
1089  *      for wildcarding.
1090  */
1091 int dev_change_name(struct net_device *dev, const char *newname)
1092 {
1093         unsigned char old_assign_type;
1094         char oldname[IFNAMSIZ];
1095         int err = 0;
1096         int ret;
1097         struct net *net;
1098
1099         ASSERT_RTNL();
1100         BUG_ON(!dev_net(dev));
1101
1102         net = dev_net(dev);
1103         if (dev->flags & IFF_UP)
1104                 return -EBUSY;
1105
1106         write_seqcount_begin(&devnet_rename_seq);
1107
1108         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1109                 write_seqcount_end(&devnet_rename_seq);
1110                 return 0;
1111         }
1112
1113         memcpy(oldname, dev->name, IFNAMSIZ);
1114
1115         err = dev_get_valid_name(net, dev, newname);
1116         if (err < 0) {
1117                 write_seqcount_end(&devnet_rename_seq);
1118                 return err;
1119         }
1120
1121         if (oldname[0] && !strchr(oldname, '%'))
1122                 netdev_info(dev, "renamed from %s\n", oldname);
1123
1124         old_assign_type = dev->name_assign_type;
1125         dev->name_assign_type = NET_NAME_RENAMED;
1126
1127 rollback:
1128         ret = device_rename(&dev->dev, dev->name);
1129         if (ret) {
1130                 memcpy(dev->name, oldname, IFNAMSIZ);
1131                 dev->name_assign_type = old_assign_type;
1132                 write_seqcount_end(&devnet_rename_seq);
1133                 return ret;
1134         }
1135
1136         write_seqcount_end(&devnet_rename_seq);
1137
1138         netdev_adjacent_rename_links(dev, oldname);
1139
1140         write_lock_bh(&dev_base_lock);
1141         hlist_del_rcu(&dev->name_hlist);
1142         write_unlock_bh(&dev_base_lock);
1143
1144         synchronize_rcu();
1145
1146         write_lock_bh(&dev_base_lock);
1147         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1148         write_unlock_bh(&dev_base_lock);
1149
1150         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1151         ret = notifier_to_errno(ret);
1152
1153         if (ret) {
1154                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1155                 if (err >= 0) {
1156                         err = ret;
1157                         write_seqcount_begin(&devnet_rename_seq);
1158                         memcpy(dev->name, oldname, IFNAMSIZ);
1159                         memcpy(oldname, newname, IFNAMSIZ);
1160                         dev->name_assign_type = old_assign_type;
1161                         old_assign_type = NET_NAME_RENAMED;
1162                         goto rollback;
1163                 } else {
1164                         pr_err("%s: name change rollback failed: %d\n",
1165                                dev->name, ret);
1166                 }
1167         }
1168
1169         return err;
1170 }
1171
1172 /**
1173  *      dev_set_alias - change ifalias of a device
1174  *      @dev: device
1175  *      @alias: name up to IFALIASZ
1176  *      @len: limit of bytes to copy from info
1177  *
1178  *      Set ifalias for a device,
1179  */
1180 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1181 {
1182         char *new_ifalias;
1183
1184         ASSERT_RTNL();
1185
1186         if (len >= IFALIASZ)
1187                 return -EINVAL;
1188
1189         if (!len) {
1190                 kfree(dev->ifalias);
1191                 dev->ifalias = NULL;
1192                 return 0;
1193         }
1194
1195         new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1196         if (!new_ifalias)
1197                 return -ENOMEM;
1198         dev->ifalias = new_ifalias;
1199
1200         strlcpy(dev->ifalias, alias, len+1);
1201         return len;
1202 }
1203
1204
1205 /**
1206  *      netdev_features_change - device changes features
1207  *      @dev: device to cause notification
1208  *
1209  *      Called to indicate a device has changed features.
1210  */
1211 void netdev_features_change(struct net_device *dev)
1212 {
1213         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1214 }
1215 EXPORT_SYMBOL(netdev_features_change);
1216
1217 /**
1218  *      netdev_state_change - device changes state
1219  *      @dev: device to cause notification
1220  *
1221  *      Called to indicate a device has changed state. This function calls
1222  *      the notifier chains for netdev_chain and sends a NEWLINK message
1223  *      to the routing socket.
1224  */
1225 void netdev_state_change(struct net_device *dev)
1226 {
1227         if (dev->flags & IFF_UP) {
1228                 struct netdev_notifier_change_info change_info;
1229
1230                 change_info.flags_changed = 0;
1231                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1232                                               &change_info.info);
1233                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1234         }
1235 }
1236 EXPORT_SYMBOL(netdev_state_change);
1237
1238 /**
1239  *      netdev_notify_peers - notify network peers about existence of @dev
1240  *      @dev: network device
1241  *
1242  * Generate traffic such that interested network peers are aware of
1243  * @dev, such as by generating a gratuitous ARP. This may be used when
1244  * a device wants to inform the rest of the network about some sort of
1245  * reconfiguration such as a failover event or virtual machine
1246  * migration.
1247  */
1248 void netdev_notify_peers(struct net_device *dev)
1249 {
1250         rtnl_lock();
1251         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1252         rtnl_unlock();
1253 }
1254 EXPORT_SYMBOL(netdev_notify_peers);
1255
1256 static int __dev_open(struct net_device *dev)
1257 {
1258         const struct net_device_ops *ops = dev->netdev_ops;
1259         int ret;
1260
1261         ASSERT_RTNL();
1262
1263         if (!netif_device_present(dev))
1264                 return -ENODEV;
1265
1266         /* Block netpoll from trying to do any rx path servicing.
1267          * If we don't do this there is a chance ndo_poll_controller
1268          * or ndo_poll may be running while we open the device
1269          */
1270         netpoll_poll_disable(dev);
1271
1272         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1273         ret = notifier_to_errno(ret);
1274         if (ret)
1275                 return ret;
1276
1277         set_bit(__LINK_STATE_START, &dev->state);
1278
1279         if (ops->ndo_validate_addr)
1280                 ret = ops->ndo_validate_addr(dev);
1281
1282         if (!ret && ops->ndo_open)
1283                 ret = ops->ndo_open(dev);
1284
1285         netpoll_poll_enable(dev);
1286
1287         if (ret)
1288                 clear_bit(__LINK_STATE_START, &dev->state);
1289         else {
1290                 dev->flags |= IFF_UP;
1291                 dev_set_rx_mode(dev);
1292                 dev_activate(dev);
1293                 add_device_randomness(dev->dev_addr, dev->addr_len);
1294         }
1295
1296         return ret;
1297 }
1298
1299 /**
1300  *      dev_open        - prepare an interface for use.
1301  *      @dev:   device to open
1302  *
1303  *      Takes a device from down to up state. The device's private open
1304  *      function is invoked and then the multicast lists are loaded. Finally
1305  *      the device is moved into the up state and a %NETDEV_UP message is
1306  *      sent to the netdev notifier chain.
1307  *
1308  *      Calling this function on an active interface is a nop. On a failure
1309  *      a negative errno code is returned.
1310  */
1311 int dev_open(struct net_device *dev)
1312 {
1313         int ret;
1314
1315         if (dev->flags & IFF_UP)
1316                 return 0;
1317
1318         ret = __dev_open(dev);
1319         if (ret < 0)
1320                 return ret;
1321
1322         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1323         call_netdevice_notifiers(NETDEV_UP, dev);
1324
1325         return ret;
1326 }
1327 EXPORT_SYMBOL(dev_open);
1328
1329 static int __dev_close_many(struct list_head *head)
1330 {
1331         struct net_device *dev;
1332
1333         ASSERT_RTNL();
1334         might_sleep();
1335
1336         list_for_each_entry(dev, head, close_list) {
1337                 /* Temporarily disable netpoll until the interface is down */
1338                 netpoll_poll_disable(dev);
1339
1340                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1341
1342                 clear_bit(__LINK_STATE_START, &dev->state);
1343
1344                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1345                  * can be even on different cpu. So just clear netif_running().
1346                  *
1347                  * dev->stop() will invoke napi_disable() on all of it's
1348                  * napi_struct instances on this device.
1349                  */
1350                 smp_mb__after_atomic(); /* Commit netif_running(). */
1351         }
1352
1353         dev_deactivate_many(head);
1354
1355         list_for_each_entry(dev, head, close_list) {
1356                 const struct net_device_ops *ops = dev->netdev_ops;
1357
1358                 /*
1359                  *      Call the device specific close. This cannot fail.
1360                  *      Only if device is UP
1361                  *
1362                  *      We allow it to be called even after a DETACH hot-plug
1363                  *      event.
1364                  */
1365                 if (ops->ndo_stop)
1366                         ops->ndo_stop(dev);
1367
1368                 dev->flags &= ~IFF_UP;
1369                 netpoll_poll_enable(dev);
1370         }
1371
1372         return 0;
1373 }
1374
1375 static int __dev_close(struct net_device *dev)
1376 {
1377         int retval;
1378         LIST_HEAD(single);
1379
1380         list_add(&dev->close_list, &single);
1381         retval = __dev_close_many(&single);
1382         list_del(&single);
1383
1384         return retval;
1385 }
1386
1387 static int dev_close_many(struct list_head *head)
1388 {
1389         struct net_device *dev, *tmp;
1390
1391         /* Remove the devices that don't need to be closed */
1392         list_for_each_entry_safe(dev, tmp, head, close_list)
1393                 if (!(dev->flags & IFF_UP))
1394                         list_del_init(&dev->close_list);
1395
1396         __dev_close_many(head);
1397
1398         list_for_each_entry_safe(dev, tmp, head, close_list) {
1399                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1400                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1401                 list_del_init(&dev->close_list);
1402         }
1403
1404         return 0;
1405 }
1406
1407 /**
1408  *      dev_close - shutdown an interface.
1409  *      @dev: device to shutdown
1410  *
1411  *      This function moves an active device into down state. A
1412  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1413  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1414  *      chain.
1415  */
1416 int dev_close(struct net_device *dev)
1417 {
1418         if (dev->flags & IFF_UP) {
1419                 LIST_HEAD(single);
1420
1421                 list_add(&dev->close_list, &single);
1422                 dev_close_many(&single);
1423                 list_del(&single);
1424         }
1425         return 0;
1426 }
1427 EXPORT_SYMBOL(dev_close);
1428
1429
1430 /**
1431  *      dev_disable_lro - disable Large Receive Offload on a device
1432  *      @dev: device
1433  *
1434  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1435  *      called under RTNL.  This is needed if received packets may be
1436  *      forwarded to another interface.
1437  */
1438 void dev_disable_lro(struct net_device *dev)
1439 {
1440         struct net_device *lower_dev;
1441         struct list_head *iter;
1442
1443         dev->wanted_features &= ~NETIF_F_LRO;
1444         netdev_update_features(dev);
1445
1446         if (unlikely(dev->features & NETIF_F_LRO))
1447                 netdev_WARN(dev, "failed to disable LRO!\n");
1448
1449         netdev_for_each_lower_dev(dev, lower_dev, iter)
1450                 dev_disable_lro(lower_dev);
1451 }
1452 EXPORT_SYMBOL(dev_disable_lro);
1453
1454 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1455                                    struct net_device *dev)
1456 {
1457         struct netdev_notifier_info info;
1458
1459         netdev_notifier_info_init(&info, dev);
1460         return nb->notifier_call(nb, val, &info);
1461 }
1462
1463 static int dev_boot_phase = 1;
1464
1465 /**
1466  *      register_netdevice_notifier - register a network notifier block
1467  *      @nb: notifier
1468  *
1469  *      Register a notifier to be called when network device events occur.
1470  *      The notifier passed is linked into the kernel structures and must
1471  *      not be reused until it has been unregistered. A negative errno code
1472  *      is returned on a failure.
1473  *
1474  *      When registered all registration and up events are replayed
1475  *      to the new notifier to allow device to have a race free
1476  *      view of the network device list.
1477  */
1478
1479 int register_netdevice_notifier(struct notifier_block *nb)
1480 {
1481         struct net_device *dev;
1482         struct net_device *last;
1483         struct net *net;
1484         int err;
1485
1486         rtnl_lock();
1487         err = raw_notifier_chain_register(&netdev_chain, nb);
1488         if (err)
1489                 goto unlock;
1490         if (dev_boot_phase)
1491                 goto unlock;
1492         for_each_net(net) {
1493                 for_each_netdev(net, dev) {
1494                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1495                         err = notifier_to_errno(err);
1496                         if (err)
1497                                 goto rollback;
1498
1499                         if (!(dev->flags & IFF_UP))
1500                                 continue;
1501
1502                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1503                 }
1504         }
1505
1506 unlock:
1507         rtnl_unlock();
1508         return err;
1509
1510 rollback:
1511         last = dev;
1512         for_each_net(net) {
1513                 for_each_netdev(net, dev) {
1514                         if (dev == last)
1515                                 goto outroll;
1516
1517                         if (dev->flags & IFF_UP) {
1518                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1519                                                         dev);
1520                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1521                         }
1522                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1523                 }
1524         }
1525
1526 outroll:
1527         raw_notifier_chain_unregister(&netdev_chain, nb);
1528         goto unlock;
1529 }
1530 EXPORT_SYMBOL(register_netdevice_notifier);
1531
1532 /**
1533  *      unregister_netdevice_notifier - unregister a network notifier block
1534  *      @nb: notifier
1535  *
1536  *      Unregister a notifier previously registered by
1537  *      register_netdevice_notifier(). The notifier is unlinked into the
1538  *      kernel structures and may then be reused. A negative errno code
1539  *      is returned on a failure.
1540  *
1541  *      After unregistering unregister and down device events are synthesized
1542  *      for all devices on the device list to the removed notifier to remove
1543  *      the need for special case cleanup code.
1544  */
1545
1546 int unregister_netdevice_notifier(struct notifier_block *nb)
1547 {
1548         struct net_device *dev;
1549         struct net *net;
1550         int err;
1551
1552         rtnl_lock();
1553         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1554         if (err)
1555                 goto unlock;
1556
1557         for_each_net(net) {
1558                 for_each_netdev(net, dev) {
1559                         if (dev->flags & IFF_UP) {
1560                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1561                                                         dev);
1562                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1563                         }
1564                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1565                 }
1566         }
1567 unlock:
1568         rtnl_unlock();
1569         return err;
1570 }
1571 EXPORT_SYMBOL(unregister_netdevice_notifier);
1572
1573 /**
1574  *      call_netdevice_notifiers_info - call all network notifier blocks
1575  *      @val: value passed unmodified to notifier function
1576  *      @dev: net_device pointer passed unmodified to notifier function
1577  *      @info: notifier information data
1578  *
1579  *      Call all network notifier blocks.  Parameters and return value
1580  *      are as for raw_notifier_call_chain().
1581  */
1582
1583 static int call_netdevice_notifiers_info(unsigned long val,
1584                                          struct net_device *dev,
1585                                          struct netdev_notifier_info *info)
1586 {
1587         ASSERT_RTNL();
1588         netdev_notifier_info_init(info, dev);
1589         return raw_notifier_call_chain(&netdev_chain, val, info);
1590 }
1591
1592 /**
1593  *      call_netdevice_notifiers - call all network notifier blocks
1594  *      @val: value passed unmodified to notifier function
1595  *      @dev: net_device pointer passed unmodified to notifier function
1596  *
1597  *      Call all network notifier blocks.  Parameters and return value
1598  *      are as for raw_notifier_call_chain().
1599  */
1600
1601 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1602 {
1603         struct netdev_notifier_info info;
1604
1605         return call_netdevice_notifiers_info(val, dev, &info);
1606 }
1607 EXPORT_SYMBOL(call_netdevice_notifiers);
1608
1609 static struct static_key netstamp_needed __read_mostly;
1610 #ifdef HAVE_JUMP_LABEL
1611 /* We are not allowed to call static_key_slow_dec() from irq context
1612  * If net_disable_timestamp() is called from irq context, defer the
1613  * static_key_slow_dec() calls.
1614  */
1615 static atomic_t netstamp_needed_deferred;
1616 #endif
1617
1618 void net_enable_timestamp(void)
1619 {
1620 #ifdef HAVE_JUMP_LABEL
1621         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1622
1623         if (deferred) {
1624                 while (--deferred)
1625                         static_key_slow_dec(&netstamp_needed);
1626                 return;
1627         }
1628 #endif
1629         static_key_slow_inc(&netstamp_needed);
1630 }
1631 EXPORT_SYMBOL(net_enable_timestamp);
1632
1633 void net_disable_timestamp(void)
1634 {
1635 #ifdef HAVE_JUMP_LABEL
1636         if (in_interrupt()) {
1637                 atomic_inc(&netstamp_needed_deferred);
1638                 return;
1639         }
1640 #endif
1641         static_key_slow_dec(&netstamp_needed);
1642 }
1643 EXPORT_SYMBOL(net_disable_timestamp);
1644
1645 static inline void net_timestamp_set(struct sk_buff *skb)
1646 {
1647         skb->tstamp.tv64 = 0;
1648         if (static_key_false(&netstamp_needed))
1649                 __net_timestamp(skb);
1650 }
1651
1652 #define net_timestamp_check(COND, SKB)                  \
1653         if (static_key_false(&netstamp_needed)) {               \
1654                 if ((COND) && !(SKB)->tstamp.tv64)      \
1655                         __net_timestamp(SKB);           \
1656         }                                               \
1657
1658 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1659 {
1660         unsigned int len;
1661
1662         if (!(dev->flags & IFF_UP))
1663                 return false;
1664
1665         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1666         if (skb->len <= len)
1667                 return true;
1668
1669         /* if TSO is enabled, we don't care about the length as the packet
1670          * could be forwarded without being segmented before
1671          */
1672         if (skb_is_gso(skb))
1673                 return true;
1674
1675         return false;
1676 }
1677 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1678
1679 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1680 {
1681         if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1682                 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1683                         atomic_long_inc(&dev->rx_dropped);
1684                         kfree_skb(skb);
1685                         return NET_RX_DROP;
1686                 }
1687         }
1688
1689         if (unlikely(!is_skb_forwardable(dev, skb))) {
1690                 atomic_long_inc(&dev->rx_dropped);
1691                 kfree_skb(skb);
1692                 return NET_RX_DROP;
1693         }
1694
1695         skb_scrub_packet(skb, true);
1696         skb->protocol = eth_type_trans(skb, dev);
1697
1698         return 0;
1699 }
1700 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1701
1702 /**
1703  * dev_forward_skb - loopback an skb to another netif
1704  *
1705  * @dev: destination network device
1706  * @skb: buffer to forward
1707  *
1708  * return values:
1709  *      NET_RX_SUCCESS  (no congestion)
1710  *      NET_RX_DROP     (packet was dropped, but freed)
1711  *
1712  * dev_forward_skb can be used for injecting an skb from the
1713  * start_xmit function of one device into the receive queue
1714  * of another device.
1715  *
1716  * The receiving device may be in another namespace, so
1717  * we have to clear all information in the skb that could
1718  * impact namespace isolation.
1719  */
1720 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1721 {
1722         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1723 }
1724 EXPORT_SYMBOL_GPL(dev_forward_skb);
1725
1726 static inline int deliver_skb(struct sk_buff *skb,
1727                               struct packet_type *pt_prev,
1728                               struct net_device *orig_dev)
1729 {
1730         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1731                 return -ENOMEM;
1732         atomic_inc(&skb->users);
1733         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1734 }
1735
1736 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1737 {
1738         if (!ptype->af_packet_priv || !skb->sk)
1739                 return false;
1740
1741         if (ptype->id_match)
1742                 return ptype->id_match(ptype, skb->sk);
1743         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1744                 return true;
1745
1746         return false;
1747 }
1748
1749 /*
1750  *      Support routine. Sends outgoing frames to any network
1751  *      taps currently in use.
1752  */
1753
1754 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1755 {
1756         struct packet_type *ptype;
1757         struct sk_buff *skb2 = NULL;
1758         struct packet_type *pt_prev = NULL;
1759
1760         rcu_read_lock();
1761         list_for_each_entry_rcu(ptype, &ptype_all, list) {
1762                 /* Never send packets back to the socket
1763                  * they originated from - MvS (miquels@drinkel.ow.org)
1764                  */
1765                 if ((ptype->dev == dev || !ptype->dev) &&
1766                     (!skb_loop_sk(ptype, skb))) {
1767                         if (pt_prev) {
1768                                 deliver_skb(skb2, pt_prev, skb->dev);
1769                                 pt_prev = ptype;
1770                                 continue;
1771                         }
1772
1773                         skb2 = skb_clone(skb, GFP_ATOMIC);
1774                         if (!skb2)
1775                                 break;
1776
1777                         net_timestamp_set(skb2);
1778
1779                         /* skb->nh should be correctly
1780                            set by sender, so that the second statement is
1781                            just protection against buggy protocols.
1782                          */
1783                         skb_reset_mac_header(skb2);
1784
1785                         if (skb_network_header(skb2) < skb2->data ||
1786                             skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1787                                 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1788                                                      ntohs(skb2->protocol),
1789                                                      dev->name);
1790                                 skb_reset_network_header(skb2);
1791                         }
1792
1793                         skb2->transport_header = skb2->network_header;
1794                         skb2->pkt_type = PACKET_OUTGOING;
1795                         pt_prev = ptype;
1796                 }
1797         }
1798         if (pt_prev)
1799                 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1800         rcu_read_unlock();
1801 }
1802
1803 /**
1804  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1805  * @dev: Network device
1806  * @txq: number of queues available
1807  *
1808  * If real_num_tx_queues is changed the tc mappings may no longer be
1809  * valid. To resolve this verify the tc mapping remains valid and if
1810  * not NULL the mapping. With no priorities mapping to this
1811  * offset/count pair it will no longer be used. In the worst case TC0
1812  * is invalid nothing can be done so disable priority mappings. If is
1813  * expected that drivers will fix this mapping if they can before
1814  * calling netif_set_real_num_tx_queues.
1815  */
1816 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1817 {
1818         int i;
1819         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1820
1821         /* If TC0 is invalidated disable TC mapping */
1822         if (tc->offset + tc->count > txq) {
1823                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1824                 dev->num_tc = 0;
1825                 return;
1826         }
1827
1828         /* Invalidated prio to tc mappings set to TC0 */
1829         for (i = 1; i < TC_BITMASK + 1; i++) {
1830                 int q = netdev_get_prio_tc_map(dev, i);
1831
1832                 tc = &dev->tc_to_txq[q];
1833                 if (tc->offset + tc->count > txq) {
1834                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1835                                 i, q);
1836                         netdev_set_prio_tc_map(dev, i, 0);
1837                 }
1838         }
1839 }
1840
1841 #ifdef CONFIG_XPS
1842 static DEFINE_MUTEX(xps_map_mutex);
1843 #define xmap_dereference(P)             \
1844         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1845
1846 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1847                                         int cpu, u16 index)
1848 {
1849         struct xps_map *map = NULL;
1850         int pos;
1851
1852         if (dev_maps)
1853                 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1854
1855         for (pos = 0; map && pos < map->len; pos++) {
1856                 if (map->queues[pos] == index) {
1857                         if (map->len > 1) {
1858                                 map->queues[pos] = map->queues[--map->len];
1859                         } else {
1860                                 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1861                                 kfree_rcu(map, rcu);
1862                                 map = NULL;
1863                         }
1864                         break;
1865                 }
1866         }
1867
1868         return map;
1869 }
1870
1871 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1872 {
1873         struct xps_dev_maps *dev_maps;
1874         int cpu, i;
1875         bool active = false;
1876
1877         mutex_lock(&xps_map_mutex);
1878         dev_maps = xmap_dereference(dev->xps_maps);
1879
1880         if (!dev_maps)
1881                 goto out_no_maps;
1882
1883         for_each_possible_cpu(cpu) {
1884                 for (i = index; i < dev->num_tx_queues; i++) {
1885                         if (!remove_xps_queue(dev_maps, cpu, i))
1886                                 break;
1887                 }
1888                 if (i == dev->num_tx_queues)
1889                         active = true;
1890         }
1891
1892         if (!active) {
1893                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1894                 kfree_rcu(dev_maps, rcu);
1895         }
1896
1897         for (i = index; i < dev->num_tx_queues; i++)
1898                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1899                                              NUMA_NO_NODE);
1900
1901 out_no_maps:
1902         mutex_unlock(&xps_map_mutex);
1903 }
1904
1905 static struct xps_map *expand_xps_map(struct xps_map *map,
1906                                       int cpu, u16 index)
1907 {
1908         struct xps_map *new_map;
1909         int alloc_len = XPS_MIN_MAP_ALLOC;
1910         int i, pos;
1911
1912         for (pos = 0; map && pos < map->len; pos++) {
1913                 if (map->queues[pos] != index)
1914                         continue;
1915                 return map;
1916         }
1917
1918         /* Need to add queue to this CPU's existing map */
1919         if (map) {
1920                 if (pos < map->alloc_len)
1921                         return map;
1922
1923                 alloc_len = map->alloc_len * 2;
1924         }
1925
1926         /* Need to allocate new map to store queue on this CPU's map */
1927         new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1928                                cpu_to_node(cpu));
1929         if (!new_map)
1930                 return NULL;
1931
1932         for (i = 0; i < pos; i++)
1933                 new_map->queues[i] = map->queues[i];
1934         new_map->alloc_len = alloc_len;
1935         new_map->len = pos;
1936
1937         return new_map;
1938 }
1939
1940 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1941                         u16 index)
1942 {
1943         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1944         struct xps_map *map, *new_map;
1945         int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1946         int cpu, numa_node_id = -2;
1947         bool active = false;
1948
1949         mutex_lock(&xps_map_mutex);
1950
1951         dev_maps = xmap_dereference(dev->xps_maps);
1952
1953         /* allocate memory for queue storage */
1954         for_each_online_cpu(cpu) {
1955                 if (!cpumask_test_cpu(cpu, mask))
1956                         continue;
1957
1958                 if (!new_dev_maps)
1959                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1960                 if (!new_dev_maps) {
1961                         mutex_unlock(&xps_map_mutex);
1962                         return -ENOMEM;
1963                 }
1964
1965                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1966                                  NULL;
1967
1968                 map = expand_xps_map(map, cpu, index);
1969                 if (!map)
1970                         goto error;
1971
1972                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1973         }
1974
1975         if (!new_dev_maps)
1976                 goto out_no_new_maps;
1977
1978         for_each_possible_cpu(cpu) {
1979                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1980                         /* add queue to CPU maps */
1981                         int pos = 0;
1982
1983                         map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1984                         while ((pos < map->len) && (map->queues[pos] != index))
1985                                 pos++;
1986
1987                         if (pos == map->len)
1988                                 map->queues[map->len++] = index;
1989 #ifdef CONFIG_NUMA
1990                         if (numa_node_id == -2)
1991                                 numa_node_id = cpu_to_node(cpu);
1992                         else if (numa_node_id != cpu_to_node(cpu))
1993                                 numa_node_id = -1;
1994 #endif
1995                 } else if (dev_maps) {
1996                         /* fill in the new device map from the old device map */
1997                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
1998                         RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1999                 }
2000
2001         }
2002
2003         rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2004
2005         /* Cleanup old maps */
2006         if (dev_maps) {
2007                 for_each_possible_cpu(cpu) {
2008                         new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2009                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2010                         if (map && map != new_map)
2011                                 kfree_rcu(map, rcu);
2012                 }
2013
2014                 kfree_rcu(dev_maps, rcu);
2015         }
2016
2017         dev_maps = new_dev_maps;
2018         active = true;
2019
2020 out_no_new_maps:
2021         /* update Tx queue numa node */
2022         netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2023                                      (numa_node_id >= 0) ? numa_node_id :
2024                                      NUMA_NO_NODE);
2025
2026         if (!dev_maps)
2027                 goto out_no_maps;
2028
2029         /* removes queue from unused CPUs */
2030         for_each_possible_cpu(cpu) {
2031                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2032                         continue;
2033
2034                 if (remove_xps_queue(dev_maps, cpu, index))
2035                         active = true;
2036         }
2037
2038         /* free map if not active */
2039         if (!active) {
2040                 RCU_INIT_POINTER(dev->xps_maps, NULL);
2041                 kfree_rcu(dev_maps, rcu);
2042         }
2043
2044 out_no_maps:
2045         mutex_unlock(&xps_map_mutex);
2046
2047         return 0;
2048 error:
2049         /* remove any maps that we added */
2050         for_each_possible_cpu(cpu) {
2051                 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2052                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2053                                  NULL;
2054                 if (new_map && new_map != map)
2055                         kfree(new_map);
2056         }
2057
2058         mutex_unlock(&xps_map_mutex);
2059
2060         kfree(new_dev_maps);
2061         return -ENOMEM;
2062 }
2063 EXPORT_SYMBOL(netif_set_xps_queue);
2064
2065 #endif
2066 /*
2067  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2068  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2069  */
2070 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2071 {
2072         int rc;
2073
2074         if (txq < 1 || txq > dev->num_tx_queues)
2075                 return -EINVAL;
2076
2077         if (dev->reg_state == NETREG_REGISTERED ||
2078             dev->reg_state == NETREG_UNREGISTERING) {
2079                 ASSERT_RTNL();
2080
2081                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2082                                                   txq);
2083                 if (rc)
2084                         return rc;
2085
2086                 if (dev->num_tc)
2087                         netif_setup_tc(dev, txq);
2088
2089                 if (txq < dev->real_num_tx_queues) {
2090                         qdisc_reset_all_tx_gt(dev, txq);
2091 #ifdef CONFIG_XPS
2092                         netif_reset_xps_queues_gt(dev, txq);
2093 #endif
2094                 }
2095         }
2096
2097         dev->real_num_tx_queues = txq;
2098         return 0;
2099 }
2100 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2101
2102 #ifdef CONFIG_SYSFS
2103 /**
2104  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2105  *      @dev: Network device
2106  *      @rxq: Actual number of RX queues
2107  *
2108  *      This must be called either with the rtnl_lock held or before
2109  *      registration of the net device.  Returns 0 on success, or a
2110  *      negative error code.  If called before registration, it always
2111  *      succeeds.
2112  */
2113 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2114 {
2115         int rc;
2116
2117         if (rxq < 1 || rxq > dev->num_rx_queues)
2118                 return -EINVAL;
2119
2120         if (dev->reg_state == NETREG_REGISTERED) {
2121                 ASSERT_RTNL();
2122
2123                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2124                                                   rxq);
2125                 if (rc)
2126                         return rc;
2127         }
2128
2129         dev->real_num_rx_queues = rxq;
2130         return 0;
2131 }
2132 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2133 #endif
2134
2135 /**
2136  * netif_get_num_default_rss_queues - default number of RSS queues
2137  *
2138  * This routine should set an upper limit on the number of RSS queues
2139  * used by default by multiqueue devices.
2140  */
2141 int netif_get_num_default_rss_queues(void)
2142 {
2143         return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2144 }
2145 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2146
2147 static inline void __netif_reschedule(struct Qdisc *q)
2148 {
2149         struct softnet_data *sd;
2150         unsigned long flags;
2151
2152         local_irq_save(flags);
2153         sd = this_cpu_ptr(&softnet_data);
2154         q->next_sched = NULL;
2155         *sd->output_queue_tailp = q;
2156         sd->output_queue_tailp = &q->next_sched;
2157         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2158         local_irq_restore(flags);
2159 }
2160
2161 void __netif_schedule(struct Qdisc *q)
2162 {
2163         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2164                 __netif_reschedule(q);
2165 }
2166 EXPORT_SYMBOL(__netif_schedule);
2167
2168 struct dev_kfree_skb_cb {
2169         enum skb_free_reason reason;
2170 };
2171
2172 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2173 {
2174         return (struct dev_kfree_skb_cb *)skb->cb;
2175 }
2176
2177 void netif_schedule_queue(struct netdev_queue *txq)
2178 {
2179         rcu_read_lock();
2180         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2181                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2182
2183                 __netif_schedule(q);
2184         }
2185         rcu_read_unlock();
2186 }
2187 EXPORT_SYMBOL(netif_schedule_queue);
2188
2189 /**
2190  *      netif_wake_subqueue - allow sending packets on subqueue
2191  *      @dev: network device
2192  *      @queue_index: sub queue index
2193  *
2194  * Resume individual transmit queue of a device with multiple transmit queues.
2195  */
2196 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2197 {
2198         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2199
2200         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2201                 struct Qdisc *q;
2202
2203                 rcu_read_lock();
2204                 q = rcu_dereference(txq->qdisc);
2205                 __netif_schedule(q);
2206                 rcu_read_unlock();
2207         }
2208 }
2209 EXPORT_SYMBOL(netif_wake_subqueue);
2210
2211 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2212 {
2213         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2214                 struct Qdisc *q;
2215
2216                 rcu_read_lock();
2217                 q = rcu_dereference(dev_queue->qdisc);
2218                 __netif_schedule(q);
2219                 rcu_read_unlock();
2220         }
2221 }
2222 EXPORT_SYMBOL(netif_tx_wake_queue);
2223
2224 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2225 {
2226         unsigned long flags;
2227
2228         if (likely(atomic_read(&skb->users) == 1)) {
2229                 smp_rmb();
2230                 atomic_set(&skb->users, 0);
2231         } else if (likely(!atomic_dec_and_test(&skb->users))) {
2232                 return;
2233         }
2234         get_kfree_skb_cb(skb)->reason = reason;
2235         local_irq_save(flags);
2236         skb->next = __this_cpu_read(softnet_data.completion_queue);
2237         __this_cpu_write(softnet_data.completion_queue, skb);
2238         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2239         local_irq_restore(flags);
2240 }
2241 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2242
2243 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2244 {
2245         if (in_irq() || irqs_disabled())
2246                 __dev_kfree_skb_irq(skb, reason);
2247         else
2248                 dev_kfree_skb(skb);
2249 }
2250 EXPORT_SYMBOL(__dev_kfree_skb_any);
2251
2252
2253 /**
2254  * netif_device_detach - mark device as removed
2255  * @dev: network device
2256  *
2257  * Mark device as removed from system and therefore no longer available.
2258  */
2259 void netif_device_detach(struct net_device *dev)
2260 {
2261         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2262             netif_running(dev)) {
2263                 netif_tx_stop_all_queues(dev);
2264         }
2265 }
2266 EXPORT_SYMBOL(netif_device_detach);
2267
2268 /**
2269  * netif_device_attach - mark device as attached
2270  * @dev: network device
2271  *
2272  * Mark device as attached from system and restart if needed.
2273  */
2274 void netif_device_attach(struct net_device *dev)
2275 {
2276         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2277             netif_running(dev)) {
2278                 netif_tx_wake_all_queues(dev);
2279                 __netdev_watchdog_up(dev);
2280         }
2281 }
2282 EXPORT_SYMBOL(netif_device_attach);
2283
2284 static void skb_warn_bad_offload(const struct sk_buff *skb)
2285 {
2286         static const netdev_features_t null_features = 0;
2287         struct net_device *dev = skb->dev;
2288         const char *driver = "";
2289
2290         if (!net_ratelimit())
2291                 return;
2292
2293         if (dev && dev->dev.parent)
2294                 driver = dev_driver_string(dev->dev.parent);
2295
2296         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2297              "gso_type=%d ip_summed=%d\n",
2298              driver, dev ? &dev->features : &null_features,
2299              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2300              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2301              skb_shinfo(skb)->gso_type, skb->ip_summed);
2302 }
2303
2304 /*
2305  * Invalidate hardware checksum when packet is to be mangled, and
2306  * complete checksum manually on outgoing path.
2307  */
2308 int skb_checksum_help(struct sk_buff *skb)
2309 {
2310         __wsum csum;
2311         int ret = 0, offset;
2312
2313         if (skb->ip_summed == CHECKSUM_COMPLETE)
2314                 goto out_set_summed;
2315
2316         if (unlikely(skb_shinfo(skb)->gso_size)) {
2317                 skb_warn_bad_offload(skb);
2318                 return -EINVAL;
2319         }
2320
2321         /* Before computing a checksum, we should make sure no frag could
2322          * be modified by an external entity : checksum could be wrong.
2323          */
2324         if (skb_has_shared_frag(skb)) {
2325                 ret = __skb_linearize(skb);
2326                 if (ret)
2327                         goto out;
2328         }
2329
2330         offset = skb_checksum_start_offset(skb);
2331         BUG_ON(offset >= skb_headlen(skb));
2332         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2333
2334         offset += skb->csum_offset;
2335         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2336
2337         if (skb_cloned(skb) &&
2338             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2339                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2340                 if (ret)
2341                         goto out;
2342         }
2343
2344         *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2345 out_set_summed:
2346         skb->ip_summed = CHECKSUM_NONE;
2347 out:
2348         return ret;
2349 }
2350 EXPORT_SYMBOL(skb_checksum_help);
2351
2352 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2353 {
2354         unsigned int vlan_depth = skb->mac_len;
2355         __be16 type = skb->protocol;
2356
2357         /* Tunnel gso handlers can set protocol to ethernet. */
2358         if (type == htons(ETH_P_TEB)) {
2359                 struct ethhdr *eth;
2360
2361                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2362                         return 0;
2363
2364                 eth = (struct ethhdr *)skb_mac_header(skb);
2365                 type = eth->h_proto;
2366         }
2367
2368         /* if skb->protocol is 802.1Q/AD then the header should already be
2369          * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2370          * ETH_HLEN otherwise
2371          */
2372         if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2373                 if (vlan_depth) {
2374                         if (WARN_ON(vlan_depth < VLAN_HLEN))
2375                                 return 0;
2376                         vlan_depth -= VLAN_HLEN;
2377                 } else {
2378                         vlan_depth = ETH_HLEN;
2379                 }
2380                 do {
2381                         struct vlan_hdr *vh;
2382
2383                         if (unlikely(!pskb_may_pull(skb,
2384                                                     vlan_depth + VLAN_HLEN)))
2385                                 return 0;
2386
2387                         vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2388                         type = vh->h_vlan_encapsulated_proto;
2389                         vlan_depth += VLAN_HLEN;
2390                 } while (type == htons(ETH_P_8021Q) ||
2391                          type == htons(ETH_P_8021AD));
2392         }
2393
2394         *depth = vlan_depth;
2395
2396         return type;
2397 }
2398
2399 /**
2400  *      skb_mac_gso_segment - mac layer segmentation handler.
2401  *      @skb: buffer to segment
2402  *      @features: features for the output path (see dev->features)
2403  */
2404 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2405                                     netdev_features_t features)
2406 {
2407         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2408         struct packet_offload *ptype;
2409         int vlan_depth = skb->mac_len;
2410         __be16 type = skb_network_protocol(skb, &vlan_depth);
2411
2412         if (unlikely(!type))
2413                 return ERR_PTR(-EINVAL);
2414
2415         __skb_pull(skb, vlan_depth);
2416
2417         rcu_read_lock();
2418         list_for_each_entry_rcu(ptype, &offload_base, list) {
2419                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2420                         segs = ptype->callbacks.gso_segment(skb, features);
2421                         break;
2422                 }
2423         }
2424         rcu_read_unlock();
2425
2426         __skb_push(skb, skb->data - skb_mac_header(skb));
2427
2428         return segs;
2429 }
2430 EXPORT_SYMBOL(skb_mac_gso_segment);
2431
2432
2433 /* openvswitch calls this on rx path, so we need a different check.
2434  */
2435 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2436 {
2437         if (tx_path)
2438                 return skb->ip_summed != CHECKSUM_PARTIAL;
2439         else
2440                 return skb->ip_summed == CHECKSUM_NONE;
2441 }
2442
2443 /**
2444  *      __skb_gso_segment - Perform segmentation on skb.
2445  *      @skb: buffer to segment
2446  *      @features: features for the output path (see dev->features)
2447  *      @tx_path: whether it is called in TX path
2448  *
2449  *      This function segments the given skb and returns a list of segments.
2450  *
2451  *      It may return NULL if the skb requires no segmentation.  This is
2452  *      only possible when GSO is used for verifying header integrity.
2453  */
2454 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2455                                   netdev_features_t features, bool tx_path)
2456 {
2457         if (unlikely(skb_needs_check(skb, tx_path))) {
2458                 int err;
2459
2460                 skb_warn_bad_offload(skb);
2461
2462                 err = skb_cow_head(skb, 0);
2463                 if (err < 0)
2464                         return ERR_PTR(err);
2465         }
2466
2467         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2468         SKB_GSO_CB(skb)->encap_level = 0;
2469
2470         skb_reset_mac_header(skb);
2471         skb_reset_mac_len(skb);
2472
2473         return skb_mac_gso_segment(skb, features);
2474 }
2475 EXPORT_SYMBOL(__skb_gso_segment);
2476
2477 /* Take action when hardware reception checksum errors are detected. */
2478 #ifdef CONFIG_BUG
2479 void netdev_rx_csum_fault(struct net_device *dev)
2480 {
2481         if (net_ratelimit()) {
2482                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2483                 dump_stack();
2484         }
2485 }
2486 EXPORT_SYMBOL(netdev_rx_csum_fault);
2487 #endif
2488
2489 /* Actually, we should eliminate this check as soon as we know, that:
2490  * 1. IOMMU is present and allows to map all the memory.
2491  * 2. No high memory really exists on this machine.
2492  */
2493
2494 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2495 {
2496 #ifdef CONFIG_HIGHMEM
2497         int i;
2498         if (!(dev->features & NETIF_F_HIGHDMA)) {
2499                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2500                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2501                         if (PageHighMem(skb_frag_page(frag)))
2502                                 return 1;
2503                 }
2504         }
2505
2506         if (PCI_DMA_BUS_IS_PHYS) {
2507                 struct device *pdev = dev->dev.parent;
2508
2509                 if (!pdev)
2510                         return 0;
2511                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2512                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2513                         dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2514                         if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2515                                 return 1;
2516                 }
2517         }
2518 #endif
2519         return 0;
2520 }
2521
2522 /* If MPLS offload request, verify we are testing hardware MPLS features
2523  * instead of standard features for the netdev.
2524  */
2525 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2526 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2527                                            netdev_features_t features,
2528                                            __be16 type)
2529 {
2530         if (eth_p_mpls(type))
2531                 features &= skb->dev->mpls_features;
2532
2533         return features;
2534 }
2535 #else
2536 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2537                                            netdev_features_t features,
2538                                            __be16 type)
2539 {
2540         return features;
2541 }
2542 #endif
2543
2544 static netdev_features_t harmonize_features(struct sk_buff *skb,
2545         netdev_features_t features)
2546 {
2547         int tmp;
2548         __be16 type;
2549
2550         type = skb_network_protocol(skb, &tmp);
2551         features = net_mpls_features(skb, features, type);
2552
2553         if (skb->ip_summed != CHECKSUM_NONE &&
2554             !can_checksum_protocol(features, type)) {
2555                 features &= ~NETIF_F_ALL_CSUM;
2556         } else if (illegal_highdma(skb->dev, skb)) {
2557                 features &= ~NETIF_F_SG;
2558         }
2559
2560         return features;
2561 }
2562
2563 netdev_features_t netif_skb_features(struct sk_buff *skb)
2564 {
2565         const struct net_device *dev = skb->dev;
2566         netdev_features_t features = dev->features;
2567         u16 gso_segs = skb_shinfo(skb)->gso_segs;
2568         __be16 protocol = skb->protocol;
2569
2570         if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2571                 features &= ~NETIF_F_GSO_MASK;
2572
2573         if (!vlan_tx_tag_present(skb)) {
2574                 if (unlikely(protocol == htons(ETH_P_8021Q) ||
2575                              protocol == htons(ETH_P_8021AD))) {
2576                         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2577                         protocol = veh->h_vlan_encapsulated_proto;
2578                 } else {
2579                         return harmonize_features(skb, features);
2580                 }
2581         }
2582
2583         features = netdev_intersect_features(features,
2584                                              dev->vlan_features |
2585                                              NETIF_F_HW_VLAN_CTAG_TX |
2586                                              NETIF_F_HW_VLAN_STAG_TX);
2587
2588         if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
2589                 features = netdev_intersect_features(features,
2590                                                      NETIF_F_SG |
2591                                                      NETIF_F_HIGHDMA |
2592                                                      NETIF_F_FRAGLIST |
2593                                                      NETIF_F_GEN_CSUM |
2594                                                      NETIF_F_HW_VLAN_CTAG_TX |
2595                                                      NETIF_F_HW_VLAN_STAG_TX);
2596
2597         return harmonize_features(skb, features);
2598 }
2599 EXPORT_SYMBOL(netif_skb_features);
2600
2601 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2602                     struct netdev_queue *txq, bool more)
2603 {
2604         unsigned int len;
2605         int rc;
2606
2607         if (!list_empty(&ptype_all))
2608                 dev_queue_xmit_nit(skb, dev);
2609
2610         len = skb->len;
2611         trace_net_dev_start_xmit(skb, dev);
2612         rc = netdev_start_xmit(skb, dev, txq, more);
2613         trace_net_dev_xmit(skb, rc, dev, len);
2614
2615         return rc;
2616 }
2617
2618 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2619                                     struct netdev_queue *txq, int *ret)
2620 {
2621         struct sk_buff *skb = first;
2622         int rc = NETDEV_TX_OK;
2623
2624         while (skb) {
2625                 struct sk_buff *next = skb->next;
2626
2627                 skb->next = NULL;
2628                 rc = xmit_one(skb, dev, txq, next != NULL);
2629                 if (unlikely(!dev_xmit_complete(rc))) {
2630                         skb->next = next;
2631                         goto out;
2632                 }
2633
2634                 skb = next;
2635                 if (netif_xmit_stopped(txq) && skb) {
2636                         rc = NETDEV_TX_BUSY;
2637                         break;
2638                 }
2639         }
2640
2641 out:
2642         *ret = rc;
2643         return skb;
2644 }
2645
2646 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2647                                           netdev_features_t features)
2648 {
2649         if (vlan_tx_tag_present(skb) &&
2650             !vlan_hw_offload_capable(features, skb->vlan_proto))
2651                 skb = __vlan_hwaccel_push_inside(skb);
2652         return skb;
2653 }
2654
2655 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2656 {
2657         netdev_features_t features;
2658
2659         if (skb->next)
2660                 return skb;
2661
2662         features = netif_skb_features(skb);
2663         skb = validate_xmit_vlan(skb, features);
2664         if (unlikely(!skb))
2665                 goto out_null;
2666
2667         /* If encapsulation offload request, verify we are testing
2668          * hardware encapsulation features instead of standard
2669          * features for the netdev
2670          */
2671         if (skb->encapsulation)
2672                 features &= dev->hw_enc_features;
2673
2674         if (netif_needs_gso(dev, skb, features)) {
2675                 struct sk_buff *segs;
2676
2677                 segs = skb_gso_segment(skb, features);
2678                 if (IS_ERR(segs)) {
2679                         goto out_kfree_skb;
2680                 } else if (segs) {
2681                         consume_skb(skb);
2682                         skb = segs;
2683                 }
2684         } else {
2685                 if (skb_needs_linearize(skb, features) &&
2686                     __skb_linearize(skb))
2687                         goto out_kfree_skb;
2688
2689                 /* If packet is not checksummed and device does not
2690                  * support checksumming for this protocol, complete
2691                  * checksumming here.
2692                  */
2693                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2694                         if (skb->encapsulation)
2695                                 skb_set_inner_transport_header(skb,
2696                                                                skb_checksum_start_offset(skb));
2697                         else
2698                                 skb_set_transport_header(skb,
2699                                                          skb_checksum_start_offset(skb));
2700                         if (!(features & NETIF_F_ALL_CSUM) &&
2701                             skb_checksum_help(skb))
2702                                 goto out_kfree_skb;
2703                 }
2704         }
2705
2706         return skb;
2707
2708 out_kfree_skb:
2709         kfree_skb(skb);
2710 out_null:
2711         return NULL;
2712 }
2713
2714 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2715 {
2716         struct sk_buff *next, *head = NULL, *tail;
2717
2718         for (; skb != NULL; skb = next) {
2719                 next = skb->next;
2720                 skb->next = NULL;
2721
2722                 /* in case skb wont be segmented, point to itself */
2723                 skb->prev = skb;
2724
2725                 skb = validate_xmit_skb(skb, dev);
2726                 if (!skb)
2727                         continue;
2728
2729                 if (!head)
2730                         head = skb;
2731                 else
2732                         tail->next = skb;
2733                 /* If skb was segmented, skb->prev points to
2734                  * the last segment. If not, it still contains skb.
2735                  */
2736                 tail = skb->prev;
2737         }
2738         return head;
2739 }
2740
2741 static void qdisc_pkt_len_init(struct sk_buff *skb)
2742 {
2743         const struct skb_shared_info *shinfo = skb_shinfo(skb);
2744
2745         qdisc_skb_cb(skb)->pkt_len = skb->len;
2746
2747         /* To get more precise estimation of bytes sent on wire,
2748          * we add to pkt_len the headers size of all segments
2749          */
2750         if (shinfo->gso_size)  {
2751                 unsigned int hdr_len;
2752                 u16 gso_segs = shinfo->gso_segs;
2753
2754                 /* mac layer + network layer */
2755                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2756
2757                 /* + transport layer */
2758                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2759                         hdr_len += tcp_hdrlen(skb);
2760                 else
2761                         hdr_len += sizeof(struct udphdr);
2762
2763                 if (shinfo->gso_type & SKB_GSO_DODGY)
2764                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2765                                                 shinfo->gso_size);
2766
2767                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2768         }
2769 }
2770
2771 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2772                                  struct net_device *dev,
2773                                  struct netdev_queue *txq)
2774 {
2775         spinlock_t *root_lock = qdisc_lock(q);
2776         bool contended;
2777         int rc;
2778
2779         qdisc_pkt_len_init(skb);
2780         qdisc_calculate_pkt_len(skb, q);
2781         /*
2782          * Heuristic to force contended enqueues to serialize on a
2783          * separate lock before trying to get qdisc main lock.
2784          * This permits __QDISC___STATE_RUNNING owner to get the lock more
2785          * often and dequeue packets faster.
2786          */
2787         contended = qdisc_is_running(q);
2788         if (unlikely(contended))
2789                 spin_lock(&q->busylock);
2790
2791         spin_lock(root_lock);
2792         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2793                 kfree_skb(skb);
2794                 rc = NET_XMIT_DROP;
2795         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2796                    qdisc_run_begin(q)) {
2797                 /*
2798                  * This is a work-conserving queue; there are no old skbs
2799                  * waiting to be sent out; and the qdisc is not running -
2800                  * xmit the skb directly.
2801                  */
2802
2803                 qdisc_bstats_update(q, skb);
2804
2805                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2806                         if (unlikely(contended)) {
2807                                 spin_unlock(&q->busylock);
2808                                 contended = false;
2809                         }
2810                         __qdisc_run(q);
2811                 } else
2812                         qdisc_run_end(q);
2813
2814                 rc = NET_XMIT_SUCCESS;
2815         } else {
2816                 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2817                 if (qdisc_run_begin(q)) {
2818                         if (unlikely(contended)) {
2819                                 spin_unlock(&q->busylock);
2820                                 contended = false;
2821                         }
2822                         __qdisc_run(q);
2823                 }
2824         }
2825         spin_unlock(root_lock);
2826         if (unlikely(contended))
2827                 spin_unlock(&q->busylock);
2828         return rc;
2829 }
2830
2831 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2832 static void skb_update_prio(struct sk_buff *skb)
2833 {
2834         struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2835
2836         if (!skb->priority && skb->sk && map) {
2837                 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2838
2839                 if (prioidx < map->priomap_len)
2840                         skb->priority = map->priomap[prioidx];
2841         }
2842 }
2843 #else
2844 #define skb_update_prio(skb)
2845 #endif
2846
2847 static DEFINE_PER_CPU(int, xmit_recursion);
2848 #define RECURSION_LIMIT 10
2849
2850 /**
2851  *      dev_loopback_xmit - loop back @skb
2852  *      @skb: buffer to transmit
2853  */
2854 int dev_loopback_xmit(struct sk_buff *skb)
2855 {
2856         skb_reset_mac_header(skb);
2857         __skb_pull(skb, skb_network_offset(skb));
2858         skb->pkt_type = PACKET_LOOPBACK;
2859         skb->ip_summed = CHECKSUM_UNNECESSARY;
2860         WARN_ON(!skb_dst(skb));
2861         skb_dst_force(skb);
2862         netif_rx_ni(skb);
2863         return 0;
2864 }
2865 EXPORT_SYMBOL(dev_loopback_xmit);
2866
2867 /**
2868  *      __dev_queue_xmit - transmit a buffer
2869  *      @skb: buffer to transmit
2870  *      @accel_priv: private data used for L2 forwarding offload
2871  *
2872  *      Queue a buffer for transmission to a network device. The caller must
2873  *      have set the device and priority and built the buffer before calling
2874  *      this function. The function can be called from an interrupt.
2875  *
2876  *      A negative errno code is returned on a failure. A success does not
2877  *      guarantee the frame will be transmitted as it may be dropped due
2878  *      to congestion or traffic shaping.
2879  *
2880  * -----------------------------------------------------------------------------------
2881  *      I notice this method can also return errors from the queue disciplines,
2882  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2883  *      be positive.
2884  *
2885  *      Regardless of the return value, the skb is consumed, so it is currently
2886  *      difficult to retry a send to this method.  (You can bump the ref count
2887  *      before sending to hold a reference for retry if you are careful.)
2888  *
2889  *      When calling this method, interrupts MUST be enabled.  This is because
2890  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2891  *          --BLG
2892  */
2893 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2894 {
2895         struct net_device *dev = skb->dev;
2896         struct netdev_queue *txq;
2897         struct Qdisc *q;
2898         int rc = -ENOMEM;
2899
2900         skb_reset_mac_header(skb);
2901
2902         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2903                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2904
2905         /* Disable soft irqs for various locks below. Also
2906          * stops preemption for RCU.
2907          */
2908         rcu_read_lock_bh();
2909
2910         skb_update_prio(skb);
2911
2912         /* If device/qdisc don't need skb->dst, release it right now while
2913          * its hot in this cpu cache.
2914          */
2915         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2916                 skb_dst_drop(skb);
2917         else
2918                 skb_dst_force(skb);
2919
2920         txq = netdev_pick_tx(dev, skb, accel_priv);
2921         q = rcu_dereference_bh(txq->qdisc);
2922
2923 #ifdef CONFIG_NET_CLS_ACT
2924         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2925 #endif
2926         trace_net_dev_queue(skb);
2927         if (q->enqueue) {
2928                 rc = __dev_xmit_skb(skb, q, dev, txq);
2929                 goto out;
2930         }
2931
2932         /* The device has no queue. Common case for software devices:
2933            loopback, all the sorts of tunnels...
2934
2935            Really, it is unlikely that netif_tx_lock protection is necessary
2936            here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2937            counters.)
2938            However, it is possible, that they rely on protection
2939            made by us here.
2940
2941            Check this and shot the lock. It is not prone from deadlocks.
2942            Either shot noqueue qdisc, it is even simpler 8)
2943          */
2944         if (dev->flags & IFF_UP) {
2945                 int cpu = smp_processor_id(); /* ok because BHs are off */
2946
2947                 if (txq->xmit_lock_owner != cpu) {
2948
2949                         if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2950                                 goto recursion_alert;
2951
2952                         skb = validate_xmit_skb(skb, dev);
2953                         if (!skb)
2954                                 goto drop;
2955
2956                         HARD_TX_LOCK(dev, txq, cpu);
2957
2958                         if (!netif_xmit_stopped(txq)) {
2959                                 __this_cpu_inc(xmit_recursion);
2960                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
2961                                 __this_cpu_dec(xmit_recursion);
2962                                 if (dev_xmit_complete(rc)) {
2963                                         HARD_TX_UNLOCK(dev, txq);
2964                                         goto out;
2965                                 }
2966                         }
2967                         HARD_TX_UNLOCK(dev, txq);
2968                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2969                                              dev->name);
2970                 } else {
2971                         /* Recursion is detected! It is possible,
2972                          * unfortunately
2973                          */
2974 recursion_alert:
2975                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2976                                              dev->name);
2977                 }
2978         }
2979
2980         rc = -ENETDOWN;
2981 drop:
2982         rcu_read_unlock_bh();
2983
2984         atomic_long_inc(&dev->tx_dropped);
2985         kfree_skb_list(skb);
2986         return rc;
2987 out:
2988         rcu_read_unlock_bh();
2989         return rc;
2990 }
2991
2992 int dev_queue_xmit(struct sk_buff *skb)
2993 {
2994         return __dev_queue_xmit(skb, NULL);
2995 }
2996 EXPORT_SYMBOL(dev_queue_xmit);
2997
2998 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2999 {
3000         return __dev_queue_xmit(skb, accel_priv);
3001 }
3002 EXPORT_SYMBOL(dev_queue_xmit_accel);
3003
3004
3005 /*=======================================================================
3006                         Receiver routines
3007   =======================================================================*/
3008
3009 int netdev_max_backlog __read_mostly = 1000;
3010 EXPORT_SYMBOL(netdev_max_backlog);
3011
3012 int netdev_tstamp_prequeue __read_mostly = 1;
3013 int netdev_budget __read_mostly = 300;
3014 int weight_p __read_mostly = 64;            /* old backlog weight */
3015
3016 /* Called with irq disabled */
3017 static inline void ____napi_schedule(struct softnet_data *sd,
3018                                      struct napi_struct *napi)
3019 {
3020         list_add_tail(&napi->poll_list, &sd->poll_list);
3021         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3022 }
3023
3024 #ifdef CONFIG_RPS
3025
3026 /* One global table that all flow-based protocols share. */
3027 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3028 EXPORT_SYMBOL(rps_sock_flow_table);
3029
3030 struct static_key rps_needed __read_mostly;
3031
3032 static struct rps_dev_flow *
3033 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3034             struct rps_dev_flow *rflow, u16 next_cpu)
3035 {
3036         if (next_cpu != RPS_NO_CPU) {
3037 #ifdef CONFIG_RFS_ACCEL
3038                 struct netdev_rx_queue *rxqueue;
3039                 struct rps_dev_flow_table *flow_table;
3040                 struct rps_dev_flow *old_rflow;
3041                 u32 flow_id;
3042                 u16 rxq_index;
3043                 int rc;
3044
3045                 /* Should we steer this flow to a different hardware queue? */
3046                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3047                     !(dev->features & NETIF_F_NTUPLE))
3048                         goto out;
3049                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3050                 if (rxq_index == skb_get_rx_queue(skb))
3051                         goto out;
3052
3053                 rxqueue = dev->_rx + rxq_index;
3054                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3055                 if (!flow_table)
3056                         goto out;
3057                 flow_id = skb_get_hash(skb) & flow_table->mask;
3058                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3059                                                         rxq_index, flow_id);
3060                 if (rc < 0)
3061                         goto out;
3062                 old_rflow = rflow;
3063                 rflow = &flow_table->flows[flow_id];
3064                 rflow->filter = rc;
3065                 if (old_rflow->filter == rflow->filter)
3066                         old_rflow->filter = RPS_NO_FILTER;
3067         out:
3068 #endif
3069                 rflow->last_qtail =
3070                         per_cpu(softnet_data, next_cpu).input_queue_head;
3071         }
3072
3073         rflow->cpu = next_cpu;
3074         return rflow;
3075 }
3076
3077 /*
3078  * get_rps_cpu is called from netif_receive_skb and returns the target
3079  * CPU from the RPS map of the receiving queue for a given skb.
3080  * rcu_read_lock must be held on entry.
3081  */
3082 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3083                        struct rps_dev_flow **rflowp)
3084 {
3085         struct netdev_rx_queue *rxqueue;
3086         struct rps_map *map;
3087         struct rps_dev_flow_table *flow_table;
3088         struct rps_sock_flow_table *sock_flow_table;
3089         int cpu = -1;
3090         u16 tcpu;
3091         u32 hash;
3092
3093         if (skb_rx_queue_recorded(skb)) {
3094                 u16 index = skb_get_rx_queue(skb);
3095                 if (unlikely(index >= dev->real_num_rx_queues)) {
3096                         WARN_ONCE(dev->real_num_rx_queues > 1,
3097                                   "%s received packet on queue %u, but number "
3098                                   "of RX queues is %u\n",
3099                                   dev->name, index, dev->real_num_rx_queues);
3100                         goto done;
3101                 }
3102                 rxqueue = dev->_rx + index;
3103         } else
3104                 rxqueue = dev->_rx;
3105
3106         map = rcu_dereference(rxqueue->rps_map);
3107         if (map) {
3108                 if (map->len == 1 &&
3109                     !rcu_access_pointer(rxqueue->rps_flow_table)) {
3110                         tcpu = map->cpus[0];
3111                         if (cpu_online(tcpu))
3112                                 cpu = tcpu;
3113                         goto done;
3114                 }
3115         } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
3116                 goto done;
3117         }
3118
3119         skb_reset_network_header(skb);
3120         hash = skb_get_hash(skb);
3121         if (!hash)
3122                 goto done;
3123
3124         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3125         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3126         if (flow_table && sock_flow_table) {
3127                 u16 next_cpu;
3128                 struct rps_dev_flow *rflow;
3129
3130                 rflow = &flow_table->flows[hash & flow_table->mask];
3131                 tcpu = rflow->cpu;
3132
3133                 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
3134
3135                 /*
3136                  * If the desired CPU (where last recvmsg was done) is
3137                  * different from current CPU (one in the rx-queue flow
3138                  * table entry), switch if one of the following holds:
3139                  *   - Current CPU is unset (equal to RPS_NO_CPU).
3140                  *   - Current CPU is offline.
3141                  *   - The current CPU's queue tail has advanced beyond the
3142                  *     last packet that was enqueued using this table entry.
3143                  *     This guarantees that all previous packets for the flow
3144                  *     have been dequeued, thus preserving in order delivery.
3145                  */
3146                 if (unlikely(tcpu != next_cpu) &&
3147                     (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3148                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3149                       rflow->last_qtail)) >= 0)) {
3150                         tcpu = next_cpu;
3151                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3152                 }
3153
3154                 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3155                         *rflowp = rflow;
3156                         cpu = tcpu;
3157                         goto done;
3158                 }
3159         }
3160
3161         if (map) {
3162                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3163                 if (cpu_online(tcpu)) {
3164                         cpu = tcpu;
3165                         goto done;
3166                 }
3167         }
3168
3169 done:
3170         return cpu;
3171 }
3172
3173 #ifdef CONFIG_RFS_ACCEL
3174
3175 /**
3176  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3177  * @dev: Device on which the filter was set
3178  * @rxq_index: RX queue index
3179  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3180  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3181  *
3182  * Drivers that implement ndo_rx_flow_steer() should periodically call
3183  * this function for each installed filter and remove the filters for
3184  * which it returns %true.
3185  */
3186 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3187                          u32 flow_id, u16 filter_id)
3188 {
3189         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3190         struct rps_dev_flow_table *flow_table;
3191         struct rps_dev_flow *rflow;
3192         bool expire = true;
3193         int cpu;
3194
3195         rcu_read_lock();
3196         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3197         if (flow_table && flow_id <= flow_table->mask) {
3198                 rflow = &flow_table->flows[flow_id];
3199                 cpu = ACCESS_ONCE(rflow->cpu);
3200                 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3201                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3202                            rflow->last_qtail) <
3203                      (int)(10 * flow_table->mask)))
3204                         expire = false;
3205         }
3206         rcu_read_unlock();
3207         return expire;
3208 }
3209 EXPORT_SYMBOL(rps_may_expire_flow);
3210
3211 #endif /* CONFIG_RFS_ACCEL */
3212
3213 /* Called from hardirq (IPI) context */
3214 static void rps_trigger_softirq(void *data)
3215 {
3216         struct softnet_data *sd = data;
3217
3218         ____napi_schedule(sd, &sd->backlog);
3219         sd->received_rps++;
3220 }
3221
3222 #endif /* CONFIG_RPS */
3223
3224 /*
3225  * Check if this softnet_data structure is another cpu one
3226  * If yes, queue it to our IPI list and return 1
3227  * If no, return 0
3228  */
3229 static int rps_ipi_queued(struct softnet_data *sd)
3230 {
3231 #ifdef CONFIG_RPS
3232         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3233
3234         if (sd != mysd) {
3235                 sd->rps_ipi_next = mysd->rps_ipi_list;
3236                 mysd->rps_ipi_list = sd;
3237
3238                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3239                 return 1;
3240         }
3241 #endif /* CONFIG_RPS */
3242         return 0;
3243 }
3244
3245 #ifdef CONFIG_NET_FLOW_LIMIT
3246 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3247 #endif
3248
3249 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3250 {
3251 #ifdef CONFIG_NET_FLOW_LIMIT
3252         struct sd_flow_limit *fl;
3253         struct softnet_data *sd;
3254         unsigned int old_flow, new_flow;
3255
3256         if (qlen < (netdev_max_backlog >> 1))
3257                 return false;
3258
3259         sd = this_cpu_ptr(&softnet_data);
3260
3261         rcu_read_lock();
3262         fl = rcu_dereference(sd->flow_limit);
3263         if (fl) {
3264                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3265                 old_flow = fl->history[fl->history_head];
3266                 fl->history[fl->history_head] = new_flow;
3267
3268                 fl->history_head++;
3269                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3270
3271                 if (likely(fl->buckets[old_flow]))
3272                         fl->buckets[old_flow]--;
3273
3274                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3275                         fl->count++;
3276                         rcu_read_unlock();
3277                         return true;
3278                 }
3279         }
3280         rcu_read_unlock();
3281 #endif
3282         return false;
3283 }
3284
3285 /*
3286  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3287  * queue (may be a remote CPU queue).
3288  */
3289 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3290                               unsigned int *qtail)
3291 {
3292         struct softnet_data *sd;
3293         unsigned long flags;
3294         unsigned int qlen;
3295
3296         sd = &per_cpu(softnet_data, cpu);
3297
3298         local_irq_save(flags);
3299
3300         rps_lock(sd);
3301         qlen = skb_queue_len(&sd->input_pkt_queue);
3302         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3303                 if (qlen) {
3304 enqueue:
3305                         __skb_queue_tail(&sd->input_pkt_queue, skb);
3306                         input_queue_tail_incr_save(sd, qtail);
3307                         rps_unlock(sd);
3308                         local_irq_restore(flags);
3309                         return NET_RX_SUCCESS;
3310                 }
3311
3312                 /* Schedule NAPI for backlog device
3313                  * We can use non atomic operation since we own the queue lock
3314                  */
3315                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3316                         if (!rps_ipi_queued(sd))
3317                                 ____napi_schedule(sd, &sd->backlog);
3318                 }
3319                 goto enqueue;
3320         }
3321
3322         sd->dropped++;
3323         rps_unlock(sd);
3324
3325         local_irq_restore(flags);
3326
3327         atomic_long_inc(&skb->dev->rx_dropped);
3328         kfree_skb(skb);
3329         return NET_RX_DROP;
3330 }
3331
3332 static int netif_rx_internal(struct sk_buff *skb)
3333 {
3334         int ret;
3335
3336         net_timestamp_check(netdev_tstamp_prequeue, skb);
3337
3338         trace_netif_rx(skb);
3339 #ifdef CONFIG_RPS
3340         if (static_key_false(&rps_needed)) {
3341                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3342                 int cpu;
3343
3344                 preempt_disable();
3345                 rcu_read_lock();
3346
3347                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3348                 if (cpu < 0)
3349                         cpu = smp_processor_id();
3350
3351                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3352
3353                 rcu_read_unlock();
3354                 preempt_enable();
3355         } else
3356 #endif
3357         {
3358                 unsigned int qtail;
3359                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3360                 put_cpu();
3361         }
3362         return ret;
3363 }
3364
3365 /**
3366  *      netif_rx        -       post buffer to the network code
3367  *      @skb: buffer to post
3368  *
3369  *      This function receives a packet from a device driver and queues it for
3370  *      the upper (protocol) levels to process.  It always succeeds. The buffer
3371  *      may be dropped during processing for congestion control or by the
3372  *      protocol layers.
3373  *
3374  *      return values:
3375  *      NET_RX_SUCCESS  (no congestion)
3376  *      NET_RX_DROP     (packet was dropped)
3377  *
3378  */
3379
3380 int netif_rx(struct sk_buff *skb)
3381 {
3382         trace_netif_rx_entry(skb);
3383
3384         return netif_rx_internal(skb);
3385 }
3386 EXPORT_SYMBOL(netif_rx);
3387
3388 int netif_rx_ni(struct sk_buff *skb)
3389 {
3390         int err;
3391
3392         trace_netif_rx_ni_entry(skb);
3393
3394         preempt_disable();
3395         err = netif_rx_internal(skb);
3396         if (local_softirq_pending())
3397                 do_softirq();
3398         preempt_enable();
3399
3400         return err;
3401 }
3402 EXPORT_SYMBOL(netif_rx_ni);
3403
3404 static void net_tx_action(struct softirq_action *h)
3405 {
3406         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3407
3408         if (sd->completion_queue) {
3409                 struct sk_buff *clist;
3410
3411                 local_irq_disable();
3412                 clist = sd->completion_queue;
3413                 sd->completion_queue = NULL;
3414                 local_irq_enable();
3415
3416                 while (clist) {
3417                         struct sk_buff *skb = clist;
3418                         clist = clist->next;
3419
3420                         WARN_ON(atomic_read(&skb->users));
3421                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3422                                 trace_consume_skb(skb);
3423                         else
3424                                 trace_kfree_skb(skb, net_tx_action);
3425                         __kfree_skb(skb);
3426                 }
3427         }
3428
3429         if (sd->output_queue) {
3430                 struct Qdisc *head;
3431
3432                 local_irq_disable();
3433                 head = sd->output_queue;
3434                 sd->output_queue = NULL;
3435                 sd->output_queue_tailp = &sd->output_queue;
3436                 local_irq_enable();
3437
3438                 while (head) {
3439                         struct Qdisc *q = head;
3440                         spinlock_t *root_lock;
3441
3442                         head = head->next_sched;
3443
3444                         root_lock = qdisc_lock(q);
3445                         if (spin_trylock(root_lock)) {
3446                                 smp_mb__before_atomic();
3447                                 clear_bit(__QDISC_STATE_SCHED,
3448                                           &q->state);
3449                                 qdisc_run(q);
3450                                 spin_unlock(root_lock);
3451                         } else {
3452                                 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3453                                               &q->state)) {
3454                                         __netif_reschedule(q);
3455                                 } else {
3456                                         smp_mb__before_atomic();
3457                                         clear_bit(__QDISC_STATE_SCHED,
3458                                                   &q->state);
3459                                 }
3460                         }
3461                 }
3462         }
3463 }
3464
3465 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3466     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3467 /* This hook is defined here for ATM LANE */
3468 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3469                              unsigned char *addr) __read_mostly;
3470 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3471 #endif
3472
3473 #ifdef CONFIG_NET_CLS_ACT
3474 /* TODO: Maybe we should just force sch_ingress to be compiled in
3475  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3476  * a compare and 2 stores extra right now if we dont have it on
3477  * but have CONFIG_NET_CLS_ACT
3478  * NOTE: This doesn't stop any functionality; if you dont have
3479  * the ingress scheduler, you just can't add policies on ingress.
3480  *
3481  */
3482 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3483 {
3484         struct net_device *dev = skb->dev;
3485         u32 ttl = G_TC_RTTL(skb->tc_verd);
3486         int result = TC_ACT_OK;
3487         struct Qdisc *q;
3488
3489         if (unlikely(MAX_RED_LOOP < ttl++)) {
3490                 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3491                                      skb->skb_iif, dev->ifindex);
3492                 return TC_ACT_SHOT;
3493         }
3494
3495         skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3496         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3497
3498         q = rcu_dereference(rxq->qdisc);
3499         if (q != &noop_qdisc) {
3500                 spin_lock(qdisc_lock(q));
3501                 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3502                         result = qdisc_enqueue_root(skb, q);
3503                 spin_unlock(qdisc_lock(q));
3504         }
3505
3506         return result;
3507 }
3508
3509 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3510                                          struct packet_type **pt_prev,
3511                                          int *ret, struct net_device *orig_dev)
3512 {
3513         struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3514
3515         if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
3516                 goto out;
3517
3518         if (*pt_prev) {
3519                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3520                 *pt_prev = NULL;
3521         }
3522
3523         switch (ing_filter(skb, rxq)) {
3524         case TC_ACT_SHOT:
3525         case TC_ACT_STOLEN:
3526                 kfree_skb(skb);
3527                 return NULL;
3528         }
3529
3530 out:
3531         skb->tc_verd = 0;
3532         return skb;
3533 }
3534 #endif
3535
3536 /**
3537  *      netdev_rx_handler_register - register receive handler
3538  *      @dev: device to register a handler for
3539  *      @rx_handler: receive handler to register
3540  *      @rx_handler_data: data pointer that is used by rx handler
3541  *
3542  *      Register a receive handler for a device. This handler will then be
3543  *      called from __netif_receive_skb. A negative errno code is returned
3544  *      on a failure.
3545  *
3546  *      The caller must hold the rtnl_mutex.
3547  *
3548  *      For a general description of rx_handler, see enum rx_handler_result.
3549  */
3550 int netdev_rx_handler_register(struct net_device *dev,
3551                                rx_handler_func_t *rx_handler,
3552                                void *rx_handler_data)
3553 {
3554         ASSERT_RTNL();
3555
3556         if (dev->rx_handler)
3557                 return -EBUSY;
3558
3559         /* Note: rx_handler_data must be set before rx_handler */
3560         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3561         rcu_assign_pointer(dev->rx_handler, rx_handler);
3562
3563         return 0;
3564 }
3565 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3566
3567 /**
3568  *      netdev_rx_handler_unregister - unregister receive handler
3569  *      @dev: device to unregister a handler from
3570  *
3571  *      Unregister a receive handler from a device.
3572  *
3573  *      The caller must hold the rtnl_mutex.
3574  */
3575 void netdev_rx_handler_unregister(struct net_device *dev)
3576 {
3577
3578         ASSERT_RTNL();
3579         RCU_INIT_POINTER(dev->rx_handler, NULL);
3580         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3581          * section has a guarantee to see a non NULL rx_handler_data
3582          * as well.
3583          */
3584         synchronize_net();
3585         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3586 }
3587 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3588
3589 /*
3590  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3591  * the special handling of PFMEMALLOC skbs.
3592  */
3593 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3594 {
3595         switch (skb->protocol) {
3596         case htons(ETH_P_ARP):
3597         case htons(ETH_P_IP):
3598         case htons(ETH_P_IPV6):
3599         case htons(ETH_P_8021Q):
3600         case htons(ETH_P_8021AD):
3601                 return true;
3602         default:
3603                 return false;
3604         }
3605 }
3606
3607 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3608 {
3609         struct packet_type *ptype, *pt_prev;
3610         rx_handler_func_t *rx_handler;
3611         struct net_device *orig_dev;
3612         struct net_device *null_or_dev;
3613         bool deliver_exact = false;
3614         int ret = NET_RX_DROP;
3615         __be16 type;
3616
3617         net_timestamp_check(!netdev_tstamp_prequeue, skb);
3618
3619         trace_netif_receive_skb(skb);
3620
3621         orig_dev = skb->dev;
3622
3623         skb_reset_network_header(skb);
3624         if (!skb_transport_header_was_set(skb))
3625                 skb_reset_transport_header(skb);
3626         skb_reset_mac_len(skb);
3627
3628         pt_prev = NULL;
3629
3630         rcu_read_lock();
3631
3632 another_round:
3633         skb->skb_iif = skb->dev->ifindex;
3634
3635         __this_cpu_inc(softnet_data.processed);
3636
3637         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3638             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3639                 skb = skb_vlan_untag(skb);
3640                 if (unlikely(!skb))
3641                         goto unlock;
3642         }
3643
3644 #ifdef CONFIG_NET_CLS_ACT
3645         if (skb->tc_verd & TC_NCLS) {
3646                 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3647                 goto ncls;
3648         }
3649 #endif
3650
3651         if (pfmemalloc)
3652                 goto skip_taps;
3653
3654         list_for_each_entry_rcu(ptype, &ptype_all, list) {
3655                 if (!ptype->dev || ptype->dev == skb->dev) {
3656                         if (pt_prev)
3657                                 ret = deliver_skb(skb, pt_prev, orig_dev);
3658                         pt_prev = ptype;
3659                 }
3660         }
3661
3662 skip_taps:
3663 #ifdef CONFIG_NET_CLS_ACT
3664         skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3665         if (!skb)
3666                 goto unlock;
3667 ncls:
3668 #endif
3669
3670         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3671                 goto drop;
3672
3673         if (vlan_tx_tag_present(skb)) {
3674                 if (pt_prev) {
3675                         ret = deliver_skb(skb, pt_prev, orig_dev);
3676                         pt_prev = NULL;
3677                 }
3678                 if (vlan_do_receive(&skb))
3679                         goto another_round;
3680                 else if (unlikely(!skb))
3681                         goto unlock;
3682         }
3683
3684         rx_handler = rcu_dereference(skb->dev->rx_handler);
3685         if (rx_handler) {
3686                 if (pt_prev) {
3687                         ret = deliver_skb(skb, pt_prev, orig_dev);
3688                         pt_prev = NULL;
3689                 }
3690                 switch (rx_handler(&skb)) {
3691                 case RX_HANDLER_CONSUMED:
3692                         ret = NET_RX_SUCCESS;
3693                         goto unlock;
3694                 case RX_HANDLER_ANOTHER:
3695                         goto another_round;
3696                 case RX_HANDLER_EXACT:
3697                         deliver_exact = true;
3698                 case RX_HANDLER_PASS:
3699                         break;
3700                 default:
3701                         BUG();
3702                 }
3703         }
3704
3705         if (unlikely(vlan_tx_tag_present(skb))) {
3706                 if (vlan_tx_tag_get_id(skb))
3707                         skb->pkt_type = PACKET_OTHERHOST;
3708                 /* Note: we might in the future use prio bits
3709                  * and set skb->priority like in vlan_do_receive()
3710                  * For the time being, just ignore Priority Code Point
3711                  */
3712                 skb->vlan_tci = 0;
3713         }
3714
3715         /* deliver only exact match when indicated */
3716         null_or_dev = deliver_exact ? skb->dev : NULL;
3717
3718         type = skb->protocol;
3719         list_for_each_entry_rcu(ptype,
3720                         &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3721                 if (ptype->type == type &&
3722                     (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3723                      ptype->dev == orig_dev)) {
3724                         if (pt_prev)
3725                                 ret = deliver_skb(skb, pt_prev, orig_dev);
3726                         pt_prev = ptype;
3727                 }
3728         }
3729
3730         if (pt_prev) {
3731                 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3732                         goto drop;
3733                 else
3734                         ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3735         } else {
3736 drop:
3737                 atomic_long_inc(&skb->dev->rx_dropped);
3738                 kfree_skb(skb);
3739                 /* Jamal, now you will not able to escape explaining
3740                  * me how you were going to use this. :-)
3741                  */
3742                 ret = NET_RX_DROP;
3743         }
3744
3745 unlock:
3746         rcu_read_unlock();
3747         return ret;
3748 }
3749
3750 static int __netif_receive_skb(struct sk_buff *skb)
3751 {
3752         int ret;
3753
3754         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3755                 unsigned long pflags = current->flags;
3756
3757                 /*
3758                  * PFMEMALLOC skbs are special, they should
3759                  * - be delivered to SOCK_MEMALLOC sockets only
3760                  * - stay away from userspace
3761                  * - have bounded memory usage
3762                  *
3763                  * Use PF_MEMALLOC as this saves us from propagating the allocation
3764                  * context down to all allocation sites.
3765                  */
3766                 current->flags |= PF_MEMALLOC;
3767                 ret = __netif_receive_skb_core(skb, true);
3768                 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3769         } else
3770                 ret = __netif_receive_skb_core(skb, false);
3771
3772         return ret;
3773 }
3774
3775 static int netif_receive_skb_internal(struct sk_buff *skb)
3776 {
3777         net_timestamp_check(netdev_tstamp_prequeue, skb);
3778
3779         if (skb_defer_rx_timestamp(skb))
3780                 return NET_RX_SUCCESS;
3781
3782 #ifdef CONFIG_RPS
3783         if (static_key_false(&rps_needed)) {
3784                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3785                 int cpu, ret;
3786
3787                 rcu_read_lock();
3788
3789                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3790
3791                 if (cpu >= 0) {
3792                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3793                         rcu_read_unlock();
3794                         return ret;
3795                 }
3796                 rcu_read_unlock();
3797         }
3798 #endif
3799         return __netif_receive_skb(skb);
3800 }
3801
3802 /**
3803  *      netif_receive_skb - process receive buffer from network
3804  *      @skb: buffer to process
3805  *
3806  *      netif_receive_skb() is the main receive data processing function.
3807  *      It always succeeds. The buffer may be dropped during processing
3808  *      for congestion control or by the protocol layers.
3809  *
3810  *      This function may only be called from softirq context and interrupts
3811  *      should be enabled.
3812  *
3813  *      Return values (usually ignored):
3814  *      NET_RX_SUCCESS: no congestion
3815  *      NET_RX_DROP: packet was dropped
3816  */
3817 int netif_receive_skb(struct sk_buff *skb)
3818 {
3819         trace_netif_receive_skb_entry(skb);
3820
3821         return netif_receive_skb_internal(skb);
3822 }
3823 EXPORT_SYMBOL(netif_receive_skb);
3824
3825 /* Network device is going away, flush any packets still pending
3826  * Called with irqs disabled.
3827  */
3828 static void flush_backlog(void *arg)
3829 {
3830         struct net_device *dev = arg;
3831         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3832         struct sk_buff *skb, *tmp;
3833
3834         rps_lock(sd);
3835         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3836                 if (skb->dev == dev) {
3837                         __skb_unlink(skb, &sd->input_pkt_queue);
3838                         kfree_skb(skb);
3839                         input_queue_head_incr(sd);
3840                 }
3841         }
3842         rps_unlock(sd);
3843
3844         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3845                 if (skb->dev == dev) {
3846                         __skb_unlink(skb, &sd->process_queue);
3847                         kfree_skb(skb);
3848                         input_queue_head_incr(sd);
3849                 }
3850         }
3851 }
3852
3853 static int napi_gro_complete(struct sk_buff *skb)
3854 {
3855         struct packet_offload *ptype;
3856         __be16 type = skb->protocol;
3857         struct list_head *head = &offload_base;
3858         int err = -ENOENT;
3859
3860         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3861
3862         if (NAPI_GRO_CB(skb)->count == 1) {
3863                 skb_shinfo(skb)->gso_size = 0;
3864                 goto out;
3865         }
3866
3867         rcu_read_lock();
3868         list_for_each_entry_rcu(ptype, head, list) {
3869                 if (ptype->type != type || !ptype->callbacks.gro_complete)
3870                         continue;
3871
3872                 err = ptype->callbacks.gro_complete(skb, 0);
3873                 break;
3874         }
3875         rcu_read_unlock();
3876
3877         if (err) {
3878                 WARN_ON(&ptype->list == head);
3879                 kfree_skb(skb);
3880                 return NET_RX_SUCCESS;
3881         }
3882
3883 out:
3884         return netif_receive_skb_internal(skb);
3885 }
3886
3887 /* napi->gro_list contains packets ordered by age.
3888  * youngest packets at the head of it.
3889  * Complete skbs in reverse order to reduce latencies.
3890  */
3891 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3892 {
3893         struct sk_buff *skb, *prev = NULL;
3894
3895         /* scan list and build reverse chain */
3896         for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3897                 skb->prev = prev;
3898                 prev = skb;
3899         }
3900
3901         for (skb = prev; skb; skb = prev) {
3902                 skb->next = NULL;
3903
3904                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3905                         return;
3906
3907                 prev = skb->prev;
3908                 napi_gro_complete(skb);
3909                 napi->gro_count--;
3910         }
3911
3912         napi->gro_list = NULL;
3913 }
3914 EXPORT_SYMBOL(napi_gro_flush);
3915
3916 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3917 {
3918         struct sk_buff *p;
3919         unsigned int maclen = skb->dev->hard_header_len;
3920         u32 hash = skb_get_hash_raw(skb);
3921
3922         for (p = napi->gro_list; p; p = p->next) {
3923                 unsigned long diffs;
3924
3925                 NAPI_GRO_CB(p)->flush = 0;
3926
3927                 if (hash != skb_get_hash_raw(p)) {
3928                         NAPI_GRO_CB(p)->same_flow = 0;
3929                         continue;
3930                 }
3931
3932                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3933                 diffs |= p->vlan_tci ^ skb->vlan_tci;
3934                 if (maclen == ETH_HLEN)
3935                         diffs |= compare_ether_header(skb_mac_header(p),
3936                                                       skb_mac_header(skb));
3937                 else if (!diffs)
3938                         diffs = memcmp(skb_mac_header(p),
3939                                        skb_mac_header(skb),
3940                                        maclen);
3941                 NAPI_GRO_CB(p)->same_flow = !diffs;
3942         }
3943 }
3944
3945 static void skb_gro_reset_offset(struct sk_buff *skb)
3946 {
3947         const struct skb_shared_info *pinfo = skb_shinfo(skb);
3948         const skb_frag_t *frag0 = &pinfo->frags[0];
3949
3950         NAPI_GRO_CB(skb)->data_offset = 0;
3951         NAPI_GRO_CB(skb)->frag0 = NULL;
3952         NAPI_GRO_CB(skb)->frag0_len = 0;
3953
3954         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3955             pinfo->nr_frags &&
3956             !PageHighMem(skb_frag_page(frag0))) {
3957                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3958                 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3959         }
3960 }
3961
3962 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3963 {
3964         struct skb_shared_info *pinfo = skb_shinfo(skb);
3965
3966         BUG_ON(skb->end - skb->tail < grow);
3967
3968         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3969
3970         skb->data_len -= grow;
3971         skb->tail += grow;
3972
3973         pinfo->frags[0].page_offset += grow;
3974         skb_frag_size_sub(&pinfo->frags[0], grow);
3975
3976         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3977                 skb_frag_unref(skb, 0);
3978                 memmove(pinfo->frags, pinfo->frags + 1,
3979                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3980         }
3981 }
3982
3983 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3984 {
3985         struct sk_buff **pp = NULL;
3986         struct packet_offload *ptype;
3987         __be16 type = skb->protocol;
3988         struct list_head *head = &offload_base;
3989         int same_flow;
3990         enum gro_result ret;
3991         int grow;
3992
3993         if (!(skb->dev->features & NETIF_F_GRO))
3994                 goto normal;
3995
3996         if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
3997                 goto normal;
3998
3999         gro_list_prepare(napi, skb);
4000
4001         rcu_read_lock();
4002         list_for_each_entry_rcu(ptype, head, list) {
4003                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4004                         continue;
4005
4006                 skb_set_network_header(skb, skb_gro_offset(skb));
4007                 skb_reset_mac_len(skb);
4008                 NAPI_GRO_CB(skb)->same_flow = 0;
4009                 NAPI_GRO_CB(skb)->flush = 0;
4010                 NAPI_GRO_CB(skb)->free = 0;
4011                 NAPI_GRO_CB(skb)->udp_mark = 0;
4012
4013                 /* Setup for GRO checksum validation */
4014                 switch (skb->ip_summed) {
4015                 case CHECKSUM_COMPLETE:
4016                         NAPI_GRO_CB(skb)->csum = skb->csum;
4017                         NAPI_GRO_CB(skb)->csum_valid = 1;
4018                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4019                         break;
4020                 case CHECKSUM_UNNECESSARY:
4021                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4022                         NAPI_GRO_CB(skb)->csum_valid = 0;
4023                         break;
4024                 default:
4025                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4026                         NAPI_GRO_CB(skb)->csum_valid = 0;
4027                 }
4028
4029                 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4030                 break;
4031         }
4032         rcu_read_unlock();
4033
4034         if (&ptype->list == head)
4035                 goto normal;
4036
4037         same_flow = NAPI_GRO_CB(skb)->same_flow;
4038         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4039
4040         if (pp) {
4041                 struct sk_buff *nskb = *pp;
4042
4043                 *pp = nskb->next;
4044                 nskb->next = NULL;
4045                 napi_gro_complete(nskb);
4046                 napi->gro_count--;
4047         }
4048
4049         if (same_flow)
4050                 goto ok;
4051
4052         if (NAPI_GRO_CB(skb)->flush)
4053                 goto normal;
4054
4055         if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4056                 struct sk_buff *nskb = napi->gro_list;
4057
4058                 /* locate the end of the list to select the 'oldest' flow */
4059                 while (nskb->next) {
4060                         pp = &nskb->next;
4061                         nskb = *pp;
4062                 }
4063                 *pp = NULL;
4064                 nskb->next = NULL;
4065                 napi_gro_complete(nskb);
4066         } else {
4067                 napi->gro_count++;
4068         }
4069         NAPI_GRO_CB(skb)->count = 1;
4070         NAPI_GRO_CB(skb)->age = jiffies;
4071         NAPI_GRO_CB(skb)->last = skb;
4072         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4073         skb->next = napi->gro_list;
4074         napi->gro_list = skb;
4075         ret = GRO_HELD;
4076
4077 pull:
4078         grow = skb_gro_offset(skb) - skb_headlen(skb);
4079         if (grow > 0)
4080                 gro_pull_from_frag0(skb, grow);
4081 ok:
4082         return ret;
4083
4084 normal:
4085         ret = GRO_NORMAL;
4086         goto pull;
4087 }
4088
4089 struct packet_offload *gro_find_receive_by_type(__be16 type)
4090 {
4091         struct list_head *offload_head = &offload_base;
4092         struct packet_offload *ptype;
4093
4094         list_for_each_entry_rcu(ptype, offload_head, list) {
4095                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4096                         continue;
4097                 return ptype;
4098         }
4099         return NULL;
4100 }
4101 EXPORT_SYMBOL(gro_find_receive_by_type);
4102
4103 struct packet_offload *gro_find_complete_by_type(__be16 type)
4104 {
4105         struct list_head *offload_head = &offload_base;
4106         struct packet_offload *ptype;
4107
4108         list_for_each_entry_rcu(ptype, offload_head, list) {
4109                 if (ptype->type != type || !ptype->callbacks.gro_complete)
4110                         continue;
4111                 return ptype;
4112         }
4113         return NULL;
4114 }
4115 EXPORT_SYMBOL(gro_find_complete_by_type);
4116
4117 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4118 {
4119         switch (ret) {
4120         case GRO_NORMAL:
4121                 if (netif_receive_skb_internal(skb))
4122                         ret = GRO_DROP;
4123                 break;
4124
4125         case GRO_DROP:
4126                 kfree_skb(skb);
4127                 break;
4128
4129         case GRO_MERGED_FREE:
4130                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4131                         kmem_cache_free(skbuff_head_cache, skb);
4132                 else
4133                         __kfree_skb(skb);
4134                 break;
4135
4136         case GRO_HELD:
4137         case GRO_MERGED:
4138                 break;
4139         }
4140
4141         return ret;
4142 }
4143
4144 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4145 {
4146         trace_napi_gro_receive_entry(skb);
4147
4148         skb_gro_reset_offset(skb);
4149
4150         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4151 }
4152 EXPORT_SYMBOL(napi_gro_receive);
4153
4154 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4155 {
4156         if (unlikely(skb->pfmemalloc)) {
4157                 consume_skb(skb);
4158                 return;
4159         }
4160         __skb_pull(skb, skb_headlen(skb));
4161         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4162         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4163         skb->vlan_tci = 0;
4164         skb->dev = napi->dev;
4165         skb->skb_iif = 0;
4166         skb->encapsulation = 0;
4167         skb_shinfo(skb)->gso_type = 0;
4168         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4169
4170         napi->skb = skb;
4171 }
4172
4173 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4174 {
4175         struct sk_buff *skb = napi->skb;
4176
4177         if (!skb) {
4178                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4179                 napi->skb = skb;
4180         }
4181         return skb;
4182 }
4183 EXPORT_SYMBOL(napi_get_frags);
4184
4185 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4186                                       struct sk_buff *skb,
4187                                       gro_result_t ret)
4188 {
4189         switch (ret) {
4190         case GRO_NORMAL:
4191         case GRO_HELD:
4192                 __skb_push(skb, ETH_HLEN);
4193                 skb->protocol = eth_type_trans(skb, skb->dev);
4194                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4195                         ret = GRO_DROP;
4196                 break;
4197
4198         case GRO_DROP:
4199         case GRO_MERGED_FREE:
4200                 napi_reuse_skb(napi, skb);
4201                 break;
4202
4203         case GRO_MERGED:
4204                 break;
4205         }
4206
4207         return ret;
4208 }
4209
4210 /* Upper GRO stack assumes network header starts at gro_offset=0
4211  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4212  * We copy ethernet header into skb->data to have a common layout.
4213  */
4214 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4215 {
4216         struct sk_buff *skb = napi->skb;
4217         const struct ethhdr *eth;
4218         unsigned int hlen = sizeof(*eth);
4219
4220         napi->skb = NULL;
4221
4222         skb_reset_mac_header(skb);
4223         skb_gro_reset_offset(skb);
4224
4225         eth = skb_gro_header_fast(skb, 0);
4226         if (unlikely(skb_gro_header_hard(skb, hlen))) {
4227                 eth = skb_gro_header_slow(skb, hlen, 0);
4228                 if (unlikely(!eth)) {
4229                         napi_reuse_skb(napi, skb);
4230                         return NULL;
4231                 }
4232         } else {
4233                 gro_pull_from_frag0(skb, hlen);
4234                 NAPI_GRO_CB(skb)->frag0 += hlen;
4235                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4236         }
4237         __skb_pull(skb, hlen);
4238
4239         /*
4240          * This works because the only protocols we care about don't require
4241          * special handling.
4242          * We'll fix it up properly in napi_frags_finish()
4243          */
4244         skb->protocol = eth->h_proto;
4245
4246         return skb;
4247 }
4248
4249 gro_result_t napi_gro_frags(struct napi_struct *napi)
4250 {
4251         struct sk_buff *skb = napi_frags_skb(napi);
4252
4253         if (!skb)
4254                 return GRO_DROP;
4255
4256         trace_napi_gro_frags_entry(skb);
4257
4258         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4259 }
4260 EXPORT_SYMBOL(napi_gro_frags);
4261
4262 /* Compute the checksum from gro_offset and return the folded value
4263  * after adding in any pseudo checksum.
4264  */
4265 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4266 {
4267         __wsum wsum;
4268         __sum16 sum;
4269
4270         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4271
4272         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4273         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4274         if (likely(!sum)) {
4275                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4276                     !skb->csum_complete_sw)
4277                         netdev_rx_csum_fault(skb->dev);
4278         }
4279
4280         NAPI_GRO_CB(skb)->csum = wsum;
4281         NAPI_GRO_CB(skb)->csum_valid = 1;
4282
4283         return sum;
4284 }
4285 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4286
4287 /*
4288  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4289  * Note: called with local irq disabled, but exits with local irq enabled.
4290  */
4291 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4292 {
4293 #ifdef CONFIG_RPS
4294         struct softnet_data *remsd = sd->rps_ipi_list;
4295
4296         if (remsd) {
4297                 sd->rps_ipi_list = NULL;
4298
4299                 local_irq_enable();
4300
4301                 /* Send pending IPI's to kick RPS processing on remote cpus. */
4302                 while (remsd) {
4303                         struct softnet_data *next = remsd->rps_ipi_next;
4304
4305                         if (cpu_online(remsd->cpu))
4306                                 smp_call_function_single_async(remsd->cpu,
4307                                                            &remsd->csd);
4308                         remsd = next;
4309                 }
4310         } else
4311 #endif
4312                 local_irq_enable();
4313 }
4314
4315 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4316 {
4317 #ifdef CONFIG_RPS
4318         return sd->rps_ipi_list != NULL;
4319 #else
4320         return false;
4321 #endif
4322 }
4323
4324 static int process_backlog(struct napi_struct *napi, int quota)
4325 {
4326         int work = 0;
4327         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4328
4329         /* Check if we have pending ipi, its better to send them now,
4330          * not waiting net_rx_action() end.
4331          */
4332         if (sd_has_rps_ipi_waiting(sd)) {
4333                 local_irq_disable();
4334                 net_rps_action_and_irq_enable(sd);
4335         }
4336
4337         napi->weight = weight_p;
4338         local_irq_disable();
4339         while (1) {
4340                 struct sk_buff *skb;
4341
4342                 while ((skb = __skb_dequeue(&sd->process_queue))) {
4343                         local_irq_enable();
4344                         __netif_receive_skb(skb);
4345                         local_irq_disable();
4346                         input_queue_head_incr(sd);
4347                         if (++work >= quota) {
4348                                 local_irq_enable();
4349                                 return work;
4350                         }
4351                 }
4352
4353                 rps_lock(sd);
4354                 if (skb_queue_empty(&sd->input_pkt_queue)) {
4355                         /*
4356                          * Inline a custom version of __napi_complete().
4357                          * only current cpu owns and manipulates this napi,
4358                          * and NAPI_STATE_SCHED is the only possible flag set
4359                          * on backlog.
4360                          * We can use a plain write instead of clear_bit(),
4361                          * and we dont need an smp_mb() memory barrier.
4362                          */
4363                         napi->state = 0;
4364                         rps_unlock(sd);
4365
4366                         break;
4367                 }
4368
4369                 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4370                                            &sd->process_queue);
4371                 rps_unlock(sd);
4372         }
4373         local_irq_enable();
4374
4375         return work;
4376 }
4377
4378 /**
4379  * __napi_schedule - schedule for receive
4380  * @n: entry to schedule
4381  *
4382  * The entry's receive function will be scheduled to run.
4383  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4384  */
4385 void __napi_schedule(struct napi_struct *n)
4386 {
4387         unsigned long flags;
4388
4389         local_irq_save(flags);
4390         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4391         local_irq_restore(flags);
4392 }
4393 EXPORT_SYMBOL(__napi_schedule);
4394
4395 /**
4396  * __napi_schedule_irqoff - schedule for receive
4397  * @n: entry to schedule
4398  *
4399  * Variant of __napi_schedule() assuming hard irqs are masked
4400  */
4401 void __napi_schedule_irqoff(struct napi_struct *n)
4402 {
4403         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4404 }
4405 EXPORT_SYMBOL(__napi_schedule_irqoff);
4406
4407 void __napi_complete(struct napi_struct *n)
4408 {
4409         BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4410
4411         list_del_init(&n->poll_list);
4412         smp_mb__before_atomic();
4413         clear_bit(NAPI_STATE_SCHED, &n->state);
4414 }
4415 EXPORT_SYMBOL(__napi_complete);
4416
4417 void napi_complete_done(struct napi_struct *n, int work_done)
4418 {
4419         unsigned long flags;
4420
4421         /*
4422          * don't let napi dequeue from the cpu poll list
4423          * just in case its running on a different cpu
4424          */
4425         if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4426                 return;
4427
4428         if (n->gro_list) {
4429                 unsigned long timeout = 0;
4430
4431                 if (work_done)
4432                         timeout = n->dev->gro_flush_timeout;
4433
4434                 if (timeout)
4435                         hrtimer_start(&n->timer, ns_to_ktime(timeout),
4436                                       HRTIMER_MODE_REL_PINNED);
4437                 else
4438                         napi_gro_flush(n, false);
4439         }
4440         if (likely(list_empty(&n->poll_list))) {
4441                 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4442         } else {
4443                 /* If n->poll_list is not empty, we need to mask irqs */
4444                 local_irq_save(flags);
4445                 __napi_complete(n);
4446                 local_irq_restore(flags);
4447         }
4448 }
4449 EXPORT_SYMBOL(napi_complete_done);
4450
4451 /* must be called under rcu_read_lock(), as we dont take a reference */
4452 struct napi_struct *napi_by_id(unsigned int napi_id)
4453 {
4454         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4455         struct napi_struct *napi;
4456
4457         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4458                 if (napi->napi_id == napi_id)
4459                         return napi;
4460
4461         return NULL;
4462 }
4463 EXPORT_SYMBOL_GPL(napi_by_id);
4464
4465 void napi_hash_add(struct napi_struct *napi)
4466 {
4467         if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4468
4469                 spin_lock(&napi_hash_lock);
4470
4471                 /* 0 is not a valid id, we also skip an id that is taken
4472                  * we expect both events to be extremely rare
4473                  */
4474                 napi->napi_id = 0;
4475                 while (!napi->napi_id) {
4476                         napi->napi_id = ++napi_gen_id;
4477                         if (napi_by_id(napi->napi_id))
4478                                 napi->napi_id = 0;
4479                 }
4480
4481                 hlist_add_head_rcu(&napi->napi_hash_node,
4482                         &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4483
4484                 spin_unlock(&napi_hash_lock);
4485         }
4486 }
4487 EXPORT_SYMBOL_GPL(napi_hash_add);
4488
4489 /* Warning : caller is responsible to make sure rcu grace period
4490  * is respected before freeing memory containing @napi
4491  */
4492 void napi_hash_del(struct napi_struct *napi)
4493 {
4494         spin_lock(&napi_hash_lock);
4495
4496         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4497                 hlist_del_rcu(&napi->napi_hash_node);
4498
4499         spin_unlock(&napi_hash_lock);
4500 }
4501 EXPORT_SYMBOL_GPL(napi_hash_del);
4502
4503 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4504 {
4505         struct napi_struct *napi;
4506
4507         napi = container_of(timer, struct napi_struct, timer);
4508         if (napi->gro_list)
4509                 napi_schedule(napi);
4510
4511         return HRTIMER_NORESTART;
4512 }
4513
4514 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4515                     int (*poll)(struct napi_struct *, int), int weight)
4516 {
4517         INIT_LIST_HEAD(&napi->poll_list);
4518         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4519         napi->timer.function = napi_watchdog;
4520         napi->gro_count = 0;
4521         napi->gro_list = NULL;
4522         napi->skb = NULL;
4523         napi->poll = poll;
4524         if (weight > NAPI_POLL_WEIGHT)
4525                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4526                             weight, dev->name);
4527         napi->weight = weight;
4528         list_add(&napi->dev_list, &dev->napi_list);
4529         napi->dev = dev;
4530 #ifdef CONFIG_NETPOLL
4531         spin_lock_init(&napi->poll_lock);
4532         napi->poll_owner = -1;
4533 #endif
4534         set_bit(NAPI_STATE_SCHED, &napi->state);
4535 }
4536 EXPORT_SYMBOL(netif_napi_add);
4537
4538 void napi_disable(struct napi_struct *n)
4539 {
4540         might_sleep();
4541         set_bit(NAPI_STATE_DISABLE, &n->state);
4542
4543         while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4544                 msleep(1);
4545
4546         hrtimer_cancel(&n->timer);
4547
4548         clear_bit(NAPI_STATE_DISABLE, &n->state);
4549 }
4550 EXPORT_SYMBOL(napi_disable);
4551
4552 void netif_napi_del(struct napi_struct *napi)
4553 {
4554         list_del_init(&napi->dev_list);
4555         napi_free_frags(napi);
4556
4557         kfree_skb_list(napi->gro_list);
4558         napi->gro_list = NULL;
4559         napi->gro_count = 0;
4560 }
4561 EXPORT_SYMBOL(netif_napi_del);
4562
4563 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4564 {
4565         void *have;
4566         int work, weight;
4567
4568         list_del_init(&n->poll_list);
4569
4570         have = netpoll_poll_lock(n);
4571
4572         weight = n->weight;
4573
4574         /* This NAPI_STATE_SCHED test is for avoiding a race
4575          * with netpoll's poll_napi().  Only the entity which
4576          * obtains the lock and sees NAPI_STATE_SCHED set will
4577          * actually make the ->poll() call.  Therefore we avoid
4578          * accidentally calling ->poll() when NAPI is not scheduled.
4579          */
4580         work = 0;
4581         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4582                 work = n->poll(n, weight);
4583                 trace_napi_poll(n);
4584         }
4585
4586         WARN_ON_ONCE(work > weight);
4587
4588         if (likely(work < weight))
4589                 goto out_unlock;
4590
4591         /* Drivers must not modify the NAPI state if they
4592          * consume the entire weight.  In such cases this code
4593          * still "owns" the NAPI instance and therefore can
4594          * move the instance around on the list at-will.
4595          */
4596         if (unlikely(napi_disable_pending(n))) {
4597                 napi_complete(n);
4598                 goto out_unlock;
4599         }
4600
4601         if (n->gro_list) {
4602                 /* flush too old packets
4603                  * If HZ < 1000, flush all packets.
4604                  */
4605                 napi_gro_flush(n, HZ >= 1000);
4606         }
4607
4608         /* Some drivers may have called napi_schedule
4609          * prior to exhausting their budget.
4610          */
4611         if (unlikely(!list_empty(&n->poll_list))) {
4612                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4613                              n->dev ? n->dev->name : "backlog");
4614                 goto out_unlock;
4615         }
4616
4617         list_add_tail(&n->poll_list, repoll);
4618
4619 out_unlock:
4620         netpoll_poll_unlock(have);
4621
4622         return work;
4623 }
4624
4625 static void net_rx_action(struct softirq_action *h)
4626 {
4627         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4628         unsigned long time_limit = jiffies + 2;
4629         int budget = netdev_budget;
4630         LIST_HEAD(list);
4631         LIST_HEAD(repoll);
4632
4633         local_irq_disable();
4634         list_splice_init(&sd->poll_list, &list);
4635         local_irq_enable();
4636
4637         for (;;) {
4638                 struct napi_struct *n;
4639
4640                 if (list_empty(&list)) {
4641                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4642                                 return;
4643                         break;
4644                 }
4645
4646                 n = list_first_entry(&list, struct napi_struct, poll_list);
4647                 budget -= napi_poll(n, &repoll);
4648
4649                 /* If softirq window is exhausted then punt.
4650                  * Allow this to run for 2 jiffies since which will allow
4651                  * an average latency of 1.5/HZ.
4652                  */
4653                 if (unlikely(budget <= 0 ||
4654                              time_after_eq(jiffies, time_limit))) {
4655                         sd->time_squeeze++;
4656                         break;
4657                 }
4658         }
4659
4660         local_irq_disable();
4661
4662         list_splice_tail_init(&sd->poll_list, &list);
4663         list_splice_tail(&repoll, &list);
4664         list_splice(&list, &sd->poll_list);
4665         if (!list_empty(&sd->poll_list))
4666                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4667
4668         net_rps_action_and_irq_enable(sd);
4669 }
4670
4671 struct netdev_adjacent {
4672         struct net_device *dev;
4673
4674         /* upper master flag, there can only be one master device per list */
4675         bool master;
4676
4677         /* counter for the number of times this device was added to us */
4678         u16 ref_nr;
4679
4680         /* private field for the users */
4681         void *private;
4682
4683         struct list_head list;
4684         struct rcu_head rcu;
4685 };
4686
4687 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4688                                                  struct net_device *adj_dev,
4689                                                  struct list_head *adj_list)
4690 {
4691         struct netdev_adjacent *adj;
4692
4693         list_for_each_entry(adj, adj_list, list) {
4694                 if (adj->dev == adj_dev)
4695                         return adj;
4696         }
4697         return NULL;
4698 }
4699
4700 /**
4701  * netdev_has_upper_dev - Check if device is linked to an upper device
4702  * @dev: device
4703  * @upper_dev: upper device to check
4704  *
4705  * Find out if a device is linked to specified upper device and return true
4706  * in case it is. Note that this checks only immediate upper device,
4707  * not through a complete stack of devices. The caller must hold the RTNL lock.
4708  */
4709 bool netdev_has_upper_dev(struct net_device *dev,
4710                           struct net_device *upper_dev)
4711 {
4712         ASSERT_RTNL();
4713
4714         return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4715 }
4716 EXPORT_SYMBOL(netdev_has_upper_dev);
4717
4718 /**
4719  * netdev_has_any_upper_dev - Check if device is linked to some device
4720  * @dev: device
4721  *
4722  * Find out if a device is linked to an upper device and return true in case
4723  * it is. The caller must hold the RTNL lock.
4724  */
4725 static bool netdev_has_any_upper_dev(struct net_device *dev)
4726 {
4727         ASSERT_RTNL();
4728
4729         return !list_empty(&dev->all_adj_list.upper);
4730 }
4731
4732 /**
4733  * netdev_master_upper_dev_get - Get master upper device
4734  * @dev: device
4735  *
4736  * Find a master upper device and return pointer to it or NULL in case
4737  * it's not there. The caller must hold the RTNL lock.
4738  */
4739 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4740 {
4741         struct netdev_adjacent *upper;
4742
4743         ASSERT_RTNL();
4744
4745         if (list_empty(&dev->adj_list.upper))
4746                 return NULL;
4747
4748         upper = list_first_entry(&dev->adj_list.upper,
4749                                  struct netdev_adjacent, list);
4750         if (likely(upper->master))
4751                 return upper->dev;
4752         return NULL;
4753 }
4754 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4755
4756 void *netdev_adjacent_get_private(struct list_head *adj_list)
4757 {
4758         struct netdev_adjacent *adj;
4759
4760         adj = list_entry(adj_list, struct netdev_adjacent, list);
4761
4762         return adj->private;
4763 }
4764 EXPORT_SYMBOL(netdev_adjacent_get_private);
4765
4766 /**
4767  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4768  * @dev: device
4769  * @iter: list_head ** of the current position
4770  *
4771  * Gets the next device from the dev's upper list, starting from iter
4772  * position. The caller must hold RCU read lock.
4773  */
4774 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4775                                                  struct list_head **iter)
4776 {
4777         struct netdev_adjacent *upper;
4778
4779         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4780
4781         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4782
4783         if (&upper->list == &dev->adj_list.upper)
4784                 return NULL;
4785
4786         *iter = &upper->list;
4787
4788         return upper->dev;
4789 }
4790 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4791
4792 /**
4793  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4794  * @dev: device
4795  * @iter: list_head ** of the current position
4796  *
4797  * Gets the next device from the dev's upper list, starting from iter
4798  * position. The caller must hold RCU read lock.
4799  */
4800 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4801                                                      struct list_head **iter)
4802 {
4803         struct netdev_adjacent *upper;
4804
4805         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4806
4807         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4808
4809         if (&upper->list == &dev->all_adj_list.upper)
4810                 return NULL;
4811
4812         *iter = &upper->list;
4813
4814         return upper->dev;
4815 }
4816 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4817
4818 /**
4819  * netdev_lower_get_next_private - Get the next ->private from the
4820  *                                 lower neighbour list
4821  * @dev: device
4822  * @iter: list_head ** of the current position
4823  *
4824  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4825  * list, starting from iter position. The caller must hold either hold the
4826  * RTNL lock or its own locking that guarantees that the neighbour lower
4827  * list will remain unchainged.
4828  */
4829 void *netdev_lower_get_next_private(struct net_device *dev,
4830                                     struct list_head **iter)
4831 {
4832         struct netdev_adjacent *lower;
4833
4834         lower = list_entry(*iter, struct netdev_adjacent, list);
4835
4836         if (&lower->list == &dev->adj_list.lower)
4837                 return NULL;
4838
4839         *iter = lower->list.next;
4840
4841         return lower->private;
4842 }
4843 EXPORT_SYMBOL(netdev_lower_get_next_private);
4844
4845 /**
4846  * netdev_lower_get_next_private_rcu - Get the next ->private from the
4847  *                                     lower neighbour list, RCU
4848  *                                     variant
4849  * @dev: device
4850  * @iter: list_head ** of the current position
4851  *
4852  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4853  * list, starting from iter position. The caller must hold RCU read lock.
4854  */
4855 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4856                                         struct list_head **iter)
4857 {
4858         struct netdev_adjacent *lower;
4859
4860         WARN_ON_ONCE(!rcu_read_lock_held());
4861
4862         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4863
4864         if (&lower->list == &dev->adj_list.lower)
4865                 return NULL;
4866
4867         *iter = &lower->list;
4868
4869         return lower->private;
4870 }
4871 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4872
4873 /**
4874  * netdev_lower_get_next - Get the next device from the lower neighbour
4875  *                         list
4876  * @dev: device
4877  * @iter: list_head ** of the current position
4878  *
4879  * Gets the next netdev_adjacent from the dev's lower neighbour
4880  * list, starting from iter position. The caller must hold RTNL lock or
4881  * its own locking that guarantees that the neighbour lower
4882  * list will remain unchainged.
4883  */
4884 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4885 {
4886         struct netdev_adjacent *lower;
4887
4888         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4889
4890         if (&lower->list == &dev->adj_list.lower)
4891                 return NULL;
4892
4893         *iter = &lower->list;
4894
4895         return lower->dev;
4896 }
4897 EXPORT_SYMBOL(netdev_lower_get_next);
4898
4899 /**
4900  * netdev_lower_get_first_private_rcu - Get the first ->private from the
4901  *                                     lower neighbour list, RCU
4902  *                                     variant
4903  * @dev: device
4904  *
4905  * Gets the first netdev_adjacent->private from the dev's lower neighbour
4906  * list. The caller must hold RCU read lock.
4907  */
4908 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4909 {
4910         struct netdev_adjacent *lower;
4911
4912         lower = list_first_or_null_rcu(&dev->adj_list.lower,
4913                         struct netdev_adjacent, list);
4914         if (lower)
4915                 return lower->private;
4916         return NULL;
4917 }
4918 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4919
4920 /**
4921  * netdev_master_upper_dev_get_rcu - Get master upper device
4922  * @dev: device
4923  *
4924  * Find a master upper device and return pointer to it or NULL in case
4925  * it's not there. The caller must hold the RCU read lock.
4926  */
4927 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4928 {
4929         struct netdev_adjacent *upper;
4930
4931         upper = list_first_or_null_rcu(&dev->adj_list.upper,
4932                                        struct netdev_adjacent, list);
4933         if (upper && likely(upper->master))
4934                 return upper->dev;
4935         return NULL;
4936 }
4937 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4938
4939 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4940                               struct net_device *adj_dev,
4941                               struct list_head *dev_list)
4942 {
4943         char linkname[IFNAMSIZ+7];
4944         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4945                 "upper_%s" : "lower_%s", adj_dev->name);
4946         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4947                                  linkname);
4948 }
4949 static void netdev_adjacent_sysfs_del(struct net_device *dev,
4950                                char *name,
4951                                struct list_head *dev_list)
4952 {
4953         char linkname[IFNAMSIZ+7];
4954         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4955                 "upper_%s" : "lower_%s", name);
4956         sysfs_remove_link(&(dev->dev.kobj), linkname);
4957 }
4958
4959 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4960                                                  struct net_device *adj_dev,
4961                                                  struct list_head *dev_list)
4962 {
4963         return (dev_list == &dev->adj_list.upper ||
4964                 dev_list == &dev->adj_list.lower) &&
4965                 net_eq(dev_net(dev), dev_net(adj_dev));
4966 }
4967
4968 static int __netdev_adjacent_dev_insert(struct net_device *dev,
4969                                         struct net_device *adj_dev,
4970                                         struct list_head *dev_list,
4971                                         void *private, bool master)
4972 {
4973         struct netdev_adjacent *adj;
4974         int ret;
4975
4976         adj = __netdev_find_adj(dev, adj_dev, dev_list);
4977
4978         if (adj) {
4979                 adj->ref_nr++;
4980                 return 0;
4981         }
4982
4983         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4984         if (!adj)
4985                 return -ENOMEM;
4986
4987         adj->dev = adj_dev;
4988         adj->master = master;
4989         adj->ref_nr = 1;
4990         adj->private = private;
4991         dev_hold(adj_dev);
4992
4993         pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4994                  adj_dev->name, dev->name, adj_dev->name);
4995
4996         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
4997                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
4998                 if (ret)
4999                         goto free_adj;
5000         }
5001
5002         /* Ensure that master link is always the first item in list. */
5003         if (master) {
5004                 ret = sysfs_create_link(&(dev->dev.kobj),
5005                                         &(adj_dev->dev.kobj), "master");
5006                 if (ret)
5007                         goto remove_symlinks;
5008
5009                 list_add_rcu(&adj->list, dev_list);
5010         } else {
5011                 list_add_tail_rcu(&adj->list, dev_list);
5012         }
5013
5014         return 0;
5015
5016 remove_symlinks:
5017         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5018                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5019 free_adj:
5020         kfree(adj);
5021         dev_put(adj_dev);
5022
5023         return ret;
5024 }
5025
5026 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5027                                          struct net_device *adj_dev,
5028                                          struct list_head *dev_list)
5029 {
5030         struct netdev_adjacent *adj;
5031
5032         adj = __netdev_find_adj(dev, adj_dev, dev_list);
5033
5034         if (!adj) {
5035                 pr_err("tried to remove device %s from %s\n",
5036                        dev->name, adj_dev->name);
5037                 BUG();
5038         }
5039
5040         if (adj->ref_nr > 1) {
5041                 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5042                          adj->ref_nr-1);
5043                 adj->ref_nr--;
5044                 return;
5045         }
5046
5047         if (adj->master)
5048                 sysfs_remove_link(&(dev->dev.kobj), "master");
5049
5050         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5051                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5052
5053         list_del_rcu(&adj->list);
5054         pr_debug("dev_put for %s, because link removed from %s to %s\n",
5055                  adj_dev->name, dev->name, adj_dev->name);
5056         dev_put(adj_dev);
5057         kfree_rcu(adj, rcu);
5058 }
5059
5060 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5061                                             struct net_device *upper_dev,
5062                                             struct list_head *up_list,
5063                                             struct list_head *down_list,
5064                                             void *private, bool master)
5065 {
5066         int ret;
5067
5068         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5069                                            master);
5070         if (ret)
5071                 return ret;
5072
5073         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5074                                            false);
5075         if (ret) {
5076                 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5077                 return ret;
5078         }
5079
5080         return 0;
5081 }
5082
5083 static int __netdev_adjacent_dev_link(struct net_device *dev,
5084                                       struct net_device *upper_dev)
5085 {
5086         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5087                                                 &dev->all_adj_list.upper,
5088                                                 &upper_dev->all_adj_list.lower,
5089                                                 NULL, false);
5090 }
5091
5092 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5093                                                struct net_device *upper_dev,
5094                                                struct list_head *up_list,
5095                                                struct list_head *down_list)
5096 {
5097         __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5098         __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5099 }
5100
5101 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5102                                          struct net_device *upper_dev)
5103 {
5104         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5105                                            &dev->all_adj_list.upper,
5106                                            &upper_dev->all_adj_list.lower);
5107 }
5108
5109 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5110                                                 struct net_device *upper_dev,
5111                                                 void *private, bool master)
5112 {
5113         int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5114
5115         if (ret)
5116                 return ret;
5117
5118         ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5119                                                &dev->adj_list.upper,
5120                                                &upper_dev->adj_list.lower,
5121                                                private, master);
5122         if (ret) {
5123                 __netdev_adjacent_dev_unlink(dev, upper_dev);
5124                 return ret;
5125         }
5126
5127         return 0;
5128 }
5129
5130 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5131                                                    struct net_device *upper_dev)
5132 {
5133         __netdev_adjacent_dev_unlink(dev, upper_dev);
5134         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5135                                            &dev->adj_list.upper,
5136                                            &upper_dev->adj_list.lower);
5137 }
5138
5139 static int __netdev_upper_dev_link(struct net_device *dev,
5140                                    struct net_device *upper_dev, bool master,
5141                                    void *private)
5142 {
5143         struct netdev_adjacent *i, *j, *to_i, *to_j;
5144         int ret = 0;
5145
5146         ASSERT_RTNL();
5147
5148         if (dev == upper_dev)
5149                 return -EBUSY;
5150
5151         /* To prevent loops, check if dev is not upper device to upper_dev. */
5152         if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5153                 return -EBUSY;
5154
5155         if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
5156                 return -EEXIST;
5157
5158         if (master && netdev_master_upper_dev_get(dev))
5159                 return -EBUSY;
5160
5161         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5162                                                    master);
5163         if (ret)
5164                 return ret;
5165
5166         /* Now that we linked these devs, make all the upper_dev's
5167          * all_adj_list.upper visible to every dev's all_adj_list.lower an
5168          * versa, and don't forget the devices itself. All of these
5169          * links are non-neighbours.
5170          */
5171         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5172                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5173                         pr_debug("Interlinking %s with %s, non-neighbour\n",
5174                                  i->dev->name, j->dev->name);
5175                         ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5176                         if (ret)
5177                                 goto rollback_mesh;
5178                 }
5179         }
5180
5181         /* add dev to every upper_dev's upper device */
5182         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5183                 pr_debug("linking %s's upper device %s with %s\n",
5184                          upper_dev->name, i->dev->name, dev->name);
5185                 ret = __netdev_adjacent_dev_link(dev, i->dev);
5186                 if (ret)
5187                         goto rollback_upper_mesh;
5188         }
5189
5190         /* add upper_dev to every dev's lower device */
5191         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5192                 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5193                          i->dev->name, upper_dev->name);
5194                 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5195                 if (ret)
5196                         goto rollback_lower_mesh;
5197         }
5198
5199         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5200         return 0;
5201
5202 rollback_lower_mesh:
5203         to_i = i;
5204         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5205                 if (i == to_i)
5206                         break;
5207                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5208         }
5209
5210         i = NULL;
5211
5212 rollback_upper_mesh:
5213         to_i = i;
5214         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5215                 if (i == to_i)
5216                         break;
5217                 __netdev_adjacent_dev_unlink(dev, i->dev);
5218         }
5219
5220         i = j = NULL;
5221
5222 rollback_mesh:
5223         to_i = i;
5224         to_j = j;
5225         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5226                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5227                         if (i == to_i && j == to_j)
5228                                 break;
5229                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5230                 }
5231                 if (i == to_i)
5232                         break;
5233         }
5234
5235         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5236
5237         return ret;
5238 }
5239
5240 /**
5241  * netdev_upper_dev_link - Add a link to the upper device
5242  * @dev: device
5243  * @upper_dev: new upper device
5244  *
5245  * Adds a link to device which is upper to this one. The caller must hold
5246  * the RTNL lock. On a failure a negative errno code is returned.
5247  * On success the reference counts are adjusted and the function
5248  * returns zero.
5249  */
5250 int netdev_upper_dev_link(struct net_device *dev,
5251                           struct net_device *upper_dev)
5252 {
5253         return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5254 }
5255 EXPORT_SYMBOL(netdev_upper_dev_link);
5256
5257 /**
5258  * netdev_master_upper_dev_link - Add a master link to the upper device
5259  * @dev: device
5260  * @upper_dev: new upper device
5261  *
5262  * Adds a link to device which is upper to this one. In this case, only
5263  * one master upper device can be linked, although other non-master devices
5264  * might be linked as well. The caller must hold the RTNL lock.
5265  * On a failure a negative errno code is returned. On success the reference
5266  * counts are adjusted and the function returns zero.
5267  */
5268 int netdev_master_upper_dev_link(struct net_device *dev,
5269                                  struct net_device *upper_dev)
5270 {
5271         return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5272 }
5273 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5274
5275 int netdev_master_upper_dev_link_private(struct net_device *dev,
5276                                          struct net_device *upper_dev,
5277                                          void *private)
5278 {
5279         return __netdev_upper_dev_link(dev, upper_dev, true, private);
5280 }
5281 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5282
5283 /**
5284  * netdev_upper_dev_unlink - Removes a link to upper device
5285  * @dev: device
5286  * @upper_dev: new upper device
5287  *
5288  * Removes a link to device which is upper to this one. The caller must hold
5289  * the RTNL lock.
5290  */
5291 void netdev_upper_dev_unlink(struct net_device *dev,
5292                              struct net_device *upper_dev)
5293 {
5294         struct netdev_adjacent *i, *j;
5295         ASSERT_RTNL();
5296
5297         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5298
5299         /* Here is the tricky part. We must remove all dev's lower
5300          * devices from all upper_dev's upper devices and vice
5301          * versa, to maintain the graph relationship.
5302          */
5303         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5304                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5305                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5306
5307         /* remove also the devices itself from lower/upper device
5308          * list
5309          */
5310         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5311                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5312
5313         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5314                 __netdev_adjacent_dev_unlink(dev, i->dev);
5315
5316         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5317 }
5318 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5319
5320 void netdev_adjacent_add_links(struct net_device *dev)
5321 {
5322         struct netdev_adjacent *iter;
5323
5324         struct net *net = dev_net(dev);
5325
5326         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5327                 if (!net_eq(net,dev_net(iter->dev)))
5328                         continue;
5329                 netdev_adjacent_sysfs_add(iter->dev, dev,
5330                                           &iter->dev->adj_list.lower);
5331                 netdev_adjacent_sysfs_add(dev, iter->dev,
5332                                           &dev->adj_list.upper);
5333         }
5334
5335         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5336                 if (!net_eq(net,dev_net(iter->dev)))
5337                         continue;
5338                 netdev_adjacent_sysfs_add(iter->dev, dev,
5339                                           &iter->dev->adj_list.upper);
5340                 netdev_adjacent_sysfs_add(dev, iter->dev,
5341                                           &dev->adj_list.lower);
5342         }
5343 }
5344
5345 void netdev_adjacent_del_links(struct net_device *dev)
5346 {
5347         struct netdev_adjacent *iter;
5348
5349         struct net *net = dev_net(dev);
5350
5351         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5352                 if (!net_eq(net,dev_net(iter->dev)))
5353                         continue;
5354                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5355                                           &iter->dev->adj_list.lower);
5356                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5357                                           &dev->adj_list.upper);
5358         }
5359
5360         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5361                 if (!net_eq(net,dev_net(iter->dev)))
5362                         continue;
5363                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5364                                           &iter->dev->adj_list.upper);
5365                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5366                                           &dev->adj_list.lower);
5367         }
5368 }
5369
5370 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5371 {
5372         struct netdev_adjacent *iter;
5373
5374         struct net *net = dev_net(dev);
5375
5376         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5377                 if (!net_eq(net,dev_net(iter->dev)))
5378                         continue;
5379                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5380                                           &iter->dev->adj_list.lower);
5381                 netdev_adjacent_sysfs_add(iter->dev, dev,
5382                                           &iter->dev->adj_list.lower);
5383         }
5384
5385         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5386                 if (!net_eq(net,dev_net(iter->dev)))
5387                         continue;
5388                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5389                                           &iter->dev->adj_list.upper);
5390                 netdev_adjacent_sysfs_add(iter->dev, dev,
5391                                           &iter->dev->adj_list.upper);
5392         }
5393 }
5394
5395 void *netdev_lower_dev_get_private(struct net_device *dev,
5396                                    struct net_device *lower_dev)
5397 {
5398         struct netdev_adjacent *lower;
5399
5400         if (!lower_dev)
5401                 return NULL;
5402         lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5403         if (!lower)
5404                 return NULL;
5405
5406         return lower->private;
5407 }
5408 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5409
5410
5411 int dev_get_nest_level(struct net_device *dev,
5412                        bool (*type_check)(struct net_device *dev))
5413 {
5414         struct net_device *lower = NULL;
5415         struct list_head *iter;
5416         int max_nest = -1;
5417         int nest;
5418
5419         ASSERT_RTNL();
5420
5421         netdev_for_each_lower_dev(dev, lower, iter) {
5422                 nest = dev_get_nest_level(lower, type_check);
5423                 if (max_nest < nest)
5424                         max_nest = nest;
5425         }
5426
5427         if (type_check(dev))
5428                 max_nest++;
5429
5430         return max_nest;
5431 }
5432 EXPORT_SYMBOL(dev_get_nest_level);
5433
5434 static void dev_change_rx_flags(struct net_device *dev, int flags)
5435 {
5436         const struct net_device_ops *ops = dev->netdev_ops;
5437
5438         if (ops->ndo_change_rx_flags)
5439                 ops->ndo_change_rx_flags(dev, flags);
5440 }
5441
5442 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5443 {
5444         unsigned int old_flags = dev->flags;
5445         kuid_t uid;
5446         kgid_t gid;
5447
5448         ASSERT_RTNL();
5449
5450         dev->flags |= IFF_PROMISC;
5451         dev->promiscuity += inc;
5452         if (dev->promiscuity == 0) {
5453                 /*
5454                  * Avoid overflow.
5455                  * If inc causes overflow, untouch promisc and return error.
5456                  */
5457                 if (inc < 0)
5458                         dev->flags &= ~IFF_PROMISC;
5459                 else {
5460                         dev->promiscuity -= inc;
5461                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5462                                 dev->name);
5463                         return -EOVERFLOW;
5464                 }
5465         }
5466         if (dev->flags != old_flags) {
5467                 pr_info("device %s %s promiscuous mode\n",
5468                         dev->name,
5469                         dev->flags & IFF_PROMISC ? "entered" : "left");
5470                 if (audit_enabled) {
5471                         current_uid_gid(&uid, &gid);
5472                         audit_log(current->audit_context, GFP_ATOMIC,
5473                                 AUDIT_ANOM_PROMISCUOUS,
5474                                 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5475                                 dev->name, (dev->flags & IFF_PROMISC),
5476                                 (old_flags & IFF_PROMISC),
5477                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5478                                 from_kuid(&init_user_ns, uid),
5479                                 from_kgid(&init_user_ns, gid),
5480                                 audit_get_sessionid(current));
5481                 }
5482
5483                 dev_change_rx_flags(dev, IFF_PROMISC);
5484         }
5485         if (notify)
5486                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5487         return 0;
5488 }
5489
5490 /**
5491  *      dev_set_promiscuity     - update promiscuity count on a device
5492  *      @dev: device
5493  *      @inc: modifier
5494  *
5495  *      Add or remove promiscuity from a device. While the count in the device
5496  *      remains above zero the interface remains promiscuous. Once it hits zero
5497  *      the device reverts back to normal filtering operation. A negative inc
5498  *      value is used to drop promiscuity on the device.
5499  *      Return 0 if successful or a negative errno code on error.
5500  */
5501 int dev_set_promiscuity(struct net_device *dev, int inc)
5502 {
5503         unsigned int old_flags = dev->flags;
5504         int err;
5505
5506         err = __dev_set_promiscuity(dev, inc, true);
5507         if (err < 0)
5508                 return err;
5509         if (dev->flags != old_flags)
5510                 dev_set_rx_mode(dev);
5511         return err;
5512 }
5513 EXPORT_SYMBOL(dev_set_promiscuity);
5514
5515 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5516 {
5517         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5518
5519         ASSERT_RTNL();
5520
5521         dev->flags |= IFF_ALLMULTI;
5522         dev->allmulti += inc;
5523         if (dev->allmulti == 0) {
5524                 /*
5525                  * Avoid overflow.
5526                  * If inc causes overflow, untouch allmulti and return error.
5527                  */
5528                 if (inc < 0)
5529                         dev->flags &= ~IFF_ALLMULTI;
5530                 else {
5531                         dev->allmulti -= inc;
5532                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5533                                 dev->name);
5534                         return -EOVERFLOW;
5535                 }
5536         }
5537         if (dev->flags ^ old_flags) {
5538                 dev_change_rx_flags(dev, IFF_ALLMULTI);
5539                 dev_set_rx_mode(dev);
5540                 if (notify)
5541                         __dev_notify_flags(dev, old_flags,
5542                                            dev->gflags ^ old_gflags);
5543         }
5544         return 0;
5545 }
5546
5547 /**
5548  *      dev_set_allmulti        - update allmulti count on a device
5549  *      @dev: device
5550  *      @inc: modifier
5551  *
5552  *      Add or remove reception of all multicast frames to a device. While the
5553  *      count in the device remains above zero the interface remains listening
5554  *      to all interfaces. Once it hits zero the device reverts back to normal
5555  *      filtering operation. A negative @inc value is used to drop the counter
5556  *      when releasing a resource needing all multicasts.
5557  *      Return 0 if successful or a negative errno code on error.
5558  */
5559
5560 int dev_set_allmulti(struct net_device *dev, int inc)
5561 {
5562         return __dev_set_allmulti(dev, inc, true);
5563 }
5564 EXPORT_SYMBOL(dev_set_allmulti);
5565
5566 /*
5567  *      Upload unicast and multicast address lists to device and
5568  *      configure RX filtering. When the device doesn't support unicast
5569  *      filtering it is put in promiscuous mode while unicast addresses
5570  *      are present.
5571  */
5572 void __dev_set_rx_mode(struct net_device *dev)
5573 {
5574         const struct net_device_ops *ops = dev->netdev_ops;
5575
5576         /* dev_open will call this function so the list will stay sane. */
5577         if (!(dev->flags&IFF_UP))
5578                 return;
5579
5580         if (!netif_device_present(dev))
5581                 return;
5582
5583         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5584                 /* Unicast addresses changes may only happen under the rtnl,
5585                  * therefore calling __dev_set_promiscuity here is safe.
5586                  */
5587                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5588                         __dev_set_promiscuity(dev, 1, false);
5589                         dev->uc_promisc = true;
5590                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5591                         __dev_set_promiscuity(dev, -1, false);
5592                         dev->uc_promisc = false;
5593                 }
5594         }
5595
5596         if (ops->ndo_set_rx_mode)
5597                 ops->ndo_set_rx_mode(dev);
5598 }
5599
5600 void dev_set_rx_mode(struct net_device *dev)
5601 {
5602         netif_addr_lock_bh(dev);
5603         __dev_set_rx_mode(dev);
5604         netif_addr_unlock_bh(dev);
5605 }
5606
5607 /**
5608  *      dev_get_flags - get flags reported to userspace
5609  *      @dev: device
5610  *
5611  *      Get the combination of flag bits exported through APIs to userspace.
5612  */
5613 unsigned int dev_get_flags(const struct net_device *dev)
5614 {
5615         unsigned int flags;
5616
5617         flags = (dev->flags & ~(IFF_PROMISC |
5618                                 IFF_ALLMULTI |
5619                                 IFF_RUNNING |
5620                                 IFF_LOWER_UP |
5621                                 IFF_DORMANT)) |
5622                 (dev->gflags & (IFF_PROMISC |
5623                                 IFF_ALLMULTI));
5624
5625         if (netif_running(dev)) {
5626                 if (netif_oper_up(dev))
5627                         flags |= IFF_RUNNING;
5628                 if (netif_carrier_ok(dev))
5629                         flags |= IFF_LOWER_UP;
5630                 if (netif_dormant(dev))
5631                         flags |= IFF_DORMANT;
5632         }
5633
5634         return flags;
5635 }
5636 EXPORT_SYMBOL(dev_get_flags);
5637
5638 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5639 {
5640         unsigned int old_flags = dev->flags;
5641         int ret;
5642
5643         ASSERT_RTNL();
5644
5645         /*
5646          *      Set the flags on our device.
5647          */
5648
5649         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5650                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5651                                IFF_AUTOMEDIA)) |
5652                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5653                                     IFF_ALLMULTI));
5654
5655         /*
5656          *      Load in the correct multicast list now the flags have changed.
5657          */
5658
5659         if ((old_flags ^ flags) & IFF_MULTICAST)
5660                 dev_change_rx_flags(dev, IFF_MULTICAST);
5661
5662         dev_set_rx_mode(dev);
5663
5664         /*
5665          *      Have we downed the interface. We handle IFF_UP ourselves
5666          *      according to user attempts to set it, rather than blindly
5667          *      setting it.
5668          */
5669
5670         ret = 0;
5671         if ((old_flags ^ flags) & IFF_UP)
5672                 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5673
5674         if ((flags ^ dev->gflags) & IFF_PROMISC) {
5675                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5676                 unsigned int old_flags = dev->flags;
5677
5678                 dev->gflags ^= IFF_PROMISC;
5679
5680                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5681                         if (dev->flags != old_flags)
5682                                 dev_set_rx_mode(dev);
5683         }
5684
5685         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5686            is important. Some (broken) drivers set IFF_PROMISC, when
5687            IFF_ALLMULTI is requested not asking us and not reporting.
5688          */
5689         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5690                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5691
5692                 dev->gflags ^= IFF_ALLMULTI;
5693                 __dev_set_allmulti(dev, inc, false);
5694         }
5695
5696         return ret;
5697 }
5698
5699 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5700                         unsigned int gchanges)
5701 {
5702         unsigned int changes = dev->flags ^ old_flags;
5703
5704         if (gchanges)
5705                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5706
5707         if (changes & IFF_UP) {
5708                 if (dev->flags & IFF_UP)
5709                         call_netdevice_notifiers(NETDEV_UP, dev);
5710                 else
5711                         call_netdevice_notifiers(NETDEV_DOWN, dev);
5712         }
5713
5714         if (dev->flags & IFF_UP &&
5715             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5716                 struct netdev_notifier_change_info change_info;
5717
5718                 change_info.flags_changed = changes;
5719                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5720                                               &change_info.info);
5721         }
5722 }
5723
5724 /**
5725  *      dev_change_flags - change device settings
5726  *      @dev: device
5727  *      @flags: device state flags
5728  *
5729  *      Change settings on device based state flags. The flags are
5730  *      in the userspace exported format.
5731  */
5732 int dev_change_flags(struct net_device *dev, unsigned int flags)
5733 {
5734         int ret;
5735         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5736
5737         ret = __dev_change_flags(dev, flags);
5738         if (ret < 0)
5739                 return ret;
5740
5741         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5742         __dev_notify_flags(dev, old_flags, changes);
5743         return ret;
5744 }
5745 EXPORT_SYMBOL(dev_change_flags);
5746
5747 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5748 {
5749         const struct net_device_ops *ops = dev->netdev_ops;
5750
5751         if (ops->ndo_change_mtu)
5752                 return ops->ndo_change_mtu(dev, new_mtu);
5753
5754         dev->mtu = new_mtu;
5755         return 0;
5756 }
5757
5758 /**
5759  *      dev_set_mtu - Change maximum transfer unit
5760  *      @dev: device
5761  *      @new_mtu: new transfer unit
5762  *
5763  *      Change the maximum transfer size of the network device.
5764  */
5765 int dev_set_mtu(struct net_device *dev, int new_mtu)
5766 {
5767         int err, orig_mtu;
5768
5769         if (new_mtu == dev->mtu)
5770                 return 0;
5771
5772         /*      MTU must be positive.    */
5773         if (new_mtu < 0)
5774                 return -EINVAL;
5775
5776         if (!netif_device_present(dev))
5777                 return -ENODEV;
5778
5779         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5780         err = notifier_to_errno(err);
5781         if (err)
5782                 return err;
5783
5784         orig_mtu = dev->mtu;
5785         err = __dev_set_mtu(dev, new_mtu);
5786
5787         if (!err) {
5788                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5789                 err = notifier_to_errno(err);
5790                 if (err) {
5791                         /* setting mtu back and notifying everyone again,
5792                          * so that they have a chance to revert changes.
5793                          */
5794                         __dev_set_mtu(dev, orig_mtu);
5795                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5796                 }
5797         }
5798         return err;
5799 }
5800 EXPORT_SYMBOL(dev_set_mtu);
5801
5802 /**
5803  *      dev_set_group - Change group this device belongs to
5804  *      @dev: device
5805  *      @new_group: group this device should belong to
5806  */
5807 void dev_set_group(struct net_device *dev, int new_group)
5808 {
5809         dev->group = new_group;
5810 }
5811 EXPORT_SYMBOL(dev_set_group);
5812
5813 /**
5814  *      dev_set_mac_address - Change Media Access Control Address
5815  *      @dev: device
5816  *      @sa: new address
5817  *
5818  *      Change the hardware (MAC) address of the device
5819  */
5820 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5821 {
5822         const struct net_device_ops *ops = dev->netdev_ops;
5823         int err;
5824
5825         if (!ops->ndo_set_mac_address)
5826                 return -EOPNOTSUPP;
5827         if (sa->sa_family != dev->type)
5828                 return -EINVAL;
5829         if (!netif_device_present(dev))
5830                 return -ENODEV;
5831         err = ops->ndo_set_mac_address(dev, sa);
5832         if (err)
5833                 return err;
5834         dev->addr_assign_type = NET_ADDR_SET;
5835         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5836         add_device_randomness(dev->dev_addr, dev->addr_len);
5837         return 0;
5838 }
5839 EXPORT_SYMBOL(dev_set_mac_address);
5840
5841 /**
5842  *      dev_change_carrier - Change device carrier
5843  *      @dev: device
5844  *      @new_carrier: new value
5845  *
5846  *      Change device carrier
5847  */
5848 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5849 {
5850         const struct net_device_ops *ops = dev->netdev_ops;
5851
5852         if (!ops->ndo_change_carrier)
5853                 return -EOPNOTSUPP;
5854         if (!netif_device_present(dev))
5855                 return -ENODEV;
5856         return ops->ndo_change_carrier(dev, new_carrier);
5857 }
5858 EXPORT_SYMBOL(dev_change_carrier);
5859
5860 /**
5861  *      dev_get_phys_port_id - Get device physical port ID
5862  *      @dev: device
5863  *      @ppid: port ID
5864  *
5865  *      Get device physical port ID
5866  */
5867 int dev_get_phys_port_id(struct net_device *dev,
5868                          struct netdev_phys_item_id *ppid)
5869 {
5870         const struct net_device_ops *ops = dev->netdev_ops;
5871
5872         if (!ops->ndo_get_phys_port_id)
5873                 return -EOPNOTSUPP;
5874         return ops->ndo_get_phys_port_id(dev, ppid);
5875 }
5876 EXPORT_SYMBOL(dev_get_phys_port_id);
5877
5878 /**
5879  *      dev_new_index   -       allocate an ifindex
5880  *      @net: the applicable net namespace
5881  *
5882  *      Returns a suitable unique value for a new device interface
5883  *      number.  The caller must hold the rtnl semaphore or the
5884  *      dev_base_lock to be sure it remains unique.
5885  */
5886 static int dev_new_index(struct net *net)
5887 {
5888         int ifindex = net->ifindex;
5889         for (;;) {
5890                 if (++ifindex <= 0)
5891                         ifindex = 1;
5892                 if (!__dev_get_by_index(net, ifindex))
5893                         return net->ifindex = ifindex;
5894         }
5895 }
5896
5897 /* Delayed registration/unregisteration */
5898 static LIST_HEAD(net_todo_list);
5899 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5900
5901 static void net_set_todo(struct net_device *dev)
5902 {
5903         list_add_tail(&dev->todo_list, &net_todo_list);
5904         dev_net(dev)->dev_unreg_count++;
5905 }
5906
5907 static void rollback_registered_many(struct list_head *head)
5908 {
5909         struct net_device *dev, *tmp;
5910         LIST_HEAD(close_head);
5911
5912         BUG_ON(dev_boot_phase);
5913         ASSERT_RTNL();
5914
5915         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5916                 /* Some devices call without registering
5917                  * for initialization unwind. Remove those
5918                  * devices and proceed with the remaining.
5919                  */
5920                 if (dev->reg_state == NETREG_UNINITIALIZED) {
5921                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5922                                  dev->name, dev);
5923
5924                         WARN_ON(1);
5925                         list_del(&dev->unreg_list);
5926                         continue;
5927                 }
5928                 dev->dismantle = true;
5929                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5930         }
5931
5932         /* If device is running, close it first. */
5933         list_for_each_entry(dev, head, unreg_list)
5934                 list_add_tail(&dev->close_list, &close_head);
5935         dev_close_many(&close_head);
5936
5937         list_for_each_entry(dev, head, unreg_list) {
5938                 /* And unlink it from device chain. */
5939                 unlist_netdevice(dev);
5940
5941                 dev->reg_state = NETREG_UNREGISTERING;
5942         }
5943
5944         synchronize_net();
5945
5946         list_for_each_entry(dev, head, unreg_list) {
5947                 struct sk_buff *skb = NULL;
5948
5949                 /* Shutdown queueing discipline. */
5950                 dev_shutdown(dev);
5951
5952
5953                 /* Notify protocols, that we are about to destroy
5954                    this device. They should clean all the things.
5955                 */
5956                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5957
5958                 if (!dev->rtnl_link_ops ||
5959                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5960                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
5961                                                      GFP_KERNEL);
5962
5963                 /*
5964                  *      Flush the unicast and multicast chains
5965                  */
5966                 dev_uc_flush(dev);
5967                 dev_mc_flush(dev);
5968
5969                 if (dev->netdev_ops->ndo_uninit)
5970                         dev->netdev_ops->ndo_uninit(dev);
5971
5972                 if (skb)
5973                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
5974
5975                 /* Notifier chain MUST detach us all upper devices. */
5976                 WARN_ON(netdev_has_any_upper_dev(dev));
5977
5978                 /* Remove entries from kobject tree */
5979                 netdev_unregister_kobject(dev);
5980 #ifdef CONFIG_XPS
5981                 /* Remove XPS queueing entries */
5982                 netif_reset_xps_queues_gt(dev, 0);
5983 #endif
5984         }
5985
5986         synchronize_net();
5987
5988         list_for_each_entry(dev, head, unreg_list)
5989                 dev_put(dev);
5990 }
5991
5992 static void rollback_registered(struct net_device *dev)
5993 {
5994         LIST_HEAD(single);
5995
5996         list_add(&dev->unreg_list, &single);
5997         rollback_registered_many(&single);
5998         list_del(&single);
5999 }
6000
6001 static netdev_features_t netdev_fix_features(struct net_device *dev,
6002         netdev_features_t features)
6003 {
6004         /* Fix illegal checksum combinations */
6005         if ((features & NETIF_F_HW_CSUM) &&
6006             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6007                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6008                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6009         }
6010
6011         /* TSO requires that SG is present as well. */
6012         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6013                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6014                 features &= ~NETIF_F_ALL_TSO;
6015         }
6016
6017         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6018                                         !(features & NETIF_F_IP_CSUM)) {
6019                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6020                 features &= ~NETIF_F_TSO;
6021                 features &= ~NETIF_F_TSO_ECN;
6022         }
6023
6024         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6025                                          !(features & NETIF_F_IPV6_CSUM)) {
6026                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6027                 features &= ~NETIF_F_TSO6;
6028         }
6029
6030         /* TSO ECN requires that TSO is present as well. */
6031         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6032                 features &= ~NETIF_F_TSO_ECN;
6033
6034         /* Software GSO depends on SG. */
6035         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6036                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6037                 features &= ~NETIF_F_GSO;
6038         }
6039
6040         /* UFO needs SG and checksumming */
6041         if (features & NETIF_F_UFO) {
6042                 /* maybe split UFO into V4 and V6? */
6043                 if (!((features & NETIF_F_GEN_CSUM) ||
6044                     (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6045                             == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6046                         netdev_dbg(dev,
6047                                 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6048                         features &= ~NETIF_F_UFO;
6049                 }
6050
6051                 if (!(features & NETIF_F_SG)) {
6052                         netdev_dbg(dev,
6053                                 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6054                         features &= ~NETIF_F_UFO;
6055                 }
6056         }
6057
6058 #ifdef CONFIG_NET_RX_BUSY_POLL
6059         if (dev->netdev_ops->ndo_busy_poll)
6060                 features |= NETIF_F_BUSY_POLL;
6061         else
6062 #endif
6063                 features &= ~NETIF_F_BUSY_POLL;
6064
6065         return features;
6066 }
6067
6068 int __netdev_update_features(struct net_device *dev)
6069 {
6070         netdev_features_t features;
6071         int err = 0;
6072
6073         ASSERT_RTNL();
6074
6075         features = netdev_get_wanted_features(dev);
6076
6077         if (dev->netdev_ops->ndo_fix_features)
6078                 features = dev->netdev_ops->ndo_fix_features(dev, features);
6079
6080         /* driver might be less strict about feature dependencies */
6081         features = netdev_fix_features(dev, features);
6082
6083         if (dev->features == features)
6084                 return 0;
6085
6086         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6087                 &dev->features, &features);
6088
6089         if (dev->netdev_ops->ndo_set_features)
6090                 err = dev->netdev_ops->ndo_set_features(dev, features);
6091
6092         if (unlikely(err < 0)) {
6093                 netdev_err(dev,
6094                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
6095                         err, &features, &dev->features);
6096                 return -1;
6097         }
6098
6099         if (!err)
6100                 dev->features = features;
6101
6102         return 1;
6103 }
6104
6105 /**
6106  *      netdev_update_features - recalculate device features
6107  *      @dev: the device to check
6108  *
6109  *      Recalculate dev->features set and send notifications if it
6110  *      has changed. Should be called after driver or hardware dependent
6111  *      conditions might have changed that influence the features.
6112  */
6113 void netdev_update_features(struct net_device *dev)
6114 {
6115         if (__netdev_update_features(dev))
6116                 netdev_features_change(dev);
6117 }
6118 EXPORT_SYMBOL(netdev_update_features);
6119
6120 /**
6121  *      netdev_change_features - recalculate device features
6122  *      @dev: the device to check
6123  *
6124  *      Recalculate dev->features set and send notifications even
6125  *      if they have not changed. Should be called instead of
6126  *      netdev_update_features() if also dev->vlan_features might
6127  *      have changed to allow the changes to be propagated to stacked
6128  *      VLAN devices.
6129  */
6130 void netdev_change_features(struct net_device *dev)
6131 {
6132         __netdev_update_features(dev);
6133         netdev_features_change(dev);
6134 }
6135 EXPORT_SYMBOL(netdev_change_features);
6136
6137 /**
6138  *      netif_stacked_transfer_operstate -      transfer operstate
6139  *      @rootdev: the root or lower level device to transfer state from
6140  *      @dev: the device to transfer operstate to
6141  *
6142  *      Transfer operational state from root to device. This is normally
6143  *      called when a stacking relationship exists between the root
6144  *      device and the device(a leaf device).
6145  */
6146 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6147                                         struct net_device *dev)
6148 {
6149         if (rootdev->operstate == IF_OPER_DORMANT)
6150                 netif_dormant_on(dev);
6151         else
6152                 netif_dormant_off(dev);
6153
6154         if (netif_carrier_ok(rootdev)) {
6155                 if (!netif_carrier_ok(dev))
6156                         netif_carrier_on(dev);
6157         } else {
6158                 if (netif_carrier_ok(dev))
6159                         netif_carrier_off(dev);
6160         }
6161 }
6162 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6163
6164 #ifdef CONFIG_SYSFS
6165 static int netif_alloc_rx_queues(struct net_device *dev)
6166 {
6167         unsigned int i, count = dev->num_rx_queues;
6168         struct netdev_rx_queue *rx;
6169
6170         BUG_ON(count < 1);
6171
6172         rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6173         if (!rx)
6174                 return -ENOMEM;
6175
6176         dev->_rx = rx;
6177
6178         for (i = 0; i < count; i++)
6179                 rx[i].dev = dev;
6180         return 0;
6181 }
6182 #endif
6183
6184 static void netdev_init_one_queue(struct net_device *dev,
6185                                   struct netdev_queue *queue, void *_unused)
6186 {
6187         /* Initialize queue lock */
6188         spin_lock_init(&queue->_xmit_lock);
6189         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6190         queue->xmit_lock_owner = -1;
6191         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6192         queue->dev = dev;
6193 #ifdef CONFIG_BQL
6194         dql_init(&queue->dql, HZ);
6195 #endif
6196 }
6197
6198 static void netif_free_tx_queues(struct net_device *dev)
6199 {
6200         kvfree(dev->_tx);
6201 }
6202
6203 static int netif_alloc_netdev_queues(struct net_device *dev)
6204 {
6205         unsigned int count = dev->num_tx_queues;
6206         struct netdev_queue *tx;
6207         size_t sz = count * sizeof(*tx);
6208
6209         BUG_ON(count < 1 || count > 0xffff);
6210
6211         tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6212         if (!tx) {
6213                 tx = vzalloc(sz);
6214                 if (!tx)
6215                         return -ENOMEM;
6216         }
6217         dev->_tx = tx;
6218
6219         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6220         spin_lock_init(&dev->tx_global_lock);
6221
6222         return 0;
6223 }
6224
6225 /**
6226  *      register_netdevice      - register a network device
6227  *      @dev: device to register
6228  *
6229  *      Take a completed network device structure and add it to the kernel
6230  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6231  *      chain. 0 is returned on success. A negative errno code is returned
6232  *      on a failure to set up the device, or if the name is a duplicate.
6233  *
6234  *      Callers must hold the rtnl semaphore. You may want
6235  *      register_netdev() instead of this.
6236  *
6237  *      BUGS:
6238  *      The locking appears insufficient to guarantee two parallel registers
6239  *      will not get the same name.
6240  */
6241
6242 int register_netdevice(struct net_device *dev)
6243 {
6244         int ret;
6245         struct net *net = dev_net(dev);
6246
6247         BUG_ON(dev_boot_phase);
6248         ASSERT_RTNL();
6249
6250         might_sleep();
6251
6252         /* When net_device's are persistent, this will be fatal. */
6253         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6254         BUG_ON(!net);
6255
6256         spin_lock_init(&dev->addr_list_lock);
6257         netdev_set_addr_lockdep_class(dev);
6258
6259         dev->iflink = -1;
6260
6261         ret = dev_get_valid_name(net, dev, dev->name);
6262         if (ret < 0)
6263                 goto out;
6264
6265         /* Init, if this function is available */
6266         if (dev->netdev_ops->ndo_init) {
6267                 ret = dev->netdev_ops->ndo_init(dev);
6268                 if (ret) {
6269                         if (ret > 0)
6270                                 ret = -EIO;
6271                         goto out;
6272                 }
6273         }
6274
6275         if (((dev->hw_features | dev->features) &
6276              NETIF_F_HW_VLAN_CTAG_FILTER) &&
6277             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6278              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6279                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6280                 ret = -EINVAL;
6281                 goto err_uninit;
6282         }
6283
6284         ret = -EBUSY;
6285         if (!dev->ifindex)
6286                 dev->ifindex = dev_new_index(net);
6287         else if (__dev_get_by_index(net, dev->ifindex))
6288                 goto err_uninit;
6289
6290         if (dev->iflink == -1)
6291                 dev->iflink = dev->ifindex;
6292
6293         /* Transfer changeable features to wanted_features and enable
6294          * software offloads (GSO and GRO).
6295          */
6296         dev->hw_features |= NETIF_F_SOFT_FEATURES;
6297         dev->features |= NETIF_F_SOFT_FEATURES;
6298         dev->wanted_features = dev->features & dev->hw_features;
6299
6300         if (!(dev->flags & IFF_LOOPBACK)) {
6301                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6302         }
6303
6304         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6305          */
6306         dev->vlan_features |= NETIF_F_HIGHDMA;
6307
6308         /* Make NETIF_F_SG inheritable to tunnel devices.
6309          */
6310         dev->hw_enc_features |= NETIF_F_SG;
6311
6312         /* Make NETIF_F_SG inheritable to MPLS.
6313          */
6314         dev->mpls_features |= NETIF_F_SG;
6315
6316         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6317         ret = notifier_to_errno(ret);
6318         if (ret)
6319                 goto err_uninit;
6320
6321         ret = netdev_register_kobject(dev);
6322         if (ret)
6323                 goto err_uninit;
6324         dev->reg_state = NETREG_REGISTERED;
6325
6326         __netdev_update_features(dev);
6327
6328         /*
6329          *      Default initial state at registry is that the
6330          *      device is present.
6331          */
6332
6333         set_bit(__LINK_STATE_PRESENT, &dev->state);
6334
6335         linkwatch_init_dev(dev);
6336
6337         dev_init_scheduler(dev);
6338         dev_hold(dev);
6339         list_netdevice(dev);
6340         add_device_randomness(dev->dev_addr, dev->addr_len);
6341
6342         /* If the device has permanent device address, driver should
6343          * set dev_addr and also addr_assign_type should be set to
6344          * NET_ADDR_PERM (default value).
6345          */
6346         if (dev->addr_assign_type == NET_ADDR_PERM)
6347                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6348
6349         /* Notify protocols, that a new device appeared. */
6350         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6351         ret = notifier_to_errno(ret);
6352         if (ret) {
6353                 rollback_registered(dev);
6354                 dev->reg_state = NETREG_UNREGISTERED;
6355         }
6356         /*
6357          *      Prevent userspace races by waiting until the network
6358          *      device is fully setup before sending notifications.
6359          */
6360         if (!dev->rtnl_link_ops ||
6361             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6362                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6363
6364 out:
6365         return ret;
6366
6367 err_uninit:
6368         if (dev->netdev_ops->ndo_uninit)
6369                 dev->netdev_ops->ndo_uninit(dev);
6370         goto out;
6371 }
6372 EXPORT_SYMBOL(register_netdevice);
6373
6374 /**
6375  *      init_dummy_netdev       - init a dummy network device for NAPI
6376  *      @dev: device to init
6377  *
6378  *      This takes a network device structure and initialize the minimum
6379  *      amount of fields so it can be used to schedule NAPI polls without
6380  *      registering a full blown interface. This is to be used by drivers
6381  *      that need to tie several hardware interfaces to a single NAPI
6382  *      poll scheduler due to HW limitations.
6383  */
6384 int init_dummy_netdev(struct net_device *dev)
6385 {
6386         /* Clear everything. Note we don't initialize spinlocks
6387          * are they aren't supposed to be taken by any of the
6388          * NAPI code and this dummy netdev is supposed to be
6389          * only ever used for NAPI polls
6390          */
6391         memset(dev, 0, sizeof(struct net_device));
6392
6393         /* make sure we BUG if trying to hit standard
6394          * register/unregister code path
6395          */
6396         dev->reg_state = NETREG_DUMMY;
6397
6398         /* NAPI wants this */
6399         INIT_LIST_HEAD(&dev->napi_list);
6400
6401         /* a dummy interface is started by default */
6402         set_bit(__LINK_STATE_PRESENT, &dev->state);
6403         set_bit(__LINK_STATE_START, &dev->state);
6404
6405         /* Note : We dont allocate pcpu_refcnt for dummy devices,
6406          * because users of this 'device' dont need to change
6407          * its refcount.
6408          */
6409
6410         return 0;
6411 }
6412 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6413
6414
6415 /**
6416  *      register_netdev - register a network device
6417  *      @dev: device to register
6418  *
6419  *      Take a completed network device structure and add it to the kernel
6420  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6421  *      chain. 0 is returned on success. A negative errno code is returned
6422  *      on a failure to set up the device, or if the name is a duplicate.
6423  *
6424  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
6425  *      and expands the device name if you passed a format string to
6426  *      alloc_netdev.
6427  */
6428 int register_netdev(struct net_device *dev)
6429 {
6430         int err;
6431
6432         rtnl_lock();
6433         err = register_netdevice(dev);
6434         rtnl_unlock();
6435         return err;
6436 }
6437 EXPORT_SYMBOL(register_netdev);
6438
6439 int netdev_refcnt_read(const struct net_device *dev)
6440 {
6441         int i, refcnt = 0;
6442
6443         for_each_possible_cpu(i)
6444                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6445         return refcnt;
6446 }
6447 EXPORT_SYMBOL(netdev_refcnt_read);
6448
6449 /**
6450  * netdev_wait_allrefs - wait until all references are gone.
6451  * @dev: target net_device
6452  *
6453  * This is called when unregistering network devices.
6454  *
6455  * Any protocol or device that holds a reference should register
6456  * for netdevice notification, and cleanup and put back the
6457  * reference if they receive an UNREGISTER event.
6458  * We can get stuck here if buggy protocols don't correctly
6459  * call dev_put.
6460  */
6461 static void netdev_wait_allrefs(struct net_device *dev)
6462 {
6463         unsigned long rebroadcast_time, warning_time;
6464         int refcnt;
6465
6466         linkwatch_forget_dev(dev);
6467
6468         rebroadcast_time = warning_time = jiffies;
6469         refcnt = netdev_refcnt_read(dev);
6470
6471         while (refcnt != 0) {
6472                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6473                         rtnl_lock();
6474
6475                         /* Rebroadcast unregister notification */
6476                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6477
6478                         __rtnl_unlock();
6479                         rcu_barrier();
6480                         rtnl_lock();
6481
6482                         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6483                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6484                                      &dev->state)) {
6485                                 /* We must not have linkwatch events
6486                                  * pending on unregister. If this
6487                                  * happens, we simply run the queue
6488                                  * unscheduled, resulting in a noop
6489                                  * for this device.
6490                                  */
6491                                 linkwatch_run_queue();
6492                         }
6493
6494                         __rtnl_unlock();
6495
6496                         rebroadcast_time = jiffies;
6497                 }
6498
6499                 msleep(250);
6500
6501                 refcnt = netdev_refcnt_read(dev);
6502
6503                 if (time_after(jiffies, warning_time + 10 * HZ)) {
6504                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6505                                  dev->name, refcnt);
6506                         warning_time = jiffies;
6507                 }
6508         }
6509 }
6510
6511 /* The sequence is:
6512  *
6513  *      rtnl_lock();
6514  *      ...
6515  *      register_netdevice(x1);
6516  *      register_netdevice(x2);
6517  *      ...
6518  *      unregister_netdevice(y1);
6519  *      unregister_netdevice(y2);
6520  *      ...
6521  *      rtnl_unlock();
6522  *      free_netdev(y1);
6523  *      free_netdev(y2);
6524  *
6525  * We are invoked by rtnl_unlock().
6526  * This allows us to deal with problems:
6527  * 1) We can delete sysfs objects which invoke hotplug
6528  *    without deadlocking with linkwatch via keventd.
6529  * 2) Since we run with the RTNL semaphore not held, we can sleep
6530  *    safely in order to wait for the netdev refcnt to drop to zero.
6531  *
6532  * We must not return until all unregister events added during
6533  * the interval the lock was held have been completed.
6534  */
6535 void netdev_run_todo(void)
6536 {
6537         struct list_head list;
6538
6539         /* Snapshot list, allow later requests */
6540         list_replace_init(&net_todo_list, &list);
6541
6542         __rtnl_unlock();
6543
6544
6545         /* Wait for rcu callbacks to finish before next phase */
6546         if (!list_empty(&list))
6547                 rcu_barrier();
6548
6549         while (!list_empty(&list)) {
6550                 struct net_device *dev
6551                         = list_first_entry(&list, struct net_device, todo_list);
6552                 list_del(&dev->todo_list);
6553
6554                 rtnl_lock();
6555                 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6556                 __rtnl_unlock();
6557
6558                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6559                         pr_err("network todo '%s' but state %d\n",
6560                                dev->name, dev->reg_state);
6561                         dump_stack();
6562                         continue;
6563                 }
6564
6565                 dev->reg_state = NETREG_UNREGISTERED;
6566
6567                 on_each_cpu(flush_backlog, dev, 1);
6568
6569                 netdev_wait_allrefs(dev);
6570
6571                 /* paranoia */
6572                 BUG_ON(netdev_refcnt_read(dev));
6573                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6574                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6575                 WARN_ON(dev->dn_ptr);
6576
6577                 if (dev->destructor)
6578                         dev->destructor(dev);
6579
6580                 /* Report a network device has been unregistered */
6581                 rtnl_lock();
6582                 dev_net(dev)->dev_unreg_count--;
6583                 __rtnl_unlock();
6584                 wake_up(&netdev_unregistering_wq);
6585
6586                 /* Free network device */
6587                 kobject_put(&dev->dev.kobj);
6588         }
6589 }
6590
6591 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
6592  * fields in the same order, with only the type differing.
6593  */
6594 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6595                              const struct net_device_stats *netdev_stats)
6596 {
6597 #if BITS_PER_LONG == 64
6598         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6599         memcpy(stats64, netdev_stats, sizeof(*stats64));
6600 #else
6601         size_t i, n = sizeof(*stats64) / sizeof(u64);
6602         const unsigned long *src = (const unsigned long *)netdev_stats;
6603         u64 *dst = (u64 *)stats64;
6604
6605         BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6606                      sizeof(*stats64) / sizeof(u64));
6607         for (i = 0; i < n; i++)
6608                 dst[i] = src[i];
6609 #endif
6610 }
6611 EXPORT_SYMBOL(netdev_stats_to_stats64);
6612
6613 /**
6614  *      dev_get_stats   - get network device statistics
6615  *      @dev: device to get statistics from
6616  *      @storage: place to store stats
6617  *
6618  *      Get network statistics from device. Return @storage.
6619  *      The device driver may provide its own method by setting
6620  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6621  *      otherwise the internal statistics structure is used.
6622  */
6623 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6624                                         struct rtnl_link_stats64 *storage)
6625 {
6626         const struct net_device_ops *ops = dev->netdev_ops;
6627
6628         if (ops->ndo_get_stats64) {
6629                 memset(storage, 0, sizeof(*storage));
6630                 ops->ndo_get_stats64(dev, storage);
6631         } else if (ops->ndo_get_stats) {
6632                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6633         } else {
6634                 netdev_stats_to_stats64(storage, &dev->stats);
6635         }
6636         storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6637         storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6638         return storage;
6639 }
6640 EXPORT_SYMBOL(dev_get_stats);
6641
6642 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6643 {
6644         struct netdev_queue *queue = dev_ingress_queue(dev);
6645
6646 #ifdef CONFIG_NET_CLS_ACT
6647         if (queue)
6648                 return queue;
6649         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6650         if (!queue)
6651                 return NULL;
6652         netdev_init_one_queue(dev, queue, NULL);
6653         queue->qdisc = &noop_qdisc;
6654         queue->qdisc_sleeping = &noop_qdisc;
6655         rcu_assign_pointer(dev->ingress_queue, queue);
6656 #endif
6657         return queue;
6658 }
6659
6660 static const struct ethtool_ops default_ethtool_ops;
6661
6662 void netdev_set_default_ethtool_ops(struct net_device *dev,
6663                                     const struct ethtool_ops *ops)
6664 {
6665         if (dev->ethtool_ops == &default_ethtool_ops)
6666                 dev->ethtool_ops = ops;
6667 }
6668 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6669
6670 void netdev_freemem(struct net_device *dev)
6671 {
6672         char *addr = (char *)dev - dev->padded;
6673
6674         kvfree(addr);
6675 }
6676
6677 /**
6678  *      alloc_netdev_mqs - allocate network device
6679  *      @sizeof_priv:           size of private data to allocate space for
6680  *      @name:                  device name format string
6681  *      @name_assign_type:      origin of device name
6682  *      @setup:                 callback to initialize device
6683  *      @txqs:                  the number of TX subqueues to allocate
6684  *      @rxqs:                  the number of RX subqueues to allocate
6685  *
6686  *      Allocates a struct net_device with private data area for driver use
6687  *      and performs basic initialization.  Also allocates subqueue structs
6688  *      for each queue on the device.
6689  */
6690 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6691                 unsigned char name_assign_type,
6692                 void (*setup)(struct net_device *),
6693                 unsigned int txqs, unsigned int rxqs)
6694 {
6695         struct net_device *dev;
6696         size_t alloc_size;
6697         struct net_device *p;
6698
6699         BUG_ON(strlen(name) >= sizeof(dev->name));
6700
6701         if (txqs < 1) {
6702                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6703                 return NULL;
6704         }
6705
6706 #ifdef CONFIG_SYSFS
6707         if (rxqs < 1) {
6708                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6709                 return NULL;
6710         }
6711 #endif
6712
6713         alloc_size = sizeof(struct net_device);
6714         if (sizeof_priv) {
6715                 /* ensure 32-byte alignment of private area */
6716                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6717                 alloc_size += sizeof_priv;
6718         }
6719         /* ensure 32-byte alignment of whole construct */
6720         alloc_size += NETDEV_ALIGN - 1;
6721
6722         p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6723         if (!p)
6724                 p = vzalloc(alloc_size);
6725         if (!p)
6726                 return NULL;
6727
6728         dev = PTR_ALIGN(p, NETDEV_ALIGN);
6729         dev->padded = (char *)dev - (char *)p;
6730
6731         dev->pcpu_refcnt = alloc_percpu(int);
6732         if (!dev->pcpu_refcnt)
6733                 goto free_dev;
6734
6735         if (dev_addr_init(dev))
6736                 goto free_pcpu;
6737
6738         dev_mc_init(dev);
6739         dev_uc_init(dev);
6740
6741         dev_net_set(dev, &init_net);
6742
6743         dev->gso_max_size = GSO_MAX_SIZE;
6744         dev->gso_max_segs = GSO_MAX_SEGS;
6745         dev->gso_min_segs = 0;
6746
6747         INIT_LIST_HEAD(&dev->napi_list);
6748         INIT_LIST_HEAD(&dev->unreg_list);
6749         INIT_LIST_HEAD(&dev->close_list);
6750         INIT_LIST_HEAD(&dev->link_watch_list);
6751         INIT_LIST_HEAD(&dev->adj_list.upper);
6752         INIT_LIST_HEAD(&dev->adj_list.lower);
6753         INIT_LIST_HEAD(&dev->all_adj_list.upper);
6754         INIT_LIST_HEAD(&dev->all_adj_list.lower);
6755         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6756         setup(dev);
6757
6758         dev->num_tx_queues = txqs;
6759         dev->real_num_tx_queues = txqs;
6760         if (netif_alloc_netdev_queues(dev))
6761                 goto free_all;
6762
6763 #ifdef CONFIG_SYSFS
6764         dev->num_rx_queues = rxqs;
6765         dev->real_num_rx_queues = rxqs;
6766         if (netif_alloc_rx_queues(dev))
6767                 goto free_all;
6768 #endif
6769
6770         strcpy(dev->name, name);
6771         dev->name_assign_type = name_assign_type;
6772         dev->group = INIT_NETDEV_GROUP;
6773         if (!dev->ethtool_ops)
6774                 dev->ethtool_ops = &default_ethtool_ops;
6775         return dev;
6776
6777 free_all:
6778         free_netdev(dev);
6779         return NULL;
6780
6781 free_pcpu:
6782         free_percpu(dev->pcpu_refcnt);
6783 free_dev:
6784         netdev_freemem(dev);
6785         return NULL;
6786 }
6787 EXPORT_SYMBOL(alloc_netdev_mqs);
6788
6789 /**
6790  *      free_netdev - free network device
6791  *      @dev: device
6792  *
6793  *      This function does the last stage of destroying an allocated device
6794  *      interface. The reference to the device object is released.
6795  *      If this is the last reference then it will be freed.
6796  */
6797 void free_netdev(struct net_device *dev)
6798 {
6799         struct napi_struct *p, *n;
6800
6801         release_net(dev_net(dev));
6802
6803         netif_free_tx_queues(dev);
6804 #ifdef CONFIG_SYSFS
6805         kfree(dev->_rx);
6806 #endif
6807
6808         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6809
6810         /* Flush device addresses */
6811         dev_addr_flush(dev);
6812
6813         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6814                 netif_napi_del(p);
6815
6816         free_percpu(dev->pcpu_refcnt);
6817         dev->pcpu_refcnt = NULL;
6818
6819         /*  Compatibility with error handling in drivers */
6820         if (dev->reg_state == NETREG_UNINITIALIZED) {
6821                 netdev_freemem(dev);
6822                 return;
6823         }
6824
6825         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6826         dev->reg_state = NETREG_RELEASED;
6827
6828         /* will free via device release */
6829         put_device(&dev->dev);
6830 }
6831 EXPORT_SYMBOL(free_netdev);
6832
6833 /**
6834  *      synchronize_net -  Synchronize with packet receive processing
6835  *
6836  *      Wait for packets currently being received to be done.
6837  *      Does not block later packets from starting.
6838  */
6839 void synchronize_net(void)
6840 {
6841         might_sleep();
6842         if (rtnl_is_locked())
6843                 synchronize_rcu_expedited();
6844         else
6845                 synchronize_rcu();
6846 }
6847 EXPORT_SYMBOL(synchronize_net);
6848
6849 /**
6850  *      unregister_netdevice_queue - remove device from the kernel
6851  *      @dev: device
6852  *      @head: list
6853  *
6854  *      This function shuts down a device interface and removes it
6855  *      from the kernel tables.
6856  *      If head not NULL, device is queued to be unregistered later.
6857  *
6858  *      Callers must hold the rtnl semaphore.  You may want
6859  *      unregister_netdev() instead of this.
6860  */
6861
6862 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6863 {
6864         ASSERT_RTNL();
6865
6866         if (head) {
6867                 list_move_tail(&dev->unreg_list, head);
6868         } else {
6869                 rollback_registered(dev);
6870                 /* Finish processing unregister after unlock */
6871                 net_set_todo(dev);
6872         }
6873 }
6874 EXPORT_SYMBOL(unregister_netdevice_queue);
6875
6876 /**
6877  *      unregister_netdevice_many - unregister many devices
6878  *      @head: list of devices
6879  *
6880  *  Note: As most callers use a stack allocated list_head,
6881  *  we force a list_del() to make sure stack wont be corrupted later.
6882  */
6883 void unregister_netdevice_many(struct list_head *head)
6884 {
6885         struct net_device *dev;
6886
6887         if (!list_empty(head)) {
6888                 rollback_registered_many(head);
6889                 list_for_each_entry(dev, head, unreg_list)
6890                         net_set_todo(dev);
6891                 list_del(head);
6892         }
6893 }
6894 EXPORT_SYMBOL(unregister_netdevice_many);
6895
6896 /**
6897  *      unregister_netdev - remove device from the kernel
6898  *      @dev: device
6899  *
6900  *      This function shuts down a device interface and removes it
6901  *      from the kernel tables.
6902  *
6903  *      This is just a wrapper for unregister_netdevice that takes
6904  *      the rtnl semaphore.  In general you want to use this and not
6905  *      unregister_netdevice.
6906  */
6907 void unregister_netdev(struct net_device *dev)
6908 {
6909         rtnl_lock();
6910         unregister_netdevice(dev);
6911         rtnl_unlock();
6912 }
6913 EXPORT_SYMBOL(unregister_netdev);
6914
6915 /**
6916  *      dev_change_net_namespace - move device to different nethost namespace
6917  *      @dev: device
6918  *      @net: network namespace
6919  *      @pat: If not NULL name pattern to try if the current device name
6920  *            is already taken in the destination network namespace.
6921  *
6922  *      This function shuts down a device interface and moves it
6923  *      to a new network namespace. On success 0 is returned, on
6924  *      a failure a netagive errno code is returned.
6925  *
6926  *      Callers must hold the rtnl semaphore.
6927  */
6928
6929 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6930 {
6931         int err;
6932
6933         ASSERT_RTNL();
6934
6935         /* Don't allow namespace local devices to be moved. */
6936         err = -EINVAL;
6937         if (dev->features & NETIF_F_NETNS_LOCAL)
6938                 goto out;
6939
6940         /* Ensure the device has been registrered */
6941         if (dev->reg_state != NETREG_REGISTERED)
6942                 goto out;
6943
6944         /* Get out if there is nothing todo */
6945         err = 0;
6946         if (net_eq(dev_net(dev), net))
6947                 goto out;
6948
6949         /* Pick the destination device name, and ensure
6950          * we can use it in the destination network namespace.
6951          */
6952         err = -EEXIST;
6953         if (__dev_get_by_name(net, dev->name)) {
6954                 /* We get here if we can't use the current device name */
6955                 if (!pat)
6956                         goto out;
6957                 if (dev_get_valid_name(net, dev, pat) < 0)
6958                         goto out;
6959         }
6960
6961         /*
6962          * And now a mini version of register_netdevice unregister_netdevice.
6963          */
6964
6965         /* If device is running close it first. */
6966         dev_close(dev);
6967
6968         /* And unlink it from device chain */
6969         err = -ENODEV;
6970         unlist_netdevice(dev);
6971
6972         synchronize_net();
6973
6974         /* Shutdown queueing discipline. */
6975         dev_shutdown(dev);
6976
6977         /* Notify protocols, that we are about to destroy
6978            this device. They should clean all the things.
6979
6980            Note that dev->reg_state stays at NETREG_REGISTERED.
6981            This is wanted because this way 8021q and macvlan know
6982            the device is just moving and can keep their slaves up.
6983         */
6984         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6985         rcu_barrier();
6986         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6987         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
6988
6989         /*
6990          *      Flush the unicast and multicast chains
6991          */
6992         dev_uc_flush(dev);
6993         dev_mc_flush(dev);
6994
6995         /* Send a netdev-removed uevent to the old namespace */
6996         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6997         netdev_adjacent_del_links(dev);
6998
6999         /* Actually switch the network namespace */
7000         dev_net_set(dev, net);
7001
7002         /* If there is an ifindex conflict assign a new one */
7003         if (__dev_get_by_index(net, dev->ifindex)) {
7004                 int iflink = (dev->iflink == dev->ifindex);
7005                 dev->ifindex = dev_new_index(net);
7006                 if (iflink)
7007                         dev->iflink = dev->ifindex;
7008         }
7009
7010         /* Send a netdev-add uevent to the new namespace */
7011         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7012         netdev_adjacent_add_links(dev);
7013
7014         /* Fixup kobjects */
7015         err = device_rename(&dev->dev, dev->name);
7016         WARN_ON(err);
7017
7018         /* Add the device back in the hashes */
7019         list_netdevice(dev);
7020
7021         /* Notify protocols, that a new device appeared. */
7022         call_netdevice_notifiers(NETDEV_REGISTER, dev);
7023
7024         /*
7025          *      Prevent userspace races by waiting until the network
7026          *      device is fully setup before sending notifications.
7027          */
7028         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7029
7030         synchronize_net();
7031         err = 0;
7032 out:
7033         return err;
7034 }
7035 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7036
7037 static int dev_cpu_callback(struct notifier_block *nfb,
7038                             unsigned long action,
7039                             void *ocpu)
7040 {
7041         struct sk_buff **list_skb;
7042         struct sk_buff *skb;
7043         unsigned int cpu, oldcpu = (unsigned long)ocpu;
7044         struct softnet_data *sd, *oldsd;
7045
7046         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7047                 return NOTIFY_OK;
7048
7049         local_irq_disable();
7050         cpu = smp_processor_id();
7051         sd = &per_cpu(softnet_data, cpu);
7052         oldsd = &per_cpu(softnet_data, oldcpu);
7053
7054         /* Find end of our completion_queue. */
7055         list_skb = &sd->completion_queue;
7056         while (*list_skb)
7057                 list_skb = &(*list_skb)->next;
7058         /* Append completion queue from offline CPU. */
7059         *list_skb = oldsd->completion_queue;
7060         oldsd->completion_queue = NULL;
7061
7062         /* Append output queue from offline CPU. */
7063         if (oldsd->output_queue) {
7064                 *sd->output_queue_tailp = oldsd->output_queue;
7065                 sd->output_queue_tailp = oldsd->output_queue_tailp;
7066                 oldsd->output_queue = NULL;
7067                 oldsd->output_queue_tailp = &oldsd->output_queue;
7068         }
7069         /* Append NAPI poll list from offline CPU. */
7070         if (!list_empty(&oldsd->poll_list)) {
7071                 list_splice_init(&oldsd->poll_list, &sd->poll_list);
7072                 raise_softirq_irqoff(NET_RX_SOFTIRQ);
7073         }
7074
7075         raise_softirq_irqoff(NET_TX_SOFTIRQ);
7076         local_irq_enable();
7077
7078         /* Process offline CPU's input_pkt_queue */
7079         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7080                 netif_rx_internal(skb);
7081                 input_queue_head_incr(oldsd);
7082         }
7083         while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
7084                 netif_rx_internal(skb);
7085                 input_queue_head_incr(oldsd);
7086         }
7087
7088         return NOTIFY_OK;
7089 }
7090
7091
7092 /**
7093  *      netdev_increment_features - increment feature set by one
7094  *      @all: current feature set
7095  *      @one: new feature set
7096  *      @mask: mask feature set
7097  *
7098  *      Computes a new feature set after adding a device with feature set
7099  *      @one to the master device with current feature set @all.  Will not
7100  *      enable anything that is off in @mask. Returns the new feature set.
7101  */
7102 netdev_features_t netdev_increment_features(netdev_features_t all,
7103         netdev_features_t one, netdev_features_t mask)
7104 {
7105         if (mask & NETIF_F_GEN_CSUM)
7106                 mask |= NETIF_F_ALL_CSUM;
7107         mask |= NETIF_F_VLAN_CHALLENGED;
7108
7109         all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7110         all &= one | ~NETIF_F_ALL_FOR_ALL;
7111
7112         /* If one device supports hw checksumming, set for all. */
7113         if (all & NETIF_F_GEN_CSUM)
7114                 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7115
7116         return all;
7117 }
7118 EXPORT_SYMBOL(netdev_increment_features);
7119
7120 static struct hlist_head * __net_init netdev_create_hash(void)
7121 {
7122         int i;
7123         struct hlist_head *hash;
7124
7125         hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7126         if (hash != NULL)
7127                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7128                         INIT_HLIST_HEAD(&hash[i]);
7129
7130         return hash;
7131 }
7132
7133 /* Initialize per network namespace state */
7134 static int __net_init netdev_init(struct net *net)
7135 {
7136         if (net != &init_net)
7137                 INIT_LIST_HEAD(&net->dev_base_head);
7138
7139         net->dev_name_head = netdev_create_hash();
7140         if (net->dev_name_head == NULL)
7141                 goto err_name;
7142
7143         net->dev_index_head = netdev_create_hash();
7144         if (net->dev_index_head == NULL)
7145                 goto err_idx;
7146
7147         return 0;
7148
7149 err_idx:
7150         kfree(net->dev_name_head);
7151 err_name:
7152         return -ENOMEM;
7153 }
7154
7155 /**
7156  *      netdev_drivername - network driver for the device
7157  *      @dev: network device
7158  *
7159  *      Determine network driver for device.
7160  */
7161 const char *netdev_drivername(const struct net_device *dev)
7162 {
7163         const struct device_driver *driver;
7164         const struct device *parent;
7165         const char *empty = "";
7166
7167         parent = dev->dev.parent;
7168         if (!parent)
7169                 return empty;
7170
7171         driver = parent->driver;
7172         if (driver && driver->name)
7173                 return driver->name;
7174         return empty;
7175 }
7176
7177 static void __netdev_printk(const char *level, const struct net_device *dev,
7178                             struct va_format *vaf)
7179 {
7180         if (dev && dev->dev.parent) {
7181                 dev_printk_emit(level[1] - '0',
7182                                 dev->dev.parent,
7183                                 "%s %s %s%s: %pV",
7184                                 dev_driver_string(dev->dev.parent),
7185                                 dev_name(dev->dev.parent),
7186                                 netdev_name(dev), netdev_reg_state(dev),
7187                                 vaf);
7188         } else if (dev) {
7189                 printk("%s%s%s: %pV",
7190                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
7191         } else {
7192                 printk("%s(NULL net_device): %pV", level, vaf);
7193         }
7194 }
7195
7196 void netdev_printk(const char *level, const struct net_device *dev,
7197                    const char *format, ...)
7198 {
7199         struct va_format vaf;
7200         va_list args;
7201
7202         va_start(args, format);
7203
7204         vaf.fmt = format;
7205         vaf.va = &args;
7206
7207         __netdev_printk(level, dev, &vaf);
7208
7209         va_end(args);
7210 }
7211 EXPORT_SYMBOL(netdev_printk);
7212
7213 #define define_netdev_printk_level(func, level)                 \
7214 void func(const struct net_device *dev, const char *fmt, ...)   \
7215 {                                                               \
7216         struct va_format vaf;                                   \
7217         va_list args;                                           \
7218                                                                 \
7219         va_start(args, fmt);                                    \
7220                                                                 \
7221         vaf.fmt = fmt;                                          \
7222         vaf.va = &args;                                         \
7223                                                                 \
7224         __netdev_printk(level, dev, &vaf);                      \
7225                                                                 \
7226         va_end(args);                                           \
7227 }                                                               \
7228 EXPORT_SYMBOL(func);
7229
7230 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7231 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7232 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7233 define_netdev_printk_level(netdev_err, KERN_ERR);
7234 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7235 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7236 define_netdev_printk_level(netdev_info, KERN_INFO);
7237
7238 static void __net_exit netdev_exit(struct net *net)
7239 {
7240         kfree(net->dev_name_head);
7241         kfree(net->dev_index_head);
7242 }
7243
7244 static struct pernet_operations __net_initdata netdev_net_ops = {
7245         .init = netdev_init,
7246         .exit = netdev_exit,
7247 };
7248
7249 static void __net_exit default_device_exit(struct net *net)
7250 {
7251         struct net_device *dev, *aux;
7252         /*
7253          * Push all migratable network devices back to the
7254          * initial network namespace
7255          */
7256         rtnl_lock();
7257         for_each_netdev_safe(net, dev, aux) {
7258                 int err;
7259                 char fb_name[IFNAMSIZ];
7260
7261                 /* Ignore unmoveable devices (i.e. loopback) */
7262                 if (dev->features & NETIF_F_NETNS_LOCAL)
7263                         continue;
7264
7265                 /* Leave virtual devices for the generic cleanup */
7266                 if (dev->rtnl_link_ops)
7267                         continue;
7268
7269                 /* Push remaining network devices to init_net */
7270                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7271                 err = dev_change_net_namespace(dev, &init_net, fb_name);
7272                 if (err) {
7273                         pr_emerg("%s: failed to move %s to init_net: %d\n",
7274                                  __func__, dev->name, err);
7275                         BUG();
7276                 }
7277         }
7278         rtnl_unlock();
7279 }
7280
7281 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7282 {
7283         /* Return with the rtnl_lock held when there are no network
7284          * devices unregistering in any network namespace in net_list.
7285          */
7286         struct net *net;
7287         bool unregistering;
7288         DEFINE_WAIT_FUNC(wait, woken_wake_function);
7289
7290         add_wait_queue(&netdev_unregistering_wq, &wait);
7291         for (;;) {
7292                 unregistering = false;
7293                 rtnl_lock();
7294                 list_for_each_entry(net, net_list, exit_list) {
7295                         if (net->dev_unreg_count > 0) {
7296                                 unregistering = true;
7297                                 break;
7298                         }
7299                 }
7300                 if (!unregistering)
7301                         break;
7302                 __rtnl_unlock();
7303
7304                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7305         }
7306         remove_wait_queue(&netdev_unregistering_wq, &wait);
7307 }
7308
7309 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7310 {
7311         /* At exit all network devices most be removed from a network
7312          * namespace.  Do this in the reverse order of registration.
7313          * Do this across as many network namespaces as possible to
7314          * improve batching efficiency.
7315          */
7316         struct net_device *dev;
7317         struct net *net;
7318         LIST_HEAD(dev_kill_list);
7319
7320         /* To prevent network device cleanup code from dereferencing
7321          * loopback devices or network devices that have been freed
7322          * wait here for all pending unregistrations to complete,
7323          * before unregistring the loopback device and allowing the
7324          * network namespace be freed.
7325          *
7326          * The netdev todo list containing all network devices
7327          * unregistrations that happen in default_device_exit_batch
7328          * will run in the rtnl_unlock() at the end of
7329          * default_device_exit_batch.
7330          */
7331         rtnl_lock_unregistering(net_list);
7332         list_for_each_entry(net, net_list, exit_list) {
7333                 for_each_netdev_reverse(net, dev) {
7334                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7335                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7336                         else
7337                                 unregister_netdevice_queue(dev, &dev_kill_list);
7338                 }
7339         }
7340         unregister_netdevice_many(&dev_kill_list);
7341         rtnl_unlock();
7342 }
7343
7344 static struct pernet_operations __net_initdata default_device_ops = {
7345         .exit = default_device_exit,
7346         .exit_batch = default_device_exit_batch,
7347 };
7348
7349 /*
7350  *      Initialize the DEV module. At boot time this walks the device list and
7351  *      unhooks any devices that fail to initialise (normally hardware not
7352  *      present) and leaves us with a valid list of present and active devices.
7353  *
7354  */
7355
7356 /*
7357  *       This is called single threaded during boot, so no need
7358  *       to take the rtnl semaphore.
7359  */
7360 static int __init net_dev_init(void)
7361 {
7362         int i, rc = -ENOMEM;
7363
7364         BUG_ON(!dev_boot_phase);
7365
7366         if (dev_proc_init())
7367                 goto out;
7368
7369         if (netdev_kobject_init())
7370                 goto out;
7371
7372         INIT_LIST_HEAD(&ptype_all);
7373         for (i = 0; i < PTYPE_HASH_SIZE; i++)
7374                 INIT_LIST_HEAD(&ptype_base[i]);
7375
7376         INIT_LIST_HEAD(&offload_base);
7377
7378         if (register_pernet_subsys(&netdev_net_ops))
7379                 goto out;
7380
7381         /*
7382          *      Initialise the packet receive queues.
7383          */
7384
7385         for_each_possible_cpu(i) {
7386                 struct softnet_data *sd = &per_cpu(softnet_data, i);
7387
7388                 skb_queue_head_init(&sd->input_pkt_queue);
7389                 skb_queue_head_init(&sd->process_queue);
7390                 INIT_LIST_HEAD(&sd->poll_list);
7391                 sd->output_queue_tailp = &sd->output_queue;
7392 #ifdef CONFIG_RPS
7393                 sd->csd.func = rps_trigger_softirq;
7394                 sd->csd.info = sd;
7395                 sd->cpu = i;
7396 #endif
7397
7398                 sd->backlog.poll = process_backlog;
7399                 sd->backlog.weight = weight_p;
7400         }
7401
7402         dev_boot_phase = 0;
7403
7404         /* The loopback device is special if any other network devices
7405          * is present in a network namespace the loopback device must
7406          * be present. Since we now dynamically allocate and free the
7407          * loopback device ensure this invariant is maintained by
7408          * keeping the loopback device as the first device on the
7409          * list of network devices.  Ensuring the loopback devices
7410          * is the first device that appears and the last network device
7411          * that disappears.
7412          */
7413         if (register_pernet_device(&loopback_net_ops))
7414                 goto out;
7415
7416         if (register_pernet_device(&default_device_ops))
7417                 goto out;
7418
7419         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7420         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7421
7422         hotcpu_notifier(dev_cpu_callback, 0);
7423         dst_init();
7424         rc = 0;
7425 out:
7426         return rc;
7427 }
7428
7429 subsys_initcall(net_dev_init);