net: introduce DST_NOPEER dst flag
[pandora-kernel.git] / net / core / net-sysfs.c
1 /*
2  * net-sysfs.c - network device class and attributes
3  *
4  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
18 #include <net/sock.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <net/wext.h>
25
26 #include "net-sysfs.h"
27
28 #ifdef CONFIG_SYSFS
29 static const char fmt_hex[] = "%#x\n";
30 static const char fmt_long_hex[] = "%#lx\n";
31 static const char fmt_dec[] = "%d\n";
32 static const char fmt_udec[] = "%u\n";
33 static const char fmt_ulong[] = "%lu\n";
34 static const char fmt_u64[] = "%llu\n";
35
36 static inline int dev_isalive(const struct net_device *dev)
37 {
38         return dev->reg_state <= NETREG_REGISTERED;
39 }
40
41 /* use same locking rules as GIF* ioctl's */
42 static ssize_t netdev_show(const struct device *dev,
43                            struct device_attribute *attr, char *buf,
44                            ssize_t (*format)(const struct net_device *, char *))
45 {
46         struct net_device *net = to_net_dev(dev);
47         ssize_t ret = -EINVAL;
48
49         read_lock(&dev_base_lock);
50         if (dev_isalive(net))
51                 ret = (*format)(net, buf);
52         read_unlock(&dev_base_lock);
53
54         return ret;
55 }
56
57 /* generate a show function for simple field */
58 #define NETDEVICE_SHOW(field, format_string)                            \
59 static ssize_t format_##field(const struct net_device *net, char *buf)  \
60 {                                                                       \
61         return sprintf(buf, format_string, net->field);                 \
62 }                                                                       \
63 static ssize_t show_##field(struct device *dev,                         \
64                             struct device_attribute *attr, char *buf)   \
65 {                                                                       \
66         return netdev_show(dev, attr, buf, format_##field);             \
67 }
68
69
70 /* use same locking and permission rules as SIF* ioctl's */
71 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
72                             const char *buf, size_t len,
73                             int (*set)(struct net_device *, unsigned long))
74 {
75         struct net_device *net = to_net_dev(dev);
76         char *endp;
77         unsigned long new;
78         int ret = -EINVAL;
79
80         if (!capable(CAP_NET_ADMIN))
81                 return -EPERM;
82
83         new = simple_strtoul(buf, &endp, 0);
84         if (endp == buf)
85                 goto err;
86
87         if (!rtnl_trylock())
88                 return restart_syscall();
89
90         if (dev_isalive(net)) {
91                 if ((ret = (*set)(net, new)) == 0)
92                         ret = len;
93         }
94         rtnl_unlock();
95  err:
96         return ret;
97 }
98
99 NETDEVICE_SHOW(dev_id, fmt_hex);
100 NETDEVICE_SHOW(addr_assign_type, fmt_dec);
101 NETDEVICE_SHOW(addr_len, fmt_dec);
102 NETDEVICE_SHOW(iflink, fmt_dec);
103 NETDEVICE_SHOW(ifindex, fmt_dec);
104 NETDEVICE_SHOW(type, fmt_dec);
105 NETDEVICE_SHOW(link_mode, fmt_dec);
106
107 /* use same locking rules as GIFHWADDR ioctl's */
108 static ssize_t show_address(struct device *dev, struct device_attribute *attr,
109                             char *buf)
110 {
111         struct net_device *net = to_net_dev(dev);
112         ssize_t ret = -EINVAL;
113
114         read_lock(&dev_base_lock);
115         if (dev_isalive(net))
116                 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
117         read_unlock(&dev_base_lock);
118         return ret;
119 }
120
121 static ssize_t show_broadcast(struct device *dev,
122                             struct device_attribute *attr, char *buf)
123 {
124         struct net_device *net = to_net_dev(dev);
125         if (dev_isalive(net))
126                 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
127         return -EINVAL;
128 }
129
130 static ssize_t show_carrier(struct device *dev,
131                             struct device_attribute *attr, char *buf)
132 {
133         struct net_device *netdev = to_net_dev(dev);
134         if (netif_running(netdev)) {
135                 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
136         }
137         return -EINVAL;
138 }
139
140 static ssize_t show_speed(struct device *dev,
141                           struct device_attribute *attr, char *buf)
142 {
143         struct net_device *netdev = to_net_dev(dev);
144         int ret = -EINVAL;
145
146         if (!rtnl_trylock())
147                 return restart_syscall();
148
149         if (netif_running(netdev)) {
150                 struct ethtool_cmd cmd;
151                 if (!__ethtool_get_settings(netdev, &cmd))
152                         ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
153         }
154         rtnl_unlock();
155         return ret;
156 }
157
158 static ssize_t show_duplex(struct device *dev,
159                            struct device_attribute *attr, char *buf)
160 {
161         struct net_device *netdev = to_net_dev(dev);
162         int ret = -EINVAL;
163
164         if (!rtnl_trylock())
165                 return restart_syscall();
166
167         if (netif_running(netdev)) {
168                 struct ethtool_cmd cmd;
169                 if (!__ethtool_get_settings(netdev, &cmd))
170                         ret = sprintf(buf, "%s\n",
171                                       cmd.duplex ? "full" : "half");
172         }
173         rtnl_unlock();
174         return ret;
175 }
176
177 static ssize_t show_dormant(struct device *dev,
178                             struct device_attribute *attr, char *buf)
179 {
180         struct net_device *netdev = to_net_dev(dev);
181
182         if (netif_running(netdev))
183                 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
184
185         return -EINVAL;
186 }
187
188 static const char *const operstates[] = {
189         "unknown",
190         "notpresent", /* currently unused */
191         "down",
192         "lowerlayerdown",
193         "testing", /* currently unused */
194         "dormant",
195         "up"
196 };
197
198 static ssize_t show_operstate(struct device *dev,
199                               struct device_attribute *attr, char *buf)
200 {
201         const struct net_device *netdev = to_net_dev(dev);
202         unsigned char operstate;
203
204         read_lock(&dev_base_lock);
205         operstate = netdev->operstate;
206         if (!netif_running(netdev))
207                 operstate = IF_OPER_DOWN;
208         read_unlock(&dev_base_lock);
209
210         if (operstate >= ARRAY_SIZE(operstates))
211                 return -EINVAL; /* should not happen */
212
213         return sprintf(buf, "%s\n", operstates[operstate]);
214 }
215
216 /* read-write attributes */
217 NETDEVICE_SHOW(mtu, fmt_dec);
218
219 static int change_mtu(struct net_device *net, unsigned long new_mtu)
220 {
221         return dev_set_mtu(net, (int) new_mtu);
222 }
223
224 static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
225                          const char *buf, size_t len)
226 {
227         return netdev_store(dev, attr, buf, len, change_mtu);
228 }
229
230 NETDEVICE_SHOW(flags, fmt_hex);
231
232 static int change_flags(struct net_device *net, unsigned long new_flags)
233 {
234         return dev_change_flags(net, (unsigned) new_flags);
235 }
236
237 static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
238                            const char *buf, size_t len)
239 {
240         return netdev_store(dev, attr, buf, len, change_flags);
241 }
242
243 NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
244
245 static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
246 {
247         net->tx_queue_len = new_len;
248         return 0;
249 }
250
251 static ssize_t store_tx_queue_len(struct device *dev,
252                                   struct device_attribute *attr,
253                                   const char *buf, size_t len)
254 {
255         return netdev_store(dev, attr, buf, len, change_tx_queue_len);
256 }
257
258 static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
259                              const char *buf, size_t len)
260 {
261         struct net_device *netdev = to_net_dev(dev);
262         size_t count = len;
263         ssize_t ret;
264
265         if (!capable(CAP_NET_ADMIN))
266                 return -EPERM;
267
268         /* ignore trailing newline */
269         if (len >  0 && buf[len - 1] == '\n')
270                 --count;
271
272         if (!rtnl_trylock())
273                 return restart_syscall();
274         ret = dev_set_alias(netdev, buf, count);
275         rtnl_unlock();
276
277         return ret < 0 ? ret : len;
278 }
279
280 static ssize_t show_ifalias(struct device *dev,
281                             struct device_attribute *attr, char *buf)
282 {
283         const struct net_device *netdev = to_net_dev(dev);
284         ssize_t ret = 0;
285
286         if (!rtnl_trylock())
287                 return restart_syscall();
288         if (netdev->ifalias)
289                 ret = sprintf(buf, "%s\n", netdev->ifalias);
290         rtnl_unlock();
291         return ret;
292 }
293
294 NETDEVICE_SHOW(group, fmt_dec);
295
296 static int change_group(struct net_device *net, unsigned long new_group)
297 {
298         dev_set_group(net, (int) new_group);
299         return 0;
300 }
301
302 static ssize_t store_group(struct device *dev, struct device_attribute *attr,
303                          const char *buf, size_t len)
304 {
305         return netdev_store(dev, attr, buf, len, change_group);
306 }
307
308 static struct device_attribute net_class_attributes[] = {
309         __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
310         __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
311         __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
312         __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
313         __ATTR(iflink, S_IRUGO, show_iflink, NULL),
314         __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
315         __ATTR(type, S_IRUGO, show_type, NULL),
316         __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
317         __ATTR(address, S_IRUGO, show_address, NULL),
318         __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
319         __ATTR(carrier, S_IRUGO, show_carrier, NULL),
320         __ATTR(speed, S_IRUGO, show_speed, NULL),
321         __ATTR(duplex, S_IRUGO, show_duplex, NULL),
322         __ATTR(dormant, S_IRUGO, show_dormant, NULL),
323         __ATTR(operstate, S_IRUGO, show_operstate, NULL),
324         __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
325         __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
326         __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
327                store_tx_queue_len),
328         __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
329         {}
330 };
331
332 /* Show a given an attribute in the statistics group */
333 static ssize_t netstat_show(const struct device *d,
334                             struct device_attribute *attr, char *buf,
335                             unsigned long offset)
336 {
337         struct net_device *dev = to_net_dev(d);
338         ssize_t ret = -EINVAL;
339
340         WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
341                         offset % sizeof(u64) != 0);
342
343         read_lock(&dev_base_lock);
344         if (dev_isalive(dev)) {
345                 struct rtnl_link_stats64 temp;
346                 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
347
348                 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
349         }
350         read_unlock(&dev_base_lock);
351         return ret;
352 }
353
354 /* generate a read-only statistics attribute */
355 #define NETSTAT_ENTRY(name)                                             \
356 static ssize_t show_##name(struct device *d,                            \
357                            struct device_attribute *attr, char *buf)    \
358 {                                                                       \
359         return netstat_show(d, attr, buf,                               \
360                             offsetof(struct rtnl_link_stats64, name));  \
361 }                                                                       \
362 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
363
364 NETSTAT_ENTRY(rx_packets);
365 NETSTAT_ENTRY(tx_packets);
366 NETSTAT_ENTRY(rx_bytes);
367 NETSTAT_ENTRY(tx_bytes);
368 NETSTAT_ENTRY(rx_errors);
369 NETSTAT_ENTRY(tx_errors);
370 NETSTAT_ENTRY(rx_dropped);
371 NETSTAT_ENTRY(tx_dropped);
372 NETSTAT_ENTRY(multicast);
373 NETSTAT_ENTRY(collisions);
374 NETSTAT_ENTRY(rx_length_errors);
375 NETSTAT_ENTRY(rx_over_errors);
376 NETSTAT_ENTRY(rx_crc_errors);
377 NETSTAT_ENTRY(rx_frame_errors);
378 NETSTAT_ENTRY(rx_fifo_errors);
379 NETSTAT_ENTRY(rx_missed_errors);
380 NETSTAT_ENTRY(tx_aborted_errors);
381 NETSTAT_ENTRY(tx_carrier_errors);
382 NETSTAT_ENTRY(tx_fifo_errors);
383 NETSTAT_ENTRY(tx_heartbeat_errors);
384 NETSTAT_ENTRY(tx_window_errors);
385 NETSTAT_ENTRY(rx_compressed);
386 NETSTAT_ENTRY(tx_compressed);
387
388 static struct attribute *netstat_attrs[] = {
389         &dev_attr_rx_packets.attr,
390         &dev_attr_tx_packets.attr,
391         &dev_attr_rx_bytes.attr,
392         &dev_attr_tx_bytes.attr,
393         &dev_attr_rx_errors.attr,
394         &dev_attr_tx_errors.attr,
395         &dev_attr_rx_dropped.attr,
396         &dev_attr_tx_dropped.attr,
397         &dev_attr_multicast.attr,
398         &dev_attr_collisions.attr,
399         &dev_attr_rx_length_errors.attr,
400         &dev_attr_rx_over_errors.attr,
401         &dev_attr_rx_crc_errors.attr,
402         &dev_attr_rx_frame_errors.attr,
403         &dev_attr_rx_fifo_errors.attr,
404         &dev_attr_rx_missed_errors.attr,
405         &dev_attr_tx_aborted_errors.attr,
406         &dev_attr_tx_carrier_errors.attr,
407         &dev_attr_tx_fifo_errors.attr,
408         &dev_attr_tx_heartbeat_errors.attr,
409         &dev_attr_tx_window_errors.attr,
410         &dev_attr_rx_compressed.attr,
411         &dev_attr_tx_compressed.attr,
412         NULL
413 };
414
415
416 static struct attribute_group netstat_group = {
417         .name  = "statistics",
418         .attrs  = netstat_attrs,
419 };
420
421 #ifdef CONFIG_WIRELESS_EXT_SYSFS
422 /* helper function that does all the locking etc for wireless stats */
423 static ssize_t wireless_show(struct device *d, char *buf,
424                              ssize_t (*format)(const struct iw_statistics *,
425                                                char *))
426 {
427         struct net_device *dev = to_net_dev(d);
428         const struct iw_statistics *iw;
429         ssize_t ret = -EINVAL;
430
431         if (!rtnl_trylock())
432                 return restart_syscall();
433         if (dev_isalive(dev)) {
434                 iw = get_wireless_stats(dev);
435                 if (iw)
436                         ret = (*format)(iw, buf);
437         }
438         rtnl_unlock();
439
440         return ret;
441 }
442
443 /* show function template for wireless fields */
444 #define WIRELESS_SHOW(name, field, format_string)                       \
445 static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
446 {                                                                       \
447         return sprintf(buf, format_string, iw->field);                  \
448 }                                                                       \
449 static ssize_t show_iw_##name(struct device *d,                         \
450                               struct device_attribute *attr, char *buf) \
451 {                                                                       \
452         return wireless_show(d, buf, format_iw_##name);                 \
453 }                                                                       \
454 static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
455
456 WIRELESS_SHOW(status, status, fmt_hex);
457 WIRELESS_SHOW(link, qual.qual, fmt_dec);
458 WIRELESS_SHOW(level, qual.level, fmt_dec);
459 WIRELESS_SHOW(noise, qual.noise, fmt_dec);
460 WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
461 WIRELESS_SHOW(crypt, discard.code, fmt_dec);
462 WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
463 WIRELESS_SHOW(misc, discard.misc, fmt_dec);
464 WIRELESS_SHOW(retries, discard.retries, fmt_dec);
465 WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
466
467 static struct attribute *wireless_attrs[] = {
468         &dev_attr_status.attr,
469         &dev_attr_link.attr,
470         &dev_attr_level.attr,
471         &dev_attr_noise.attr,
472         &dev_attr_nwid.attr,
473         &dev_attr_crypt.attr,
474         &dev_attr_fragment.attr,
475         &dev_attr_retries.attr,
476         &dev_attr_misc.attr,
477         &dev_attr_beacon.attr,
478         NULL
479 };
480
481 static struct attribute_group wireless_group = {
482         .name = "wireless",
483         .attrs = wireless_attrs,
484 };
485 #endif
486 #endif /* CONFIG_SYSFS */
487
488 #ifdef CONFIG_RPS
489 /*
490  * RX queue sysfs structures and functions.
491  */
492 struct rx_queue_attribute {
493         struct attribute attr;
494         ssize_t (*show)(struct netdev_rx_queue *queue,
495             struct rx_queue_attribute *attr, char *buf);
496         ssize_t (*store)(struct netdev_rx_queue *queue,
497             struct rx_queue_attribute *attr, const char *buf, size_t len);
498 };
499 #define to_rx_queue_attr(_attr) container_of(_attr,             \
500     struct rx_queue_attribute, attr)
501
502 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
503
504 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
505                                   char *buf)
506 {
507         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
508         struct netdev_rx_queue *queue = to_rx_queue(kobj);
509
510         if (!attribute->show)
511                 return -EIO;
512
513         return attribute->show(queue, attribute, buf);
514 }
515
516 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
517                                    const char *buf, size_t count)
518 {
519         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
520         struct netdev_rx_queue *queue = to_rx_queue(kobj);
521
522         if (!attribute->store)
523                 return -EIO;
524
525         return attribute->store(queue, attribute, buf, count);
526 }
527
528 static const struct sysfs_ops rx_queue_sysfs_ops = {
529         .show = rx_queue_attr_show,
530         .store = rx_queue_attr_store,
531 };
532
533 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
534                             struct rx_queue_attribute *attribute, char *buf)
535 {
536         struct rps_map *map;
537         cpumask_var_t mask;
538         size_t len = 0;
539         int i;
540
541         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
542                 return -ENOMEM;
543
544         rcu_read_lock();
545         map = rcu_dereference(queue->rps_map);
546         if (map)
547                 for (i = 0; i < map->len; i++)
548                         cpumask_set_cpu(map->cpus[i], mask);
549
550         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
551         if (PAGE_SIZE - len < 3) {
552                 rcu_read_unlock();
553                 free_cpumask_var(mask);
554                 return -EINVAL;
555         }
556         rcu_read_unlock();
557
558         free_cpumask_var(mask);
559         len += sprintf(buf + len, "\n");
560         return len;
561 }
562
563 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
564                       struct rx_queue_attribute *attribute,
565                       const char *buf, size_t len)
566 {
567         struct rps_map *old_map, *map;
568         cpumask_var_t mask;
569         int err, cpu, i;
570         static DEFINE_SPINLOCK(rps_map_lock);
571
572         if (!capable(CAP_NET_ADMIN))
573                 return -EPERM;
574
575         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
576                 return -ENOMEM;
577
578         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
579         if (err) {
580                 free_cpumask_var(mask);
581                 return err;
582         }
583
584         map = kzalloc(max_t(unsigned,
585             RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
586             GFP_KERNEL);
587         if (!map) {
588                 free_cpumask_var(mask);
589                 return -ENOMEM;
590         }
591
592         i = 0;
593         for_each_cpu_and(cpu, mask, cpu_online_mask)
594                 map->cpus[i++] = cpu;
595
596         if (i)
597                 map->len = i;
598         else {
599                 kfree(map);
600                 map = NULL;
601         }
602
603         spin_lock(&rps_map_lock);
604         old_map = rcu_dereference_protected(queue->rps_map,
605                                             lockdep_is_held(&rps_map_lock));
606         rcu_assign_pointer(queue->rps_map, map);
607         spin_unlock(&rps_map_lock);
608
609         if (old_map)
610                 kfree_rcu(old_map, rcu);
611
612         free_cpumask_var(mask);
613         return len;
614 }
615
616 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
617                                            struct rx_queue_attribute *attr,
618                                            char *buf)
619 {
620         struct rps_dev_flow_table *flow_table;
621         unsigned int val = 0;
622
623         rcu_read_lock();
624         flow_table = rcu_dereference(queue->rps_flow_table);
625         if (flow_table)
626                 val = flow_table->mask + 1;
627         rcu_read_unlock();
628
629         return sprintf(buf, "%u\n", val);
630 }
631
632 static void rps_dev_flow_table_release_work(struct work_struct *work)
633 {
634         struct rps_dev_flow_table *table = container_of(work,
635             struct rps_dev_flow_table, free_work);
636
637         vfree(table);
638 }
639
640 static void rps_dev_flow_table_release(struct rcu_head *rcu)
641 {
642         struct rps_dev_flow_table *table = container_of(rcu,
643             struct rps_dev_flow_table, rcu);
644
645         INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
646         schedule_work(&table->free_work);
647 }
648
649 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
650                                      struct rx_queue_attribute *attr,
651                                      const char *buf, size_t len)
652 {
653         unsigned int count;
654         char *endp;
655         struct rps_dev_flow_table *table, *old_table;
656         static DEFINE_SPINLOCK(rps_dev_flow_lock);
657
658         if (!capable(CAP_NET_ADMIN))
659                 return -EPERM;
660
661         count = simple_strtoul(buf, &endp, 0);
662         if (endp == buf)
663                 return -EINVAL;
664
665         if (count) {
666                 int i;
667
668                 if (count > 1<<30) {
669                         /* Enforce a limit to prevent overflow */
670                         return -EINVAL;
671                 }
672                 count = roundup_pow_of_two(count);
673                 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
674                 if (!table)
675                         return -ENOMEM;
676
677                 table->mask = count - 1;
678                 for (i = 0; i < count; i++)
679                         table->flows[i].cpu = RPS_NO_CPU;
680         } else
681                 table = NULL;
682
683         spin_lock(&rps_dev_flow_lock);
684         old_table = rcu_dereference_protected(queue->rps_flow_table,
685                                               lockdep_is_held(&rps_dev_flow_lock));
686         rcu_assign_pointer(queue->rps_flow_table, table);
687         spin_unlock(&rps_dev_flow_lock);
688
689         if (old_table)
690                 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
691
692         return len;
693 }
694
695 static struct rx_queue_attribute rps_cpus_attribute =
696         __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
697
698
699 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
700         __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
701             show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
702
703 static struct attribute *rx_queue_default_attrs[] = {
704         &rps_cpus_attribute.attr,
705         &rps_dev_flow_table_cnt_attribute.attr,
706         NULL
707 };
708
709 static void rx_queue_release(struct kobject *kobj)
710 {
711         struct netdev_rx_queue *queue = to_rx_queue(kobj);
712         struct rps_map *map;
713         struct rps_dev_flow_table *flow_table;
714
715
716         map = rcu_dereference_protected(queue->rps_map, 1);
717         if (map) {
718                 RCU_INIT_POINTER(queue->rps_map, NULL);
719                 kfree_rcu(map, rcu);
720         }
721
722         flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
723         if (flow_table) {
724                 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
725                 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
726         }
727
728         memset(kobj, 0, sizeof(*kobj));
729         dev_put(queue->dev);
730 }
731
732 static struct kobj_type rx_queue_ktype = {
733         .sysfs_ops = &rx_queue_sysfs_ops,
734         .release = rx_queue_release,
735         .default_attrs = rx_queue_default_attrs,
736 };
737
738 static int rx_queue_add_kobject(struct net_device *net, int index)
739 {
740         struct netdev_rx_queue *queue = net->_rx + index;
741         struct kobject *kobj = &queue->kobj;
742         int error = 0;
743
744         kobj->kset = net->queues_kset;
745         error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
746             "rx-%u", index);
747         if (error) {
748                 kobject_put(kobj);
749                 return error;
750         }
751
752         kobject_uevent(kobj, KOBJ_ADD);
753         dev_hold(queue->dev);
754
755         return error;
756 }
757 #endif /* CONFIG_RPS */
758
759 int
760 net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
761 {
762 #ifdef CONFIG_RPS
763         int i;
764         int error = 0;
765
766         for (i = old_num; i < new_num; i++) {
767                 error = rx_queue_add_kobject(net, i);
768                 if (error) {
769                         new_num = old_num;
770                         break;
771                 }
772         }
773
774         while (--i >= new_num)
775                 kobject_put(&net->_rx[i].kobj);
776
777         return error;
778 #else
779         return 0;
780 #endif
781 }
782
783 #ifdef CONFIG_XPS
784 /*
785  * netdev_queue sysfs structures and functions.
786  */
787 struct netdev_queue_attribute {
788         struct attribute attr;
789         ssize_t (*show)(struct netdev_queue *queue,
790             struct netdev_queue_attribute *attr, char *buf);
791         ssize_t (*store)(struct netdev_queue *queue,
792             struct netdev_queue_attribute *attr, const char *buf, size_t len);
793 };
794 #define to_netdev_queue_attr(_attr) container_of(_attr,         \
795     struct netdev_queue_attribute, attr)
796
797 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
798
799 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
800                                       struct attribute *attr, char *buf)
801 {
802         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
803         struct netdev_queue *queue = to_netdev_queue(kobj);
804
805         if (!attribute->show)
806                 return -EIO;
807
808         return attribute->show(queue, attribute, buf);
809 }
810
811 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
812                                        struct attribute *attr,
813                                        const char *buf, size_t count)
814 {
815         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
816         struct netdev_queue *queue = to_netdev_queue(kobj);
817
818         if (!attribute->store)
819                 return -EIO;
820
821         return attribute->store(queue, attribute, buf, count);
822 }
823
824 static const struct sysfs_ops netdev_queue_sysfs_ops = {
825         .show = netdev_queue_attr_show,
826         .store = netdev_queue_attr_store,
827 };
828
829 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
830 {
831         struct net_device *dev = queue->dev;
832         int i;
833
834         for (i = 0; i < dev->num_tx_queues; i++)
835                 if (queue == &dev->_tx[i])
836                         break;
837
838         BUG_ON(i >= dev->num_tx_queues);
839
840         return i;
841 }
842
843
844 static ssize_t show_xps_map(struct netdev_queue *queue,
845                             struct netdev_queue_attribute *attribute, char *buf)
846 {
847         struct net_device *dev = queue->dev;
848         struct xps_dev_maps *dev_maps;
849         cpumask_var_t mask;
850         unsigned long index;
851         size_t len = 0;
852         int i;
853
854         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
855                 return -ENOMEM;
856
857         index = get_netdev_queue_index(queue);
858
859         rcu_read_lock();
860         dev_maps = rcu_dereference(dev->xps_maps);
861         if (dev_maps) {
862                 for_each_possible_cpu(i) {
863                         struct xps_map *map =
864                             rcu_dereference(dev_maps->cpu_map[i]);
865                         if (map) {
866                                 int j;
867                                 for (j = 0; j < map->len; j++) {
868                                         if (map->queues[j] == index) {
869                                                 cpumask_set_cpu(i, mask);
870                                                 break;
871                                         }
872                                 }
873                         }
874                 }
875         }
876         rcu_read_unlock();
877
878         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
879         if (PAGE_SIZE - len < 3) {
880                 free_cpumask_var(mask);
881                 return -EINVAL;
882         }
883
884         free_cpumask_var(mask);
885         len += sprintf(buf + len, "\n");
886         return len;
887 }
888
889 static DEFINE_MUTEX(xps_map_mutex);
890 #define xmap_dereference(P)             \
891         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
892
893 static ssize_t store_xps_map(struct netdev_queue *queue,
894                       struct netdev_queue_attribute *attribute,
895                       const char *buf, size_t len)
896 {
897         struct net_device *dev = queue->dev;
898         cpumask_var_t mask;
899         int err, i, cpu, pos, map_len, alloc_len, need_set;
900         unsigned long index;
901         struct xps_map *map, *new_map;
902         struct xps_dev_maps *dev_maps, *new_dev_maps;
903         int nonempty = 0;
904         int numa_node = -2;
905
906         if (!capable(CAP_NET_ADMIN))
907                 return -EPERM;
908
909         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
910                 return -ENOMEM;
911
912         index = get_netdev_queue_index(queue);
913
914         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
915         if (err) {
916                 free_cpumask_var(mask);
917                 return err;
918         }
919
920         new_dev_maps = kzalloc(max_t(unsigned,
921             XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
922         if (!new_dev_maps) {
923                 free_cpumask_var(mask);
924                 return -ENOMEM;
925         }
926
927         mutex_lock(&xps_map_mutex);
928
929         dev_maps = xmap_dereference(dev->xps_maps);
930
931         for_each_possible_cpu(cpu) {
932                 map = dev_maps ?
933                         xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
934                 new_map = map;
935                 if (map) {
936                         for (pos = 0; pos < map->len; pos++)
937                                 if (map->queues[pos] == index)
938                                         break;
939                         map_len = map->len;
940                         alloc_len = map->alloc_len;
941                 } else
942                         pos = map_len = alloc_len = 0;
943
944                 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
945 #ifdef CONFIG_NUMA
946                 if (need_set) {
947                         if (numa_node == -2)
948                                 numa_node = cpu_to_node(cpu);
949                         else if (numa_node != cpu_to_node(cpu))
950                                 numa_node = -1;
951                 }
952 #endif
953                 if (need_set && pos >= map_len) {
954                         /* Need to add queue to this CPU's map */
955                         if (map_len >= alloc_len) {
956                                 alloc_len = alloc_len ?
957                                     2 * alloc_len : XPS_MIN_MAP_ALLOC;
958                                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
959                                                        GFP_KERNEL,
960                                                        cpu_to_node(cpu));
961                                 if (!new_map)
962                                         goto error;
963                                 new_map->alloc_len = alloc_len;
964                                 for (i = 0; i < map_len; i++)
965                                         new_map->queues[i] = map->queues[i];
966                                 new_map->len = map_len;
967                         }
968                         new_map->queues[new_map->len++] = index;
969                 } else if (!need_set && pos < map_len) {
970                         /* Need to remove queue from this CPU's map */
971                         if (map_len > 1)
972                                 new_map->queues[pos] =
973                                     new_map->queues[--new_map->len];
974                         else
975                                 new_map = NULL;
976                 }
977                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
978         }
979
980         /* Cleanup old maps */
981         for_each_possible_cpu(cpu) {
982                 map = dev_maps ?
983                         xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
984                 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
985                         kfree_rcu(map, rcu);
986                 if (new_dev_maps->cpu_map[cpu])
987                         nonempty = 1;
988         }
989
990         if (nonempty)
991                 RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
992         else {
993                 kfree(new_dev_maps);
994                 RCU_INIT_POINTER(dev->xps_maps, NULL);
995         }
996
997         if (dev_maps)
998                 kfree_rcu(dev_maps, rcu);
999
1000         netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
1001                                             NUMA_NO_NODE);
1002
1003         mutex_unlock(&xps_map_mutex);
1004
1005         free_cpumask_var(mask);
1006         return len;
1007
1008 error:
1009         mutex_unlock(&xps_map_mutex);
1010
1011         if (new_dev_maps)
1012                 for_each_possible_cpu(i)
1013                         kfree(rcu_dereference_protected(
1014                                 new_dev_maps->cpu_map[i],
1015                                 1));
1016         kfree(new_dev_maps);
1017         free_cpumask_var(mask);
1018         return -ENOMEM;
1019 }
1020
1021 static struct netdev_queue_attribute xps_cpus_attribute =
1022     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1023
1024 static struct attribute *netdev_queue_default_attrs[] = {
1025         &xps_cpus_attribute.attr,
1026         NULL
1027 };
1028
1029 static void netdev_queue_release(struct kobject *kobj)
1030 {
1031         struct netdev_queue *queue = to_netdev_queue(kobj);
1032         struct net_device *dev = queue->dev;
1033         struct xps_dev_maps *dev_maps;
1034         struct xps_map *map;
1035         unsigned long index;
1036         int i, pos, nonempty = 0;
1037
1038         index = get_netdev_queue_index(queue);
1039
1040         mutex_lock(&xps_map_mutex);
1041         dev_maps = xmap_dereference(dev->xps_maps);
1042
1043         if (dev_maps) {
1044                 for_each_possible_cpu(i) {
1045                         map = xmap_dereference(dev_maps->cpu_map[i]);
1046                         if (!map)
1047                                 continue;
1048
1049                         for (pos = 0; pos < map->len; pos++)
1050                                 if (map->queues[pos] == index)
1051                                         break;
1052
1053                         if (pos < map->len) {
1054                                 if (map->len > 1)
1055                                         map->queues[pos] =
1056                                             map->queues[--map->len];
1057                                 else {
1058                                         RCU_INIT_POINTER(dev_maps->cpu_map[i],
1059                                             NULL);
1060                                         kfree_rcu(map, rcu);
1061                                         map = NULL;
1062                                 }
1063                         }
1064                         if (map)
1065                                 nonempty = 1;
1066                 }
1067
1068                 if (!nonempty) {
1069                         RCU_INIT_POINTER(dev->xps_maps, NULL);
1070                         kfree_rcu(dev_maps, rcu);
1071                 }
1072         }
1073
1074         mutex_unlock(&xps_map_mutex);
1075
1076         memset(kobj, 0, sizeof(*kobj));
1077         dev_put(queue->dev);
1078 }
1079
1080 static struct kobj_type netdev_queue_ktype = {
1081         .sysfs_ops = &netdev_queue_sysfs_ops,
1082         .release = netdev_queue_release,
1083         .default_attrs = netdev_queue_default_attrs,
1084 };
1085
1086 static int netdev_queue_add_kobject(struct net_device *net, int index)
1087 {
1088         struct netdev_queue *queue = net->_tx + index;
1089         struct kobject *kobj = &queue->kobj;
1090         int error = 0;
1091
1092         kobj->kset = net->queues_kset;
1093         error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1094             "tx-%u", index);
1095         if (error) {
1096                 kobject_put(kobj);
1097                 return error;
1098         }
1099
1100         kobject_uevent(kobj, KOBJ_ADD);
1101         dev_hold(queue->dev);
1102
1103         return error;
1104 }
1105 #endif /* CONFIG_XPS */
1106
1107 int
1108 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1109 {
1110 #ifdef CONFIG_XPS
1111         int i;
1112         int error = 0;
1113
1114         for (i = old_num; i < new_num; i++) {
1115                 error = netdev_queue_add_kobject(net, i);
1116                 if (error) {
1117                         new_num = old_num;
1118                         break;
1119                 }
1120         }
1121
1122         while (--i >= new_num)
1123                 kobject_put(&net->_tx[i].kobj);
1124
1125         return error;
1126 #else
1127         return 0;
1128 #endif
1129 }
1130
1131 static int register_queue_kobjects(struct net_device *net)
1132 {
1133         int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1134
1135 #if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1136         net->queues_kset = kset_create_and_add("queues",
1137             NULL, &net->dev.kobj);
1138         if (!net->queues_kset)
1139                 return -ENOMEM;
1140 #endif
1141
1142 #ifdef CONFIG_RPS
1143         real_rx = net->real_num_rx_queues;
1144 #endif
1145         real_tx = net->real_num_tx_queues;
1146
1147         error = net_rx_queue_update_kobjects(net, 0, real_rx);
1148         if (error)
1149                 goto error;
1150         rxq = real_rx;
1151
1152         error = netdev_queue_update_kobjects(net, 0, real_tx);
1153         if (error)
1154                 goto error;
1155         txq = real_tx;
1156
1157         return 0;
1158
1159 error:
1160         netdev_queue_update_kobjects(net, txq, 0);
1161         net_rx_queue_update_kobjects(net, rxq, 0);
1162         return error;
1163 }
1164
1165 static void remove_queue_kobjects(struct net_device *net)
1166 {
1167         int real_rx = 0, real_tx = 0;
1168
1169 #ifdef CONFIG_RPS
1170         real_rx = net->real_num_rx_queues;
1171 #endif
1172         real_tx = net->real_num_tx_queues;
1173
1174         net_rx_queue_update_kobjects(net, real_rx, 0);
1175         netdev_queue_update_kobjects(net, real_tx, 0);
1176 #if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1177         kset_unregister(net->queues_kset);
1178 #endif
1179 }
1180
1181 static void *net_grab_current_ns(void)
1182 {
1183         struct net *ns = current->nsproxy->net_ns;
1184 #ifdef CONFIG_NET_NS
1185         if (ns)
1186                 atomic_inc(&ns->passive);
1187 #endif
1188         return ns;
1189 }
1190
1191 static const void *net_initial_ns(void)
1192 {
1193         return &init_net;
1194 }
1195
1196 static const void *net_netlink_ns(struct sock *sk)
1197 {
1198         return sock_net(sk);
1199 }
1200
1201 struct kobj_ns_type_operations net_ns_type_operations = {
1202         .type = KOBJ_NS_TYPE_NET,
1203         .grab_current_ns = net_grab_current_ns,
1204         .netlink_ns = net_netlink_ns,
1205         .initial_ns = net_initial_ns,
1206         .drop_ns = net_drop_ns,
1207 };
1208 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1209
1210 #ifdef CONFIG_HOTPLUG
1211 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1212 {
1213         struct net_device *dev = to_net_dev(d);
1214         int retval;
1215
1216         /* pass interface to uevent. */
1217         retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1218         if (retval)
1219                 goto exit;
1220
1221         /* pass ifindex to uevent.
1222          * ifindex is useful as it won't change (interface name may change)
1223          * and is what RtNetlink uses natively. */
1224         retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1225
1226 exit:
1227         return retval;
1228 }
1229 #endif
1230
1231 /*
1232  *      netdev_release -- destroy and free a dead device.
1233  *      Called when last reference to device kobject is gone.
1234  */
1235 static void netdev_release(struct device *d)
1236 {
1237         struct net_device *dev = to_net_dev(d);
1238
1239         BUG_ON(dev->reg_state != NETREG_RELEASED);
1240
1241         kfree(dev->ifalias);
1242         kfree((char *)dev - dev->padded);
1243 }
1244
1245 static const void *net_namespace(struct device *d)
1246 {
1247         struct net_device *dev;
1248         dev = container_of(d, struct net_device, dev);
1249         return dev_net(dev);
1250 }
1251
1252 static struct class net_class = {
1253         .name = "net",
1254         .dev_release = netdev_release,
1255 #ifdef CONFIG_SYSFS
1256         .dev_attrs = net_class_attributes,
1257 #endif /* CONFIG_SYSFS */
1258 #ifdef CONFIG_HOTPLUG
1259         .dev_uevent = netdev_uevent,
1260 #endif
1261         .ns_type = &net_ns_type_operations,
1262         .namespace = net_namespace,
1263 };
1264
1265 /* Delete sysfs entries but hold kobject reference until after all
1266  * netdev references are gone.
1267  */
1268 void netdev_unregister_kobject(struct net_device * net)
1269 {
1270         struct device *dev = &(net->dev);
1271
1272         kobject_get(&dev->kobj);
1273
1274         remove_queue_kobjects(net);
1275
1276         device_del(dev);
1277 }
1278
1279 /* Create sysfs entries for network device. */
1280 int netdev_register_kobject(struct net_device *net)
1281 {
1282         struct device *dev = &(net->dev);
1283         const struct attribute_group **groups = net->sysfs_groups;
1284         int error = 0;
1285
1286         device_initialize(dev);
1287         dev->class = &net_class;
1288         dev->platform_data = net;
1289         dev->groups = groups;
1290
1291         dev_set_name(dev, "%s", net->name);
1292
1293 #ifdef CONFIG_SYSFS
1294         /* Allow for a device specific group */
1295         if (*groups)
1296                 groups++;
1297
1298         *groups++ = &netstat_group;
1299 #ifdef CONFIG_WIRELESS_EXT_SYSFS
1300         if (net->ieee80211_ptr)
1301                 *groups++ = &wireless_group;
1302 #ifdef CONFIG_WIRELESS_EXT
1303         else if (net->wireless_handlers)
1304                 *groups++ = &wireless_group;
1305 #endif
1306 #endif
1307 #endif /* CONFIG_SYSFS */
1308
1309         error = device_add(dev);
1310         if (error)
1311                 return error;
1312
1313         error = register_queue_kobjects(net);
1314         if (error) {
1315                 device_del(dev);
1316                 return error;
1317         }
1318
1319         return error;
1320 }
1321
1322 int netdev_class_create_file(struct class_attribute *class_attr)
1323 {
1324         return class_create_file(&net_class, class_attr);
1325 }
1326 EXPORT_SYMBOL(netdev_class_create_file);
1327
1328 void netdev_class_remove_file(struct class_attribute *class_attr)
1329 {
1330         class_remove_file(&net_class, class_attr);
1331 }
1332 EXPORT_SYMBOL(netdev_class_remove_file);
1333
1334 int netdev_kobject_init(void)
1335 {
1336         kobj_ns_type_register(&net_ns_type_operations);
1337         return class_register(&net_class);
1338 }