cad39fca75a9ef5f2d4b35f740e21242e702bc51
[pandora-kernel.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <linux/workqueue.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54
55 #include "datapath.h"
56 #include "flow.h"
57 #include "vport-internal_dev.h"
58
59 /**
60  * struct ovs_net - Per net-namespace data for ovs.
61  * @dps: List of datapaths to enable dumping them all out.
62  * Protected by genl_mutex.
63  */
64 struct ovs_net {
65         struct list_head dps;
66 };
67
68 static int ovs_net_id __read_mostly;
69
70 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
71 static void rehash_flow_table(struct work_struct *work);
72 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
73
74 /**
75  * DOC: Locking:
76  *
77  * Writes to device state (add/remove datapath, port, set operations on vports,
78  * etc.) are protected by RTNL.
79  *
80  * Writes to other state (flow table modifications, set miscellaneous datapath
81  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
82  * genl_mutex.
83  *
84  * Reads are protected by RCU.
85  *
86  * There are a few special cases (mostly stats) that have their own
87  * synchronization but they nest under all of above and don't interact with
88  * each other.
89  */
90
91 static struct vport *new_vport(const struct vport_parms *);
92 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
93                              const struct dp_upcall_info *);
94 static int queue_userspace_packet(struct net *, int dp_ifindex,
95                                   struct sk_buff *,
96                                   const struct dp_upcall_info *);
97
98 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
99 static struct datapath *get_dp(struct net *net, int dp_ifindex)
100 {
101         struct datapath *dp = NULL;
102         struct net_device *dev;
103
104         rcu_read_lock();
105         dev = dev_get_by_index_rcu(net, dp_ifindex);
106         if (dev) {
107                 struct vport *vport = ovs_internal_dev_get_vport(dev);
108                 if (vport)
109                         dp = vport->dp;
110         }
111         rcu_read_unlock();
112
113         return dp;
114 }
115
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *ovs_dp_name(const struct datapath *dp)
118 {
119         struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
120         return vport->ops->get_name(vport);
121 }
122
123 static int get_dpifindex(struct datapath *dp)
124 {
125         struct vport *local;
126         int ifindex;
127
128         rcu_read_lock();
129
130         local = rcu_dereference(dp->ports[OVSP_LOCAL]);
131         if (local)
132                 ifindex = local->ops->get_ifindex(local);
133         else
134                 ifindex = 0;
135
136         rcu_read_unlock();
137
138         return ifindex;
139 }
140
141 static void destroy_dp_rcu(struct rcu_head *rcu)
142 {
143         struct datapath *dp = container_of(rcu, struct datapath, rcu);
144
145         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
146         free_percpu(dp->stats_percpu);
147         release_net(ovs_dp_get_net(dp));
148         kfree(dp);
149 }
150
151 /* Called with RTNL lock and genl_lock. */
152 static struct vport *new_vport(const struct vport_parms *parms)
153 {
154         struct vport *vport;
155
156         vport = ovs_vport_add(parms);
157         if (!IS_ERR(vport)) {
158                 struct datapath *dp = parms->dp;
159
160                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
161                 list_add(&vport->node, &dp->port_list);
162         }
163
164         return vport;
165 }
166
167 /* Called with RTNL lock. */
168 void ovs_dp_detach_port(struct vport *p)
169 {
170         ASSERT_RTNL();
171
172         /* First drop references to device. */
173         list_del(&p->node);
174         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
175
176         /* Then destroy it. */
177         ovs_vport_del(p);
178 }
179
180 /* Must be called with rcu_read_lock. */
181 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
182 {
183         struct datapath *dp = p->dp;
184         struct sw_flow *flow;
185         struct dp_stats_percpu *stats;
186         struct sw_flow_key key;
187         u64 *stats_counter;
188         int error;
189         int key_len;
190
191         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
192
193         /* Extract flow from 'skb' into 'key'. */
194         error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
195         if (unlikely(error)) {
196                 kfree_skb(skb);
197                 return;
198         }
199
200         /* Look up flow. */
201         flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
202         if (unlikely(!flow)) {
203                 struct dp_upcall_info upcall;
204
205                 upcall.cmd = OVS_PACKET_CMD_MISS;
206                 upcall.key = &key;
207                 upcall.userdata = NULL;
208                 upcall.pid = p->upcall_pid;
209                 ovs_dp_upcall(dp, skb, &upcall);
210                 consume_skb(skb);
211                 stats_counter = &stats->n_missed;
212                 goto out;
213         }
214
215         OVS_CB(skb)->flow = flow;
216
217         stats_counter = &stats->n_hit;
218         ovs_flow_used(OVS_CB(skb)->flow, skb);
219         ovs_execute_actions(dp, skb);
220
221 out:
222         /* Update datapath statistics. */
223         u64_stats_update_begin(&stats->sync);
224         (*stats_counter)++;
225         u64_stats_update_end(&stats->sync);
226 }
227
228 static struct genl_family dp_packet_genl_family = {
229         .id = GENL_ID_GENERATE,
230         .hdrsize = sizeof(struct ovs_header),
231         .name = OVS_PACKET_FAMILY,
232         .version = OVS_PACKET_VERSION,
233         .maxattr = OVS_PACKET_ATTR_MAX,
234         .netnsok = true
235 };
236
237 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
238                   const struct dp_upcall_info *upcall_info)
239 {
240         struct dp_stats_percpu *stats;
241         int dp_ifindex;
242         int err;
243
244         if (upcall_info->pid == 0) {
245                 err = -ENOTCONN;
246                 goto err;
247         }
248
249         dp_ifindex = get_dpifindex(dp);
250         if (!dp_ifindex) {
251                 err = -ENODEV;
252                 goto err;
253         }
254
255         if (!skb_is_gso(skb))
256                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
257         else
258                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
259         if (err)
260                 goto err;
261
262         return 0;
263
264 err:
265         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
266
267         u64_stats_update_begin(&stats->sync);
268         stats->n_lost++;
269         u64_stats_update_end(&stats->sync);
270
271         return err;
272 }
273
274 static int queue_gso_packets(struct net *net, int dp_ifindex,
275                              struct sk_buff *skb,
276                              const struct dp_upcall_info *upcall_info)
277 {
278         unsigned short gso_type = skb_shinfo(skb)->gso_type;
279         struct dp_upcall_info later_info;
280         struct sw_flow_key later_key;
281         struct sk_buff *segs, *nskb;
282         int err;
283
284         segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
285         if (IS_ERR(segs))
286                 return PTR_ERR(segs);
287
288         /* Queue all of the segments. */
289         skb = segs;
290         do {
291                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
292                 if (err)
293                         break;
294
295                 if (skb == segs && gso_type & SKB_GSO_UDP) {
296                         /* The initial flow key extracted by ovs_flow_extract()
297                          * in this case is for a first fragment, so we need to
298                          * properly mark later fragments.
299                          */
300                         later_key = *upcall_info->key;
301                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
302
303                         later_info = *upcall_info;
304                         later_info.key = &later_key;
305                         upcall_info = &later_info;
306                 }
307         } while ((skb = skb->next));
308
309         /* Free all of the segments. */
310         skb = segs;
311         do {
312                 nskb = skb->next;
313                 if (err)
314                         kfree_skb(skb);
315                 else
316                         consume_skb(skb);
317         } while ((skb = nskb));
318         return err;
319 }
320
321 static int queue_userspace_packet(struct net *net, int dp_ifindex,
322                                   struct sk_buff *skb,
323                                   const struct dp_upcall_info *upcall_info)
324 {
325         struct ovs_header *upcall;
326         struct sk_buff *nskb = NULL;
327         struct sk_buff *user_skb; /* to be queued to userspace */
328         struct nlattr *nla;
329         unsigned int len;
330         int err;
331
332         if (vlan_tx_tag_present(skb)) {
333                 nskb = skb_clone(skb, GFP_ATOMIC);
334                 if (!nskb)
335                         return -ENOMEM;
336
337                 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
338                 if (!nskb)
339                         return -ENOMEM;
340
341                 nskb->vlan_tci = 0;
342                 skb = nskb;
343         }
344
345         if (nla_attr_size(skb->len) > USHRT_MAX) {
346                 err = -EFBIG;
347                 goto out;
348         }
349
350         len = sizeof(struct ovs_header);
351         len += nla_total_size(skb->len);
352         len += nla_total_size(FLOW_BUFSIZE);
353         if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
354                 len += nla_total_size(8);
355
356         user_skb = genlmsg_new(len, GFP_ATOMIC);
357         if (!user_skb) {
358                 err = -ENOMEM;
359                 goto out;
360         }
361
362         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
363                              0, upcall_info->cmd);
364         upcall->dp_ifindex = dp_ifindex;
365
366         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
367         ovs_flow_to_nlattrs(upcall_info->key, user_skb);
368         nla_nest_end(user_skb, nla);
369
370         if (upcall_info->userdata)
371                 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
372                             nla_get_u64(upcall_info->userdata));
373
374         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
375
376         skb_copy_and_csum_dev(skb, nla_data(nla));
377
378         err = genlmsg_unicast(net, user_skb, upcall_info->pid);
379
380 out:
381         kfree_skb(nskb);
382         return err;
383 }
384
385 /* Called with genl_mutex. */
386 static int flush_flows(struct datapath *dp)
387 {
388         struct flow_table *old_table;
389         struct flow_table *new_table;
390
391         old_table = genl_dereference(dp->table);
392         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
393         if (!new_table)
394                 return -ENOMEM;
395
396         rcu_assign_pointer(dp->table, new_table);
397
398         ovs_flow_tbl_deferred_destroy(old_table);
399         return 0;
400 }
401
402 static int validate_actions(const struct nlattr *attr,
403                                 const struct sw_flow_key *key, int depth);
404
405 static int validate_sample(const struct nlattr *attr,
406                                 const struct sw_flow_key *key, int depth)
407 {
408         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
409         const struct nlattr *probability, *actions;
410         const struct nlattr *a;
411         int rem;
412
413         memset(attrs, 0, sizeof(attrs));
414         nla_for_each_nested(a, attr, rem) {
415                 int type = nla_type(a);
416                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
417                         return -EINVAL;
418                 attrs[type] = a;
419         }
420         if (rem)
421                 return -EINVAL;
422
423         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
424         if (!probability || nla_len(probability) != sizeof(u32))
425                 return -EINVAL;
426
427         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
428         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
429                 return -EINVAL;
430         return validate_actions(actions, key, depth + 1);
431 }
432
433 static int validate_tp_port(const struct sw_flow_key *flow_key)
434 {
435         if (flow_key->eth.type == htons(ETH_P_IP)) {
436                 if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
437                         return 0;
438         } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
439                 if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
440                         return 0;
441         }
442
443         return -EINVAL;
444 }
445
446 static int validate_set(const struct nlattr *a,
447                         const struct sw_flow_key *flow_key)
448 {
449         const struct nlattr *ovs_key = nla_data(a);
450         int key_type = nla_type(ovs_key);
451
452         /* There can be only one key in a action */
453         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
454                 return -EINVAL;
455
456         if (key_type > OVS_KEY_ATTR_MAX ||
457             nla_len(ovs_key) != ovs_key_lens[key_type])
458                 return -EINVAL;
459
460         switch (key_type) {
461         const struct ovs_key_ipv4 *ipv4_key;
462
463         case OVS_KEY_ATTR_PRIORITY:
464         case OVS_KEY_ATTR_ETHERNET:
465                 break;
466
467         case OVS_KEY_ATTR_IPV4:
468                 if (flow_key->eth.type != htons(ETH_P_IP))
469                         return -EINVAL;
470
471                 if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
472                         return -EINVAL;
473
474                 ipv4_key = nla_data(ovs_key);
475                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
476                         return -EINVAL;
477
478                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
479                         return -EINVAL;
480
481                 break;
482
483         case OVS_KEY_ATTR_TCP:
484                 if (flow_key->ip.proto != IPPROTO_TCP)
485                         return -EINVAL;
486
487                 return validate_tp_port(flow_key);
488
489         case OVS_KEY_ATTR_UDP:
490                 if (flow_key->ip.proto != IPPROTO_UDP)
491                         return -EINVAL;
492
493                 return validate_tp_port(flow_key);
494
495         default:
496                 return -EINVAL;
497         }
498
499         return 0;
500 }
501
502 static int validate_userspace(const struct nlattr *attr)
503 {
504         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
505                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
506                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
507         };
508         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
509         int error;
510
511         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
512                                  attr, userspace_policy);
513         if (error)
514                 return error;
515
516         if (!a[OVS_USERSPACE_ATTR_PID] ||
517             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
518                 return -EINVAL;
519
520         return 0;
521 }
522
523 static int validate_actions(const struct nlattr *attr,
524                                 const struct sw_flow_key *key,  int depth)
525 {
526         const struct nlattr *a;
527         int rem, err;
528
529         if (depth >= SAMPLE_ACTION_DEPTH)
530                 return -EOVERFLOW;
531
532         nla_for_each_nested(a, attr, rem) {
533                 /* Expected argument lengths, (u32)-1 for variable length. */
534                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
535                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
536                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
537                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
538                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
539                         [OVS_ACTION_ATTR_SET] = (u32)-1,
540                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
541                 };
542                 const struct ovs_action_push_vlan *vlan;
543                 int type = nla_type(a);
544
545                 if (type > OVS_ACTION_ATTR_MAX ||
546                     (action_lens[type] != nla_len(a) &&
547                      action_lens[type] != (u32)-1))
548                         return -EINVAL;
549
550                 switch (type) {
551                 case OVS_ACTION_ATTR_UNSPEC:
552                         return -EINVAL;
553
554                 case OVS_ACTION_ATTR_USERSPACE:
555                         err = validate_userspace(a);
556                         if (err)
557                                 return err;
558                         break;
559
560                 case OVS_ACTION_ATTR_OUTPUT:
561                         if (nla_get_u32(a) >= DP_MAX_PORTS)
562                                 return -EINVAL;
563                         break;
564
565
566                 case OVS_ACTION_ATTR_POP_VLAN:
567                         break;
568
569                 case OVS_ACTION_ATTR_PUSH_VLAN:
570                         vlan = nla_data(a);
571                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
572                                 return -EINVAL;
573                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
574                                 return -EINVAL;
575                         break;
576
577                 case OVS_ACTION_ATTR_SET:
578                         err = validate_set(a, key);
579                         if (err)
580                                 return err;
581                         break;
582
583                 case OVS_ACTION_ATTR_SAMPLE:
584                         err = validate_sample(a, key, depth);
585                         if (err)
586                                 return err;
587                         break;
588
589                 default:
590                         return -EINVAL;
591                 }
592         }
593
594         if (rem > 0)
595                 return -EINVAL;
596
597         return 0;
598 }
599
600 static void clear_stats(struct sw_flow *flow)
601 {
602         flow->used = 0;
603         flow->tcp_flags = 0;
604         flow->packet_count = 0;
605         flow->byte_count = 0;
606 }
607
608 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
609 {
610         struct ovs_header *ovs_header = info->userhdr;
611         struct nlattr **a = info->attrs;
612         struct sw_flow_actions *acts;
613         struct sk_buff *packet;
614         struct sw_flow *flow;
615         struct datapath *dp;
616         struct ethhdr *eth;
617         int len;
618         int err;
619         int key_len;
620
621         err = -EINVAL;
622         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
623             !a[OVS_PACKET_ATTR_ACTIONS] ||
624             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
625                 goto err;
626
627         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
628         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
629         err = -ENOMEM;
630         if (!packet)
631                 goto err;
632         skb_reserve(packet, NET_IP_ALIGN);
633
634         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
635
636         skb_reset_mac_header(packet);
637         eth = eth_hdr(packet);
638
639         /* Normally, setting the skb 'protocol' field would be handled by a
640          * call to eth_type_trans(), but it assumes there's a sending
641          * device, which we may not have. */
642         if (ntohs(eth->h_proto) >= 1536)
643                 packet->protocol = eth->h_proto;
644         else
645                 packet->protocol = htons(ETH_P_802_2);
646
647         /* Build an sw_flow for sending this packet. */
648         flow = ovs_flow_alloc();
649         err = PTR_ERR(flow);
650         if (IS_ERR(flow))
651                 goto err_kfree_skb;
652
653         err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
654         if (err)
655                 goto err_flow_free;
656
657         err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
658                                              &flow->key.phy.in_port,
659                                              a[OVS_PACKET_ATTR_KEY]);
660         if (err)
661                 goto err_flow_free;
662
663         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
664         if (err)
665                 goto err_flow_free;
666
667         flow->hash = ovs_flow_hash(&flow->key, key_len);
668
669         acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
670         err = PTR_ERR(acts);
671         if (IS_ERR(acts))
672                 goto err_flow_free;
673         rcu_assign_pointer(flow->sf_acts, acts);
674
675         OVS_CB(packet)->flow = flow;
676         packet->priority = flow->key.phy.priority;
677
678         rcu_read_lock();
679         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
680         err = -ENODEV;
681         if (!dp)
682                 goto err_unlock;
683
684         local_bh_disable();
685         err = ovs_execute_actions(dp, packet);
686         local_bh_enable();
687         rcu_read_unlock();
688
689         ovs_flow_free(flow);
690         return err;
691
692 err_unlock:
693         rcu_read_unlock();
694 err_flow_free:
695         ovs_flow_free(flow);
696 err_kfree_skb:
697         kfree_skb(packet);
698 err:
699         return err;
700 }
701
702 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
703         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
704         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
705         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
706 };
707
708 static struct genl_ops dp_packet_genl_ops[] = {
709         { .cmd = OVS_PACKET_CMD_EXECUTE,
710           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
711           .policy = packet_policy,
712           .doit = ovs_packet_cmd_execute
713         }
714 };
715
716 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
717 {
718         int i;
719         struct flow_table *table = genl_dereference(dp->table);
720
721         stats->n_flows = ovs_flow_tbl_count(table);
722
723         stats->n_hit = stats->n_missed = stats->n_lost = 0;
724         for_each_possible_cpu(i) {
725                 const struct dp_stats_percpu *percpu_stats;
726                 struct dp_stats_percpu local_stats;
727                 unsigned int start;
728
729                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
730
731                 do {
732                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
733                         local_stats = *percpu_stats;
734                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
735
736                 stats->n_hit += local_stats.n_hit;
737                 stats->n_missed += local_stats.n_missed;
738                 stats->n_lost += local_stats.n_lost;
739         }
740 }
741
742 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
743         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
744         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
745         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
746 };
747
748 static struct genl_family dp_flow_genl_family = {
749         .id = GENL_ID_GENERATE,
750         .hdrsize = sizeof(struct ovs_header),
751         .name = OVS_FLOW_FAMILY,
752         .version = OVS_FLOW_VERSION,
753         .maxattr = OVS_FLOW_ATTR_MAX,
754         .netnsok = true
755 };
756
757 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
758         .name = OVS_FLOW_MCGROUP
759 };
760
761 /* Called with genl_lock. */
762 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
763                                   struct sk_buff *skb, u32 pid,
764                                   u32 seq, u32 flags, u8 cmd)
765 {
766         const int skb_orig_len = skb->len;
767         const struct sw_flow_actions *sf_acts;
768         struct ovs_flow_stats stats;
769         struct ovs_header *ovs_header;
770         struct nlattr *nla;
771         unsigned long used;
772         u8 tcp_flags;
773         int err;
774
775         sf_acts = rcu_dereference_protected(flow->sf_acts,
776                                             lockdep_genl_is_held());
777
778         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
779         if (!ovs_header)
780                 return -EMSGSIZE;
781
782         ovs_header->dp_ifindex = get_dpifindex(dp);
783
784         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
785         if (!nla)
786                 goto nla_put_failure;
787         err = ovs_flow_to_nlattrs(&flow->key, skb);
788         if (err)
789                 goto error;
790         nla_nest_end(skb, nla);
791
792         spin_lock_bh(&flow->lock);
793         used = flow->used;
794         stats.n_packets = flow->packet_count;
795         stats.n_bytes = flow->byte_count;
796         tcp_flags = flow->tcp_flags;
797         spin_unlock_bh(&flow->lock);
798
799         if (used &&
800             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
801                 goto nla_put_failure;
802
803         if (stats.n_packets &&
804             nla_put(skb, OVS_FLOW_ATTR_STATS,
805                     sizeof(struct ovs_flow_stats), &stats))
806                 goto nla_put_failure;
807
808         if (tcp_flags &&
809             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
810                 goto nla_put_failure;
811
812         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
813          * this is the first flow to be dumped into 'skb'.  This is unusual for
814          * Netlink but individual action lists can be longer than
815          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
816          * The userspace caller can always fetch the actions separately if it
817          * really wants them.  (Most userspace callers in fact don't care.)
818          *
819          * This can only fail for dump operations because the skb is always
820          * properly sized for single flows.
821          */
822         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
823                       sf_acts->actions);
824         if (err < 0 && skb_orig_len)
825                 goto error;
826
827         return genlmsg_end(skb, ovs_header);
828
829 nla_put_failure:
830         err = -EMSGSIZE;
831 error:
832         genlmsg_cancel(skb, ovs_header);
833         return err;
834 }
835
836 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
837 {
838         const struct sw_flow_actions *sf_acts;
839         int len;
840
841         sf_acts = rcu_dereference_protected(flow->sf_acts,
842                                             lockdep_genl_is_held());
843
844         /* OVS_FLOW_ATTR_KEY */
845         len = nla_total_size(FLOW_BUFSIZE);
846         /* OVS_FLOW_ATTR_ACTIONS */
847         len += nla_total_size(sf_acts->actions_len);
848         /* OVS_FLOW_ATTR_STATS */
849         len += nla_total_size(sizeof(struct ovs_flow_stats));
850         /* OVS_FLOW_ATTR_TCP_FLAGS */
851         len += nla_total_size(1);
852         /* OVS_FLOW_ATTR_USED */
853         len += nla_total_size(8);
854
855         len += NLMSG_ALIGN(sizeof(struct ovs_header));
856
857         return genlmsg_new(len, GFP_KERNEL);
858 }
859
860 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
861                                                struct datapath *dp,
862                                                u32 pid, u32 seq, u8 cmd)
863 {
864         struct sk_buff *skb;
865         int retval;
866
867         skb = ovs_flow_cmd_alloc_info(flow);
868         if (!skb)
869                 return ERR_PTR(-ENOMEM);
870
871         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
872         BUG_ON(retval < 0);
873         return skb;
874 }
875
876 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
877 {
878         struct nlattr **a = info->attrs;
879         struct ovs_header *ovs_header = info->userhdr;
880         struct sw_flow_key key;
881         struct sw_flow *flow;
882         struct sk_buff *reply;
883         struct datapath *dp;
884         struct flow_table *table;
885         int error;
886         int key_len;
887
888         /* Extract key. */
889         error = -EINVAL;
890         if (!a[OVS_FLOW_ATTR_KEY])
891                 goto error;
892         error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
893         if (error)
894                 goto error;
895
896         /* Validate actions. */
897         if (a[OVS_FLOW_ATTR_ACTIONS]) {
898                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
899                 if (error)
900                         goto error;
901         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
902                 error = -EINVAL;
903                 goto error;
904         }
905
906         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
907         error = -ENODEV;
908         if (!dp)
909                 goto error;
910
911         table = genl_dereference(dp->table);
912         flow = ovs_flow_tbl_lookup(table, &key, key_len);
913         if (!flow) {
914                 struct sw_flow_actions *acts;
915
916                 /* Bail out if we're not allowed to create a new flow. */
917                 error = -ENOENT;
918                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
919                         goto error;
920
921                 /* Expand table, if necessary, to make room. */
922                 if (ovs_flow_tbl_need_to_expand(table)) {
923                         struct flow_table *new_table;
924
925                         new_table = ovs_flow_tbl_expand(table);
926                         if (!IS_ERR(new_table)) {
927                                 rcu_assign_pointer(dp->table, new_table);
928                                 ovs_flow_tbl_deferred_destroy(table);
929                                 table = genl_dereference(dp->table);
930                         }
931                 }
932
933                 /* Allocate flow. */
934                 flow = ovs_flow_alloc();
935                 if (IS_ERR(flow)) {
936                         error = PTR_ERR(flow);
937                         goto error;
938                 }
939                 flow->key = key;
940                 clear_stats(flow);
941
942                 /* Obtain actions. */
943                 acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
944                 error = PTR_ERR(acts);
945                 if (IS_ERR(acts))
946                         goto error_free_flow;
947                 rcu_assign_pointer(flow->sf_acts, acts);
948
949                 /* Put flow in bucket. */
950                 flow->hash = ovs_flow_hash(&key, key_len);
951                 ovs_flow_tbl_insert(table, flow);
952
953                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
954                                                 info->snd_seq,
955                                                 OVS_FLOW_CMD_NEW);
956         } else {
957                 /* We found a matching flow. */
958                 struct sw_flow_actions *old_acts;
959                 struct nlattr *acts_attrs;
960
961                 /* Bail out if we're not allowed to modify an existing flow.
962                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
963                  * because Generic Netlink treats the latter as a dump
964                  * request.  We also accept NLM_F_EXCL in case that bug ever
965                  * gets fixed.
966                  */
967                 error = -EEXIST;
968                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
969                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
970                         goto error;
971
972                 /* Update actions. */
973                 old_acts = rcu_dereference_protected(flow->sf_acts,
974                                                      lockdep_genl_is_held());
975                 acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
976                 if (acts_attrs &&
977                    (old_acts->actions_len != nla_len(acts_attrs) ||
978                    memcmp(old_acts->actions, nla_data(acts_attrs),
979                           old_acts->actions_len))) {
980                         struct sw_flow_actions *new_acts;
981
982                         new_acts = ovs_flow_actions_alloc(acts_attrs);
983                         error = PTR_ERR(new_acts);
984                         if (IS_ERR(new_acts))
985                                 goto error;
986
987                         rcu_assign_pointer(flow->sf_acts, new_acts);
988                         ovs_flow_deferred_free_acts(old_acts);
989                 }
990
991                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
992                                                info->snd_seq, OVS_FLOW_CMD_NEW);
993
994                 /* Clear stats. */
995                 if (a[OVS_FLOW_ATTR_CLEAR]) {
996                         spin_lock_bh(&flow->lock);
997                         clear_stats(flow);
998                         spin_unlock_bh(&flow->lock);
999                 }
1000         }
1001
1002         if (!IS_ERR(reply))
1003                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1004                            ovs_dp_flow_multicast_group.id, info->nlhdr,
1005                            GFP_KERNEL);
1006         else
1007                 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1008                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1009         return 0;
1010
1011 error_free_flow:
1012         ovs_flow_free(flow);
1013 error:
1014         return error;
1015 }
1016
1017 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1018 {
1019         struct nlattr **a = info->attrs;
1020         struct ovs_header *ovs_header = info->userhdr;
1021         struct sw_flow_key key;
1022         struct sk_buff *reply;
1023         struct sw_flow *flow;
1024         struct datapath *dp;
1025         struct flow_table *table;
1026         int err;
1027         int key_len;
1028
1029         if (!a[OVS_FLOW_ATTR_KEY])
1030                 return -EINVAL;
1031         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1032         if (err)
1033                 return err;
1034
1035         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1036         if (!dp)
1037                 return -ENODEV;
1038
1039         table = genl_dereference(dp->table);
1040         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1041         if (!flow)
1042                 return -ENOENT;
1043
1044         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1045                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1046         if (IS_ERR(reply))
1047                 return PTR_ERR(reply);
1048
1049         return genlmsg_reply(reply, info);
1050 }
1051
1052 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1053 {
1054         struct nlattr **a = info->attrs;
1055         struct ovs_header *ovs_header = info->userhdr;
1056         struct sw_flow_key key;
1057         struct sk_buff *reply;
1058         struct sw_flow *flow;
1059         struct datapath *dp;
1060         struct flow_table *table;
1061         int err;
1062         int key_len;
1063
1064         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1065         if (!dp)
1066                 return -ENODEV;
1067
1068         if (!a[OVS_FLOW_ATTR_KEY])
1069                 return flush_flows(dp);
1070
1071         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1072         if (err)
1073                 return err;
1074
1075         table = genl_dereference(dp->table);
1076         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1077         if (!flow)
1078                 return -ENOENT;
1079
1080         reply = ovs_flow_cmd_alloc_info(flow);
1081         if (!reply)
1082                 return -ENOMEM;
1083
1084         ovs_flow_tbl_remove(table, flow);
1085
1086         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1087                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1088         BUG_ON(err < 0);
1089
1090         ovs_flow_deferred_free(flow);
1091
1092         genl_notify(reply, genl_info_net(info), info->snd_pid,
1093                     ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1094         return 0;
1095 }
1096
1097 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1098 {
1099         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1100         struct datapath *dp;
1101         struct flow_table *table;
1102
1103         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1104         if (!dp)
1105                 return -ENODEV;
1106
1107         table = genl_dereference(dp->table);
1108
1109         for (;;) {
1110                 struct sw_flow *flow;
1111                 u32 bucket, obj;
1112
1113                 bucket = cb->args[0];
1114                 obj = cb->args[1];
1115                 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1116                 if (!flow)
1117                         break;
1118
1119                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1120                                            NETLINK_CB(cb->skb).pid,
1121                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1122                                            OVS_FLOW_CMD_NEW) < 0)
1123                         break;
1124
1125                 cb->args[0] = bucket;
1126                 cb->args[1] = obj;
1127         }
1128         return skb->len;
1129 }
1130
1131 static struct genl_ops dp_flow_genl_ops[] = {
1132         { .cmd = OVS_FLOW_CMD_NEW,
1133           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1134           .policy = flow_policy,
1135           .doit = ovs_flow_cmd_new_or_set
1136         },
1137         { .cmd = OVS_FLOW_CMD_DEL,
1138           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1139           .policy = flow_policy,
1140           .doit = ovs_flow_cmd_del
1141         },
1142         { .cmd = OVS_FLOW_CMD_GET,
1143           .flags = 0,               /* OK for unprivileged users. */
1144           .policy = flow_policy,
1145           .doit = ovs_flow_cmd_get,
1146           .dumpit = ovs_flow_cmd_dump
1147         },
1148         { .cmd = OVS_FLOW_CMD_SET,
1149           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1150           .policy = flow_policy,
1151           .doit = ovs_flow_cmd_new_or_set,
1152         },
1153 };
1154
1155 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1156         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1157         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1158 };
1159
1160 static struct genl_family dp_datapath_genl_family = {
1161         .id = GENL_ID_GENERATE,
1162         .hdrsize = sizeof(struct ovs_header),
1163         .name = OVS_DATAPATH_FAMILY,
1164         .version = OVS_DATAPATH_VERSION,
1165         .maxattr = OVS_DP_ATTR_MAX,
1166         .netnsok = true
1167 };
1168
1169 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1170         .name = OVS_DATAPATH_MCGROUP
1171 };
1172
1173 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1174                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1175 {
1176         struct ovs_header *ovs_header;
1177         struct ovs_dp_stats dp_stats;
1178         int err;
1179
1180         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1181                                    flags, cmd);
1182         if (!ovs_header)
1183                 goto error;
1184
1185         ovs_header->dp_ifindex = get_dpifindex(dp);
1186
1187         rcu_read_lock();
1188         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1189         rcu_read_unlock();
1190         if (err)
1191                 goto nla_put_failure;
1192
1193         get_dp_stats(dp, &dp_stats);
1194         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1195                 goto nla_put_failure;
1196
1197         return genlmsg_end(skb, ovs_header);
1198
1199 nla_put_failure:
1200         genlmsg_cancel(skb, ovs_header);
1201 error:
1202         return -EMSGSIZE;
1203 }
1204
1205 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1206                                              u32 seq, u8 cmd)
1207 {
1208         struct sk_buff *skb;
1209         int retval;
1210
1211         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1212         if (!skb)
1213                 return ERR_PTR(-ENOMEM);
1214
1215         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1216         if (retval < 0) {
1217                 kfree_skb(skb);
1218                 return ERR_PTR(retval);
1219         }
1220         return skb;
1221 }
1222
1223 /* Called with genl_mutex and optionally with RTNL lock also. */
1224 static struct datapath *lookup_datapath(struct net *net,
1225                                         struct ovs_header *ovs_header,
1226                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1227 {
1228         struct datapath *dp;
1229
1230         if (!a[OVS_DP_ATTR_NAME])
1231                 dp = get_dp(net, ovs_header->dp_ifindex);
1232         else {
1233                 struct vport *vport;
1234
1235                 rcu_read_lock();
1236                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1237                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1238                 rcu_read_unlock();
1239         }
1240         return dp ? dp : ERR_PTR(-ENODEV);
1241 }
1242
1243 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1244 {
1245         struct nlattr **a = info->attrs;
1246         struct vport_parms parms;
1247         struct sk_buff *reply;
1248         struct datapath *dp;
1249         struct vport *vport;
1250         struct ovs_net *ovs_net;
1251         int err;
1252
1253         err = -EINVAL;
1254         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1255                 goto err;
1256
1257         rtnl_lock();
1258
1259         err = -ENOMEM;
1260         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1261         if (dp == NULL)
1262                 goto err_unlock_rtnl;
1263
1264         INIT_LIST_HEAD(&dp->port_list);
1265         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1266
1267         /* Allocate table. */
1268         err = -ENOMEM;
1269         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1270         if (!dp->table)
1271                 goto err_free_dp;
1272
1273         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1274         if (!dp->stats_percpu) {
1275                 err = -ENOMEM;
1276                 goto err_destroy_table;
1277         }
1278
1279         /* Set up our datapath device. */
1280         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1281         parms.type = OVS_VPORT_TYPE_INTERNAL;
1282         parms.options = NULL;
1283         parms.dp = dp;
1284         parms.port_no = OVSP_LOCAL;
1285         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1286
1287         vport = new_vport(&parms);
1288         if (IS_ERR(vport)) {
1289                 err = PTR_ERR(vport);
1290                 if (err == -EBUSY)
1291                         err = -EEXIST;
1292
1293                 goto err_destroy_percpu;
1294         }
1295
1296         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1297                                       info->snd_seq, OVS_DP_CMD_NEW);
1298         err = PTR_ERR(reply);
1299         if (IS_ERR(reply))
1300                 goto err_destroy_local_port;
1301
1302         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1303         list_add_tail(&dp->list_node, &ovs_net->dps);
1304         rtnl_unlock();
1305
1306         genl_notify(reply, genl_info_net(info), info->snd_pid,
1307                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1308                     GFP_KERNEL);
1309         return 0;
1310
1311 err_destroy_local_port:
1312         ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
1313 err_destroy_percpu:
1314         free_percpu(dp->stats_percpu);
1315 err_destroy_table:
1316         ovs_flow_tbl_destroy(genl_dereference(dp->table));
1317 err_free_dp:
1318         release_net(ovs_dp_get_net(dp));
1319         kfree(dp);
1320 err_unlock_rtnl:
1321         rtnl_unlock();
1322 err:
1323         return err;
1324 }
1325
1326 /* Called with genl_mutex. */
1327 static void __dp_destroy(struct datapath *dp)
1328 {
1329         struct vport *vport, *next_vport;
1330
1331         rtnl_lock();
1332         list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
1333                 if (vport->port_no != OVSP_LOCAL)
1334                         ovs_dp_detach_port(vport);
1335
1336         list_del(&dp->list_node);
1337         ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
1338
1339         /* rtnl_unlock() will wait until all the references to devices that
1340          * are pending unregistration have been dropped.  We do it here to
1341          * ensure that any internal devices (which contain DP pointers) are
1342          * fully destroyed before freeing the datapath.
1343          */
1344         rtnl_unlock();
1345
1346         call_rcu(&dp->rcu, destroy_dp_rcu);
1347 }
1348
1349 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1350 {
1351         struct sk_buff *reply;
1352         struct datapath *dp;
1353         int err;
1354
1355         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1356         err = PTR_ERR(dp);
1357         if (IS_ERR(dp))
1358                 return err;
1359
1360         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1361                                       info->snd_seq, OVS_DP_CMD_DEL);
1362         err = PTR_ERR(reply);
1363         if (IS_ERR(reply))
1364                 return err;
1365
1366         __dp_destroy(dp);
1367
1368         genl_notify(reply, genl_info_net(info), info->snd_pid,
1369                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1370                     GFP_KERNEL);
1371
1372         return 0;
1373 }
1374
1375 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1376 {
1377         struct sk_buff *reply;
1378         struct datapath *dp;
1379         int err;
1380
1381         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1382         if (IS_ERR(dp))
1383                 return PTR_ERR(dp);
1384
1385         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1386                                       info->snd_seq, OVS_DP_CMD_NEW);
1387         if (IS_ERR(reply)) {
1388                 err = PTR_ERR(reply);
1389                 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1390                                 ovs_dp_datapath_multicast_group.id, err);
1391                 return 0;
1392         }
1393
1394         genl_notify(reply, genl_info_net(info), info->snd_pid,
1395                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1396                     GFP_KERNEL);
1397
1398         return 0;
1399 }
1400
1401 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1402 {
1403         struct sk_buff *reply;
1404         struct datapath *dp;
1405
1406         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1407         if (IS_ERR(dp))
1408                 return PTR_ERR(dp);
1409
1410         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1411                                       info->snd_seq, OVS_DP_CMD_NEW);
1412         if (IS_ERR(reply))
1413                 return PTR_ERR(reply);
1414
1415         return genlmsg_reply(reply, info);
1416 }
1417
1418 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1419 {
1420         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1421         struct datapath *dp;
1422         int skip = cb->args[0];
1423         int i = 0;
1424
1425         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1426                 if (i >= skip &&
1427                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1428                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1429                                          OVS_DP_CMD_NEW) < 0)
1430                         break;
1431                 i++;
1432         }
1433
1434         cb->args[0] = i;
1435
1436         return skb->len;
1437 }
1438
1439 static struct genl_ops dp_datapath_genl_ops[] = {
1440         { .cmd = OVS_DP_CMD_NEW,
1441           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1442           .policy = datapath_policy,
1443           .doit = ovs_dp_cmd_new
1444         },
1445         { .cmd = OVS_DP_CMD_DEL,
1446           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1447           .policy = datapath_policy,
1448           .doit = ovs_dp_cmd_del
1449         },
1450         { .cmd = OVS_DP_CMD_GET,
1451           .flags = 0,               /* OK for unprivileged users. */
1452           .policy = datapath_policy,
1453           .doit = ovs_dp_cmd_get,
1454           .dumpit = ovs_dp_cmd_dump
1455         },
1456         { .cmd = OVS_DP_CMD_SET,
1457           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1458           .policy = datapath_policy,
1459           .doit = ovs_dp_cmd_set,
1460         },
1461 };
1462
1463 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1464         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1465         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1466         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1467         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1468         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1469         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1470 };
1471
1472 static struct genl_family dp_vport_genl_family = {
1473         .id = GENL_ID_GENERATE,
1474         .hdrsize = sizeof(struct ovs_header),
1475         .name = OVS_VPORT_FAMILY,
1476         .version = OVS_VPORT_VERSION,
1477         .maxattr = OVS_VPORT_ATTR_MAX,
1478         .netnsok = true
1479 };
1480
1481 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1482         .name = OVS_VPORT_MCGROUP
1483 };
1484
1485 /* Called with RTNL lock or RCU read lock. */
1486 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1487                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1488 {
1489         struct ovs_header *ovs_header;
1490         struct ovs_vport_stats vport_stats;
1491         int err;
1492
1493         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1494                                  flags, cmd);
1495         if (!ovs_header)
1496                 return -EMSGSIZE;
1497
1498         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1499
1500         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1501             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1502             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1503             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
1504                 goto nla_put_failure;
1505
1506         ovs_vport_get_stats(vport, &vport_stats);
1507         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1508                     &vport_stats))
1509                 goto nla_put_failure;
1510
1511         err = ovs_vport_get_options(vport, skb);
1512         if (err == -EMSGSIZE)
1513                 goto error;
1514
1515         return genlmsg_end(skb, ovs_header);
1516
1517 nla_put_failure:
1518         err = -EMSGSIZE;
1519 error:
1520         genlmsg_cancel(skb, ovs_header);
1521         return err;
1522 }
1523
1524 /* Called with RTNL lock or RCU read lock. */
1525 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1526                                          u32 seq, u8 cmd)
1527 {
1528         struct sk_buff *skb;
1529         int retval;
1530
1531         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1532         if (!skb)
1533                 return ERR_PTR(-ENOMEM);
1534
1535         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1536         if (retval < 0) {
1537                 kfree_skb(skb);
1538                 return ERR_PTR(retval);
1539         }
1540         return skb;
1541 }
1542
1543 /* Called with RTNL lock or RCU read lock. */
1544 static struct vport *lookup_vport(struct net *net,
1545                                   struct ovs_header *ovs_header,
1546                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1547 {
1548         struct datapath *dp;
1549         struct vport *vport;
1550
1551         if (a[OVS_VPORT_ATTR_NAME]) {
1552                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1553                 if (!vport)
1554                         return ERR_PTR(-ENODEV);
1555                 if (ovs_header->dp_ifindex &&
1556                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1557                         return ERR_PTR(-ENODEV);
1558                 return vport;
1559         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1560                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1561
1562                 if (port_no >= DP_MAX_PORTS)
1563                         return ERR_PTR(-EFBIG);
1564
1565                 dp = get_dp(net, ovs_header->dp_ifindex);
1566                 if (!dp)
1567                         return ERR_PTR(-ENODEV);
1568
1569                 vport = rcu_dereference_rtnl(dp->ports[port_no]);
1570                 if (!vport)
1571                         return ERR_PTR(-ENOENT);
1572                 return vport;
1573         } else
1574                 return ERR_PTR(-EINVAL);
1575 }
1576
1577 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1578 {
1579         struct nlattr **a = info->attrs;
1580         struct ovs_header *ovs_header = info->userhdr;
1581         struct vport_parms parms;
1582         struct sk_buff *reply;
1583         struct vport *vport;
1584         struct datapath *dp;
1585         u32 port_no;
1586         int err;
1587
1588         err = -EINVAL;
1589         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1590             !a[OVS_VPORT_ATTR_UPCALL_PID])
1591                 goto exit;
1592
1593         rtnl_lock();
1594         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1595         err = -ENODEV;
1596         if (!dp)
1597                 goto exit_unlock;
1598
1599         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1600                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1601
1602                 err = -EFBIG;
1603                 if (port_no >= DP_MAX_PORTS)
1604                         goto exit_unlock;
1605
1606                 vport = rtnl_dereference(dp->ports[port_no]);
1607                 err = -EBUSY;
1608                 if (vport)
1609                         goto exit_unlock;
1610         } else {
1611                 for (port_no = 1; ; port_no++) {
1612                         if (port_no >= DP_MAX_PORTS) {
1613                                 err = -EFBIG;
1614                                 goto exit_unlock;
1615                         }
1616                         vport = rtnl_dereference(dp->ports[port_no]);
1617                         if (!vport)
1618                                 break;
1619                 }
1620         }
1621
1622         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1623         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1624         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1625         parms.dp = dp;
1626         parms.port_no = port_no;
1627         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1628
1629         vport = new_vport(&parms);
1630         err = PTR_ERR(vport);
1631         if (IS_ERR(vport))
1632                 goto exit_unlock;
1633
1634         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1635                                          OVS_VPORT_CMD_NEW);
1636         if (IS_ERR(reply)) {
1637                 err = PTR_ERR(reply);
1638                 ovs_dp_detach_port(vport);
1639                 goto exit_unlock;
1640         }
1641         genl_notify(reply, genl_info_net(info), info->snd_pid,
1642                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1643
1644 exit_unlock:
1645         rtnl_unlock();
1646 exit:
1647         return err;
1648 }
1649
1650 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1651 {
1652         struct nlattr **a = info->attrs;
1653         struct sk_buff *reply;
1654         struct vport *vport;
1655         int err;
1656
1657         rtnl_lock();
1658         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1659         err = PTR_ERR(vport);
1660         if (IS_ERR(vport))
1661                 goto exit_unlock;
1662
1663         err = 0;
1664         if (a[OVS_VPORT_ATTR_TYPE] &&
1665             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1666                 err = -EINVAL;
1667
1668         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1669                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1670         if (err)
1671                 goto exit_unlock;
1672         if (a[OVS_VPORT_ATTR_UPCALL_PID])
1673                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1674
1675         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1676                                          OVS_VPORT_CMD_NEW);
1677         if (IS_ERR(reply)) {
1678                 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1679                                 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1680                 goto exit_unlock;
1681         }
1682
1683         genl_notify(reply, genl_info_net(info), info->snd_pid,
1684                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1685
1686 exit_unlock:
1687         rtnl_unlock();
1688         return err;
1689 }
1690
1691 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1692 {
1693         struct nlattr **a = info->attrs;
1694         struct sk_buff *reply;
1695         struct vport *vport;
1696         int err;
1697
1698         rtnl_lock();
1699         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1700         err = PTR_ERR(vport);
1701         if (IS_ERR(vport))
1702                 goto exit_unlock;
1703
1704         if (vport->port_no == OVSP_LOCAL) {
1705                 err = -EINVAL;
1706                 goto exit_unlock;
1707         }
1708
1709         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1710                                          OVS_VPORT_CMD_DEL);
1711         err = PTR_ERR(reply);
1712         if (IS_ERR(reply))
1713                 goto exit_unlock;
1714
1715         ovs_dp_detach_port(vport);
1716
1717         genl_notify(reply, genl_info_net(info), info->snd_pid,
1718                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1719
1720 exit_unlock:
1721         rtnl_unlock();
1722         return err;
1723 }
1724
1725 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1726 {
1727         struct nlattr **a = info->attrs;
1728         struct ovs_header *ovs_header = info->userhdr;
1729         struct sk_buff *reply;
1730         struct vport *vport;
1731         int err;
1732
1733         rcu_read_lock();
1734         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1735         err = PTR_ERR(vport);
1736         if (IS_ERR(vport))
1737                 goto exit_unlock;
1738
1739         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1740                                          OVS_VPORT_CMD_NEW);
1741         err = PTR_ERR(reply);
1742         if (IS_ERR(reply))
1743                 goto exit_unlock;
1744
1745         rcu_read_unlock();
1746
1747         return genlmsg_reply(reply, info);
1748
1749 exit_unlock:
1750         rcu_read_unlock();
1751         return err;
1752 }
1753
1754 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1755 {
1756         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1757         struct datapath *dp;
1758         u32 port_no;
1759         int retval;
1760
1761         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1762         if (!dp)
1763                 return -ENODEV;
1764
1765         rcu_read_lock();
1766         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1767                 struct vport *vport;
1768
1769                 vport = rcu_dereference(dp->ports[port_no]);
1770                 if (!vport)
1771                         continue;
1772
1773                 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1774                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1775                                             OVS_VPORT_CMD_NEW) < 0)
1776                         break;
1777         }
1778         rcu_read_unlock();
1779
1780         cb->args[0] = port_no;
1781         retval = skb->len;
1782
1783         return retval;
1784 }
1785
1786 static struct genl_ops dp_vport_genl_ops[] = {
1787         { .cmd = OVS_VPORT_CMD_NEW,
1788           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1789           .policy = vport_policy,
1790           .doit = ovs_vport_cmd_new
1791         },
1792         { .cmd = OVS_VPORT_CMD_DEL,
1793           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1794           .policy = vport_policy,
1795           .doit = ovs_vport_cmd_del
1796         },
1797         { .cmd = OVS_VPORT_CMD_GET,
1798           .flags = 0,               /* OK for unprivileged users. */
1799           .policy = vport_policy,
1800           .doit = ovs_vport_cmd_get,
1801           .dumpit = ovs_vport_cmd_dump
1802         },
1803         { .cmd = OVS_VPORT_CMD_SET,
1804           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1805           .policy = vport_policy,
1806           .doit = ovs_vport_cmd_set,
1807         },
1808 };
1809
1810 struct genl_family_and_ops {
1811         struct genl_family *family;
1812         struct genl_ops *ops;
1813         int n_ops;
1814         struct genl_multicast_group *group;
1815 };
1816
1817 static const struct genl_family_and_ops dp_genl_families[] = {
1818         { &dp_datapath_genl_family,
1819           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1820           &ovs_dp_datapath_multicast_group },
1821         { &dp_vport_genl_family,
1822           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1823           &ovs_dp_vport_multicast_group },
1824         { &dp_flow_genl_family,
1825           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1826           &ovs_dp_flow_multicast_group },
1827         { &dp_packet_genl_family,
1828           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1829           NULL },
1830 };
1831
1832 static void dp_unregister_genl(int n_families)
1833 {
1834         int i;
1835
1836         for (i = 0; i < n_families; i++)
1837                 genl_unregister_family(dp_genl_families[i].family);
1838 }
1839
1840 static int dp_register_genl(void)
1841 {
1842         int n_registered;
1843         int err;
1844         int i;
1845
1846         n_registered = 0;
1847         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1848                 const struct genl_family_and_ops *f = &dp_genl_families[i];
1849
1850                 err = genl_register_family_with_ops(f->family, f->ops,
1851                                                     f->n_ops);
1852                 if (err)
1853                         goto error;
1854                 n_registered++;
1855
1856                 if (f->group) {
1857                         err = genl_register_mc_group(f->family, f->group);
1858                         if (err)
1859                                 goto error;
1860                 }
1861         }
1862
1863         return 0;
1864
1865 error:
1866         dp_unregister_genl(n_registered);
1867         return err;
1868 }
1869
1870 static void rehash_flow_table(struct work_struct *work)
1871 {
1872         struct datapath *dp;
1873         struct net *net;
1874
1875         genl_lock();
1876         rtnl_lock();
1877         for_each_net(net) {
1878                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1879
1880                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1881                         struct flow_table *old_table = genl_dereference(dp->table);
1882                         struct flow_table *new_table;
1883
1884                         new_table = ovs_flow_tbl_rehash(old_table);
1885                         if (!IS_ERR(new_table)) {
1886                                 rcu_assign_pointer(dp->table, new_table);
1887                                 ovs_flow_tbl_deferred_destroy(old_table);
1888                         }
1889                 }
1890         }
1891         rtnl_unlock();
1892         genl_unlock();
1893
1894         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
1895 }
1896
1897 static int __net_init ovs_init_net(struct net *net)
1898 {
1899         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1900
1901         INIT_LIST_HEAD(&ovs_net->dps);
1902         return 0;
1903 }
1904
1905 static void __net_exit ovs_exit_net(struct net *net)
1906 {
1907         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1908         struct datapath *dp, *dp_next;
1909
1910         genl_lock();
1911         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
1912                 __dp_destroy(dp);
1913         genl_unlock();
1914 }
1915
1916 static struct pernet_operations ovs_net_ops = {
1917         .init = ovs_init_net,
1918         .exit = ovs_exit_net,
1919         .id   = &ovs_net_id,
1920         .size = sizeof(struct ovs_net),
1921 };
1922
1923 static int __init dp_init(void)
1924 {
1925         struct sk_buff *dummy_skb;
1926         int err;
1927
1928         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1929
1930         pr_info("Open vSwitch switching datapath\n");
1931
1932         err = ovs_flow_init();
1933         if (err)
1934                 goto error;
1935
1936         err = ovs_vport_init();
1937         if (err)
1938                 goto error_flow_exit;
1939
1940         err = register_pernet_device(&ovs_net_ops);
1941         if (err)
1942                 goto error_vport_exit;
1943
1944         err = register_netdevice_notifier(&ovs_dp_device_notifier);
1945         if (err)
1946                 goto error_netns_exit;
1947
1948         err = dp_register_genl();
1949         if (err < 0)
1950                 goto error_unreg_notifier;
1951
1952         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
1953
1954         return 0;
1955
1956 error_unreg_notifier:
1957         unregister_netdevice_notifier(&ovs_dp_device_notifier);
1958 error_netns_exit:
1959         unregister_pernet_device(&ovs_net_ops);
1960 error_vport_exit:
1961         ovs_vport_exit();
1962 error_flow_exit:
1963         ovs_flow_exit();
1964 error:
1965         return err;
1966 }
1967
1968 static void dp_cleanup(void)
1969 {
1970         cancel_delayed_work_sync(&rehash_flow_wq);
1971         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
1972         unregister_netdevice_notifier(&ovs_dp_device_notifier);
1973         unregister_pernet_device(&ovs_net_ops);
1974         rcu_barrier();
1975         ovs_vport_exit();
1976         ovs_flow_exit();
1977 }
1978
1979 module_init(dp_init);
1980 module_exit(dp_cleanup);
1981
1982 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1983 MODULE_LICENSE("GPL");