Merge tag 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck...
[pandora-kernel.git] / net / netfilter / nfnetlink_queue_core.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/list.h>
31 #include <net/sock.h>
32 #include <net/netfilter/nf_queue.h>
33 #include <net/netfilter/nfnetlink_queue.h>
34
35 #include <linux/atomic.h>
36
37 #ifdef CONFIG_BRIDGE_NETFILTER
38 #include "../bridge/br_private.h"
39 #endif
40
41 #define NFQNL_QMAX_DEFAULT 1024
42
43 struct nfqnl_instance {
44         struct hlist_node hlist;                /* global list of queues */
45         struct rcu_head rcu;
46
47         int peer_pid;
48         unsigned int queue_maxlen;
49         unsigned int copy_range;
50         unsigned int queue_dropped;
51         unsigned int queue_user_dropped;
52
53
54         u_int16_t queue_num;                    /* number of this queue */
55         u_int8_t copy_mode;
56         u_int32_t flags;                        /* Set using NFQA_CFG_FLAGS */
57 /*
58  * Following fields are dirtied for each queued packet,
59  * keep them in same cache line if possible.
60  */
61         spinlock_t      lock;
62         unsigned int    queue_total;
63         unsigned int    id_sequence;            /* 'sequence' of pkt ids */
64         struct list_head queue_list;            /* packets in queue */
65 };
66
67 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
68
69 static DEFINE_SPINLOCK(instances_lock);
70
71 #define INSTANCE_BUCKETS        16
72 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
73
74 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
75 {
76         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
77 }
78
79 static struct nfqnl_instance *
80 instance_lookup(u_int16_t queue_num)
81 {
82         struct hlist_head *head;
83         struct hlist_node *pos;
84         struct nfqnl_instance *inst;
85
86         head = &instance_table[instance_hashfn(queue_num)];
87         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
88                 if (inst->queue_num == queue_num)
89                         return inst;
90         }
91         return NULL;
92 }
93
94 static struct nfqnl_instance *
95 instance_create(u_int16_t queue_num, int pid)
96 {
97         struct nfqnl_instance *inst;
98         unsigned int h;
99         int err;
100
101         spin_lock(&instances_lock);
102         if (instance_lookup(queue_num)) {
103                 err = -EEXIST;
104                 goto out_unlock;
105         }
106
107         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
108         if (!inst) {
109                 err = -ENOMEM;
110                 goto out_unlock;
111         }
112
113         inst->queue_num = queue_num;
114         inst->peer_pid = pid;
115         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
116         inst->copy_range = 0xfffff;
117         inst->copy_mode = NFQNL_COPY_NONE;
118         spin_lock_init(&inst->lock);
119         INIT_LIST_HEAD(&inst->queue_list);
120
121         if (!try_module_get(THIS_MODULE)) {
122                 err = -EAGAIN;
123                 goto out_free;
124         }
125
126         h = instance_hashfn(queue_num);
127         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
128
129         spin_unlock(&instances_lock);
130
131         return inst;
132
133 out_free:
134         kfree(inst);
135 out_unlock:
136         spin_unlock(&instances_lock);
137         return ERR_PTR(err);
138 }
139
140 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
141                         unsigned long data);
142
143 static void
144 instance_destroy_rcu(struct rcu_head *head)
145 {
146         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
147                                                    rcu);
148
149         nfqnl_flush(inst, NULL, 0);
150         kfree(inst);
151         module_put(THIS_MODULE);
152 }
153
154 static void
155 __instance_destroy(struct nfqnl_instance *inst)
156 {
157         hlist_del_rcu(&inst->hlist);
158         call_rcu(&inst->rcu, instance_destroy_rcu);
159 }
160
161 static void
162 instance_destroy(struct nfqnl_instance *inst)
163 {
164         spin_lock(&instances_lock);
165         __instance_destroy(inst);
166         spin_unlock(&instances_lock);
167 }
168
169 static inline void
170 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
171 {
172        list_add_tail(&entry->list, &queue->queue_list);
173        queue->queue_total++;
174 }
175
176 static void
177 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
178 {
179         list_del(&entry->list);
180         queue->queue_total--;
181 }
182
183 static struct nf_queue_entry *
184 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
185 {
186         struct nf_queue_entry *entry = NULL, *i;
187
188         spin_lock_bh(&queue->lock);
189
190         list_for_each_entry(i, &queue->queue_list, list) {
191                 if (i->id == id) {
192                         entry = i;
193                         break;
194                 }
195         }
196
197         if (entry)
198                 __dequeue_entry(queue, entry);
199
200         spin_unlock_bh(&queue->lock);
201
202         return entry;
203 }
204
205 static void
206 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
207 {
208         struct nf_queue_entry *entry, *next;
209
210         spin_lock_bh(&queue->lock);
211         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
212                 if (!cmpfn || cmpfn(entry, data)) {
213                         list_del(&entry->list);
214                         queue->queue_total--;
215                         nf_reinject(entry, NF_DROP);
216                 }
217         }
218         spin_unlock_bh(&queue->lock);
219 }
220
221 static struct sk_buff *
222 nfqnl_build_packet_message(struct nfqnl_instance *queue,
223                            struct nf_queue_entry *entry,
224                            __be32 **packet_id_ptr)
225 {
226         sk_buff_data_t old_tail;
227         size_t size;
228         size_t data_len = 0;
229         struct sk_buff *skb;
230         struct nlattr *nla;
231         struct nfqnl_msg_packet_hdr *pmsg;
232         struct nlmsghdr *nlh;
233         struct nfgenmsg *nfmsg;
234         struct sk_buff *entskb = entry->skb;
235         struct net_device *indev;
236         struct net_device *outdev;
237         struct nf_conn *ct = NULL;
238         enum ip_conntrack_info uninitialized_var(ctinfo);
239
240         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
241                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
242                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
243                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
244 #ifdef CONFIG_BRIDGE_NETFILTER
245                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
246                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
247 #endif
248                 + nla_total_size(sizeof(u_int32_t))     /* mark */
249                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
250                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
251
252         outdev = entry->outdev;
253
254         switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
255         case NFQNL_COPY_META:
256         case NFQNL_COPY_NONE:
257                 break;
258
259         case NFQNL_COPY_PACKET:
260                 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
261                     skb_checksum_help(entskb))
262                         return NULL;
263
264                 data_len = ACCESS_ONCE(queue->copy_range);
265                 if (data_len == 0 || data_len > entskb->len)
266                         data_len = entskb->len;
267
268                 size += nla_total_size(data_len);
269                 break;
270         }
271
272         if (queue->flags & NFQA_CFG_F_CONNTRACK)
273                 ct = nfqnl_ct_get(entskb, &size, &ctinfo);
274
275         skb = alloc_skb(size, GFP_ATOMIC);
276         if (!skb)
277                 return NULL;
278
279         old_tail = skb->tail;
280         nlh = nlmsg_put(skb, 0, 0,
281                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
282                         sizeof(struct nfgenmsg), 0);
283         if (!nlh) {
284                 kfree_skb(skb);
285                 return NULL;
286         }
287         nfmsg = nlmsg_data(nlh);
288         nfmsg->nfgen_family = entry->pf;
289         nfmsg->version = NFNETLINK_V0;
290         nfmsg->res_id = htons(queue->queue_num);
291
292         nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
293         pmsg = nla_data(nla);
294         pmsg->hw_protocol       = entskb->protocol;
295         pmsg->hook              = entry->hook;
296         *packet_id_ptr          = &pmsg->packet_id;
297
298         indev = entry->indev;
299         if (indev) {
300 #ifndef CONFIG_BRIDGE_NETFILTER
301                 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
302                         goto nla_put_failure;
303 #else
304                 if (entry->pf == PF_BRIDGE) {
305                         /* Case 1: indev is physical input device, we need to
306                          * look for bridge group (when called from
307                          * netfilter_bridge) */
308                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
309                                          htonl(indev->ifindex)) ||
310                         /* this is the bridge group "brX" */
311                         /* rcu_read_lock()ed by __nf_queue */
312                             nla_put_be32(skb, NFQA_IFINDEX_INDEV,
313                                          htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
314                                 goto nla_put_failure;
315                 } else {
316                         /* Case 2: indev is bridge group, we need to look for
317                          * physical device (when called from ipv4) */
318                         if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
319                                          htonl(indev->ifindex)))
320                                 goto nla_put_failure;
321                         if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
322                             nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
323                                          htonl(entskb->nf_bridge->physindev->ifindex)))
324                                 goto nla_put_failure;
325                 }
326 #endif
327         }
328
329         if (outdev) {
330 #ifndef CONFIG_BRIDGE_NETFILTER
331                 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
332                         goto nla_put_failure;
333 #else
334                 if (entry->pf == PF_BRIDGE) {
335                         /* Case 1: outdev is physical output device, we need to
336                          * look for bridge group (when called from
337                          * netfilter_bridge) */
338                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
339                                          htonl(outdev->ifindex)) ||
340                         /* this is the bridge group "brX" */
341                         /* rcu_read_lock()ed by __nf_queue */
342                             nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
343                                          htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
344                                 goto nla_put_failure;
345                 } else {
346                         /* Case 2: outdev is bridge group, we need to look for
347                          * physical output device (when called from ipv4) */
348                         if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
349                                          htonl(outdev->ifindex)))
350                                 goto nla_put_failure;
351                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
352                             nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
353                                          htonl(entskb->nf_bridge->physoutdev->ifindex)))
354                                 goto nla_put_failure;
355                 }
356 #endif
357         }
358
359         if (entskb->mark &&
360             nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
361                 goto nla_put_failure;
362
363         if (indev && entskb->dev &&
364             entskb->mac_header != entskb->network_header) {
365                 struct nfqnl_msg_packet_hw phw;
366                 int len = dev_parse_header(entskb, phw.hw_addr);
367                 if (len) {
368                         phw.hw_addrlen = htons(len);
369                         if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
370                                 goto nla_put_failure;
371                 }
372         }
373
374         if (entskb->tstamp.tv64) {
375                 struct nfqnl_msg_packet_timestamp ts;
376                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
377                 ts.sec = cpu_to_be64(tv.tv_sec);
378                 ts.usec = cpu_to_be64(tv.tv_usec);
379
380                 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
381                         goto nla_put_failure;
382         }
383
384         if (data_len) {
385                 struct nlattr *nla;
386                 int sz = nla_attr_size(data_len);
387
388                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
389                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
390                         kfree_skb(skb);
391                         return NULL;
392                 }
393
394                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
395                 nla->nla_type = NFQA_PAYLOAD;
396                 nla->nla_len = sz;
397
398                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
399                         BUG();
400         }
401
402         if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
403                 goto nla_put_failure;
404
405         nlh->nlmsg_len = skb->tail - old_tail;
406         return skb;
407
408 nla_put_failure:
409         if (skb)
410                 kfree_skb(skb);
411         net_err_ratelimited("nf_queue: error creating packet message\n");
412         return NULL;
413 }
414
415 static int
416 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
417 {
418         struct sk_buff *nskb;
419         struct nfqnl_instance *queue;
420         int err = -ENOBUFS;
421         __be32 *packet_id_ptr;
422         int failopen = 0;
423
424         /* rcu_read_lock()ed by nf_hook_slow() */
425         queue = instance_lookup(queuenum);
426         if (!queue) {
427                 err = -ESRCH;
428                 goto err_out;
429         }
430
431         if (queue->copy_mode == NFQNL_COPY_NONE) {
432                 err = -EINVAL;
433                 goto err_out;
434         }
435
436         nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
437         if (nskb == NULL) {
438                 err = -ENOMEM;
439                 goto err_out;
440         }
441         spin_lock_bh(&queue->lock);
442
443         if (!queue->peer_pid) {
444                 err = -EINVAL;
445                 goto err_out_free_nskb;
446         }
447         if (queue->queue_total >= queue->queue_maxlen) {
448                 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
449                         failopen = 1;
450                         err = 0;
451                 } else {
452                         queue->queue_dropped++;
453                         net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
454                                              queue->queue_total);
455                 }
456                 goto err_out_free_nskb;
457         }
458         entry->id = ++queue->id_sequence;
459         *packet_id_ptr = htonl(entry->id);
460
461         /* nfnetlink_unicast will either free the nskb or add it to a socket */
462         err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
463         if (err < 0) {
464                 queue->queue_user_dropped++;
465                 goto err_out_unlock;
466         }
467
468         __enqueue_entry(queue, entry);
469
470         spin_unlock_bh(&queue->lock);
471         return 0;
472
473 err_out_free_nskb:
474         kfree_skb(nskb);
475 err_out_unlock:
476         spin_unlock_bh(&queue->lock);
477         if (failopen)
478                 nf_reinject(entry, NF_ACCEPT);
479 err_out:
480         return err;
481 }
482
483 static int
484 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
485 {
486         struct sk_buff *nskb;
487
488         if (diff < 0) {
489                 if (pskb_trim(e->skb, data_len))
490                         return -ENOMEM;
491         } else if (diff > 0) {
492                 if (data_len > 0xFFFF)
493                         return -EINVAL;
494                 if (diff > skb_tailroom(e->skb)) {
495                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
496                                                diff, GFP_ATOMIC);
497                         if (!nskb) {
498                                 printk(KERN_WARNING "nf_queue: OOM "
499                                       "in mangle, dropping packet\n");
500                                 return -ENOMEM;
501                         }
502                         kfree_skb(e->skb);
503                         e->skb = nskb;
504                 }
505                 skb_put(e->skb, diff);
506         }
507         if (!skb_make_writable(e->skb, data_len))
508                 return -ENOMEM;
509         skb_copy_to_linear_data(e->skb, data, data_len);
510         e->skb->ip_summed = CHECKSUM_NONE;
511         return 0;
512 }
513
514 static int
515 nfqnl_set_mode(struct nfqnl_instance *queue,
516                unsigned char mode, unsigned int range)
517 {
518         int status = 0;
519
520         spin_lock_bh(&queue->lock);
521         switch (mode) {
522         case NFQNL_COPY_NONE:
523         case NFQNL_COPY_META:
524                 queue->copy_mode = mode;
525                 queue->copy_range = 0;
526                 break;
527
528         case NFQNL_COPY_PACKET:
529                 queue->copy_mode = mode;
530                 /* we're using struct nlattr which has 16bit nla_len */
531                 if (range > 0xffff)
532                         queue->copy_range = 0xffff;
533                 else
534                         queue->copy_range = range;
535                 break;
536
537         default:
538                 status = -EINVAL;
539
540         }
541         spin_unlock_bh(&queue->lock);
542
543         return status;
544 }
545
546 static int
547 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
548 {
549         if (entry->indev)
550                 if (entry->indev->ifindex == ifindex)
551                         return 1;
552         if (entry->outdev)
553                 if (entry->outdev->ifindex == ifindex)
554                         return 1;
555 #ifdef CONFIG_BRIDGE_NETFILTER
556         if (entry->skb->nf_bridge) {
557                 if (entry->skb->nf_bridge->physindev &&
558                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
559                         return 1;
560                 if (entry->skb->nf_bridge->physoutdev &&
561                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
562                         return 1;
563         }
564 #endif
565         return 0;
566 }
567
568 /* drop all packets with either indev or outdev == ifindex from all queue
569  * instances */
570 static void
571 nfqnl_dev_drop(int ifindex)
572 {
573         int i;
574
575         rcu_read_lock();
576
577         for (i = 0; i < INSTANCE_BUCKETS; i++) {
578                 struct hlist_node *tmp;
579                 struct nfqnl_instance *inst;
580                 struct hlist_head *head = &instance_table[i];
581
582                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
583                         nfqnl_flush(inst, dev_cmp, ifindex);
584         }
585
586         rcu_read_unlock();
587 }
588
589 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
590
591 static int
592 nfqnl_rcv_dev_event(struct notifier_block *this,
593                     unsigned long event, void *ptr)
594 {
595         struct net_device *dev = ptr;
596
597         if (!net_eq(dev_net(dev), &init_net))
598                 return NOTIFY_DONE;
599
600         /* Drop any packets associated with the downed device */
601         if (event == NETDEV_DOWN)
602                 nfqnl_dev_drop(dev->ifindex);
603         return NOTIFY_DONE;
604 }
605
606 static struct notifier_block nfqnl_dev_notifier = {
607         .notifier_call  = nfqnl_rcv_dev_event,
608 };
609
610 static int
611 nfqnl_rcv_nl_event(struct notifier_block *this,
612                    unsigned long event, void *ptr)
613 {
614         struct netlink_notify *n = ptr;
615
616         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
617                 int i;
618
619                 /* destroy all instances for this pid */
620                 spin_lock(&instances_lock);
621                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
622                         struct hlist_node *tmp, *t2;
623                         struct nfqnl_instance *inst;
624                         struct hlist_head *head = &instance_table[i];
625
626                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
627                                 if ((n->net == &init_net) &&
628                                     (n->pid == inst->peer_pid))
629                                         __instance_destroy(inst);
630                         }
631                 }
632                 spin_unlock(&instances_lock);
633         }
634         return NOTIFY_DONE;
635 }
636
637 static struct notifier_block nfqnl_rtnl_notifier = {
638         .notifier_call  = nfqnl_rcv_nl_event,
639 };
640
641 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
642         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
643         [NFQA_MARK]             = { .type = NLA_U32 },
644         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
645         [NFQA_CT]               = { .type = NLA_UNSPEC },
646 };
647
648 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
649         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
650         [NFQA_MARK]             = { .type = NLA_U32 },
651 };
652
653 static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
654 {
655         struct nfqnl_instance *queue;
656
657         queue = instance_lookup(queue_num);
658         if (!queue)
659                 return ERR_PTR(-ENODEV);
660
661         if (queue->peer_pid != nlpid)
662                 return ERR_PTR(-EPERM);
663
664         return queue;
665 }
666
667 static struct nfqnl_msg_verdict_hdr*
668 verdicthdr_get(const struct nlattr * const nfqa[])
669 {
670         struct nfqnl_msg_verdict_hdr *vhdr;
671         unsigned int verdict;
672
673         if (!nfqa[NFQA_VERDICT_HDR])
674                 return NULL;
675
676         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
677         verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
678         if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
679                 return NULL;
680         return vhdr;
681 }
682
683 static int nfq_id_after(unsigned int id, unsigned int max)
684 {
685         return (int)(id - max) > 0;
686 }
687
688 static int
689 nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
690                    const struct nlmsghdr *nlh,
691                    const struct nlattr * const nfqa[])
692 {
693         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
694         struct nf_queue_entry *entry, *tmp;
695         unsigned int verdict, maxid;
696         struct nfqnl_msg_verdict_hdr *vhdr;
697         struct nfqnl_instance *queue;
698         LIST_HEAD(batch_list);
699         u16 queue_num = ntohs(nfmsg->res_id);
700
701         queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
702         if (IS_ERR(queue))
703                 return PTR_ERR(queue);
704
705         vhdr = verdicthdr_get(nfqa);
706         if (!vhdr)
707                 return -EINVAL;
708
709         verdict = ntohl(vhdr->verdict);
710         maxid = ntohl(vhdr->id);
711
712         spin_lock_bh(&queue->lock);
713
714         list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
715                 if (nfq_id_after(entry->id, maxid))
716                         break;
717                 __dequeue_entry(queue, entry);
718                 list_add_tail(&entry->list, &batch_list);
719         }
720
721         spin_unlock_bh(&queue->lock);
722
723         if (list_empty(&batch_list))
724                 return -ENOENT;
725
726         list_for_each_entry_safe(entry, tmp, &batch_list, list) {
727                 if (nfqa[NFQA_MARK])
728                         entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
729                 nf_reinject(entry, verdict);
730         }
731         return 0;
732 }
733
734 static int
735 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
736                    const struct nlmsghdr *nlh,
737                    const struct nlattr * const nfqa[])
738 {
739         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
740         u_int16_t queue_num = ntohs(nfmsg->res_id);
741
742         struct nfqnl_msg_verdict_hdr *vhdr;
743         struct nfqnl_instance *queue;
744         unsigned int verdict;
745         struct nf_queue_entry *entry;
746         enum ip_conntrack_info uninitialized_var(ctinfo);
747         struct nf_conn *ct = NULL;
748
749         queue = instance_lookup(queue_num);
750         if (!queue)
751
752         queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
753         if (IS_ERR(queue))
754                 return PTR_ERR(queue);
755
756         vhdr = verdicthdr_get(nfqa);
757         if (!vhdr)
758                 return -EINVAL;
759
760         verdict = ntohl(vhdr->verdict);
761
762         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
763         if (entry == NULL)
764                 return -ENOENT;
765
766         rcu_read_lock();
767         if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
768                 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
769
770         if (nfqa[NFQA_PAYLOAD]) {
771                 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
772                 int diff = payload_len - entry->skb->len;
773
774                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
775                                  payload_len, entry, diff) < 0)
776                         verdict = NF_DROP;
777
778                 if (ct)
779                         nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
780         }
781         rcu_read_unlock();
782
783         if (nfqa[NFQA_MARK])
784                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
785
786         nf_reinject(entry, verdict);
787         return 0;
788 }
789
790 static int
791 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
792                   const struct nlmsghdr *nlh,
793                   const struct nlattr * const nfqa[])
794 {
795         return -ENOTSUPP;
796 }
797
798 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
799         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
800         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
801 };
802
803 static const struct nf_queue_handler nfqh = {
804         .name   = "nf_queue",
805         .outfn  = &nfqnl_enqueue_packet,
806 };
807
808 static int
809 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
810                   const struct nlmsghdr *nlh,
811                   const struct nlattr * const nfqa[])
812 {
813         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
814         u_int16_t queue_num = ntohs(nfmsg->res_id);
815         struct nfqnl_instance *queue;
816         struct nfqnl_msg_config_cmd *cmd = NULL;
817         int ret = 0;
818
819         if (nfqa[NFQA_CFG_CMD]) {
820                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
821
822                 /* Commands without queue context - might sleep */
823                 switch (cmd->command) {
824                 case NFQNL_CFG_CMD_PF_BIND:
825                         return nf_register_queue_handler(ntohs(cmd->pf),
826                                                          &nfqh);
827                 case NFQNL_CFG_CMD_PF_UNBIND:
828                         return nf_unregister_queue_handler(ntohs(cmd->pf),
829                                                            &nfqh);
830                 }
831         }
832
833         rcu_read_lock();
834         queue = instance_lookup(queue_num);
835         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
836                 ret = -EPERM;
837                 goto err_out_unlock;
838         }
839
840         if (cmd != NULL) {
841                 switch (cmd->command) {
842                 case NFQNL_CFG_CMD_BIND:
843                         if (queue) {
844                                 ret = -EBUSY;
845                                 goto err_out_unlock;
846                         }
847                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
848                         if (IS_ERR(queue)) {
849                                 ret = PTR_ERR(queue);
850                                 goto err_out_unlock;
851                         }
852                         break;
853                 case NFQNL_CFG_CMD_UNBIND:
854                         if (!queue) {
855                                 ret = -ENODEV;
856                                 goto err_out_unlock;
857                         }
858                         instance_destroy(queue);
859                         break;
860                 case NFQNL_CFG_CMD_PF_BIND:
861                 case NFQNL_CFG_CMD_PF_UNBIND:
862                         break;
863                 default:
864                         ret = -ENOTSUPP;
865                         break;
866                 }
867         }
868
869         if (nfqa[NFQA_CFG_PARAMS]) {
870                 struct nfqnl_msg_config_params *params;
871
872                 if (!queue) {
873                         ret = -ENODEV;
874                         goto err_out_unlock;
875                 }
876                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
877                 nfqnl_set_mode(queue, params->copy_mode,
878                                 ntohl(params->copy_range));
879         }
880
881         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
882                 __be32 *queue_maxlen;
883
884                 if (!queue) {
885                         ret = -ENODEV;
886                         goto err_out_unlock;
887                 }
888                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
889                 spin_lock_bh(&queue->lock);
890                 queue->queue_maxlen = ntohl(*queue_maxlen);
891                 spin_unlock_bh(&queue->lock);
892         }
893
894         if (nfqa[NFQA_CFG_FLAGS]) {
895                 __u32 flags, mask;
896
897                 if (!queue) {
898                         ret = -ENODEV;
899                         goto err_out_unlock;
900                 }
901
902                 if (!nfqa[NFQA_CFG_MASK]) {
903                         /* A mask is needed to specify which flags are being
904                          * changed.
905                          */
906                         ret = -EINVAL;
907                         goto err_out_unlock;
908                 }
909
910                 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
911                 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
912
913                 if (flags >= NFQA_CFG_F_MAX) {
914                         ret = -EOPNOTSUPP;
915                         goto err_out_unlock;
916                 }
917
918                 spin_lock_bh(&queue->lock);
919                 queue->flags &= ~mask;
920                 queue->flags |= flags & mask;
921                 spin_unlock_bh(&queue->lock);
922         }
923
924 err_out_unlock:
925         rcu_read_unlock();
926         return ret;
927 }
928
929 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
930         [NFQNL_MSG_PACKET]      = { .call_rcu = nfqnl_recv_unsupp,
931                                     .attr_count = NFQA_MAX, },
932         [NFQNL_MSG_VERDICT]     = { .call_rcu = nfqnl_recv_verdict,
933                                     .attr_count = NFQA_MAX,
934                                     .policy = nfqa_verdict_policy },
935         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
936                                     .attr_count = NFQA_CFG_MAX,
937                                     .policy = nfqa_cfg_policy },
938         [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
939                                     .attr_count = NFQA_MAX,
940                                     .policy = nfqa_verdict_batch_policy },
941 };
942
943 static const struct nfnetlink_subsystem nfqnl_subsys = {
944         .name           = "nf_queue",
945         .subsys_id      = NFNL_SUBSYS_QUEUE,
946         .cb_count       = NFQNL_MSG_MAX,
947         .cb             = nfqnl_cb,
948 };
949
950 #ifdef CONFIG_PROC_FS
951 struct iter_state {
952         unsigned int bucket;
953 };
954
955 static struct hlist_node *get_first(struct seq_file *seq)
956 {
957         struct iter_state *st = seq->private;
958
959         if (!st)
960                 return NULL;
961
962         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
963                 if (!hlist_empty(&instance_table[st->bucket]))
964                         return instance_table[st->bucket].first;
965         }
966         return NULL;
967 }
968
969 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
970 {
971         struct iter_state *st = seq->private;
972
973         h = h->next;
974         while (!h) {
975                 if (++st->bucket >= INSTANCE_BUCKETS)
976                         return NULL;
977
978                 h = instance_table[st->bucket].first;
979         }
980         return h;
981 }
982
983 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
984 {
985         struct hlist_node *head;
986         head = get_first(seq);
987
988         if (head)
989                 while (pos && (head = get_next(seq, head)))
990                         pos--;
991         return pos ? NULL : head;
992 }
993
994 static void *seq_start(struct seq_file *seq, loff_t *pos)
995         __acquires(instances_lock)
996 {
997         spin_lock(&instances_lock);
998         return get_idx(seq, *pos);
999 }
1000
1001 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1002 {
1003         (*pos)++;
1004         return get_next(s, v);
1005 }
1006
1007 static void seq_stop(struct seq_file *s, void *v)
1008         __releases(instances_lock)
1009 {
1010         spin_unlock(&instances_lock);
1011 }
1012
1013 static int seq_show(struct seq_file *s, void *v)
1014 {
1015         const struct nfqnl_instance *inst = v;
1016
1017         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1018                           inst->queue_num,
1019                           inst->peer_pid, inst->queue_total,
1020                           inst->copy_mode, inst->copy_range,
1021                           inst->queue_dropped, inst->queue_user_dropped,
1022                           inst->id_sequence, 1);
1023 }
1024
1025 static const struct seq_operations nfqnl_seq_ops = {
1026         .start  = seq_start,
1027         .next   = seq_next,
1028         .stop   = seq_stop,
1029         .show   = seq_show,
1030 };
1031
1032 static int nfqnl_open(struct inode *inode, struct file *file)
1033 {
1034         return seq_open_private(file, &nfqnl_seq_ops,
1035                         sizeof(struct iter_state));
1036 }
1037
1038 static const struct file_operations nfqnl_file_ops = {
1039         .owner   = THIS_MODULE,
1040         .open    = nfqnl_open,
1041         .read    = seq_read,
1042         .llseek  = seq_lseek,
1043         .release = seq_release_private,
1044 };
1045
1046 #endif /* PROC_FS */
1047
1048 static int __init nfnetlink_queue_init(void)
1049 {
1050         int i, status = -ENOMEM;
1051
1052         for (i = 0; i < INSTANCE_BUCKETS; i++)
1053                 INIT_HLIST_HEAD(&instance_table[i]);
1054
1055         netlink_register_notifier(&nfqnl_rtnl_notifier);
1056         status = nfnetlink_subsys_register(&nfqnl_subsys);
1057         if (status < 0) {
1058                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1059                 goto cleanup_netlink_notifier;
1060         }
1061
1062 #ifdef CONFIG_PROC_FS
1063         if (!proc_create("nfnetlink_queue", 0440,
1064                          proc_net_netfilter, &nfqnl_file_ops))
1065                 goto cleanup_subsys;
1066 #endif
1067
1068         register_netdevice_notifier(&nfqnl_dev_notifier);
1069         return status;
1070
1071 #ifdef CONFIG_PROC_FS
1072 cleanup_subsys:
1073         nfnetlink_subsys_unregister(&nfqnl_subsys);
1074 #endif
1075 cleanup_netlink_notifier:
1076         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1077         return status;
1078 }
1079
1080 static void __exit nfnetlink_queue_fini(void)
1081 {
1082         nf_unregister_queue_handlers(&nfqh);
1083         unregister_netdevice_notifier(&nfqnl_dev_notifier);
1084 #ifdef CONFIG_PROC_FS
1085         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1086 #endif
1087         nfnetlink_subsys_unregister(&nfqnl_subsys);
1088         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1089
1090         rcu_barrier(); /* Wait for completion of call_rcu()'s */
1091 }
1092
1093 MODULE_DESCRIPTION("netfilter packet queue handler");
1094 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1095 MODULE_LICENSE("GPL");
1096 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1097
1098 module_init(nfnetlink_queue_init);
1099 module_exit(nfnetlink_queue_fini);