Merge tag 'cleanup-initcall' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[pandora-kernel.git] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/list.h>
31 #include <net/sock.h>
32 #include <net/netfilter/nf_queue.h>
33
34 #include <linux/atomic.h>
35
36 #ifdef CONFIG_BRIDGE_NETFILTER
37 #include "../bridge/br_private.h"
38 #endif
39
40 #define NFQNL_QMAX_DEFAULT 1024
41
42 struct nfqnl_instance {
43         struct hlist_node hlist;                /* global list of queues */
44         struct rcu_head rcu;
45
46         int peer_pid;
47         unsigned int queue_maxlen;
48         unsigned int copy_range;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52
53         u_int16_t queue_num;                    /* number of this queue */
54         u_int8_t copy_mode;
55 /*
56  * Following fields are dirtied for each queued packet,
57  * keep them in same cache line if possible.
58  */
59         spinlock_t      lock;
60         unsigned int    queue_total;
61         unsigned int    id_sequence;            /* 'sequence' of pkt ids */
62         struct list_head queue_list;            /* packets in queue */
63 };
64
65 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
66
67 static DEFINE_SPINLOCK(instances_lock);
68
69 #define INSTANCE_BUCKETS        16
70 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
71
72 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
73 {
74         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
75 }
76
77 static struct nfqnl_instance *
78 instance_lookup(u_int16_t queue_num)
79 {
80         struct hlist_head *head;
81         struct hlist_node *pos;
82         struct nfqnl_instance *inst;
83
84         head = &instance_table[instance_hashfn(queue_num)];
85         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
86                 if (inst->queue_num == queue_num)
87                         return inst;
88         }
89         return NULL;
90 }
91
92 static struct nfqnl_instance *
93 instance_create(u_int16_t queue_num, int pid)
94 {
95         struct nfqnl_instance *inst;
96         unsigned int h;
97         int err;
98
99         spin_lock(&instances_lock);
100         if (instance_lookup(queue_num)) {
101                 err = -EEXIST;
102                 goto out_unlock;
103         }
104
105         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
106         if (!inst) {
107                 err = -ENOMEM;
108                 goto out_unlock;
109         }
110
111         inst->queue_num = queue_num;
112         inst->peer_pid = pid;
113         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
114         inst->copy_range = 0xfffff;
115         inst->copy_mode = NFQNL_COPY_NONE;
116         spin_lock_init(&inst->lock);
117         INIT_LIST_HEAD(&inst->queue_list);
118
119         if (!try_module_get(THIS_MODULE)) {
120                 err = -EAGAIN;
121                 goto out_free;
122         }
123
124         h = instance_hashfn(queue_num);
125         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
126
127         spin_unlock(&instances_lock);
128
129         return inst;
130
131 out_free:
132         kfree(inst);
133 out_unlock:
134         spin_unlock(&instances_lock);
135         return ERR_PTR(err);
136 }
137
138 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
139                         unsigned long data);
140
141 static void
142 instance_destroy_rcu(struct rcu_head *head)
143 {
144         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
145                                                    rcu);
146
147         nfqnl_flush(inst, NULL, 0);
148         kfree(inst);
149         module_put(THIS_MODULE);
150 }
151
152 static void
153 __instance_destroy(struct nfqnl_instance *inst)
154 {
155         hlist_del_rcu(&inst->hlist);
156         call_rcu(&inst->rcu, instance_destroy_rcu);
157 }
158
159 static void
160 instance_destroy(struct nfqnl_instance *inst)
161 {
162         spin_lock(&instances_lock);
163         __instance_destroy(inst);
164         spin_unlock(&instances_lock);
165 }
166
167 static inline void
168 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
169 {
170        list_add_tail(&entry->list, &queue->queue_list);
171        queue->queue_total++;
172 }
173
174 static void
175 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
176 {
177         list_del(&entry->list);
178         queue->queue_total--;
179 }
180
181 static struct nf_queue_entry *
182 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
183 {
184         struct nf_queue_entry *entry = NULL, *i;
185
186         spin_lock_bh(&queue->lock);
187
188         list_for_each_entry(i, &queue->queue_list, list) {
189                 if (i->id == id) {
190                         entry = i;
191                         break;
192                 }
193         }
194
195         if (entry)
196                 __dequeue_entry(queue, entry);
197
198         spin_unlock_bh(&queue->lock);
199
200         return entry;
201 }
202
203 static void
204 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
205 {
206         struct nf_queue_entry *entry, *next;
207
208         spin_lock_bh(&queue->lock);
209         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
210                 if (!cmpfn || cmpfn(entry, data)) {
211                         list_del(&entry->list);
212                         queue->queue_total--;
213                         nf_reinject(entry, NF_DROP);
214                 }
215         }
216         spin_unlock_bh(&queue->lock);
217 }
218
219 static struct sk_buff *
220 nfqnl_build_packet_message(struct nfqnl_instance *queue,
221                            struct nf_queue_entry *entry,
222                            __be32 **packet_id_ptr)
223 {
224         sk_buff_data_t old_tail;
225         size_t size;
226         size_t data_len = 0;
227         struct sk_buff *skb;
228         struct nlattr *nla;
229         struct nfqnl_msg_packet_hdr *pmsg;
230         struct nlmsghdr *nlh;
231         struct nfgenmsg *nfmsg;
232         struct sk_buff *entskb = entry->skb;
233         struct net_device *indev;
234         struct net_device *outdev;
235
236         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
237                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
238                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
239                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
240 #ifdef CONFIG_BRIDGE_NETFILTER
241                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
242                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
243 #endif
244                 + nla_total_size(sizeof(u_int32_t))     /* mark */
245                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
246                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
247
248         outdev = entry->outdev;
249
250         switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
251         case NFQNL_COPY_META:
252         case NFQNL_COPY_NONE:
253                 break;
254
255         case NFQNL_COPY_PACKET:
256                 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
257                     skb_checksum_help(entskb))
258                         return NULL;
259
260                 data_len = ACCESS_ONCE(queue->copy_range);
261                 if (data_len == 0 || data_len > entskb->len)
262                         data_len = entskb->len;
263
264                 size += nla_total_size(data_len);
265                 break;
266         }
267
268
269         skb = alloc_skb(size, GFP_ATOMIC);
270         if (!skb)
271                 goto nlmsg_failure;
272
273         old_tail = skb->tail;
274         nlh = NLMSG_PUT(skb, 0, 0,
275                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
276                         sizeof(struct nfgenmsg));
277         nfmsg = NLMSG_DATA(nlh);
278         nfmsg->nfgen_family = entry->pf;
279         nfmsg->version = NFNETLINK_V0;
280         nfmsg->res_id = htons(queue->queue_num);
281
282         nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
283         pmsg = nla_data(nla);
284         pmsg->hw_protocol       = entskb->protocol;
285         pmsg->hook              = entry->hook;
286         *packet_id_ptr          = &pmsg->packet_id;
287
288         indev = entry->indev;
289         if (indev) {
290 #ifndef CONFIG_BRIDGE_NETFILTER
291                 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
292                         goto nla_put_failure;
293 #else
294                 if (entry->pf == PF_BRIDGE) {
295                         /* Case 1: indev is physical input device, we need to
296                          * look for bridge group (when called from
297                          * netfilter_bridge) */
298                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
299                                          htonl(indev->ifindex)) ||
300                         /* this is the bridge group "brX" */
301                         /* rcu_read_lock()ed by __nf_queue */
302                             nla_put_be32(skb, NFQA_IFINDEX_INDEV,
303                                          htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
304                                 goto nla_put_failure;
305                 } else {
306                         /* Case 2: indev is bridge group, we need to look for
307                          * physical device (when called from ipv4) */
308                         if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
309                                          htonl(indev->ifindex)))
310                                 goto nla_put_failure;
311                         if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
312                             nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
313                                          htonl(entskb->nf_bridge->physindev->ifindex)))
314                                 goto nla_put_failure;
315                 }
316 #endif
317         }
318
319         if (outdev) {
320 #ifndef CONFIG_BRIDGE_NETFILTER
321                 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
322                         goto nla_put_failure;
323 #else
324                 if (entry->pf == PF_BRIDGE) {
325                         /* Case 1: outdev is physical output device, we need to
326                          * look for bridge group (when called from
327                          * netfilter_bridge) */
328                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
329                                          htonl(outdev->ifindex)) ||
330                         /* this is the bridge group "brX" */
331                         /* rcu_read_lock()ed by __nf_queue */
332                             nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
333                                          htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
334                                 goto nla_put_failure;
335                 } else {
336                         /* Case 2: outdev is bridge group, we need to look for
337                          * physical output device (when called from ipv4) */
338                         if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
339                                          htonl(outdev->ifindex)))
340                                 goto nla_put_failure;
341                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
342                             nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
343                                          htonl(entskb->nf_bridge->physoutdev->ifindex)))
344                                 goto nla_put_failure;
345                 }
346 #endif
347         }
348
349         if (entskb->mark &&
350             nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
351                 goto nla_put_failure;
352
353         if (indev && entskb->dev &&
354             entskb->mac_header != entskb->network_header) {
355                 struct nfqnl_msg_packet_hw phw;
356                 int len = dev_parse_header(entskb, phw.hw_addr);
357                 if (len) {
358                         phw.hw_addrlen = htons(len);
359                         if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
360                                 goto nla_put_failure;
361                 }
362         }
363
364         if (entskb->tstamp.tv64) {
365                 struct nfqnl_msg_packet_timestamp ts;
366                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
367                 ts.sec = cpu_to_be64(tv.tv_sec);
368                 ts.usec = cpu_to_be64(tv.tv_usec);
369
370                 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
371                         goto nla_put_failure;
372         }
373
374         if (data_len) {
375                 struct nlattr *nla;
376                 int sz = nla_attr_size(data_len);
377
378                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
379                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
380                         goto nlmsg_failure;
381                 }
382
383                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
384                 nla->nla_type = NFQA_PAYLOAD;
385                 nla->nla_len = sz;
386
387                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
388                         BUG();
389         }
390
391         nlh->nlmsg_len = skb->tail - old_tail;
392         return skb;
393
394 nlmsg_failure:
395 nla_put_failure:
396         if (skb)
397                 kfree_skb(skb);
398         net_err_ratelimited("nf_queue: error creating packet message\n");
399         return NULL;
400 }
401
402 static int
403 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
404 {
405         struct sk_buff *nskb;
406         struct nfqnl_instance *queue;
407         int err = -ENOBUFS;
408         __be32 *packet_id_ptr;
409
410         /* rcu_read_lock()ed by nf_hook_slow() */
411         queue = instance_lookup(queuenum);
412         if (!queue) {
413                 err = -ESRCH;
414                 goto err_out;
415         }
416
417         if (queue->copy_mode == NFQNL_COPY_NONE) {
418                 err = -EINVAL;
419                 goto err_out;
420         }
421
422         nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
423         if (nskb == NULL) {
424                 err = -ENOMEM;
425                 goto err_out;
426         }
427         spin_lock_bh(&queue->lock);
428
429         if (!queue->peer_pid) {
430                 err = -EINVAL;
431                 goto err_out_free_nskb;
432         }
433         if (queue->queue_total >= queue->queue_maxlen) {
434                 queue->queue_dropped++;
435                 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
436                                      queue->queue_total);
437                 goto err_out_free_nskb;
438         }
439         entry->id = ++queue->id_sequence;
440         *packet_id_ptr = htonl(entry->id);
441
442         /* nfnetlink_unicast will either free the nskb or add it to a socket */
443         err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
444         if (err < 0) {
445                 queue->queue_user_dropped++;
446                 goto err_out_unlock;
447         }
448
449         __enqueue_entry(queue, entry);
450
451         spin_unlock_bh(&queue->lock);
452         return 0;
453
454 err_out_free_nskb:
455         kfree_skb(nskb);
456 err_out_unlock:
457         spin_unlock_bh(&queue->lock);
458 err_out:
459         return err;
460 }
461
462 static int
463 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
464 {
465         struct sk_buff *nskb;
466         int diff;
467
468         diff = data_len - e->skb->len;
469         if (diff < 0) {
470                 if (pskb_trim(e->skb, data_len))
471                         return -ENOMEM;
472         } else if (diff > 0) {
473                 if (data_len > 0xFFFF)
474                         return -EINVAL;
475                 if (diff > skb_tailroom(e->skb)) {
476                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
477                                                diff, GFP_ATOMIC);
478                         if (!nskb) {
479                                 printk(KERN_WARNING "nf_queue: OOM "
480                                       "in mangle, dropping packet\n");
481                                 return -ENOMEM;
482                         }
483                         kfree_skb(e->skb);
484                         e->skb = nskb;
485                 }
486                 skb_put(e->skb, diff);
487         }
488         if (!skb_make_writable(e->skb, data_len))
489                 return -ENOMEM;
490         skb_copy_to_linear_data(e->skb, data, data_len);
491         e->skb->ip_summed = CHECKSUM_NONE;
492         return 0;
493 }
494
495 static int
496 nfqnl_set_mode(struct nfqnl_instance *queue,
497                unsigned char mode, unsigned int range)
498 {
499         int status = 0;
500
501         spin_lock_bh(&queue->lock);
502         switch (mode) {
503         case NFQNL_COPY_NONE:
504         case NFQNL_COPY_META:
505                 queue->copy_mode = mode;
506                 queue->copy_range = 0;
507                 break;
508
509         case NFQNL_COPY_PACKET:
510                 queue->copy_mode = mode;
511                 /* we're using struct nlattr which has 16bit nla_len */
512                 if (range > 0xffff)
513                         queue->copy_range = 0xffff;
514                 else
515                         queue->copy_range = range;
516                 break;
517
518         default:
519                 status = -EINVAL;
520
521         }
522         spin_unlock_bh(&queue->lock);
523
524         return status;
525 }
526
527 static int
528 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
529 {
530         if (entry->indev)
531                 if (entry->indev->ifindex == ifindex)
532                         return 1;
533         if (entry->outdev)
534                 if (entry->outdev->ifindex == ifindex)
535                         return 1;
536 #ifdef CONFIG_BRIDGE_NETFILTER
537         if (entry->skb->nf_bridge) {
538                 if (entry->skb->nf_bridge->physindev &&
539                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
540                         return 1;
541                 if (entry->skb->nf_bridge->physoutdev &&
542                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
543                         return 1;
544         }
545 #endif
546         return 0;
547 }
548
549 /* drop all packets with either indev or outdev == ifindex from all queue
550  * instances */
551 static void
552 nfqnl_dev_drop(int ifindex)
553 {
554         int i;
555
556         rcu_read_lock();
557
558         for (i = 0; i < INSTANCE_BUCKETS; i++) {
559                 struct hlist_node *tmp;
560                 struct nfqnl_instance *inst;
561                 struct hlist_head *head = &instance_table[i];
562
563                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
564                         nfqnl_flush(inst, dev_cmp, ifindex);
565         }
566
567         rcu_read_unlock();
568 }
569
570 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
571
572 static int
573 nfqnl_rcv_dev_event(struct notifier_block *this,
574                     unsigned long event, void *ptr)
575 {
576         struct net_device *dev = ptr;
577
578         if (!net_eq(dev_net(dev), &init_net))
579                 return NOTIFY_DONE;
580
581         /* Drop any packets associated with the downed device */
582         if (event == NETDEV_DOWN)
583                 nfqnl_dev_drop(dev->ifindex);
584         return NOTIFY_DONE;
585 }
586
587 static struct notifier_block nfqnl_dev_notifier = {
588         .notifier_call  = nfqnl_rcv_dev_event,
589 };
590
591 static int
592 nfqnl_rcv_nl_event(struct notifier_block *this,
593                    unsigned long event, void *ptr)
594 {
595         struct netlink_notify *n = ptr;
596
597         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
598                 int i;
599
600                 /* destroy all instances for this pid */
601                 spin_lock(&instances_lock);
602                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
603                         struct hlist_node *tmp, *t2;
604                         struct nfqnl_instance *inst;
605                         struct hlist_head *head = &instance_table[i];
606
607                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
608                                 if ((n->net == &init_net) &&
609                                     (n->pid == inst->peer_pid))
610                                         __instance_destroy(inst);
611                         }
612                 }
613                 spin_unlock(&instances_lock);
614         }
615         return NOTIFY_DONE;
616 }
617
618 static struct notifier_block nfqnl_rtnl_notifier = {
619         .notifier_call  = nfqnl_rcv_nl_event,
620 };
621
622 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
623         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
624         [NFQA_MARK]             = { .type = NLA_U32 },
625         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
626 };
627
628 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
629         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
630         [NFQA_MARK]             = { .type = NLA_U32 },
631 };
632
633 static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
634 {
635         struct nfqnl_instance *queue;
636
637         queue = instance_lookup(queue_num);
638         if (!queue)
639                 return ERR_PTR(-ENODEV);
640
641         if (queue->peer_pid != nlpid)
642                 return ERR_PTR(-EPERM);
643
644         return queue;
645 }
646
647 static struct nfqnl_msg_verdict_hdr*
648 verdicthdr_get(const struct nlattr * const nfqa[])
649 {
650         struct nfqnl_msg_verdict_hdr *vhdr;
651         unsigned int verdict;
652
653         if (!nfqa[NFQA_VERDICT_HDR])
654                 return NULL;
655
656         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
657         verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
658         if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
659                 return NULL;
660         return vhdr;
661 }
662
663 static int nfq_id_after(unsigned int id, unsigned int max)
664 {
665         return (int)(id - max) > 0;
666 }
667
668 static int
669 nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
670                    const struct nlmsghdr *nlh,
671                    const struct nlattr * const nfqa[])
672 {
673         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
674         struct nf_queue_entry *entry, *tmp;
675         unsigned int verdict, maxid;
676         struct nfqnl_msg_verdict_hdr *vhdr;
677         struct nfqnl_instance *queue;
678         LIST_HEAD(batch_list);
679         u16 queue_num = ntohs(nfmsg->res_id);
680
681         queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
682         if (IS_ERR(queue))
683                 return PTR_ERR(queue);
684
685         vhdr = verdicthdr_get(nfqa);
686         if (!vhdr)
687                 return -EINVAL;
688
689         verdict = ntohl(vhdr->verdict);
690         maxid = ntohl(vhdr->id);
691
692         spin_lock_bh(&queue->lock);
693
694         list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
695                 if (nfq_id_after(entry->id, maxid))
696                         break;
697                 __dequeue_entry(queue, entry);
698                 list_add_tail(&entry->list, &batch_list);
699         }
700
701         spin_unlock_bh(&queue->lock);
702
703         if (list_empty(&batch_list))
704                 return -ENOENT;
705
706         list_for_each_entry_safe(entry, tmp, &batch_list, list) {
707                 if (nfqa[NFQA_MARK])
708                         entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
709                 nf_reinject(entry, verdict);
710         }
711         return 0;
712 }
713
714 static int
715 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
716                    const struct nlmsghdr *nlh,
717                    const struct nlattr * const nfqa[])
718 {
719         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
720         u_int16_t queue_num = ntohs(nfmsg->res_id);
721
722         struct nfqnl_msg_verdict_hdr *vhdr;
723         struct nfqnl_instance *queue;
724         unsigned int verdict;
725         struct nf_queue_entry *entry;
726
727         queue = instance_lookup(queue_num);
728         if (!queue)
729
730         queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
731         if (IS_ERR(queue))
732                 return PTR_ERR(queue);
733
734         vhdr = verdicthdr_get(nfqa);
735         if (!vhdr)
736                 return -EINVAL;
737
738         verdict = ntohl(vhdr->verdict);
739
740         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
741         if (entry == NULL)
742                 return -ENOENT;
743
744         if (nfqa[NFQA_PAYLOAD]) {
745                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
746                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
747                         verdict = NF_DROP;
748         }
749
750         if (nfqa[NFQA_MARK])
751                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
752
753         nf_reinject(entry, verdict);
754         return 0;
755 }
756
757 static int
758 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
759                   const struct nlmsghdr *nlh,
760                   const struct nlattr * const nfqa[])
761 {
762         return -ENOTSUPP;
763 }
764
765 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
766         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
767         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
768 };
769
770 static const struct nf_queue_handler nfqh = {
771         .name   = "nf_queue",
772         .outfn  = &nfqnl_enqueue_packet,
773 };
774
775 static int
776 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
777                   const struct nlmsghdr *nlh,
778                   const struct nlattr * const nfqa[])
779 {
780         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
781         u_int16_t queue_num = ntohs(nfmsg->res_id);
782         struct nfqnl_instance *queue;
783         struct nfqnl_msg_config_cmd *cmd = NULL;
784         int ret = 0;
785
786         if (nfqa[NFQA_CFG_CMD]) {
787                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
788
789                 /* Commands without queue context - might sleep */
790                 switch (cmd->command) {
791                 case NFQNL_CFG_CMD_PF_BIND:
792                         return nf_register_queue_handler(ntohs(cmd->pf),
793                                                          &nfqh);
794                 case NFQNL_CFG_CMD_PF_UNBIND:
795                         return nf_unregister_queue_handler(ntohs(cmd->pf),
796                                                            &nfqh);
797                 }
798         }
799
800         rcu_read_lock();
801         queue = instance_lookup(queue_num);
802         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
803                 ret = -EPERM;
804                 goto err_out_unlock;
805         }
806
807         if (cmd != NULL) {
808                 switch (cmd->command) {
809                 case NFQNL_CFG_CMD_BIND:
810                         if (queue) {
811                                 ret = -EBUSY;
812                                 goto err_out_unlock;
813                         }
814                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
815                         if (IS_ERR(queue)) {
816                                 ret = PTR_ERR(queue);
817                                 goto err_out_unlock;
818                         }
819                         break;
820                 case NFQNL_CFG_CMD_UNBIND:
821                         if (!queue) {
822                                 ret = -ENODEV;
823                                 goto err_out_unlock;
824                         }
825                         instance_destroy(queue);
826                         break;
827                 case NFQNL_CFG_CMD_PF_BIND:
828                 case NFQNL_CFG_CMD_PF_UNBIND:
829                         break;
830                 default:
831                         ret = -ENOTSUPP;
832                         break;
833                 }
834         }
835
836         if (nfqa[NFQA_CFG_PARAMS]) {
837                 struct nfqnl_msg_config_params *params;
838
839                 if (!queue) {
840                         ret = -ENODEV;
841                         goto err_out_unlock;
842                 }
843                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
844                 nfqnl_set_mode(queue, params->copy_mode,
845                                 ntohl(params->copy_range));
846         }
847
848         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
849                 __be32 *queue_maxlen;
850
851                 if (!queue) {
852                         ret = -ENODEV;
853                         goto err_out_unlock;
854                 }
855                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
856                 spin_lock_bh(&queue->lock);
857                 queue->queue_maxlen = ntohl(*queue_maxlen);
858                 spin_unlock_bh(&queue->lock);
859         }
860
861 err_out_unlock:
862         rcu_read_unlock();
863         return ret;
864 }
865
866 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
867         [NFQNL_MSG_PACKET]      = { .call_rcu = nfqnl_recv_unsupp,
868                                     .attr_count = NFQA_MAX, },
869         [NFQNL_MSG_VERDICT]     = { .call_rcu = nfqnl_recv_verdict,
870                                     .attr_count = NFQA_MAX,
871                                     .policy = nfqa_verdict_policy },
872         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
873                                     .attr_count = NFQA_CFG_MAX,
874                                     .policy = nfqa_cfg_policy },
875         [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
876                                     .attr_count = NFQA_MAX,
877                                     .policy = nfqa_verdict_batch_policy },
878 };
879
880 static const struct nfnetlink_subsystem nfqnl_subsys = {
881         .name           = "nf_queue",
882         .subsys_id      = NFNL_SUBSYS_QUEUE,
883         .cb_count       = NFQNL_MSG_MAX,
884         .cb             = nfqnl_cb,
885 };
886
887 #ifdef CONFIG_PROC_FS
888 struct iter_state {
889         unsigned int bucket;
890 };
891
892 static struct hlist_node *get_first(struct seq_file *seq)
893 {
894         struct iter_state *st = seq->private;
895
896         if (!st)
897                 return NULL;
898
899         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
900                 if (!hlist_empty(&instance_table[st->bucket]))
901                         return instance_table[st->bucket].first;
902         }
903         return NULL;
904 }
905
906 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
907 {
908         struct iter_state *st = seq->private;
909
910         h = h->next;
911         while (!h) {
912                 if (++st->bucket >= INSTANCE_BUCKETS)
913                         return NULL;
914
915                 h = instance_table[st->bucket].first;
916         }
917         return h;
918 }
919
920 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
921 {
922         struct hlist_node *head;
923         head = get_first(seq);
924
925         if (head)
926                 while (pos && (head = get_next(seq, head)))
927                         pos--;
928         return pos ? NULL : head;
929 }
930
931 static void *seq_start(struct seq_file *seq, loff_t *pos)
932         __acquires(instances_lock)
933 {
934         spin_lock(&instances_lock);
935         return get_idx(seq, *pos);
936 }
937
938 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
939 {
940         (*pos)++;
941         return get_next(s, v);
942 }
943
944 static void seq_stop(struct seq_file *s, void *v)
945         __releases(instances_lock)
946 {
947         spin_unlock(&instances_lock);
948 }
949
950 static int seq_show(struct seq_file *s, void *v)
951 {
952         const struct nfqnl_instance *inst = v;
953
954         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
955                           inst->queue_num,
956                           inst->peer_pid, inst->queue_total,
957                           inst->copy_mode, inst->copy_range,
958                           inst->queue_dropped, inst->queue_user_dropped,
959                           inst->id_sequence, 1);
960 }
961
962 static const struct seq_operations nfqnl_seq_ops = {
963         .start  = seq_start,
964         .next   = seq_next,
965         .stop   = seq_stop,
966         .show   = seq_show,
967 };
968
969 static int nfqnl_open(struct inode *inode, struct file *file)
970 {
971         return seq_open_private(file, &nfqnl_seq_ops,
972                         sizeof(struct iter_state));
973 }
974
975 static const struct file_operations nfqnl_file_ops = {
976         .owner   = THIS_MODULE,
977         .open    = nfqnl_open,
978         .read    = seq_read,
979         .llseek  = seq_lseek,
980         .release = seq_release_private,
981 };
982
983 #endif /* PROC_FS */
984
985 static int __init nfnetlink_queue_init(void)
986 {
987         int i, status = -ENOMEM;
988
989         for (i = 0; i < INSTANCE_BUCKETS; i++)
990                 INIT_HLIST_HEAD(&instance_table[i]);
991
992         netlink_register_notifier(&nfqnl_rtnl_notifier);
993         status = nfnetlink_subsys_register(&nfqnl_subsys);
994         if (status < 0) {
995                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
996                 goto cleanup_netlink_notifier;
997         }
998
999 #ifdef CONFIG_PROC_FS
1000         if (!proc_create("nfnetlink_queue", 0440,
1001                          proc_net_netfilter, &nfqnl_file_ops))
1002                 goto cleanup_subsys;
1003 #endif
1004
1005         register_netdevice_notifier(&nfqnl_dev_notifier);
1006         return status;
1007
1008 #ifdef CONFIG_PROC_FS
1009 cleanup_subsys:
1010         nfnetlink_subsys_unregister(&nfqnl_subsys);
1011 #endif
1012 cleanup_netlink_notifier:
1013         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1014         return status;
1015 }
1016
1017 static void __exit nfnetlink_queue_fini(void)
1018 {
1019         nf_unregister_queue_handlers(&nfqh);
1020         unregister_netdevice_notifier(&nfqnl_dev_notifier);
1021 #ifdef CONFIG_PROC_FS
1022         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1023 #endif
1024         nfnetlink_subsys_unregister(&nfqnl_subsys);
1025         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1026
1027         rcu_barrier(); /* Wait for completion of call_rcu()'s */
1028 }
1029
1030 MODULE_DESCRIPTION("netfilter packet queue handler");
1031 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1032 MODULE_LICENSE("GPL");
1033 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1034
1035 module_init(nfnetlink_queue_init);
1036 module_exit(nfnetlink_queue_fini);