netfilter: don't use INIT_RCU_HEAD()
[pandora-kernel.git] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <linux/netfilter/nfnetlink.h>
28 #include <linux/netfilter/nfnetlink_queue.h>
29 #include <linux/list.h>
30 #include <net/sock.h>
31 #include <net/netfilter/nf_queue.h>
32
33 #include <asm/atomic.h>
34
35 #ifdef CONFIG_BRIDGE_NETFILTER
36 #include "../bridge/br_private.h"
37 #endif
38
39 #define NFQNL_QMAX_DEFAULT 1024
40
41 struct nfqnl_instance {
42         struct hlist_node hlist;                /* global list of queues */
43         struct rcu_head rcu;
44
45         int peer_pid;
46         unsigned int queue_maxlen;
47         unsigned int copy_range;
48         unsigned int queue_total;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52         unsigned int id_sequence;               /* 'sequence' of pkt ids */
53
54         u_int16_t queue_num;                    /* number of this queue */
55         u_int8_t copy_mode;
56
57         spinlock_t lock;
58
59         struct list_head queue_list;            /* packets in queue */
60 };
61
62 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
63
64 static DEFINE_SPINLOCK(instances_lock);
65
66 #define INSTANCE_BUCKETS        16
67 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
68
69 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
70 {
71         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
72 }
73
74 static struct nfqnl_instance *
75 instance_lookup(u_int16_t queue_num)
76 {
77         struct hlist_head *head;
78         struct hlist_node *pos;
79         struct nfqnl_instance *inst;
80
81         head = &instance_table[instance_hashfn(queue_num)];
82         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
83                 if (inst->queue_num == queue_num)
84                         return inst;
85         }
86         return NULL;
87 }
88
89 static struct nfqnl_instance *
90 instance_create(u_int16_t queue_num, int pid)
91 {
92         struct nfqnl_instance *inst;
93         unsigned int h;
94         int err;
95
96         spin_lock(&instances_lock);
97         if (instance_lookup(queue_num)) {
98                 err = -EEXIST;
99                 goto out_unlock;
100         }
101
102         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
103         if (!inst) {
104                 err = -ENOMEM;
105                 goto out_unlock;
106         }
107
108         inst->queue_num = queue_num;
109         inst->peer_pid = pid;
110         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
111         inst->copy_range = 0xfffff;
112         inst->copy_mode = NFQNL_COPY_NONE;
113         spin_lock_init(&inst->lock);
114         INIT_LIST_HEAD(&inst->queue_list);
115
116         if (!try_module_get(THIS_MODULE)) {
117                 err = -EAGAIN;
118                 goto out_free;
119         }
120
121         h = instance_hashfn(queue_num);
122         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
123
124         spin_unlock(&instances_lock);
125
126         return inst;
127
128 out_free:
129         kfree(inst);
130 out_unlock:
131         spin_unlock(&instances_lock);
132         return ERR_PTR(err);
133 }
134
135 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
136                         unsigned long data);
137
138 static void
139 instance_destroy_rcu(struct rcu_head *head)
140 {
141         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
142                                                    rcu);
143
144         nfqnl_flush(inst, NULL, 0);
145         kfree(inst);
146         module_put(THIS_MODULE);
147 }
148
149 static void
150 __instance_destroy(struct nfqnl_instance *inst)
151 {
152         hlist_del_rcu(&inst->hlist);
153         call_rcu(&inst->rcu, instance_destroy_rcu);
154 }
155
156 static void
157 instance_destroy(struct nfqnl_instance *inst)
158 {
159         spin_lock(&instances_lock);
160         __instance_destroy(inst);
161         spin_unlock(&instances_lock);
162 }
163
164 static inline void
165 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
166 {
167        list_add_tail(&entry->list, &queue->queue_list);
168        queue->queue_total++;
169 }
170
171 static struct nf_queue_entry *
172 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
173 {
174         struct nf_queue_entry *entry = NULL, *i;
175
176         spin_lock_bh(&queue->lock);
177
178         list_for_each_entry(i, &queue->queue_list, list) {
179                 if (i->id == id) {
180                         entry = i;
181                         break;
182                 }
183         }
184
185         if (entry) {
186                 list_del(&entry->list);
187                 queue->queue_total--;
188         }
189
190         spin_unlock_bh(&queue->lock);
191
192         return entry;
193 }
194
195 static void
196 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
197 {
198         struct nf_queue_entry *entry, *next;
199
200         spin_lock_bh(&queue->lock);
201         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
202                 if (!cmpfn || cmpfn(entry, data)) {
203                         list_del(&entry->list);
204                         queue->queue_total--;
205                         nf_reinject(entry, NF_DROP);
206                 }
207         }
208         spin_unlock_bh(&queue->lock);
209 }
210
211 static struct sk_buff *
212 nfqnl_build_packet_message(struct nfqnl_instance *queue,
213                            struct nf_queue_entry *entry)
214 {
215         sk_buff_data_t old_tail;
216         size_t size;
217         size_t data_len = 0;
218         struct sk_buff *skb;
219         struct nfqnl_msg_packet_hdr pmsg;
220         struct nlmsghdr *nlh;
221         struct nfgenmsg *nfmsg;
222         struct sk_buff *entskb = entry->skb;
223         struct net_device *indev;
224         struct net_device *outdev;
225
226         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
227                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
228                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
229                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
230 #ifdef CONFIG_BRIDGE_NETFILTER
231                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
232                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
233 #endif
234                 + nla_total_size(sizeof(u_int32_t))     /* mark */
235                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
236                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
237
238         outdev = entry->outdev;
239
240         spin_lock_bh(&queue->lock);
241
242         switch ((enum nfqnl_config_mode)queue->copy_mode) {
243         case NFQNL_COPY_META:
244         case NFQNL_COPY_NONE:
245                 break;
246
247         case NFQNL_COPY_PACKET:
248                 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
249                      entskb->ip_summed == CHECKSUM_COMPLETE) &&
250                     skb_checksum_help(entskb)) {
251                         spin_unlock_bh(&queue->lock);
252                         return NULL;
253                 }
254                 if (queue->copy_range == 0
255                     || queue->copy_range > entskb->len)
256                         data_len = entskb->len;
257                 else
258                         data_len = queue->copy_range;
259
260                 size += nla_total_size(data_len);
261                 break;
262         }
263
264         entry->id = queue->id_sequence++;
265
266         spin_unlock_bh(&queue->lock);
267
268         skb = alloc_skb(size, GFP_ATOMIC);
269         if (!skb)
270                 goto nlmsg_failure;
271
272         old_tail = skb->tail;
273         nlh = NLMSG_PUT(skb, 0, 0,
274                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
275                         sizeof(struct nfgenmsg));
276         nfmsg = NLMSG_DATA(nlh);
277         nfmsg->nfgen_family = entry->pf;
278         nfmsg->version = NFNETLINK_V0;
279         nfmsg->res_id = htons(queue->queue_num);
280
281         pmsg.packet_id          = htonl(entry->id);
282         pmsg.hw_protocol        = entskb->protocol;
283         pmsg.hook               = entry->hook;
284
285         NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
286
287         indev = entry->indev;
288         if (indev) {
289 #ifndef CONFIG_BRIDGE_NETFILTER
290                 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
291 #else
292                 if (entry->pf == PF_BRIDGE) {
293                         /* Case 1: indev is physical input device, we need to
294                          * look for bridge group (when called from
295                          * netfilter_bridge) */
296                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
297                                      htonl(indev->ifindex));
298                         /* this is the bridge group "brX" */
299                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
300                                      htonl(indev->br_port->br->dev->ifindex));
301                 } else {
302                         /* Case 2: indev is bridge group, we need to look for
303                          * physical device (when called from ipv4) */
304                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
305                                      htonl(indev->ifindex));
306                         if (entskb->nf_bridge && entskb->nf_bridge->physindev)
307                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
308                                              htonl(entskb->nf_bridge->physindev->ifindex));
309                 }
310 #endif
311         }
312
313         if (outdev) {
314 #ifndef CONFIG_BRIDGE_NETFILTER
315                 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
316 #else
317                 if (entry->pf == PF_BRIDGE) {
318                         /* Case 1: outdev is physical output device, we need to
319                          * look for bridge group (when called from
320                          * netfilter_bridge) */
321                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
322                                      htonl(outdev->ifindex));
323                         /* this is the bridge group "brX" */
324                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
325                                      htonl(outdev->br_port->br->dev->ifindex));
326                 } else {
327                         /* Case 2: outdev is bridge group, we need to look for
328                          * physical output device (when called from ipv4) */
329                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
330                                      htonl(outdev->ifindex));
331                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
332                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
333                                              htonl(entskb->nf_bridge->physoutdev->ifindex));
334                 }
335 #endif
336         }
337
338         if (entskb->mark)
339                 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
340
341         if (indev && entskb->dev) {
342                 struct nfqnl_msg_packet_hw phw;
343                 int len = dev_parse_header(entskb, phw.hw_addr);
344                 if (len) {
345                         phw.hw_addrlen = htons(len);
346                         NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
347                 }
348         }
349
350         if (entskb->tstamp.tv64) {
351                 struct nfqnl_msg_packet_timestamp ts;
352                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
353                 ts.sec = cpu_to_be64(tv.tv_sec);
354                 ts.usec = cpu_to_be64(tv.tv_usec);
355
356                 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
357         }
358
359         if (data_len) {
360                 struct nlattr *nla;
361                 int sz = nla_attr_size(data_len);
362
363                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
364                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
365                         goto nlmsg_failure;
366                 }
367
368                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
369                 nla->nla_type = NFQA_PAYLOAD;
370                 nla->nla_len = sz;
371
372                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
373                         BUG();
374         }
375
376         nlh->nlmsg_len = skb->tail - old_tail;
377         return skb;
378
379 nlmsg_failure:
380 nla_put_failure:
381         if (skb)
382                 kfree_skb(skb);
383         if (net_ratelimit())
384                 printk(KERN_ERR "nf_queue: error creating packet message\n");
385         return NULL;
386 }
387
388 static int
389 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
390 {
391         struct sk_buff *nskb;
392         struct nfqnl_instance *queue;
393         int err;
394
395         /* rcu_read_lock()ed by nf_hook_slow() */
396         queue = instance_lookup(queuenum);
397         if (!queue)
398                 goto err_out;
399
400         if (queue->copy_mode == NFQNL_COPY_NONE)
401                 goto err_out;
402
403         nskb = nfqnl_build_packet_message(queue, entry);
404         if (nskb == NULL)
405                 goto err_out;
406
407         spin_lock_bh(&queue->lock);
408
409         if (!queue->peer_pid)
410                 goto err_out_free_nskb;
411
412         if (queue->queue_total >= queue->queue_maxlen) {
413                 queue->queue_dropped++;
414                 if (net_ratelimit())
415                           printk(KERN_WARNING "nf_queue: full at %d entries, "
416                                  "dropping packets(s).\n",
417                                  queue->queue_total);
418                 goto err_out_free_nskb;
419         }
420
421         /* nfnetlink_unicast will either free the nskb or add it to a socket */
422         err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
423         if (err < 0) {
424                 queue->queue_user_dropped++;
425                 goto err_out_unlock;
426         }
427
428         __enqueue_entry(queue, entry);
429
430         spin_unlock_bh(&queue->lock);
431         return 0;
432
433 err_out_free_nskb:
434         kfree_skb(nskb);
435 err_out_unlock:
436         spin_unlock_bh(&queue->lock);
437 err_out:
438         return -1;
439 }
440
441 static int
442 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
443 {
444         struct sk_buff *nskb;
445         int diff;
446
447         diff = data_len - e->skb->len;
448         if (diff < 0) {
449                 if (pskb_trim(e->skb, data_len))
450                         return -ENOMEM;
451         } else if (diff > 0) {
452                 if (data_len > 0xFFFF)
453                         return -EINVAL;
454                 if (diff > skb_tailroom(e->skb)) {
455                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
456                                                diff, GFP_ATOMIC);
457                         if (!nskb) {
458                                 printk(KERN_WARNING "nf_queue: OOM "
459                                       "in mangle, dropping packet\n");
460                                 return -ENOMEM;
461                         }
462                         kfree_skb(e->skb);
463                         e->skb = nskb;
464                 }
465                 skb_put(e->skb, diff);
466         }
467         if (!skb_make_writable(e->skb, data_len))
468                 return -ENOMEM;
469         skb_copy_to_linear_data(e->skb, data, data_len);
470         e->skb->ip_summed = CHECKSUM_NONE;
471         return 0;
472 }
473
474 static int
475 nfqnl_set_mode(struct nfqnl_instance *queue,
476                unsigned char mode, unsigned int range)
477 {
478         int status = 0;
479
480         spin_lock_bh(&queue->lock);
481         switch (mode) {
482         case NFQNL_COPY_NONE:
483         case NFQNL_COPY_META:
484                 queue->copy_mode = mode;
485                 queue->copy_range = 0;
486                 break;
487
488         case NFQNL_COPY_PACKET:
489                 queue->copy_mode = mode;
490                 /* we're using struct nlattr which has 16bit nla_len */
491                 if (range > 0xffff)
492                         queue->copy_range = 0xffff;
493                 else
494                         queue->copy_range = range;
495                 break;
496
497         default:
498                 status = -EINVAL;
499
500         }
501         spin_unlock_bh(&queue->lock);
502
503         return status;
504 }
505
506 static int
507 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
508 {
509         if (entry->indev)
510                 if (entry->indev->ifindex == ifindex)
511                         return 1;
512         if (entry->outdev)
513                 if (entry->outdev->ifindex == ifindex)
514                         return 1;
515 #ifdef CONFIG_BRIDGE_NETFILTER
516         if (entry->skb->nf_bridge) {
517                 if (entry->skb->nf_bridge->physindev &&
518                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
519                         return 1;
520                 if (entry->skb->nf_bridge->physoutdev &&
521                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
522                         return 1;
523         }
524 #endif
525         return 0;
526 }
527
528 /* drop all packets with either indev or outdev == ifindex from all queue
529  * instances */
530 static void
531 nfqnl_dev_drop(int ifindex)
532 {
533         int i;
534
535         rcu_read_lock();
536
537         for (i = 0; i < INSTANCE_BUCKETS; i++) {
538                 struct hlist_node *tmp;
539                 struct nfqnl_instance *inst;
540                 struct hlist_head *head = &instance_table[i];
541
542                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
543                         nfqnl_flush(inst, dev_cmp, ifindex);
544         }
545
546         rcu_read_unlock();
547 }
548
549 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
550
551 static int
552 nfqnl_rcv_dev_event(struct notifier_block *this,
553                     unsigned long event, void *ptr)
554 {
555         struct net_device *dev = ptr;
556
557         if (!net_eq(dev_net(dev), &init_net))
558                 return NOTIFY_DONE;
559
560         /* Drop any packets associated with the downed device */
561         if (event == NETDEV_DOWN)
562                 nfqnl_dev_drop(dev->ifindex);
563         return NOTIFY_DONE;
564 }
565
566 static struct notifier_block nfqnl_dev_notifier = {
567         .notifier_call  = nfqnl_rcv_dev_event,
568 };
569
570 static int
571 nfqnl_rcv_nl_event(struct notifier_block *this,
572                    unsigned long event, void *ptr)
573 {
574         struct netlink_notify *n = ptr;
575
576         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
577                 int i;
578
579                 /* destroy all instances for this pid */
580                 spin_lock(&instances_lock);
581                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
582                         struct hlist_node *tmp, *t2;
583                         struct nfqnl_instance *inst;
584                         struct hlist_head *head = &instance_table[i];
585
586                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
587                                 if ((n->net == &init_net) &&
588                                     (n->pid == inst->peer_pid))
589                                         __instance_destroy(inst);
590                         }
591                 }
592                 spin_unlock(&instances_lock);
593         }
594         return NOTIFY_DONE;
595 }
596
597 static struct notifier_block nfqnl_rtnl_notifier = {
598         .notifier_call  = nfqnl_rcv_nl_event,
599 };
600
601 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
602         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
603         [NFQA_MARK]             = { .type = NLA_U32 },
604         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
605 };
606
607 static int
608 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
609                    const struct nlmsghdr *nlh,
610                    const struct nlattr * const nfqa[])
611 {
612         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
613         u_int16_t queue_num = ntohs(nfmsg->res_id);
614
615         struct nfqnl_msg_verdict_hdr *vhdr;
616         struct nfqnl_instance *queue;
617         unsigned int verdict;
618         struct nf_queue_entry *entry;
619         int err;
620
621         rcu_read_lock();
622         queue = instance_lookup(queue_num);
623         if (!queue) {
624                 err = -ENODEV;
625                 goto err_out_unlock;
626         }
627
628         if (queue->peer_pid != NETLINK_CB(skb).pid) {
629                 err = -EPERM;
630                 goto err_out_unlock;
631         }
632
633         if (!nfqa[NFQA_VERDICT_HDR]) {
634                 err = -EINVAL;
635                 goto err_out_unlock;
636         }
637
638         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
639         verdict = ntohl(vhdr->verdict);
640
641         if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
642                 err = -EINVAL;
643                 goto err_out_unlock;
644         }
645
646         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
647         if (entry == NULL) {
648                 err = -ENOENT;
649                 goto err_out_unlock;
650         }
651         rcu_read_unlock();
652
653         if (nfqa[NFQA_PAYLOAD]) {
654                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
655                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
656                         verdict = NF_DROP;
657         }
658
659         if (nfqa[NFQA_MARK])
660                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
661
662         nf_reinject(entry, verdict);
663         return 0;
664
665 err_out_unlock:
666         rcu_read_unlock();
667         return err;
668 }
669
670 static int
671 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
672                   const struct nlmsghdr *nlh,
673                   const struct nlattr * const nfqa[])
674 {
675         return -ENOTSUPP;
676 }
677
678 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
679         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
680         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
681 };
682
683 static const struct nf_queue_handler nfqh = {
684         .name   = "nf_queue",
685         .outfn  = &nfqnl_enqueue_packet,
686 };
687
688 static int
689 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
690                   const struct nlmsghdr *nlh,
691                   const struct nlattr * const nfqa[])
692 {
693         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
694         u_int16_t queue_num = ntohs(nfmsg->res_id);
695         struct nfqnl_instance *queue;
696         struct nfqnl_msg_config_cmd *cmd = NULL;
697         int ret = 0;
698
699         if (nfqa[NFQA_CFG_CMD]) {
700                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
701
702                 /* Commands without queue context - might sleep */
703                 switch (cmd->command) {
704                 case NFQNL_CFG_CMD_PF_BIND:
705                         return nf_register_queue_handler(ntohs(cmd->pf),
706                                                          &nfqh);
707                 case NFQNL_CFG_CMD_PF_UNBIND:
708                         return nf_unregister_queue_handler(ntohs(cmd->pf),
709                                                            &nfqh);
710                 }
711         }
712
713         rcu_read_lock();
714         queue = instance_lookup(queue_num);
715         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
716                 ret = -EPERM;
717                 goto err_out_unlock;
718         }
719
720         if (cmd != NULL) {
721                 switch (cmd->command) {
722                 case NFQNL_CFG_CMD_BIND:
723                         if (queue) {
724                                 ret = -EBUSY;
725                                 goto err_out_unlock;
726                         }
727                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
728                         if (IS_ERR(queue)) {
729                                 ret = PTR_ERR(queue);
730                                 goto err_out_unlock;
731                         }
732                         break;
733                 case NFQNL_CFG_CMD_UNBIND:
734                         if (!queue) {
735                                 ret = -ENODEV;
736                                 goto err_out_unlock;
737                         }
738                         instance_destroy(queue);
739                         break;
740                 case NFQNL_CFG_CMD_PF_BIND:
741                 case NFQNL_CFG_CMD_PF_UNBIND:
742                         break;
743                 default:
744                         ret = -ENOTSUPP;
745                         break;
746                 }
747         }
748
749         if (nfqa[NFQA_CFG_PARAMS]) {
750                 struct nfqnl_msg_config_params *params;
751
752                 if (!queue) {
753                         ret = -ENODEV;
754                         goto err_out_unlock;
755                 }
756                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
757                 nfqnl_set_mode(queue, params->copy_mode,
758                                 ntohl(params->copy_range));
759         }
760
761         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
762                 __be32 *queue_maxlen;
763
764                 if (!queue) {
765                         ret = -ENODEV;
766                         goto err_out_unlock;
767                 }
768                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
769                 spin_lock_bh(&queue->lock);
770                 queue->queue_maxlen = ntohl(*queue_maxlen);
771                 spin_unlock_bh(&queue->lock);
772         }
773
774 err_out_unlock:
775         rcu_read_unlock();
776         return ret;
777 }
778
779 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
780         [NFQNL_MSG_PACKET]      = { .call = nfqnl_recv_unsupp,
781                                     .attr_count = NFQA_MAX, },
782         [NFQNL_MSG_VERDICT]     = { .call = nfqnl_recv_verdict,
783                                     .attr_count = NFQA_MAX,
784                                     .policy = nfqa_verdict_policy },
785         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
786                                     .attr_count = NFQA_CFG_MAX,
787                                     .policy = nfqa_cfg_policy },
788 };
789
790 static const struct nfnetlink_subsystem nfqnl_subsys = {
791         .name           = "nf_queue",
792         .subsys_id      = NFNL_SUBSYS_QUEUE,
793         .cb_count       = NFQNL_MSG_MAX,
794         .cb             = nfqnl_cb,
795 };
796
797 #ifdef CONFIG_PROC_FS
798 struct iter_state {
799         unsigned int bucket;
800 };
801
802 static struct hlist_node *get_first(struct seq_file *seq)
803 {
804         struct iter_state *st = seq->private;
805
806         if (!st)
807                 return NULL;
808
809         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
810                 if (!hlist_empty(&instance_table[st->bucket]))
811                         return instance_table[st->bucket].first;
812         }
813         return NULL;
814 }
815
816 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
817 {
818         struct iter_state *st = seq->private;
819
820         h = h->next;
821         while (!h) {
822                 if (++st->bucket >= INSTANCE_BUCKETS)
823                         return NULL;
824
825                 h = instance_table[st->bucket].first;
826         }
827         return h;
828 }
829
830 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
831 {
832         struct hlist_node *head;
833         head = get_first(seq);
834
835         if (head)
836                 while (pos && (head = get_next(seq, head)))
837                         pos--;
838         return pos ? NULL : head;
839 }
840
841 static void *seq_start(struct seq_file *seq, loff_t *pos)
842         __acquires(instances_lock)
843 {
844         spin_lock(&instances_lock);
845         return get_idx(seq, *pos);
846 }
847
848 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
849 {
850         (*pos)++;
851         return get_next(s, v);
852 }
853
854 static void seq_stop(struct seq_file *s, void *v)
855         __releases(instances_lock)
856 {
857         spin_unlock(&instances_lock);
858 }
859
860 static int seq_show(struct seq_file *s, void *v)
861 {
862         const struct nfqnl_instance *inst = v;
863
864         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
865                           inst->queue_num,
866                           inst->peer_pid, inst->queue_total,
867                           inst->copy_mode, inst->copy_range,
868                           inst->queue_dropped, inst->queue_user_dropped,
869                           inst->id_sequence, 1);
870 }
871
872 static const struct seq_operations nfqnl_seq_ops = {
873         .start  = seq_start,
874         .next   = seq_next,
875         .stop   = seq_stop,
876         .show   = seq_show,
877 };
878
879 static int nfqnl_open(struct inode *inode, struct file *file)
880 {
881         return seq_open_private(file, &nfqnl_seq_ops,
882                         sizeof(struct iter_state));
883 }
884
885 static const struct file_operations nfqnl_file_ops = {
886         .owner   = THIS_MODULE,
887         .open    = nfqnl_open,
888         .read    = seq_read,
889         .llseek  = seq_lseek,
890         .release = seq_release_private,
891 };
892
893 #endif /* PROC_FS */
894
895 static int __init nfnetlink_queue_init(void)
896 {
897         int i, status = -ENOMEM;
898
899         for (i = 0; i < INSTANCE_BUCKETS; i++)
900                 INIT_HLIST_HEAD(&instance_table[i]);
901
902         netlink_register_notifier(&nfqnl_rtnl_notifier);
903         status = nfnetlink_subsys_register(&nfqnl_subsys);
904         if (status < 0) {
905                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
906                 goto cleanup_netlink_notifier;
907         }
908
909 #ifdef CONFIG_PROC_FS
910         if (!proc_create("nfnetlink_queue", 0440,
911                          proc_net_netfilter, &nfqnl_file_ops))
912                 goto cleanup_subsys;
913 #endif
914
915         register_netdevice_notifier(&nfqnl_dev_notifier);
916         return status;
917
918 #ifdef CONFIG_PROC_FS
919 cleanup_subsys:
920         nfnetlink_subsys_unregister(&nfqnl_subsys);
921 #endif
922 cleanup_netlink_notifier:
923         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
924         return status;
925 }
926
927 static void __exit nfnetlink_queue_fini(void)
928 {
929         nf_unregister_queue_handlers(&nfqh);
930         unregister_netdevice_notifier(&nfqnl_dev_notifier);
931 #ifdef CONFIG_PROC_FS
932         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
933 #endif
934         nfnetlink_subsys_unregister(&nfqnl_subsys);
935         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
936
937         rcu_barrier(); /* Wait for completion of call_rcu()'s */
938 }
939
940 MODULE_DESCRIPTION("netfilter packet queue handler");
941 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
942 MODULE_LICENSE("GPL");
943 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
944
945 module_init(nfnetlink_queue_init);
946 module_exit(nfnetlink_queue_fini);