sfc: Implement generic features interface
[pandora-kernel.git] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/list.h>
31 #include <net/sock.h>
32 #include <net/netfilter/nf_queue.h>
33
34 #include <asm/atomic.h>
35
36 #ifdef CONFIG_BRIDGE_NETFILTER
37 #include "../bridge/br_private.h"
38 #endif
39
40 #define NFQNL_QMAX_DEFAULT 1024
41
42 struct nfqnl_instance {
43         struct hlist_node hlist;                /* global list of queues */
44         struct rcu_head rcu;
45
46         int peer_pid;
47         unsigned int queue_maxlen;
48         unsigned int copy_range;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52
53         u_int16_t queue_num;                    /* number of this queue */
54         u_int8_t copy_mode;
55 /*
56  * Following fields are dirtied for each queued packet,
57  * keep them in same cache line if possible.
58  */
59         spinlock_t      lock;
60         unsigned int    queue_total;
61         atomic_t        id_sequence;            /* 'sequence' of pkt ids */
62         struct list_head queue_list;            /* packets in queue */
63 };
64
65 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
66
67 static DEFINE_SPINLOCK(instances_lock);
68
69 #define INSTANCE_BUCKETS        16
70 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
71
72 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
73 {
74         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
75 }
76
77 static struct nfqnl_instance *
78 instance_lookup(u_int16_t queue_num)
79 {
80         struct hlist_head *head;
81         struct hlist_node *pos;
82         struct nfqnl_instance *inst;
83
84         head = &instance_table[instance_hashfn(queue_num)];
85         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
86                 if (inst->queue_num == queue_num)
87                         return inst;
88         }
89         return NULL;
90 }
91
92 static struct nfqnl_instance *
93 instance_create(u_int16_t queue_num, int pid)
94 {
95         struct nfqnl_instance *inst;
96         unsigned int h;
97         int err;
98
99         spin_lock(&instances_lock);
100         if (instance_lookup(queue_num)) {
101                 err = -EEXIST;
102                 goto out_unlock;
103         }
104
105         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
106         if (!inst) {
107                 err = -ENOMEM;
108                 goto out_unlock;
109         }
110
111         inst->queue_num = queue_num;
112         inst->peer_pid = pid;
113         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
114         inst->copy_range = 0xfffff;
115         inst->copy_mode = NFQNL_COPY_NONE;
116         spin_lock_init(&inst->lock);
117         INIT_LIST_HEAD(&inst->queue_list);
118
119         if (!try_module_get(THIS_MODULE)) {
120                 err = -EAGAIN;
121                 goto out_free;
122         }
123
124         h = instance_hashfn(queue_num);
125         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
126
127         spin_unlock(&instances_lock);
128
129         return inst;
130
131 out_free:
132         kfree(inst);
133 out_unlock:
134         spin_unlock(&instances_lock);
135         return ERR_PTR(err);
136 }
137
138 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
139                         unsigned long data);
140
141 static void
142 instance_destroy_rcu(struct rcu_head *head)
143 {
144         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
145                                                    rcu);
146
147         nfqnl_flush(inst, NULL, 0);
148         kfree(inst);
149         module_put(THIS_MODULE);
150 }
151
152 static void
153 __instance_destroy(struct nfqnl_instance *inst)
154 {
155         hlist_del_rcu(&inst->hlist);
156         call_rcu(&inst->rcu, instance_destroy_rcu);
157 }
158
159 static void
160 instance_destroy(struct nfqnl_instance *inst)
161 {
162         spin_lock(&instances_lock);
163         __instance_destroy(inst);
164         spin_unlock(&instances_lock);
165 }
166
167 static inline void
168 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
169 {
170        list_add_tail(&entry->list, &queue->queue_list);
171        queue->queue_total++;
172 }
173
174 static struct nf_queue_entry *
175 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
176 {
177         struct nf_queue_entry *entry = NULL, *i;
178
179         spin_lock_bh(&queue->lock);
180
181         list_for_each_entry(i, &queue->queue_list, list) {
182                 if (i->id == id) {
183                         entry = i;
184                         break;
185                 }
186         }
187
188         if (entry) {
189                 list_del(&entry->list);
190                 queue->queue_total--;
191         }
192
193         spin_unlock_bh(&queue->lock);
194
195         return entry;
196 }
197
198 static void
199 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
200 {
201         struct nf_queue_entry *entry, *next;
202
203         spin_lock_bh(&queue->lock);
204         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
205                 if (!cmpfn || cmpfn(entry, data)) {
206                         list_del(&entry->list);
207                         queue->queue_total--;
208                         nf_reinject(entry, NF_DROP);
209                 }
210         }
211         spin_unlock_bh(&queue->lock);
212 }
213
214 static struct sk_buff *
215 nfqnl_build_packet_message(struct nfqnl_instance *queue,
216                            struct nf_queue_entry *entry)
217 {
218         sk_buff_data_t old_tail;
219         size_t size;
220         size_t data_len = 0;
221         struct sk_buff *skb;
222         struct nfqnl_msg_packet_hdr pmsg;
223         struct nlmsghdr *nlh;
224         struct nfgenmsg *nfmsg;
225         struct sk_buff *entskb = entry->skb;
226         struct net_device *indev;
227         struct net_device *outdev;
228
229         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
230                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
231                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
232                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
233 #ifdef CONFIG_BRIDGE_NETFILTER
234                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
235                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
236 #endif
237                 + nla_total_size(sizeof(u_int32_t))     /* mark */
238                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
239                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
240
241         outdev = entry->outdev;
242
243         switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
244         case NFQNL_COPY_META:
245         case NFQNL_COPY_NONE:
246                 break;
247
248         case NFQNL_COPY_PACKET:
249                 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
250                     skb_checksum_help(entskb))
251                         return NULL;
252
253                 data_len = ACCESS_ONCE(queue->copy_range);
254                 if (data_len == 0 || data_len > entskb->len)
255                         data_len = entskb->len;
256
257                 size += nla_total_size(data_len);
258                 break;
259         }
260
261
262         skb = alloc_skb(size, GFP_ATOMIC);
263         if (!skb)
264                 goto nlmsg_failure;
265
266         old_tail = skb->tail;
267         nlh = NLMSG_PUT(skb, 0, 0,
268                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
269                         sizeof(struct nfgenmsg));
270         nfmsg = NLMSG_DATA(nlh);
271         nfmsg->nfgen_family = entry->pf;
272         nfmsg->version = NFNETLINK_V0;
273         nfmsg->res_id = htons(queue->queue_num);
274
275         entry->id = atomic_inc_return(&queue->id_sequence);
276         pmsg.packet_id          = htonl(entry->id);
277         pmsg.hw_protocol        = entskb->protocol;
278         pmsg.hook               = entry->hook;
279
280         NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
281
282         indev = entry->indev;
283         if (indev) {
284 #ifndef CONFIG_BRIDGE_NETFILTER
285                 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
286 #else
287                 if (entry->pf == PF_BRIDGE) {
288                         /* Case 1: indev is physical input device, we need to
289                          * look for bridge group (when called from
290                          * netfilter_bridge) */
291                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
292                                      htonl(indev->ifindex));
293                         /* this is the bridge group "brX" */
294                         /* rcu_read_lock()ed by __nf_queue */
295                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
296                                      htonl(br_port_get_rcu(indev)->br->dev->ifindex));
297                 } else {
298                         /* Case 2: indev is bridge group, we need to look for
299                          * physical device (when called from ipv4) */
300                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
301                                      htonl(indev->ifindex));
302                         if (entskb->nf_bridge && entskb->nf_bridge->physindev)
303                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
304                                              htonl(entskb->nf_bridge->physindev->ifindex));
305                 }
306 #endif
307         }
308
309         if (outdev) {
310 #ifndef CONFIG_BRIDGE_NETFILTER
311                 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
312 #else
313                 if (entry->pf == PF_BRIDGE) {
314                         /* Case 1: outdev is physical output device, we need to
315                          * look for bridge group (when called from
316                          * netfilter_bridge) */
317                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
318                                      htonl(outdev->ifindex));
319                         /* this is the bridge group "brX" */
320                         /* rcu_read_lock()ed by __nf_queue */
321                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
322                                      htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
323                 } else {
324                         /* Case 2: outdev is bridge group, we need to look for
325                          * physical output device (when called from ipv4) */
326                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
327                                      htonl(outdev->ifindex));
328                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
329                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
330                                              htonl(entskb->nf_bridge->physoutdev->ifindex));
331                 }
332 #endif
333         }
334
335         if (entskb->mark)
336                 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337
338         if (indev && entskb->dev) {
339                 struct nfqnl_msg_packet_hw phw;
340                 int len = dev_parse_header(entskb, phw.hw_addr);
341                 if (len) {
342                         phw.hw_addrlen = htons(len);
343                         NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
344                 }
345         }
346
347         if (entskb->tstamp.tv64) {
348                 struct nfqnl_msg_packet_timestamp ts;
349                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
350                 ts.sec = cpu_to_be64(tv.tv_sec);
351                 ts.usec = cpu_to_be64(tv.tv_usec);
352
353                 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
354         }
355
356         if (data_len) {
357                 struct nlattr *nla;
358                 int sz = nla_attr_size(data_len);
359
360                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
361                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
362                         goto nlmsg_failure;
363                 }
364
365                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
366                 nla->nla_type = NFQA_PAYLOAD;
367                 nla->nla_len = sz;
368
369                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
370                         BUG();
371         }
372
373         nlh->nlmsg_len = skb->tail - old_tail;
374         return skb;
375
376 nlmsg_failure:
377 nla_put_failure:
378         if (skb)
379                 kfree_skb(skb);
380         if (net_ratelimit())
381                 printk(KERN_ERR "nf_queue: error creating packet message\n");
382         return NULL;
383 }
384
385 static int
386 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
387 {
388         struct sk_buff *nskb;
389         struct nfqnl_instance *queue;
390         int err = -ENOBUFS;
391
392         /* rcu_read_lock()ed by nf_hook_slow() */
393         queue = instance_lookup(queuenum);
394         if (!queue) {
395                 err = -ESRCH;
396                 goto err_out;
397         }
398
399         if (queue->copy_mode == NFQNL_COPY_NONE) {
400                 err = -EINVAL;
401                 goto err_out;
402         }
403
404         nskb = nfqnl_build_packet_message(queue, entry);
405         if (nskb == NULL) {
406                 err = -ENOMEM;
407                 goto err_out;
408         }
409         spin_lock_bh(&queue->lock);
410
411         if (!queue->peer_pid) {
412                 err = -EINVAL;
413                 goto err_out_free_nskb;
414         }
415         if (queue->queue_total >= queue->queue_maxlen) {
416                 queue->queue_dropped++;
417                 if (net_ratelimit())
418                           printk(KERN_WARNING "nf_queue: full at %d entries, "
419                                  "dropping packets(s).\n",
420                                  queue->queue_total);
421                 goto err_out_free_nskb;
422         }
423
424         /* nfnetlink_unicast will either free the nskb or add it to a socket */
425         err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
426         if (err < 0) {
427                 queue->queue_user_dropped++;
428                 goto err_out_unlock;
429         }
430
431         __enqueue_entry(queue, entry);
432
433         spin_unlock_bh(&queue->lock);
434         return 0;
435
436 err_out_free_nskb:
437         kfree_skb(nskb);
438 err_out_unlock:
439         spin_unlock_bh(&queue->lock);
440 err_out:
441         return err;
442 }
443
444 static int
445 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
446 {
447         struct sk_buff *nskb;
448         int diff;
449
450         diff = data_len - e->skb->len;
451         if (diff < 0) {
452                 if (pskb_trim(e->skb, data_len))
453                         return -ENOMEM;
454         } else if (diff > 0) {
455                 if (data_len > 0xFFFF)
456                         return -EINVAL;
457                 if (diff > skb_tailroom(e->skb)) {
458                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
459                                                diff, GFP_ATOMIC);
460                         if (!nskb) {
461                                 printk(KERN_WARNING "nf_queue: OOM "
462                                       "in mangle, dropping packet\n");
463                                 return -ENOMEM;
464                         }
465                         kfree_skb(e->skb);
466                         e->skb = nskb;
467                 }
468                 skb_put(e->skb, diff);
469         }
470         if (!skb_make_writable(e->skb, data_len))
471                 return -ENOMEM;
472         skb_copy_to_linear_data(e->skb, data, data_len);
473         e->skb->ip_summed = CHECKSUM_NONE;
474         return 0;
475 }
476
477 static int
478 nfqnl_set_mode(struct nfqnl_instance *queue,
479                unsigned char mode, unsigned int range)
480 {
481         int status = 0;
482
483         spin_lock_bh(&queue->lock);
484         switch (mode) {
485         case NFQNL_COPY_NONE:
486         case NFQNL_COPY_META:
487                 queue->copy_mode = mode;
488                 queue->copy_range = 0;
489                 break;
490
491         case NFQNL_COPY_PACKET:
492                 queue->copy_mode = mode;
493                 /* we're using struct nlattr which has 16bit nla_len */
494                 if (range > 0xffff)
495                         queue->copy_range = 0xffff;
496                 else
497                         queue->copy_range = range;
498                 break;
499
500         default:
501                 status = -EINVAL;
502
503         }
504         spin_unlock_bh(&queue->lock);
505
506         return status;
507 }
508
509 static int
510 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
511 {
512         if (entry->indev)
513                 if (entry->indev->ifindex == ifindex)
514                         return 1;
515         if (entry->outdev)
516                 if (entry->outdev->ifindex == ifindex)
517                         return 1;
518 #ifdef CONFIG_BRIDGE_NETFILTER
519         if (entry->skb->nf_bridge) {
520                 if (entry->skb->nf_bridge->physindev &&
521                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
522                         return 1;
523                 if (entry->skb->nf_bridge->physoutdev &&
524                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
525                         return 1;
526         }
527 #endif
528         return 0;
529 }
530
531 /* drop all packets with either indev or outdev == ifindex from all queue
532  * instances */
533 static void
534 nfqnl_dev_drop(int ifindex)
535 {
536         int i;
537
538         rcu_read_lock();
539
540         for (i = 0; i < INSTANCE_BUCKETS; i++) {
541                 struct hlist_node *tmp;
542                 struct nfqnl_instance *inst;
543                 struct hlist_head *head = &instance_table[i];
544
545                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
546                         nfqnl_flush(inst, dev_cmp, ifindex);
547         }
548
549         rcu_read_unlock();
550 }
551
552 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
553
554 static int
555 nfqnl_rcv_dev_event(struct notifier_block *this,
556                     unsigned long event, void *ptr)
557 {
558         struct net_device *dev = ptr;
559
560         if (!net_eq(dev_net(dev), &init_net))
561                 return NOTIFY_DONE;
562
563         /* Drop any packets associated with the downed device */
564         if (event == NETDEV_DOWN)
565                 nfqnl_dev_drop(dev->ifindex);
566         return NOTIFY_DONE;
567 }
568
569 static struct notifier_block nfqnl_dev_notifier = {
570         .notifier_call  = nfqnl_rcv_dev_event,
571 };
572
573 static int
574 nfqnl_rcv_nl_event(struct notifier_block *this,
575                    unsigned long event, void *ptr)
576 {
577         struct netlink_notify *n = ptr;
578
579         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
580                 int i;
581
582                 /* destroy all instances for this pid */
583                 spin_lock(&instances_lock);
584                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
585                         struct hlist_node *tmp, *t2;
586                         struct nfqnl_instance *inst;
587                         struct hlist_head *head = &instance_table[i];
588
589                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
590                                 if ((n->net == &init_net) &&
591                                     (n->pid == inst->peer_pid))
592                                         __instance_destroy(inst);
593                         }
594                 }
595                 spin_unlock(&instances_lock);
596         }
597         return NOTIFY_DONE;
598 }
599
600 static struct notifier_block nfqnl_rtnl_notifier = {
601         .notifier_call  = nfqnl_rcv_nl_event,
602 };
603
604 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
605         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
606         [NFQA_MARK]             = { .type = NLA_U32 },
607         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
608 };
609
610 static int
611 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
612                    const struct nlmsghdr *nlh,
613                    const struct nlattr * const nfqa[])
614 {
615         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
616         u_int16_t queue_num = ntohs(nfmsg->res_id);
617
618         struct nfqnl_msg_verdict_hdr *vhdr;
619         struct nfqnl_instance *queue;
620         unsigned int verdict;
621         struct nf_queue_entry *entry;
622         int err;
623
624         rcu_read_lock();
625         queue = instance_lookup(queue_num);
626         if (!queue) {
627                 err = -ENODEV;
628                 goto err_out_unlock;
629         }
630
631         if (queue->peer_pid != NETLINK_CB(skb).pid) {
632                 err = -EPERM;
633                 goto err_out_unlock;
634         }
635
636         if (!nfqa[NFQA_VERDICT_HDR]) {
637                 err = -EINVAL;
638                 goto err_out_unlock;
639         }
640
641         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
642         verdict = ntohl(vhdr->verdict);
643
644         if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
645                 err = -EINVAL;
646                 goto err_out_unlock;
647         }
648
649         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
650         if (entry == NULL) {
651                 err = -ENOENT;
652                 goto err_out_unlock;
653         }
654         rcu_read_unlock();
655
656         if (nfqa[NFQA_PAYLOAD]) {
657                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
658                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
659                         verdict = NF_DROP;
660         }
661
662         if (nfqa[NFQA_MARK])
663                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
664
665         nf_reinject(entry, verdict);
666         return 0;
667
668 err_out_unlock:
669         rcu_read_unlock();
670         return err;
671 }
672
673 static int
674 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
675                   const struct nlmsghdr *nlh,
676                   const struct nlattr * const nfqa[])
677 {
678         return -ENOTSUPP;
679 }
680
681 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
682         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
683         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
684 };
685
686 static const struct nf_queue_handler nfqh = {
687         .name   = "nf_queue",
688         .outfn  = &nfqnl_enqueue_packet,
689 };
690
691 static int
692 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
693                   const struct nlmsghdr *nlh,
694                   const struct nlattr * const nfqa[])
695 {
696         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
697         u_int16_t queue_num = ntohs(nfmsg->res_id);
698         struct nfqnl_instance *queue;
699         struct nfqnl_msg_config_cmd *cmd = NULL;
700         int ret = 0;
701
702         if (nfqa[NFQA_CFG_CMD]) {
703                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
704
705                 /* Commands without queue context - might sleep */
706                 switch (cmd->command) {
707                 case NFQNL_CFG_CMD_PF_BIND:
708                         return nf_register_queue_handler(ntohs(cmd->pf),
709                                                          &nfqh);
710                 case NFQNL_CFG_CMD_PF_UNBIND:
711                         return nf_unregister_queue_handler(ntohs(cmd->pf),
712                                                            &nfqh);
713                 }
714         }
715
716         rcu_read_lock();
717         queue = instance_lookup(queue_num);
718         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
719                 ret = -EPERM;
720                 goto err_out_unlock;
721         }
722
723         if (cmd != NULL) {
724                 switch (cmd->command) {
725                 case NFQNL_CFG_CMD_BIND:
726                         if (queue) {
727                                 ret = -EBUSY;
728                                 goto err_out_unlock;
729                         }
730                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
731                         if (IS_ERR(queue)) {
732                                 ret = PTR_ERR(queue);
733                                 goto err_out_unlock;
734                         }
735                         break;
736                 case NFQNL_CFG_CMD_UNBIND:
737                         if (!queue) {
738                                 ret = -ENODEV;
739                                 goto err_out_unlock;
740                         }
741                         instance_destroy(queue);
742                         break;
743                 case NFQNL_CFG_CMD_PF_BIND:
744                 case NFQNL_CFG_CMD_PF_UNBIND:
745                         break;
746                 default:
747                         ret = -ENOTSUPP;
748                         break;
749                 }
750         }
751
752         if (nfqa[NFQA_CFG_PARAMS]) {
753                 struct nfqnl_msg_config_params *params;
754
755                 if (!queue) {
756                         ret = -ENODEV;
757                         goto err_out_unlock;
758                 }
759                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
760                 nfqnl_set_mode(queue, params->copy_mode,
761                                 ntohl(params->copy_range));
762         }
763
764         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
765                 __be32 *queue_maxlen;
766
767                 if (!queue) {
768                         ret = -ENODEV;
769                         goto err_out_unlock;
770                 }
771                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
772                 spin_lock_bh(&queue->lock);
773                 queue->queue_maxlen = ntohl(*queue_maxlen);
774                 spin_unlock_bh(&queue->lock);
775         }
776
777 err_out_unlock:
778         rcu_read_unlock();
779         return ret;
780 }
781
782 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
783         [NFQNL_MSG_PACKET]      = { .call = nfqnl_recv_unsupp,
784                                     .attr_count = NFQA_MAX, },
785         [NFQNL_MSG_VERDICT]     = { .call = nfqnl_recv_verdict,
786                                     .attr_count = NFQA_MAX,
787                                     .policy = nfqa_verdict_policy },
788         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
789                                     .attr_count = NFQA_CFG_MAX,
790                                     .policy = nfqa_cfg_policy },
791 };
792
793 static const struct nfnetlink_subsystem nfqnl_subsys = {
794         .name           = "nf_queue",
795         .subsys_id      = NFNL_SUBSYS_QUEUE,
796         .cb_count       = NFQNL_MSG_MAX,
797         .cb             = nfqnl_cb,
798 };
799
800 #ifdef CONFIG_PROC_FS
801 struct iter_state {
802         unsigned int bucket;
803 };
804
805 static struct hlist_node *get_first(struct seq_file *seq)
806 {
807         struct iter_state *st = seq->private;
808
809         if (!st)
810                 return NULL;
811
812         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
813                 if (!hlist_empty(&instance_table[st->bucket]))
814                         return instance_table[st->bucket].first;
815         }
816         return NULL;
817 }
818
819 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
820 {
821         struct iter_state *st = seq->private;
822
823         h = h->next;
824         while (!h) {
825                 if (++st->bucket >= INSTANCE_BUCKETS)
826                         return NULL;
827
828                 h = instance_table[st->bucket].first;
829         }
830         return h;
831 }
832
833 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
834 {
835         struct hlist_node *head;
836         head = get_first(seq);
837
838         if (head)
839                 while (pos && (head = get_next(seq, head)))
840                         pos--;
841         return pos ? NULL : head;
842 }
843
844 static void *seq_start(struct seq_file *seq, loff_t *pos)
845         __acquires(instances_lock)
846 {
847         spin_lock(&instances_lock);
848         return get_idx(seq, *pos);
849 }
850
851 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
852 {
853         (*pos)++;
854         return get_next(s, v);
855 }
856
857 static void seq_stop(struct seq_file *s, void *v)
858         __releases(instances_lock)
859 {
860         spin_unlock(&instances_lock);
861 }
862
863 static int seq_show(struct seq_file *s, void *v)
864 {
865         const struct nfqnl_instance *inst = v;
866
867         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
868                           inst->queue_num,
869                           inst->peer_pid, inst->queue_total,
870                           inst->copy_mode, inst->copy_range,
871                           inst->queue_dropped, inst->queue_user_dropped,
872                           atomic_read(&inst->id_sequence), 1);
873 }
874
875 static const struct seq_operations nfqnl_seq_ops = {
876         .start  = seq_start,
877         .next   = seq_next,
878         .stop   = seq_stop,
879         .show   = seq_show,
880 };
881
882 static int nfqnl_open(struct inode *inode, struct file *file)
883 {
884         return seq_open_private(file, &nfqnl_seq_ops,
885                         sizeof(struct iter_state));
886 }
887
888 static const struct file_operations nfqnl_file_ops = {
889         .owner   = THIS_MODULE,
890         .open    = nfqnl_open,
891         .read    = seq_read,
892         .llseek  = seq_lseek,
893         .release = seq_release_private,
894 };
895
896 #endif /* PROC_FS */
897
898 static int __init nfnetlink_queue_init(void)
899 {
900         int i, status = -ENOMEM;
901
902         for (i = 0; i < INSTANCE_BUCKETS; i++)
903                 INIT_HLIST_HEAD(&instance_table[i]);
904
905         netlink_register_notifier(&nfqnl_rtnl_notifier);
906         status = nfnetlink_subsys_register(&nfqnl_subsys);
907         if (status < 0) {
908                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
909                 goto cleanup_netlink_notifier;
910         }
911
912 #ifdef CONFIG_PROC_FS
913         if (!proc_create("nfnetlink_queue", 0440,
914                          proc_net_netfilter, &nfqnl_file_ops))
915                 goto cleanup_subsys;
916 #endif
917
918         register_netdevice_notifier(&nfqnl_dev_notifier);
919         return status;
920
921 #ifdef CONFIG_PROC_FS
922 cleanup_subsys:
923         nfnetlink_subsys_unregister(&nfqnl_subsys);
924 #endif
925 cleanup_netlink_notifier:
926         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
927         return status;
928 }
929
930 static void __exit nfnetlink_queue_fini(void)
931 {
932         nf_unregister_queue_handlers(&nfqh);
933         unregister_netdevice_notifier(&nfqnl_dev_notifier);
934 #ifdef CONFIG_PROC_FS
935         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
936 #endif
937         nfnetlink_subsys_unregister(&nfqnl_subsys);
938         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
939
940         rcu_barrier(); /* Wait for completion of call_rcu()'s */
941 }
942
943 MODULE_DESCRIPTION("netfilter packet queue handler");
944 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
945 MODULE_LICENSE("GPL");
946 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
947
948 module_init(nfnetlink_queue_init);
949 module_exit(nfnetlink_queue_fini);