858fd52c10408393a901a9afb686226638f32f07
[pandora-kernel.git] / net / netfilter / nfnetlink_queue_core.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/list.h>
31 #include <net/sock.h>
32 #include <net/netfilter/nf_queue.h>
33 #include <net/netfilter/nfnetlink_queue.h>
34
35 #include <linux/atomic.h>
36
37 #ifdef CONFIG_BRIDGE_NETFILTER
38 #include "../bridge/br_private.h"
39 #endif
40
41 #define NFQNL_QMAX_DEFAULT 1024
42
43 struct nfqnl_instance {
44         struct hlist_node hlist;                /* global list of queues */
45         struct rcu_head rcu;
46
47         int peer_portid;
48         unsigned int queue_maxlen;
49         unsigned int copy_range;
50         unsigned int queue_dropped;
51         unsigned int queue_user_dropped;
52
53
54         u_int16_t queue_num;                    /* number of this queue */
55         u_int8_t copy_mode;
56         u_int32_t flags;                        /* Set using NFQA_CFG_FLAGS */
57 /*
58  * Following fields are dirtied for each queued packet,
59  * keep them in same cache line if possible.
60  */
61         spinlock_t      lock;
62         unsigned int    queue_total;
63         unsigned int    id_sequence;            /* 'sequence' of pkt ids */
64         struct list_head queue_list;            /* packets in queue */
65 };
66
67 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
68
69 static DEFINE_SPINLOCK(instances_lock);
70
71 #define INSTANCE_BUCKETS        16
72 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
73
74 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
75 {
76         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
77 }
78
79 static struct nfqnl_instance *
80 instance_lookup(u_int16_t queue_num)
81 {
82         struct hlist_head *head;
83         struct nfqnl_instance *inst;
84
85         head = &instance_table[instance_hashfn(queue_num)];
86         hlist_for_each_entry_rcu(inst, head, hlist) {
87                 if (inst->queue_num == queue_num)
88                         return inst;
89         }
90         return NULL;
91 }
92
93 static struct nfqnl_instance *
94 instance_create(u_int16_t queue_num, int portid)
95 {
96         struct nfqnl_instance *inst;
97         unsigned int h;
98         int err;
99
100         spin_lock(&instances_lock);
101         if (instance_lookup(queue_num)) {
102                 err = -EEXIST;
103                 goto out_unlock;
104         }
105
106         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
107         if (!inst) {
108                 err = -ENOMEM;
109                 goto out_unlock;
110         }
111
112         inst->queue_num = queue_num;
113         inst->peer_portid = portid;
114         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
115         inst->copy_range = 0xfffff;
116         inst->copy_mode = NFQNL_COPY_NONE;
117         spin_lock_init(&inst->lock);
118         INIT_LIST_HEAD(&inst->queue_list);
119
120         if (!try_module_get(THIS_MODULE)) {
121                 err = -EAGAIN;
122                 goto out_free;
123         }
124
125         h = instance_hashfn(queue_num);
126         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
127
128         spin_unlock(&instances_lock);
129
130         return inst;
131
132 out_free:
133         kfree(inst);
134 out_unlock:
135         spin_unlock(&instances_lock);
136         return ERR_PTR(err);
137 }
138
139 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
140                         unsigned long data);
141
142 static void
143 instance_destroy_rcu(struct rcu_head *head)
144 {
145         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
146                                                    rcu);
147
148         nfqnl_flush(inst, NULL, 0);
149         kfree(inst);
150         module_put(THIS_MODULE);
151 }
152
153 static void
154 __instance_destroy(struct nfqnl_instance *inst)
155 {
156         hlist_del_rcu(&inst->hlist);
157         call_rcu(&inst->rcu, instance_destroy_rcu);
158 }
159
160 static void
161 instance_destroy(struct nfqnl_instance *inst)
162 {
163         spin_lock(&instances_lock);
164         __instance_destroy(inst);
165         spin_unlock(&instances_lock);
166 }
167
168 static inline void
169 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
170 {
171        list_add_tail(&entry->list, &queue->queue_list);
172        queue->queue_total++;
173 }
174
175 static void
176 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
177 {
178         list_del(&entry->list);
179         queue->queue_total--;
180 }
181
182 static struct nf_queue_entry *
183 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
184 {
185         struct nf_queue_entry *entry = NULL, *i;
186
187         spin_lock_bh(&queue->lock);
188
189         list_for_each_entry(i, &queue->queue_list, list) {
190                 if (i->id == id) {
191                         entry = i;
192                         break;
193                 }
194         }
195
196         if (entry)
197                 __dequeue_entry(queue, entry);
198
199         spin_unlock_bh(&queue->lock);
200
201         return entry;
202 }
203
204 static void
205 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
206 {
207         struct nf_queue_entry *entry, *next;
208
209         spin_lock_bh(&queue->lock);
210         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
211                 if (!cmpfn || cmpfn(entry, data)) {
212                         list_del(&entry->list);
213                         queue->queue_total--;
214                         nf_reinject(entry, NF_DROP);
215                 }
216         }
217         spin_unlock_bh(&queue->lock);
218 }
219
220 static struct sk_buff *
221 nfqnl_build_packet_message(struct nfqnl_instance *queue,
222                            struct nf_queue_entry *entry,
223                            __be32 **packet_id_ptr)
224 {
225         sk_buff_data_t old_tail;
226         size_t size;
227         size_t data_len = 0, cap_len = 0;
228         struct sk_buff *skb;
229         struct nlattr *nla;
230         struct nfqnl_msg_packet_hdr *pmsg;
231         struct nlmsghdr *nlh;
232         struct nfgenmsg *nfmsg;
233         struct sk_buff *entskb = entry->skb;
234         struct net_device *indev;
235         struct net_device *outdev;
236         struct nf_conn *ct = NULL;
237         enum ip_conntrack_info uninitialized_var(ctinfo);
238
239         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
240                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
241                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
242                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
243 #ifdef CONFIG_BRIDGE_NETFILTER
244                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
245                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
246 #endif
247                 + nla_total_size(sizeof(u_int32_t))     /* mark */
248                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
249                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)
250                 + nla_total_size(sizeof(u_int32_t)));   /* cap_len */
251
252         outdev = entry->outdev;
253
254         switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
255         case NFQNL_COPY_META:
256         case NFQNL_COPY_NONE:
257                 break;
258
259         case NFQNL_COPY_PACKET:
260                 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
261                     skb_checksum_help(entskb))
262                         return NULL;
263
264                 data_len = ACCESS_ONCE(queue->copy_range);
265                 if (data_len == 0 || data_len > entskb->len)
266                         data_len = entskb->len;
267
268                 size += nla_total_size(data_len);
269                 cap_len = entskb->len;
270                 break;
271         }
272
273         if (queue->flags & NFQA_CFG_F_CONNTRACK)
274                 ct = nfqnl_ct_get(entskb, &size, &ctinfo);
275
276         skb = alloc_skb(size, GFP_ATOMIC);
277         if (!skb)
278                 return NULL;
279
280         old_tail = skb->tail;
281         nlh = nlmsg_put(skb, 0, 0,
282                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
283                         sizeof(struct nfgenmsg), 0);
284         if (!nlh) {
285                 kfree_skb(skb);
286                 return NULL;
287         }
288         nfmsg = nlmsg_data(nlh);
289         nfmsg->nfgen_family = entry->pf;
290         nfmsg->version = NFNETLINK_V0;
291         nfmsg->res_id = htons(queue->queue_num);
292
293         nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
294         pmsg = nla_data(nla);
295         pmsg->hw_protocol       = entskb->protocol;
296         pmsg->hook              = entry->hook;
297         *packet_id_ptr          = &pmsg->packet_id;
298
299         indev = entry->indev;
300         if (indev) {
301 #ifndef CONFIG_BRIDGE_NETFILTER
302                 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
303                         goto nla_put_failure;
304 #else
305                 if (entry->pf == PF_BRIDGE) {
306                         /* Case 1: indev is physical input device, we need to
307                          * look for bridge group (when called from
308                          * netfilter_bridge) */
309                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
310                                          htonl(indev->ifindex)) ||
311                         /* this is the bridge group "brX" */
312                         /* rcu_read_lock()ed by __nf_queue */
313                             nla_put_be32(skb, NFQA_IFINDEX_INDEV,
314                                          htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
315                                 goto nla_put_failure;
316                 } else {
317                         /* Case 2: indev is bridge group, we need to look for
318                          * physical device (when called from ipv4) */
319                         if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
320                                          htonl(indev->ifindex)))
321                                 goto nla_put_failure;
322                         if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
323                             nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
324                                          htonl(entskb->nf_bridge->physindev->ifindex)))
325                                 goto nla_put_failure;
326                 }
327 #endif
328         }
329
330         if (outdev) {
331 #ifndef CONFIG_BRIDGE_NETFILTER
332                 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
333                         goto nla_put_failure;
334 #else
335                 if (entry->pf == PF_BRIDGE) {
336                         /* Case 1: outdev is physical output device, we need to
337                          * look for bridge group (when called from
338                          * netfilter_bridge) */
339                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
340                                          htonl(outdev->ifindex)) ||
341                         /* this is the bridge group "brX" */
342                         /* rcu_read_lock()ed by __nf_queue */
343                             nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
344                                          htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
345                                 goto nla_put_failure;
346                 } else {
347                         /* Case 2: outdev is bridge group, we need to look for
348                          * physical output device (when called from ipv4) */
349                         if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
350                                          htonl(outdev->ifindex)))
351                                 goto nla_put_failure;
352                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
353                             nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
354                                          htonl(entskb->nf_bridge->physoutdev->ifindex)))
355                                 goto nla_put_failure;
356                 }
357 #endif
358         }
359
360         if (entskb->mark &&
361             nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
362                 goto nla_put_failure;
363
364         if (indev && entskb->dev &&
365             entskb->mac_header != entskb->network_header) {
366                 struct nfqnl_msg_packet_hw phw;
367                 int len = dev_parse_header(entskb, phw.hw_addr);
368                 if (len) {
369                         phw.hw_addrlen = htons(len);
370                         if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
371                                 goto nla_put_failure;
372                 }
373         }
374
375         if (entskb->tstamp.tv64) {
376                 struct nfqnl_msg_packet_timestamp ts;
377                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
378                 ts.sec = cpu_to_be64(tv.tv_sec);
379                 ts.usec = cpu_to_be64(tv.tv_usec);
380
381                 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
382                         goto nla_put_failure;
383         }
384
385         if (data_len) {
386                 struct nlattr *nla;
387                 int sz = nla_attr_size(data_len);
388
389                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
390                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
391                         kfree_skb(skb);
392                         return NULL;
393                 }
394
395                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
396                 nla->nla_type = NFQA_PAYLOAD;
397                 nla->nla_len = sz;
398
399                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
400                         BUG();
401         }
402
403         if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
404                 goto nla_put_failure;
405
406         if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
407                 goto nla_put_failure;
408
409         nlh->nlmsg_len = skb->tail - old_tail;
410         return skb;
411
412 nla_put_failure:
413         kfree_skb(skb);
414         net_err_ratelimited("nf_queue: error creating packet message\n");
415         return NULL;
416 }
417
418 static int
419 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
420 {
421         struct sk_buff *nskb;
422         struct nfqnl_instance *queue;
423         int err = -ENOBUFS;
424         __be32 *packet_id_ptr;
425         int failopen = 0;
426
427         /* rcu_read_lock()ed by nf_hook_slow() */
428         queue = instance_lookup(queuenum);
429         if (!queue) {
430                 err = -ESRCH;
431                 goto err_out;
432         }
433
434         if (queue->copy_mode == NFQNL_COPY_NONE) {
435                 err = -EINVAL;
436                 goto err_out;
437         }
438
439         nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
440         if (nskb == NULL) {
441                 err = -ENOMEM;
442                 goto err_out;
443         }
444         spin_lock_bh(&queue->lock);
445
446         if (!queue->peer_portid) {
447                 err = -EINVAL;
448                 goto err_out_free_nskb;
449         }
450         if (queue->queue_total >= queue->queue_maxlen) {
451                 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
452                         failopen = 1;
453                         err = 0;
454                 } else {
455                         queue->queue_dropped++;
456                         net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
457                                              queue->queue_total);
458                 }
459                 goto err_out_free_nskb;
460         }
461         entry->id = ++queue->id_sequence;
462         *packet_id_ptr = htonl(entry->id);
463
464         /* nfnetlink_unicast will either free the nskb or add it to a socket */
465         err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT);
466         if (err < 0) {
467                 queue->queue_user_dropped++;
468                 goto err_out_unlock;
469         }
470
471         __enqueue_entry(queue, entry);
472
473         spin_unlock_bh(&queue->lock);
474         return 0;
475
476 err_out_free_nskb:
477         kfree_skb(nskb);
478 err_out_unlock:
479         spin_unlock_bh(&queue->lock);
480         if (failopen)
481                 nf_reinject(entry, NF_ACCEPT);
482 err_out:
483         return err;
484 }
485
486 static int
487 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
488 {
489         struct sk_buff *nskb;
490
491         if (diff < 0) {
492                 if (pskb_trim(e->skb, data_len))
493                         return -ENOMEM;
494         } else if (diff > 0) {
495                 if (data_len > 0xFFFF)
496                         return -EINVAL;
497                 if (diff > skb_tailroom(e->skb)) {
498                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
499                                                diff, GFP_ATOMIC);
500                         if (!nskb) {
501                                 printk(KERN_WARNING "nf_queue: OOM "
502                                       "in mangle, dropping packet\n");
503                                 return -ENOMEM;
504                         }
505                         kfree_skb(e->skb);
506                         e->skb = nskb;
507                 }
508                 skb_put(e->skb, diff);
509         }
510         if (!skb_make_writable(e->skb, data_len))
511                 return -ENOMEM;
512         skb_copy_to_linear_data(e->skb, data, data_len);
513         e->skb->ip_summed = CHECKSUM_NONE;
514         return 0;
515 }
516
517 static int
518 nfqnl_set_mode(struct nfqnl_instance *queue,
519                unsigned char mode, unsigned int range)
520 {
521         int status = 0;
522
523         spin_lock_bh(&queue->lock);
524         switch (mode) {
525         case NFQNL_COPY_NONE:
526         case NFQNL_COPY_META:
527                 queue->copy_mode = mode;
528                 queue->copy_range = 0;
529                 break;
530
531         case NFQNL_COPY_PACKET:
532                 queue->copy_mode = mode;
533                 /* We're using struct nlattr which has 16bit nla_len. Note that
534                  * nla_len includes the header length. Thus, the maximum packet
535                  * length that we support is 65531 bytes. We send truncated
536                  * packets if the specified length is larger than that.
537                  */
538                 if (range > 0xffff - NLA_HDRLEN)
539                         queue->copy_range = 0xffff - NLA_HDRLEN;
540                 else
541                         queue->copy_range = range;
542                 break;
543
544         default:
545                 status = -EINVAL;
546
547         }
548         spin_unlock_bh(&queue->lock);
549
550         return status;
551 }
552
553 static int
554 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
555 {
556         if (entry->indev)
557                 if (entry->indev->ifindex == ifindex)
558                         return 1;
559         if (entry->outdev)
560                 if (entry->outdev->ifindex == ifindex)
561                         return 1;
562 #ifdef CONFIG_BRIDGE_NETFILTER
563         if (entry->skb->nf_bridge) {
564                 if (entry->skb->nf_bridge->physindev &&
565                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
566                         return 1;
567                 if (entry->skb->nf_bridge->physoutdev &&
568                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
569                         return 1;
570         }
571 #endif
572         return 0;
573 }
574
575 /* drop all packets with either indev or outdev == ifindex from all queue
576  * instances */
577 static void
578 nfqnl_dev_drop(int ifindex)
579 {
580         int i;
581
582         rcu_read_lock();
583
584         for (i = 0; i < INSTANCE_BUCKETS; i++) {
585                 struct nfqnl_instance *inst;
586                 struct hlist_head *head = &instance_table[i];
587
588                 hlist_for_each_entry_rcu(inst, head, hlist)
589                         nfqnl_flush(inst, dev_cmp, ifindex);
590         }
591
592         rcu_read_unlock();
593 }
594
595 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
596
597 static int
598 nfqnl_rcv_dev_event(struct notifier_block *this,
599                     unsigned long event, void *ptr)
600 {
601         struct net_device *dev = ptr;
602
603         if (!net_eq(dev_net(dev), &init_net))
604                 return NOTIFY_DONE;
605
606         /* Drop any packets associated with the downed device */
607         if (event == NETDEV_DOWN)
608                 nfqnl_dev_drop(dev->ifindex);
609         return NOTIFY_DONE;
610 }
611
612 static struct notifier_block nfqnl_dev_notifier = {
613         .notifier_call  = nfqnl_rcv_dev_event,
614 };
615
616 static int
617 nfqnl_rcv_nl_event(struct notifier_block *this,
618                    unsigned long event, void *ptr)
619 {
620         struct netlink_notify *n = ptr;
621
622         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
623                 int i;
624
625                 /* destroy all instances for this portid */
626                 spin_lock(&instances_lock);
627                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
628                         struct hlist_node *t2;
629                         struct nfqnl_instance *inst;
630                         struct hlist_head *head = &instance_table[i];
631
632                         hlist_for_each_entry_safe(inst, t2, head, hlist) {
633                                 if ((n->net == &init_net) &&
634                                     (n->portid == inst->peer_portid))
635                                         __instance_destroy(inst);
636                         }
637                 }
638                 spin_unlock(&instances_lock);
639         }
640         return NOTIFY_DONE;
641 }
642
643 static struct notifier_block nfqnl_rtnl_notifier = {
644         .notifier_call  = nfqnl_rcv_nl_event,
645 };
646
647 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
648         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
649         [NFQA_MARK]             = { .type = NLA_U32 },
650         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
651         [NFQA_CT]               = { .type = NLA_UNSPEC },
652 };
653
654 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
655         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
656         [NFQA_MARK]             = { .type = NLA_U32 },
657 };
658
659 static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid)
660 {
661         struct nfqnl_instance *queue;
662
663         queue = instance_lookup(queue_num);
664         if (!queue)
665                 return ERR_PTR(-ENODEV);
666
667         if (queue->peer_portid != nlportid)
668                 return ERR_PTR(-EPERM);
669
670         return queue;
671 }
672
673 static struct nfqnl_msg_verdict_hdr*
674 verdicthdr_get(const struct nlattr * const nfqa[])
675 {
676         struct nfqnl_msg_verdict_hdr *vhdr;
677         unsigned int verdict;
678
679         if (!nfqa[NFQA_VERDICT_HDR])
680                 return NULL;
681
682         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
683         verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
684         if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
685                 return NULL;
686         return vhdr;
687 }
688
689 static int nfq_id_after(unsigned int id, unsigned int max)
690 {
691         return (int)(id - max) > 0;
692 }
693
694 static int
695 nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
696                    const struct nlmsghdr *nlh,
697                    const struct nlattr * const nfqa[])
698 {
699         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
700         struct nf_queue_entry *entry, *tmp;
701         unsigned int verdict, maxid;
702         struct nfqnl_msg_verdict_hdr *vhdr;
703         struct nfqnl_instance *queue;
704         LIST_HEAD(batch_list);
705         u16 queue_num = ntohs(nfmsg->res_id);
706
707         queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
708         if (IS_ERR(queue))
709                 return PTR_ERR(queue);
710
711         vhdr = verdicthdr_get(nfqa);
712         if (!vhdr)
713                 return -EINVAL;
714
715         verdict = ntohl(vhdr->verdict);
716         maxid = ntohl(vhdr->id);
717
718         spin_lock_bh(&queue->lock);
719
720         list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
721                 if (nfq_id_after(entry->id, maxid))
722                         break;
723                 __dequeue_entry(queue, entry);
724                 list_add_tail(&entry->list, &batch_list);
725         }
726
727         spin_unlock_bh(&queue->lock);
728
729         if (list_empty(&batch_list))
730                 return -ENOENT;
731
732         list_for_each_entry_safe(entry, tmp, &batch_list, list) {
733                 if (nfqa[NFQA_MARK])
734                         entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
735                 nf_reinject(entry, verdict);
736         }
737         return 0;
738 }
739
740 static int
741 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
742                    const struct nlmsghdr *nlh,
743                    const struct nlattr * const nfqa[])
744 {
745         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
746         u_int16_t queue_num = ntohs(nfmsg->res_id);
747
748         struct nfqnl_msg_verdict_hdr *vhdr;
749         struct nfqnl_instance *queue;
750         unsigned int verdict;
751         struct nf_queue_entry *entry;
752         enum ip_conntrack_info uninitialized_var(ctinfo);
753         struct nf_conn *ct = NULL;
754
755         queue = instance_lookup(queue_num);
756         if (!queue)
757
758         queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
759         if (IS_ERR(queue))
760                 return PTR_ERR(queue);
761
762         vhdr = verdicthdr_get(nfqa);
763         if (!vhdr)
764                 return -EINVAL;
765
766         verdict = ntohl(vhdr->verdict);
767
768         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
769         if (entry == NULL)
770                 return -ENOENT;
771
772         rcu_read_lock();
773         if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
774                 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
775
776         if (nfqa[NFQA_PAYLOAD]) {
777                 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
778                 int diff = payload_len - entry->skb->len;
779
780                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
781                                  payload_len, entry, diff) < 0)
782                         verdict = NF_DROP;
783
784                 if (ct)
785                         nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
786         }
787         rcu_read_unlock();
788
789         if (nfqa[NFQA_MARK])
790                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
791
792         nf_reinject(entry, verdict);
793         return 0;
794 }
795
796 static int
797 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
798                   const struct nlmsghdr *nlh,
799                   const struct nlattr * const nfqa[])
800 {
801         return -ENOTSUPP;
802 }
803
804 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
805         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
806         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
807 };
808
809 static const struct nf_queue_handler nfqh = {
810         .outfn  = &nfqnl_enqueue_packet,
811 };
812
813 static int
814 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
815                   const struct nlmsghdr *nlh,
816                   const struct nlattr * const nfqa[])
817 {
818         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
819         u_int16_t queue_num = ntohs(nfmsg->res_id);
820         struct nfqnl_instance *queue;
821         struct nfqnl_msg_config_cmd *cmd = NULL;
822         int ret = 0;
823
824         if (nfqa[NFQA_CFG_CMD]) {
825                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
826
827                 /* Obsolete commands without queue context */
828                 switch (cmd->command) {
829                 case NFQNL_CFG_CMD_PF_BIND: return 0;
830                 case NFQNL_CFG_CMD_PF_UNBIND: return 0;
831                 }
832         }
833
834         rcu_read_lock();
835         queue = instance_lookup(queue_num);
836         if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
837                 ret = -EPERM;
838                 goto err_out_unlock;
839         }
840
841         if (cmd != NULL) {
842                 switch (cmd->command) {
843                 case NFQNL_CFG_CMD_BIND:
844                         if (queue) {
845                                 ret = -EBUSY;
846                                 goto err_out_unlock;
847                         }
848                         queue = instance_create(queue_num, NETLINK_CB(skb).portid);
849                         if (IS_ERR(queue)) {
850                                 ret = PTR_ERR(queue);
851                                 goto err_out_unlock;
852                         }
853                         break;
854                 case NFQNL_CFG_CMD_UNBIND:
855                         if (!queue) {
856                                 ret = -ENODEV;
857                                 goto err_out_unlock;
858                         }
859                         instance_destroy(queue);
860                         break;
861                 case NFQNL_CFG_CMD_PF_BIND:
862                 case NFQNL_CFG_CMD_PF_UNBIND:
863                         break;
864                 default:
865                         ret = -ENOTSUPP;
866                         break;
867                 }
868         }
869
870         if (nfqa[NFQA_CFG_PARAMS]) {
871                 struct nfqnl_msg_config_params *params;
872
873                 if (!queue) {
874                         ret = -ENODEV;
875                         goto err_out_unlock;
876                 }
877                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
878                 nfqnl_set_mode(queue, params->copy_mode,
879                                 ntohl(params->copy_range));
880         }
881
882         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
883                 __be32 *queue_maxlen;
884
885                 if (!queue) {
886                         ret = -ENODEV;
887                         goto err_out_unlock;
888                 }
889                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
890                 spin_lock_bh(&queue->lock);
891                 queue->queue_maxlen = ntohl(*queue_maxlen);
892                 spin_unlock_bh(&queue->lock);
893         }
894
895         if (nfqa[NFQA_CFG_FLAGS]) {
896                 __u32 flags, mask;
897
898                 if (!queue) {
899                         ret = -ENODEV;
900                         goto err_out_unlock;
901                 }
902
903                 if (!nfqa[NFQA_CFG_MASK]) {
904                         /* A mask is needed to specify which flags are being
905                          * changed.
906                          */
907                         ret = -EINVAL;
908                         goto err_out_unlock;
909                 }
910
911                 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
912                 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
913
914                 if (flags >= NFQA_CFG_F_MAX) {
915                         ret = -EOPNOTSUPP;
916                         goto err_out_unlock;
917                 }
918
919                 spin_lock_bh(&queue->lock);
920                 queue->flags &= ~mask;
921                 queue->flags |= flags & mask;
922                 spin_unlock_bh(&queue->lock);
923         }
924
925 err_out_unlock:
926         rcu_read_unlock();
927         return ret;
928 }
929
930 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
931         [NFQNL_MSG_PACKET]      = { .call_rcu = nfqnl_recv_unsupp,
932                                     .attr_count = NFQA_MAX, },
933         [NFQNL_MSG_VERDICT]     = { .call_rcu = nfqnl_recv_verdict,
934                                     .attr_count = NFQA_MAX,
935                                     .policy = nfqa_verdict_policy },
936         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
937                                     .attr_count = NFQA_CFG_MAX,
938                                     .policy = nfqa_cfg_policy },
939         [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
940                                     .attr_count = NFQA_MAX,
941                                     .policy = nfqa_verdict_batch_policy },
942 };
943
944 static const struct nfnetlink_subsystem nfqnl_subsys = {
945         .name           = "nf_queue",
946         .subsys_id      = NFNL_SUBSYS_QUEUE,
947         .cb_count       = NFQNL_MSG_MAX,
948         .cb             = nfqnl_cb,
949 };
950
951 #ifdef CONFIG_PROC_FS
952 struct iter_state {
953         unsigned int bucket;
954 };
955
956 static struct hlist_node *get_first(struct seq_file *seq)
957 {
958         struct iter_state *st = seq->private;
959
960         if (!st)
961                 return NULL;
962
963         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
964                 if (!hlist_empty(&instance_table[st->bucket]))
965                         return instance_table[st->bucket].first;
966         }
967         return NULL;
968 }
969
970 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
971 {
972         struct iter_state *st = seq->private;
973
974         h = h->next;
975         while (!h) {
976                 if (++st->bucket >= INSTANCE_BUCKETS)
977                         return NULL;
978
979                 h = instance_table[st->bucket].first;
980         }
981         return h;
982 }
983
984 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
985 {
986         struct hlist_node *head;
987         head = get_first(seq);
988
989         if (head)
990                 while (pos && (head = get_next(seq, head)))
991                         pos--;
992         return pos ? NULL : head;
993 }
994
995 static void *seq_start(struct seq_file *seq, loff_t *pos)
996         __acquires(instances_lock)
997 {
998         spin_lock(&instances_lock);
999         return get_idx(seq, *pos);
1000 }
1001
1002 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1003 {
1004         (*pos)++;
1005         return get_next(s, v);
1006 }
1007
1008 static void seq_stop(struct seq_file *s, void *v)
1009         __releases(instances_lock)
1010 {
1011         spin_unlock(&instances_lock);
1012 }
1013
1014 static int seq_show(struct seq_file *s, void *v)
1015 {
1016         const struct nfqnl_instance *inst = v;
1017
1018         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1019                           inst->queue_num,
1020                           inst->peer_portid, inst->queue_total,
1021                           inst->copy_mode, inst->copy_range,
1022                           inst->queue_dropped, inst->queue_user_dropped,
1023                           inst->id_sequence, 1);
1024 }
1025
1026 static const struct seq_operations nfqnl_seq_ops = {
1027         .start  = seq_start,
1028         .next   = seq_next,
1029         .stop   = seq_stop,
1030         .show   = seq_show,
1031 };
1032
1033 static int nfqnl_open(struct inode *inode, struct file *file)
1034 {
1035         return seq_open_private(file, &nfqnl_seq_ops,
1036                         sizeof(struct iter_state));
1037 }
1038
1039 static const struct file_operations nfqnl_file_ops = {
1040         .owner   = THIS_MODULE,
1041         .open    = nfqnl_open,
1042         .read    = seq_read,
1043         .llseek  = seq_lseek,
1044         .release = seq_release_private,
1045 };
1046
1047 #endif /* PROC_FS */
1048
1049 static int __init nfnetlink_queue_init(void)
1050 {
1051         int i, status = -ENOMEM;
1052
1053         for (i = 0; i < INSTANCE_BUCKETS; i++)
1054                 INIT_HLIST_HEAD(&instance_table[i]);
1055
1056         netlink_register_notifier(&nfqnl_rtnl_notifier);
1057         status = nfnetlink_subsys_register(&nfqnl_subsys);
1058         if (status < 0) {
1059                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1060                 goto cleanup_netlink_notifier;
1061         }
1062
1063 #ifdef CONFIG_PROC_FS
1064         if (!proc_create("nfnetlink_queue", 0440,
1065                          proc_net_netfilter, &nfqnl_file_ops))
1066                 goto cleanup_subsys;
1067 #endif
1068
1069         register_netdevice_notifier(&nfqnl_dev_notifier);
1070         nf_register_queue_handler(&nfqh);
1071         return status;
1072
1073 #ifdef CONFIG_PROC_FS
1074 cleanup_subsys:
1075         nfnetlink_subsys_unregister(&nfqnl_subsys);
1076 #endif
1077 cleanup_netlink_notifier:
1078         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1079         return status;
1080 }
1081
1082 static void __exit nfnetlink_queue_fini(void)
1083 {
1084         nf_unregister_queue_handler();
1085         unregister_netdevice_notifier(&nfqnl_dev_notifier);
1086 #ifdef CONFIG_PROC_FS
1087         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1088 #endif
1089         nfnetlink_subsys_unregister(&nfqnl_subsys);
1090         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1091
1092         rcu_barrier(); /* Wait for completion of call_rcu()'s */
1093 }
1094
1095 MODULE_DESCRIPTION("netfilter packet queue handler");
1096 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1097 MODULE_LICENSE("GPL");
1098 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1099
1100 module_init(nfnetlink_queue_init);
1101 module_exit(nfnetlink_queue_fini);