Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / net / ipv4 / netfilter / nf_nat_helper.c
1 /* ip_nat_helper.c - generic support functions for NAT helpers
2  *
3  * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4  * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
18 #include <net/tcp.h>
19 #include <net/route.h>
20
21 #include <linux/netfilter_ipv4.h>
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_helper.h>
24 #include <net/netfilter/nf_conntrack_ecache.h>
25 #include <net/netfilter/nf_conntrack_expect.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
30
31 #define DUMP_OFFSET(x) \
32         pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
33                  x->offset_before, x->offset_after, x->correction_pos);
34
35 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
36
37 /* Setup TCP sequence correction given this change at this sequence */
38 static inline void
39 adjust_tcp_sequence(u32 seq,
40                     int sizediff,
41                     struct nf_conn *ct,
42                     enum ip_conntrack_info ctinfo)
43 {
44         int dir;
45         struct nf_nat_seq *this_way, *other_way;
46         struct nf_conn_nat *nat = nfct_nat(ct);
47
48         pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", seq, seq);
49
50         dir = CTINFO2DIR(ctinfo);
51
52         this_way = &nat->seq[dir];
53         other_way = &nat->seq[!dir];
54
55         pr_debug("nf_nat_resize_packet: Seq_offset before: ");
56         DUMP_OFFSET(this_way);
57
58         spin_lock_bh(&nf_nat_seqofs_lock);
59
60         /* SYN adjust. If it's uninitialized, or this is after last
61          * correction, record it: we don't handle more than one
62          * adjustment in the window, but do deal with common case of a
63          * retransmit */
64         if (this_way->offset_before == this_way->offset_after ||
65             before(this_way->correction_pos, seq)) {
66                    this_way->correction_pos = seq;
67                    this_way->offset_before = this_way->offset_after;
68                    this_way->offset_after += sizediff;
69         }
70         spin_unlock_bh(&nf_nat_seqofs_lock);
71
72         pr_debug("nf_nat_resize_packet: Seq_offset after: ");
73         DUMP_OFFSET(this_way);
74 }
75
76 /* Frobs data inside this packet, which is linear. */
77 static void mangle_contents(struct sk_buff *skb,
78                             unsigned int dataoff,
79                             unsigned int match_offset,
80                             unsigned int match_len,
81                             const char *rep_buffer,
82                             unsigned int rep_len)
83 {
84         unsigned char *data;
85
86         BUG_ON(skb_is_nonlinear(skb));
87         data = skb_network_header(skb) + dataoff;
88
89         /* move post-replacement */
90         memmove(data + match_offset + rep_len,
91                 data + match_offset + match_len,
92                 skb->tail - (skb->network_header + dataoff +
93                              match_offset + match_len));
94
95         /* insert data from buffer */
96         memcpy(data + match_offset, rep_buffer, rep_len);
97
98         /* update skb info */
99         if (rep_len > match_len) {
100                 pr_debug("nf_nat_mangle_packet: Extending packet by "
101                          "%u from %u bytes\n", rep_len - match_len, skb->len);
102                 skb_put(skb, rep_len - match_len);
103         } else {
104                 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
105                          "%u from %u bytes\n", match_len - rep_len, skb->len);
106                 __skb_trim(skb, skb->len + rep_len - match_len);
107         }
108
109         /* fix IP hdr checksum information */
110         ip_hdr(skb)->tot_len = htons(skb->len);
111         ip_send_check(ip_hdr(skb));
112 }
113
114 /* Unusual, but possible case. */
115 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
116 {
117         if (skb->len + extra > 65535)
118                 return 0;
119
120         if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
121                 return 0;
122
123         return 1;
124 }
125
126 /* Generic function for mangling variable-length address changes inside
127  * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
128  * command in FTP).
129  *
130  * Takes care about all the nasty sequence number changes, checksumming,
131  * skb enlargement, ...
132  *
133  * */
134 int
135 nf_nat_mangle_tcp_packet(struct sk_buff *skb,
136                          struct nf_conn *ct,
137                          enum ip_conntrack_info ctinfo,
138                          unsigned int match_offset,
139                          unsigned int match_len,
140                          const char *rep_buffer,
141                          unsigned int rep_len)
142 {
143         struct rtable *rt = skb_rtable(skb);
144         struct iphdr *iph;
145         struct tcphdr *tcph;
146         int oldlen, datalen;
147
148         if (!skb_make_writable(skb, skb->len))
149                 return 0;
150
151         if (rep_len > match_len &&
152             rep_len - match_len > skb_tailroom(skb) &&
153             !enlarge_skb(skb, rep_len - match_len))
154                 return 0;
155
156         SKB_LINEAR_ASSERT(skb);
157
158         iph = ip_hdr(skb);
159         tcph = (void *)iph + iph->ihl*4;
160
161         oldlen = skb->len - iph->ihl*4;
162         mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
163                         match_offset, match_len, rep_buffer, rep_len);
164
165         datalen = skb->len - iph->ihl*4;
166         if (skb->ip_summed != CHECKSUM_PARTIAL) {
167                 if (!(rt->rt_flags & RTCF_LOCAL) &&
168                     skb->dev->features & NETIF_F_V4_CSUM) {
169                         skb->ip_summed = CHECKSUM_PARTIAL;
170                         skb->csum_start = skb_headroom(skb) +
171                                           skb_network_offset(skb) +
172                                           iph->ihl * 4;
173                         skb->csum_offset = offsetof(struct tcphdr, check);
174                         tcph->check = ~tcp_v4_check(datalen,
175                                                     iph->saddr, iph->daddr, 0);
176                 } else {
177                         tcph->check = 0;
178                         tcph->check = tcp_v4_check(datalen,
179                                                    iph->saddr, iph->daddr,
180                                                    csum_partial(tcph,
181                                                                 datalen, 0));
182                 }
183         } else
184                 inet_proto_csum_replace2(&tcph->check, skb,
185                                          htons(oldlen), htons(datalen), 1);
186
187         if (rep_len != match_len) {
188                 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
189                 adjust_tcp_sequence(ntohl(tcph->seq),
190                                     (int)rep_len - (int)match_len,
191                                     ct, ctinfo);
192                 /* Tell TCP window tracking about seq change */
193                 nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
194                                         ct, CTINFO2DIR(ctinfo),
195                                         (int)rep_len - (int)match_len);
196
197                 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
198         }
199         return 1;
200 }
201 EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
202
203 /* Generic function for mangling variable-length address changes inside
204  * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
205  * command in the Amanda protocol)
206  *
207  * Takes care about all the nasty sequence number changes, checksumming,
208  * skb enlargement, ...
209  *
210  * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
211  *       should be fairly easy to do.
212  */
213 int
214 nf_nat_mangle_udp_packet(struct sk_buff *skb,
215                          struct nf_conn *ct,
216                          enum ip_conntrack_info ctinfo,
217                          unsigned int match_offset,
218                          unsigned int match_len,
219                          const char *rep_buffer,
220                          unsigned int rep_len)
221 {
222         struct rtable *rt = skb_rtable(skb);
223         struct iphdr *iph;
224         struct udphdr *udph;
225         int datalen, oldlen;
226
227         /* UDP helpers might accidentally mangle the wrong packet */
228         iph = ip_hdr(skb);
229         if (skb->len < iph->ihl*4 + sizeof(*udph) +
230                                match_offset + match_len)
231                 return 0;
232
233         if (!skb_make_writable(skb, skb->len))
234                 return 0;
235
236         if (rep_len > match_len &&
237             rep_len - match_len > skb_tailroom(skb) &&
238             !enlarge_skb(skb, rep_len - match_len))
239                 return 0;
240
241         iph = ip_hdr(skb);
242         udph = (void *)iph + iph->ihl*4;
243
244         oldlen = skb->len - iph->ihl*4;
245         mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
246                         match_offset, match_len, rep_buffer, rep_len);
247
248         /* update the length of the UDP packet */
249         datalen = skb->len - iph->ihl*4;
250         udph->len = htons(datalen);
251
252         /* fix udp checksum if udp checksum was previously calculated */
253         if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
254                 return 1;
255
256         if (skb->ip_summed != CHECKSUM_PARTIAL) {
257                 if (!(rt->rt_flags & RTCF_LOCAL) &&
258                     skb->dev->features & NETIF_F_V4_CSUM) {
259                         skb->ip_summed = CHECKSUM_PARTIAL;
260                         skb->csum_start = skb_headroom(skb) +
261                                           skb_network_offset(skb) +
262                                           iph->ihl * 4;
263                         skb->csum_offset = offsetof(struct udphdr, check);
264                         udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
265                                                          datalen, IPPROTO_UDP,
266                                                          0);
267                 } else {
268                         udph->check = 0;
269                         udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
270                                                         datalen, IPPROTO_UDP,
271                                                         csum_partial(udph,
272                                                                      datalen, 0));
273                         if (!udph->check)
274                                 udph->check = CSUM_MANGLED_0;
275                 }
276         } else
277                 inet_proto_csum_replace2(&udph->check, skb,
278                                          htons(oldlen), htons(datalen), 1);
279
280         return 1;
281 }
282 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
283
284 /* Adjust one found SACK option including checksum correction */
285 static void
286 sack_adjust(struct sk_buff *skb,
287             struct tcphdr *tcph,
288             unsigned int sackoff,
289             unsigned int sackend,
290             struct nf_nat_seq *natseq)
291 {
292         while (sackoff < sackend) {
293                 struct tcp_sack_block_wire *sack;
294                 __be32 new_start_seq, new_end_seq;
295
296                 sack = (void *)skb->data + sackoff;
297                 if (after(ntohl(sack->start_seq) - natseq->offset_before,
298                           natseq->correction_pos))
299                         new_start_seq = htonl(ntohl(sack->start_seq)
300                                         - natseq->offset_after);
301                 else
302                         new_start_seq = htonl(ntohl(sack->start_seq)
303                                         - natseq->offset_before);
304
305                 if (after(ntohl(sack->end_seq) - natseq->offset_before,
306                           natseq->correction_pos))
307                         new_end_seq = htonl(ntohl(sack->end_seq)
308                                       - natseq->offset_after);
309                 else
310                         new_end_seq = htonl(ntohl(sack->end_seq)
311                                       - natseq->offset_before);
312
313                 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
314                          ntohl(sack->start_seq), new_start_seq,
315                          ntohl(sack->end_seq), new_end_seq);
316
317                 inet_proto_csum_replace4(&tcph->check, skb,
318                                          sack->start_seq, new_start_seq, 0);
319                 inet_proto_csum_replace4(&tcph->check, skb,
320                                          sack->end_seq, new_end_seq, 0);
321                 sack->start_seq = new_start_seq;
322                 sack->end_seq = new_end_seq;
323                 sackoff += sizeof(*sack);
324         }
325 }
326
327 /* TCP SACK sequence number adjustment */
328 static inline unsigned int
329 nf_nat_sack_adjust(struct sk_buff *skb,
330                    struct tcphdr *tcph,
331                    struct nf_conn *ct,
332                    enum ip_conntrack_info ctinfo)
333 {
334         unsigned int dir, optoff, optend;
335         struct nf_conn_nat *nat = nfct_nat(ct);
336
337         optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
338         optend = ip_hdrlen(skb) + tcph->doff * 4;
339
340         if (!skb_make_writable(skb, optend))
341                 return 0;
342
343         dir = CTINFO2DIR(ctinfo);
344
345         while (optoff < optend) {
346                 /* Usually: option, length. */
347                 unsigned char *op = skb->data + optoff;
348
349                 switch (op[0]) {
350                 case TCPOPT_EOL:
351                         return 1;
352                 case TCPOPT_NOP:
353                         optoff++;
354                         continue;
355                 default:
356                         /* no partial options */
357                         if (optoff + 1 == optend ||
358                             optoff + op[1] > optend ||
359                             op[1] < 2)
360                                 return 0;
361                         if (op[0] == TCPOPT_SACK &&
362                             op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
363                             ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
364                                 sack_adjust(skb, tcph, optoff+2,
365                                             optoff+op[1], &nat->seq[!dir]);
366                         optoff += op[1];
367                 }
368         }
369         return 1;
370 }
371
372 /* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
373 int
374 nf_nat_seq_adjust(struct sk_buff *skb,
375                   struct nf_conn *ct,
376                   enum ip_conntrack_info ctinfo)
377 {
378         struct tcphdr *tcph;
379         int dir;
380         __be32 newseq, newack;
381         s16 seqoff, ackoff;
382         struct nf_conn_nat *nat = nfct_nat(ct);
383         struct nf_nat_seq *this_way, *other_way;
384
385         dir = CTINFO2DIR(ctinfo);
386
387         this_way = &nat->seq[dir];
388         other_way = &nat->seq[!dir];
389
390         if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
391                 return 0;
392
393         tcph = (void *)skb->data + ip_hdrlen(skb);
394         if (after(ntohl(tcph->seq), this_way->correction_pos))
395                 seqoff = this_way->offset_after;
396         else
397                 seqoff = this_way->offset_before;
398
399         if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
400                   other_way->correction_pos))
401                 ackoff = other_way->offset_after;
402         else
403                 ackoff = other_way->offset_before;
404
405         newseq = htonl(ntohl(tcph->seq) + seqoff);
406         newack = htonl(ntohl(tcph->ack_seq) - ackoff);
407
408         inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
409         inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
410
411         pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
412                  ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
413                  ntohl(newack));
414
415         tcph->seq = newseq;
416         tcph->ack_seq = newack;
417
418         if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
419                 return 0;
420
421         nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
422
423         return 1;
424 }
425
426 /* Setup NAT on this expected conntrack so it follows master. */
427 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
428 void nf_nat_follow_master(struct nf_conn *ct,
429                           struct nf_conntrack_expect *exp)
430 {
431         struct nf_nat_range range;
432
433         /* This must be a fresh one. */
434         BUG_ON(ct->status & IPS_NAT_DONE_MASK);
435
436         /* Change src to where master sends to */
437         range.flags = IP_NAT_RANGE_MAP_IPS;
438         range.min_ip = range.max_ip
439                 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
440         nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
441
442         /* For DST manip, map port here to where it's expected. */
443         range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
444         range.min = range.max = exp->saved_proto;
445         range.min_ip = range.max_ip
446                 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
447         nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
448 }
449 EXPORT_SYMBOL(nf_nat_follow_master);