Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[pandora-kernel.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__ void ip_send_check(struct iphdr *iph)
88 {
89         iph->check = 0;
90         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91 }
92 EXPORT_SYMBOL(ip_send_check);
93
94 int __ip_local_out(struct sk_buff *skb)
95 {
96         struct iphdr *iph = ip_hdr(skb);
97
98         iph->tot_len = htons(skb->len);
99         ip_send_check(iph);
100         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
101                        skb_dst(skb)->dev, dst_output);
102 }
103
104 int ip_local_out(struct sk_buff *skb)
105 {
106         int err;
107
108         err = __ip_local_out(skb);
109         if (likely(err == 1))
110                 err = dst_output(skb);
111
112         return err;
113 }
114 EXPORT_SYMBOL_GPL(ip_local_out);
115
116 /* dev_loopback_xmit for use with netfilter. */
117 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 {
119         skb_reset_mac_header(newskb);
120         __skb_pull(newskb, skb_network_offset(newskb));
121         newskb->pkt_type = PACKET_LOOPBACK;
122         newskb->ip_summed = CHECKSUM_UNNECESSARY;
123         WARN_ON(!skb_dst(newskb));
124         netif_rx_ni(newskb);
125         return 0;
126 }
127
128 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
129 {
130         int ttl = inet->uc_ttl;
131
132         if (ttl < 0)
133                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
134         return ttl;
135 }
136
137 /*
138  *              Add an ip header to a skbuff and send it out.
139  *
140  */
141 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
142                           __be32 saddr, __be32 daddr, struct ip_options *opt)
143 {
144         struct inet_sock *inet = inet_sk(sk);
145         struct rtable *rt = skb_rtable(skb);
146         struct iphdr *iph;
147
148         /* Build the IP header. */
149         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
150         skb_reset_network_header(skb);
151         iph = ip_hdr(skb);
152         iph->version  = 4;
153         iph->ihl      = 5;
154         iph->tos      = inet->tos;
155         if (ip_dont_fragment(sk, &rt->dst))
156                 iph->frag_off = htons(IP_DF);
157         else
158                 iph->frag_off = 0;
159         iph->ttl      = ip_select_ttl(inet, &rt->dst);
160         iph->daddr    = rt->rt_dst;
161         iph->saddr    = rt->rt_src;
162         iph->protocol = sk->sk_protocol;
163         ip_select_ident(iph, &rt->dst, sk);
164
165         if (opt && opt->optlen) {
166                 iph->ihl += opt->optlen>>2;
167                 ip_options_build(skb, opt, daddr, rt, 0);
168         }
169
170         skb->priority = sk->sk_priority;
171         skb->mark = sk->sk_mark;
172
173         /* Send it out. */
174         return ip_local_out(skb);
175 }
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
178 static inline int ip_finish_output2(struct sk_buff *skb)
179 {
180         struct dst_entry *dst = skb_dst(skb);
181         struct rtable *rt = (struct rtable *)dst;
182         struct net_device *dev = dst->dev;
183         unsigned int hh_len = LL_RESERVED_SPACE(dev);
184
185         if (rt->rt_type == RTN_MULTICAST) {
186                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187         } else if (rt->rt_type == RTN_BROADCAST)
188                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
189
190         /* Be paranoid, rather than too clever. */
191         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
192                 struct sk_buff *skb2;
193
194                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195                 if (skb2 == NULL) {
196                         kfree_skb(skb);
197                         return -ENOMEM;
198                 }
199                 if (skb->sk)
200                         skb_set_owner_w(skb2, skb->sk);
201                 kfree_skb(skb);
202                 skb = skb2;
203         }
204
205         if (dst->hh)
206                 return neigh_hh_output(dst->hh, skb);
207         else if (dst->neighbour)
208                 return dst->neighbour->output(skb);
209
210         if (net_ratelimit())
211                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212         kfree_skb(skb);
213         return -EINVAL;
214 }
215
216 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 {
218         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
221                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
222 }
223
224 static int ip_finish_output(struct sk_buff *skb)
225 {
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227         /* Policy lookup after SNAT yielded a new policy */
228         if (skb_dst(skb)->xfrm != NULL) {
229                 IPCB(skb)->flags |= IPSKB_REROUTED;
230                 return dst_output(skb);
231         }
232 #endif
233         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
234                 return ip_fragment(skb, ip_finish_output2);
235         else
236                 return ip_finish_output2(skb);
237 }
238
239 int ip_mc_output(struct sk_buff *skb)
240 {
241         struct sock *sk = skb->sk;
242         struct rtable *rt = skb_rtable(skb);
243         struct net_device *dev = rt->dst.dev;
244
245         /*
246          *      If the indicated interface is up and running, send the packet.
247          */
248         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
249
250         skb->dev = dev;
251         skb->protocol = htons(ETH_P_IP);
252
253         /*
254          *      Multicasts are looped back for other local users
255          */
256
257         if (rt->rt_flags&RTCF_MULTICAST) {
258                 if (sk_mc_loop(sk)
259 #ifdef CONFIG_IP_MROUTE
260                 /* Small optimization: do not loopback not local frames,
261                    which returned after forwarding; they will be  dropped
262                    by ip_mr_input in any case.
263                    Note, that local frames are looped back to be delivered
264                    to local recipients.
265
266                    This check is duplicated in ip_mr_input at the moment.
267                  */
268                     &&
269                     ((rt->rt_flags & RTCF_LOCAL) ||
270                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
271 #endif
272                    ) {
273                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274                         if (newskb)
275                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276                                         newskb, NULL, newskb->dev,
277                                         ip_dev_loopback_xmit);
278                 }
279
280                 /* Multicasts with ttl 0 must not go beyond the host */
281
282                 if (ip_hdr(skb)->ttl == 0) {
283                         kfree_skb(skb);
284                         return 0;
285                 }
286         }
287
288         if (rt->rt_flags&RTCF_BROADCAST) {
289                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290                 if (newskb)
291                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292                                 NULL, newskb->dev, ip_dev_loopback_xmit);
293         }
294
295         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296                             skb->dev, ip_finish_output,
297                             !(IPCB(skb)->flags & IPSKB_REROUTED));
298 }
299
300 int ip_output(struct sk_buff *skb)
301 {
302         struct net_device *dev = skb_dst(skb)->dev;
303
304         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
305
306         skb->dev = dev;
307         skb->protocol = htons(ETH_P_IP);
308
309         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
310                             ip_finish_output,
311                             !(IPCB(skb)->flags & IPSKB_REROUTED));
312 }
313
314 int ip_queue_xmit(struct sk_buff *skb)
315 {
316         struct sock *sk = skb->sk;
317         struct inet_sock *inet = inet_sk(sk);
318         struct ip_options *opt = inet->opt;
319         struct rtable *rt;
320         struct iphdr *iph;
321         int res;
322
323         /* Skip all of this if the packet is already routed,
324          * f.e. by something like SCTP.
325          */
326         rcu_read_lock();
327         rt = skb_rtable(skb);
328         if (rt != NULL)
329                 goto packet_routed;
330
331         /* Make sure we can route this packet. */
332         rt = (struct rtable *)__sk_dst_check(sk, 0);
333         if (rt == NULL) {
334                 __be32 daddr;
335
336                 /* Use correct destination address if we have options. */
337                 daddr = inet->inet_daddr;
338                 if(opt && opt->srr)
339                         daddr = opt->faddr;
340
341                 {
342                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
343                                             .mark = sk->sk_mark,
344                                             .nl_u = { .ip4_u =
345                                                       { .daddr = daddr,
346                                                         .saddr = inet->inet_saddr,
347                                                         .tos = RT_CONN_FLAGS(sk) } },
348                                             .proto = sk->sk_protocol,
349                                             .flags = inet_sk_flowi_flags(sk),
350                                             .uli_u = { .ports =
351                                                        { .sport = inet->inet_sport,
352                                                          .dport = inet->inet_dport } } };
353
354                         /* If this fails, retransmit mechanism of transport layer will
355                          * keep trying until route appears or the connection times
356                          * itself out.
357                          */
358                         security_sk_classify_flow(sk, &fl);
359                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
360                                 goto no_route;
361                 }
362                 sk_setup_caps(sk, &rt->dst);
363         }
364         skb_dst_set_noref(skb, &rt->dst);
365
366 packet_routed:
367         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
368                 goto no_route;
369
370         /* OK, we know where to send it, allocate and build IP header. */
371         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
372         skb_reset_network_header(skb);
373         iph = ip_hdr(skb);
374         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
375         if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
376                 iph->frag_off = htons(IP_DF);
377         else
378                 iph->frag_off = 0;
379         iph->ttl      = ip_select_ttl(inet, &rt->dst);
380         iph->protocol = sk->sk_protocol;
381         iph->saddr    = rt->rt_src;
382         iph->daddr    = rt->rt_dst;
383         /* Transport layer set skb->h.foo itself. */
384
385         if (opt && opt->optlen) {
386                 iph->ihl += opt->optlen >> 2;
387                 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
388         }
389
390         ip_select_ident_more(iph, &rt->dst, sk,
391                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
392
393         skb->priority = sk->sk_priority;
394         skb->mark = sk->sk_mark;
395
396         res = ip_local_out(skb);
397         rcu_read_unlock();
398         return res;
399
400 no_route:
401         rcu_read_unlock();
402         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
403         kfree_skb(skb);
404         return -EHOSTUNREACH;
405 }
406 EXPORT_SYMBOL(ip_queue_xmit);
407
408
409 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
410 {
411         to->pkt_type = from->pkt_type;
412         to->priority = from->priority;
413         to->protocol = from->protocol;
414         skb_dst_drop(to);
415         skb_dst_copy(to, from);
416         to->dev = from->dev;
417         to->mark = from->mark;
418
419         /* Copy the flags to each fragment. */
420         IPCB(to)->flags = IPCB(from)->flags;
421
422 #ifdef CONFIG_NET_SCHED
423         to->tc_index = from->tc_index;
424 #endif
425         nf_copy(to, from);
426 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
427     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
428         to->nf_trace = from->nf_trace;
429 #endif
430 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
431         to->ipvs_property = from->ipvs_property;
432 #endif
433         skb_copy_secmark(to, from);
434 }
435
436 /*
437  *      This IP datagram is too large to be sent in one piece.  Break it up into
438  *      smaller pieces (each of size equal to IP header plus
439  *      a block of the data of the original IP data part) that will yet fit in a
440  *      single device frame, and queue such a frame for sending.
441  */
442
443 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
444 {
445         struct iphdr *iph;
446         int ptr;
447         struct net_device *dev;
448         struct sk_buff *skb2;
449         unsigned int mtu, hlen, left, len, ll_rs;
450         int offset;
451         __be16 not_last_frag;
452         struct rtable *rt = skb_rtable(skb);
453         int err = 0;
454
455         dev = rt->dst.dev;
456
457         /*
458          *      Point into the IP datagram header.
459          */
460
461         iph = ip_hdr(skb);
462
463         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
464                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
465                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
466                           htonl(ip_skb_dst_mtu(skb)));
467                 kfree_skb(skb);
468                 return -EMSGSIZE;
469         }
470
471         /*
472          *      Setup starting values.
473          */
474
475         hlen = iph->ihl * 4;
476         mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
477 #ifdef CONFIG_BRIDGE_NETFILTER
478         if (skb->nf_bridge)
479                 mtu -= nf_bridge_mtu_reduction(skb);
480 #endif
481         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
482
483         /* When frag_list is given, use it. First, check its validity:
484          * some transformers could create wrong frag_list or break existing
485          * one, it is not prohibited. In this case fall back to copying.
486          *
487          * LATER: this step can be merged to real generation of fragments,
488          * we can switch to copy when see the first bad fragment.
489          */
490         if (skb_has_frags(skb)) {
491                 struct sk_buff *frag, *frag2;
492                 int first_len = skb_pagelen(skb);
493
494                 if (first_len - hlen > mtu ||
495                     ((first_len - hlen) & 7) ||
496                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
497                     skb_cloned(skb))
498                         goto slow_path;
499
500                 skb_walk_frags(skb, frag) {
501                         /* Correct geometry. */
502                         if (frag->len > mtu ||
503                             ((frag->len & 7) && frag->next) ||
504                             skb_headroom(frag) < hlen)
505                                 goto slow_path_clean;
506
507                         /* Partially cloned skb? */
508                         if (skb_shared(frag))
509                                 goto slow_path_clean;
510
511                         BUG_ON(frag->sk);
512                         if (skb->sk) {
513                                 frag->sk = skb->sk;
514                                 frag->destructor = sock_wfree;
515                         }
516                         skb->truesize -= frag->truesize;
517                 }
518
519                 /* Everything is OK. Generate! */
520
521                 err = 0;
522                 offset = 0;
523                 frag = skb_shinfo(skb)->frag_list;
524                 skb_frag_list_init(skb);
525                 skb->data_len = first_len - skb_headlen(skb);
526                 skb->len = first_len;
527                 iph->tot_len = htons(first_len);
528                 iph->frag_off = htons(IP_MF);
529                 ip_send_check(iph);
530
531                 for (;;) {
532                         /* Prepare header of the next frame,
533                          * before previous one went down. */
534                         if (frag) {
535                                 frag->ip_summed = CHECKSUM_NONE;
536                                 skb_reset_transport_header(frag);
537                                 __skb_push(frag, hlen);
538                                 skb_reset_network_header(frag);
539                                 memcpy(skb_network_header(frag), iph, hlen);
540                                 iph = ip_hdr(frag);
541                                 iph->tot_len = htons(frag->len);
542                                 ip_copy_metadata(frag, skb);
543                                 if (offset == 0)
544                                         ip_options_fragment(frag);
545                                 offset += skb->len - hlen;
546                                 iph->frag_off = htons(offset>>3);
547                                 if (frag->next != NULL)
548                                         iph->frag_off |= htons(IP_MF);
549                                 /* Ready, complete checksum */
550                                 ip_send_check(iph);
551                         }
552
553                         err = output(skb);
554
555                         if (!err)
556                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
557                         if (err || !frag)
558                                 break;
559
560                         skb = frag;
561                         frag = skb->next;
562                         skb->next = NULL;
563                 }
564
565                 if (err == 0) {
566                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
567                         return 0;
568                 }
569
570                 while (frag) {
571                         skb = frag->next;
572                         kfree_skb(frag);
573                         frag = skb;
574                 }
575                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
576                 return err;
577
578 slow_path_clean:
579                 skb_walk_frags(skb, frag2) {
580                         if (frag2 == frag)
581                                 break;
582                         frag2->sk = NULL;
583                         frag2->destructor = NULL;
584                         skb->truesize += frag2->truesize;
585                 }
586         }
587
588 slow_path:
589         left = skb->len - hlen;         /* Space per frame */
590         ptr = hlen;             /* Where to start from */
591
592         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
593          * we need to make room for the encapsulating header
594          */
595         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
596
597         /*
598          *      Fragment the datagram.
599          */
600
601         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
602         not_last_frag = iph->frag_off & htons(IP_MF);
603
604         /*
605          *      Keep copying data until we run out.
606          */
607
608         while (left > 0) {
609                 len = left;
610                 /* IF: it doesn't fit, use 'mtu' - the data space left */
611                 if (len > mtu)
612                         len = mtu;
613                 /* IF: we are not sending upto and including the packet end
614                    then align the next start on an eight byte boundary */
615                 if (len < left) {
616                         len &= ~7;
617                 }
618                 /*
619                  *      Allocate buffer.
620                  */
621
622                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
623                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
624                         err = -ENOMEM;
625                         goto fail;
626                 }
627
628                 /*
629                  *      Set up data on packet
630                  */
631
632                 ip_copy_metadata(skb2, skb);
633                 skb_reserve(skb2, ll_rs);
634                 skb_put(skb2, len + hlen);
635                 skb_reset_network_header(skb2);
636                 skb2->transport_header = skb2->network_header + hlen;
637
638                 /*
639                  *      Charge the memory for the fragment to any owner
640                  *      it might possess
641                  */
642
643                 if (skb->sk)
644                         skb_set_owner_w(skb2, skb->sk);
645
646                 /*
647                  *      Copy the packet header into the new buffer.
648                  */
649
650                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
651
652                 /*
653                  *      Copy a block of the IP datagram.
654                  */
655                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
656                         BUG();
657                 left -= len;
658
659                 /*
660                  *      Fill in the new header fields.
661                  */
662                 iph = ip_hdr(skb2);
663                 iph->frag_off = htons((offset >> 3));
664
665                 /* ANK: dirty, but effective trick. Upgrade options only if
666                  * the segment to be fragmented was THE FIRST (otherwise,
667                  * options are already fixed) and make it ONCE
668                  * on the initial skb, so that all the following fragments
669                  * will inherit fixed options.
670                  */
671                 if (offset == 0)
672                         ip_options_fragment(skb);
673
674                 /*
675                  *      Added AC : If we are fragmenting a fragment that's not the
676                  *                 last fragment then keep MF on each bit
677                  */
678                 if (left > 0 || not_last_frag)
679                         iph->frag_off |= htons(IP_MF);
680                 ptr += len;
681                 offset += len;
682
683                 /*
684                  *      Put this fragment into the sending queue.
685                  */
686                 iph->tot_len = htons(len + hlen);
687
688                 ip_send_check(iph);
689
690                 err = output(skb2);
691                 if (err)
692                         goto fail;
693
694                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
695         }
696         kfree_skb(skb);
697         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
698         return err;
699
700 fail:
701         kfree_skb(skb);
702         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
703         return err;
704 }
705 EXPORT_SYMBOL(ip_fragment);
706
707 int
708 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
709 {
710         struct iovec *iov = from;
711
712         if (skb->ip_summed == CHECKSUM_PARTIAL) {
713                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
714                         return -EFAULT;
715         } else {
716                 __wsum csum = 0;
717                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
718                         return -EFAULT;
719                 skb->csum = csum_block_add(skb->csum, csum, odd);
720         }
721         return 0;
722 }
723 EXPORT_SYMBOL(ip_generic_getfrag);
724
725 static inline __wsum
726 csum_page(struct page *page, int offset, int copy)
727 {
728         char *kaddr;
729         __wsum csum;
730         kaddr = kmap(page);
731         csum = csum_partial(kaddr + offset, copy, 0);
732         kunmap(page);
733         return csum;
734 }
735
736 static inline int ip_ufo_append_data(struct sock *sk,
737                         int getfrag(void *from, char *to, int offset, int len,
738                                int odd, struct sk_buff *skb),
739                         void *from, int length, int hh_len, int fragheaderlen,
740                         int transhdrlen, int mtu, unsigned int flags)
741 {
742         struct sk_buff *skb;
743         int err;
744
745         /* There is support for UDP fragmentation offload by network
746          * device, so create one single skb packet containing complete
747          * udp datagram
748          */
749         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
750                 skb = sock_alloc_send_skb(sk,
751                         hh_len + fragheaderlen + transhdrlen + 20,
752                         (flags & MSG_DONTWAIT), &err);
753
754                 if (skb == NULL)
755                         return err;
756
757                 /* reserve space for Hardware header */
758                 skb_reserve(skb, hh_len);
759
760                 /* create space for UDP/IP header */
761                 skb_put(skb, fragheaderlen + transhdrlen);
762
763                 /* initialize network header pointer */
764                 skb_reset_network_header(skb);
765
766                 /* initialize protocol header pointer */
767                 skb->transport_header = skb->network_header + fragheaderlen;
768
769                 skb->ip_summed = CHECKSUM_PARTIAL;
770                 skb->csum = 0;
771                 sk->sk_sndmsg_off = 0;
772
773                 /* specify the length of each IP datagram fragment */
774                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
775                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
776                 __skb_queue_tail(&sk->sk_write_queue, skb);
777         }
778
779         return skb_append_datato_frags(sk, skb, getfrag, from,
780                                        (length - transhdrlen));
781 }
782
783 /*
784  *      ip_append_data() and ip_append_page() can make one large IP datagram
785  *      from many pieces of data. Each pieces will be holded on the socket
786  *      until ip_push_pending_frames() is called. Each piece can be a page
787  *      or non-page data.
788  *
789  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
790  *      this interface potentially.
791  *
792  *      LATER: length must be adjusted by pad at tail, when it is required.
793  */
794 int ip_append_data(struct sock *sk,
795                    int getfrag(void *from, char *to, int offset, int len,
796                                int odd, struct sk_buff *skb),
797                    void *from, int length, int transhdrlen,
798                    struct ipcm_cookie *ipc, struct rtable **rtp,
799                    unsigned int flags)
800 {
801         struct inet_sock *inet = inet_sk(sk);
802         struct sk_buff *skb;
803
804         struct ip_options *opt = NULL;
805         int hh_len;
806         int exthdrlen;
807         int mtu;
808         int copy;
809         int err;
810         int offset = 0;
811         unsigned int maxfraglen, fragheaderlen;
812         int csummode = CHECKSUM_NONE;
813         struct rtable *rt;
814
815         if (flags&MSG_PROBE)
816                 return 0;
817
818         if (skb_queue_empty(&sk->sk_write_queue)) {
819                 /*
820                  * setup for corking.
821                  */
822                 opt = ipc->opt;
823                 if (opt) {
824                         if (inet->cork.opt == NULL) {
825                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
826                                 if (unlikely(inet->cork.opt == NULL))
827                                         return -ENOBUFS;
828                         }
829                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
830                         inet->cork.flags |= IPCORK_OPT;
831                         inet->cork.addr = ipc->addr;
832                 }
833                 rt = *rtp;
834                 if (unlikely(!rt))
835                         return -EFAULT;
836                 /*
837                  * We steal reference to this route, caller should not release it
838                  */
839                 *rtp = NULL;
840                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
841                                             rt->dst.dev->mtu :
842                                             dst_mtu(rt->dst.path);
843                 inet->cork.dst = &rt->dst;
844                 inet->cork.length = 0;
845                 sk->sk_sndmsg_page = NULL;
846                 sk->sk_sndmsg_off = 0;
847                 if ((exthdrlen = rt->dst.header_len) != 0) {
848                         length += exthdrlen;
849                         transhdrlen += exthdrlen;
850                 }
851         } else {
852                 rt = (struct rtable *)inet->cork.dst;
853                 if (inet->cork.flags & IPCORK_OPT)
854                         opt = inet->cork.opt;
855
856                 transhdrlen = 0;
857                 exthdrlen = 0;
858                 mtu = inet->cork.fragsize;
859         }
860         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
861
862         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
863         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
864
865         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
866                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
867                                mtu-exthdrlen);
868                 return -EMSGSIZE;
869         }
870
871         /*
872          * transhdrlen > 0 means that this is the first fragment and we wish
873          * it won't be fragmented in the future.
874          */
875         if (transhdrlen &&
876             length + fragheaderlen <= mtu &&
877             rt->dst.dev->features & NETIF_F_V4_CSUM &&
878             !exthdrlen)
879                 csummode = CHECKSUM_PARTIAL;
880
881         skb = skb_peek_tail(&sk->sk_write_queue);
882
883         inet->cork.length += length;
884         if (((length > mtu) || (skb && skb_is_gso(skb))) &&
885             (sk->sk_protocol == IPPROTO_UDP) &&
886             (rt->dst.dev->features & NETIF_F_UFO)) {
887                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
888                                          fragheaderlen, transhdrlen, mtu,
889                                          flags);
890                 if (err)
891                         goto error;
892                 return 0;
893         }
894
895         /* So, what's going on in the loop below?
896          *
897          * We use calculated fragment length to generate chained skb,
898          * each of segments is IP fragment ready for sending to network after
899          * adding appropriate IP header.
900          */
901
902         if (!skb)
903                 goto alloc_new_skb;
904
905         while (length > 0) {
906                 /* Check if the remaining data fits into current packet. */
907                 copy = mtu - skb->len;
908                 if (copy < length)
909                         copy = maxfraglen - skb->len;
910                 if (copy <= 0) {
911                         char *data;
912                         unsigned int datalen;
913                         unsigned int fraglen;
914                         unsigned int fraggap;
915                         unsigned int alloclen;
916                         struct sk_buff *skb_prev;
917 alloc_new_skb:
918                         skb_prev = skb;
919                         if (skb_prev)
920                                 fraggap = skb_prev->len - maxfraglen;
921                         else
922                                 fraggap = 0;
923
924                         /*
925                          * If remaining data exceeds the mtu,
926                          * we know we need more fragment(s).
927                          */
928                         datalen = length + fraggap;
929                         if (datalen > mtu - fragheaderlen)
930                                 datalen = maxfraglen - fragheaderlen;
931                         fraglen = datalen + fragheaderlen;
932
933                         if ((flags & MSG_MORE) &&
934                             !(rt->dst.dev->features&NETIF_F_SG))
935                                 alloclen = mtu;
936                         else
937                                 alloclen = datalen + fragheaderlen;
938
939                         /* The last fragment gets additional space at tail.
940                          * Note, with MSG_MORE we overallocate on fragments,
941                          * because we have no idea what fragment will be
942                          * the last.
943                          */
944                         if (datalen == length + fraggap)
945                                 alloclen += rt->dst.trailer_len;
946
947                         if (transhdrlen) {
948                                 skb = sock_alloc_send_skb(sk,
949                                                 alloclen + hh_len + 15,
950                                                 (flags & MSG_DONTWAIT), &err);
951                         } else {
952                                 skb = NULL;
953                                 if (atomic_read(&sk->sk_wmem_alloc) <=
954                                     2 * sk->sk_sndbuf)
955                                         skb = sock_wmalloc(sk,
956                                                            alloclen + hh_len + 15, 1,
957                                                            sk->sk_allocation);
958                                 if (unlikely(skb == NULL))
959                                         err = -ENOBUFS;
960                                 else
961                                         /* only the initial fragment is
962                                            time stamped */
963                                         ipc->shtx.flags = 0;
964                         }
965                         if (skb == NULL)
966                                 goto error;
967
968                         /*
969                          *      Fill in the control structures
970                          */
971                         skb->ip_summed = csummode;
972                         skb->csum = 0;
973                         skb_reserve(skb, hh_len);
974                         *skb_tx(skb) = ipc->shtx;
975
976                         /*
977                          *      Find where to start putting bytes.
978                          */
979                         data = skb_put(skb, fraglen);
980                         skb_set_network_header(skb, exthdrlen);
981                         skb->transport_header = (skb->network_header +
982                                                  fragheaderlen);
983                         data += fragheaderlen;
984
985                         if (fraggap) {
986                                 skb->csum = skb_copy_and_csum_bits(
987                                         skb_prev, maxfraglen,
988                                         data + transhdrlen, fraggap, 0);
989                                 skb_prev->csum = csum_sub(skb_prev->csum,
990                                                           skb->csum);
991                                 data += fraggap;
992                                 pskb_trim_unique(skb_prev, maxfraglen);
993                         }
994
995                         copy = datalen - transhdrlen - fraggap;
996                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
997                                 err = -EFAULT;
998                                 kfree_skb(skb);
999                                 goto error;
1000                         }
1001
1002                         offset += copy;
1003                         length -= datalen - fraggap;
1004                         transhdrlen = 0;
1005                         exthdrlen = 0;
1006                         csummode = CHECKSUM_NONE;
1007
1008                         /*
1009                          * Put the packet on the pending queue.
1010                          */
1011                         __skb_queue_tail(&sk->sk_write_queue, skb);
1012                         continue;
1013                 }
1014
1015                 if (copy > length)
1016                         copy = length;
1017
1018                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1019                         unsigned int off;
1020
1021                         off = skb->len;
1022                         if (getfrag(from, skb_put(skb, copy),
1023                                         offset, copy, off, skb) < 0) {
1024                                 __skb_trim(skb, off);
1025                                 err = -EFAULT;
1026                                 goto error;
1027                         }
1028                 } else {
1029                         int i = skb_shinfo(skb)->nr_frags;
1030                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1031                         struct page *page = sk->sk_sndmsg_page;
1032                         int off = sk->sk_sndmsg_off;
1033                         unsigned int left;
1034
1035                         if (page && (left = PAGE_SIZE - off) > 0) {
1036                                 if (copy >= left)
1037                                         copy = left;
1038                                 if (page != frag->page) {
1039                                         if (i == MAX_SKB_FRAGS) {
1040                                                 err = -EMSGSIZE;
1041                                                 goto error;
1042                                         }
1043                                         get_page(page);
1044                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1045                                         frag = &skb_shinfo(skb)->frags[i];
1046                                 }
1047                         } else if (i < MAX_SKB_FRAGS) {
1048                                 if (copy > PAGE_SIZE)
1049                                         copy = PAGE_SIZE;
1050                                 page = alloc_pages(sk->sk_allocation, 0);
1051                                 if (page == NULL)  {
1052                                         err = -ENOMEM;
1053                                         goto error;
1054                                 }
1055                                 sk->sk_sndmsg_page = page;
1056                                 sk->sk_sndmsg_off = 0;
1057
1058                                 skb_fill_page_desc(skb, i, page, 0, 0);
1059                                 frag = &skb_shinfo(skb)->frags[i];
1060                         } else {
1061                                 err = -EMSGSIZE;
1062                                 goto error;
1063                         }
1064                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1065                                 err = -EFAULT;
1066                                 goto error;
1067                         }
1068                         sk->sk_sndmsg_off += copy;
1069                         frag->size += copy;
1070                         skb->len += copy;
1071                         skb->data_len += copy;
1072                         skb->truesize += copy;
1073                         atomic_add(copy, &sk->sk_wmem_alloc);
1074                 }
1075                 offset += copy;
1076                 length -= copy;
1077         }
1078
1079         return 0;
1080
1081 error:
1082         inet->cork.length -= length;
1083         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1084         return err;
1085 }
1086
1087 ssize_t ip_append_page(struct sock *sk, struct page *page,
1088                        int offset, size_t size, int flags)
1089 {
1090         struct inet_sock *inet = inet_sk(sk);
1091         struct sk_buff *skb;
1092         struct rtable *rt;
1093         struct ip_options *opt = NULL;
1094         int hh_len;
1095         int mtu;
1096         int len;
1097         int err;
1098         unsigned int maxfraglen, fragheaderlen, fraggap;
1099
1100         if (inet->hdrincl)
1101                 return -EPERM;
1102
1103         if (flags&MSG_PROBE)
1104                 return 0;
1105
1106         if (skb_queue_empty(&sk->sk_write_queue))
1107                 return -EINVAL;
1108
1109         rt = (struct rtable *)inet->cork.dst;
1110         if (inet->cork.flags & IPCORK_OPT)
1111                 opt = inet->cork.opt;
1112
1113         if (!(rt->dst.dev->features&NETIF_F_SG))
1114                 return -EOPNOTSUPP;
1115
1116         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1117         mtu = inet->cork.fragsize;
1118
1119         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1120         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1121
1122         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1123                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
1124                 return -EMSGSIZE;
1125         }
1126
1127         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1128                 return -EINVAL;
1129
1130         inet->cork.length += size;
1131         if ((size + skb->len > mtu) &&
1132             (sk->sk_protocol == IPPROTO_UDP) &&
1133             (rt->dst.dev->features & NETIF_F_UFO)) {
1134                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1135                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1136         }
1137
1138
1139         while (size > 0) {
1140                 int i;
1141
1142                 if (skb_is_gso(skb))
1143                         len = size;
1144                 else {
1145
1146                         /* Check if the remaining data fits into current packet. */
1147                         len = mtu - skb->len;
1148                         if (len < size)
1149                                 len = maxfraglen - skb->len;
1150                 }
1151                 if (len <= 0) {
1152                         struct sk_buff *skb_prev;
1153                         int alloclen;
1154
1155                         skb_prev = skb;
1156                         fraggap = skb_prev->len - maxfraglen;
1157
1158                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1159                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1160                         if (unlikely(!skb)) {
1161                                 err = -ENOBUFS;
1162                                 goto error;
1163                         }
1164
1165                         /*
1166                          *      Fill in the control structures
1167                          */
1168                         skb->ip_summed = CHECKSUM_NONE;
1169                         skb->csum = 0;
1170                         skb_reserve(skb, hh_len);
1171
1172                         /*
1173                          *      Find where to start putting bytes.
1174                          */
1175                         skb_put(skb, fragheaderlen + fraggap);
1176                         skb_reset_network_header(skb);
1177                         skb->transport_header = (skb->network_header +
1178                                                  fragheaderlen);
1179                         if (fraggap) {
1180                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1181                                                                    maxfraglen,
1182                                                     skb_transport_header(skb),
1183                                                                    fraggap, 0);
1184                                 skb_prev->csum = csum_sub(skb_prev->csum,
1185                                                           skb->csum);
1186                                 pskb_trim_unique(skb_prev, maxfraglen);
1187                         }
1188
1189                         /*
1190                          * Put the packet on the pending queue.
1191                          */
1192                         __skb_queue_tail(&sk->sk_write_queue, skb);
1193                         continue;
1194                 }
1195
1196                 i = skb_shinfo(skb)->nr_frags;
1197                 if (len > size)
1198                         len = size;
1199                 if (skb_can_coalesce(skb, i, page, offset)) {
1200                         skb_shinfo(skb)->frags[i-1].size += len;
1201                 } else if (i < MAX_SKB_FRAGS) {
1202                         get_page(page);
1203                         skb_fill_page_desc(skb, i, page, offset, len);
1204                 } else {
1205                         err = -EMSGSIZE;
1206                         goto error;
1207                 }
1208
1209                 if (skb->ip_summed == CHECKSUM_NONE) {
1210                         __wsum csum;
1211                         csum = csum_page(page, offset, len);
1212                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1213                 }
1214
1215                 skb->len += len;
1216                 skb->data_len += len;
1217                 skb->truesize += len;
1218                 atomic_add(len, &sk->sk_wmem_alloc);
1219                 offset += len;
1220                 size -= len;
1221         }
1222         return 0;
1223
1224 error:
1225         inet->cork.length -= size;
1226         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1227         return err;
1228 }
1229
1230 static void ip_cork_release(struct inet_sock *inet)
1231 {
1232         inet->cork.flags &= ~IPCORK_OPT;
1233         kfree(inet->cork.opt);
1234         inet->cork.opt = NULL;
1235         dst_release(inet->cork.dst);
1236         inet->cork.dst = NULL;
1237 }
1238
1239 /*
1240  *      Combined all pending IP fragments on the socket as one IP datagram
1241  *      and push them out.
1242  */
1243 int ip_push_pending_frames(struct sock *sk)
1244 {
1245         struct sk_buff *skb, *tmp_skb;
1246         struct sk_buff **tail_skb;
1247         struct inet_sock *inet = inet_sk(sk);
1248         struct net *net = sock_net(sk);
1249         struct ip_options *opt = NULL;
1250         struct rtable *rt = (struct rtable *)inet->cork.dst;
1251         struct iphdr *iph;
1252         __be16 df = 0;
1253         __u8 ttl;
1254         int err = 0;
1255
1256         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1257                 goto out;
1258         tail_skb = &(skb_shinfo(skb)->frag_list);
1259
1260         /* move skb->data to ip header from ext header */
1261         if (skb->data < skb_network_header(skb))
1262                 __skb_pull(skb, skb_network_offset(skb));
1263         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1264                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1265                 *tail_skb = tmp_skb;
1266                 tail_skb = &(tmp_skb->next);
1267                 skb->len += tmp_skb->len;
1268                 skb->data_len += tmp_skb->len;
1269                 skb->truesize += tmp_skb->truesize;
1270                 tmp_skb->destructor = NULL;
1271                 tmp_skb->sk = NULL;
1272         }
1273
1274         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1275          * to fragment the frame generated here. No matter, what transforms
1276          * how transforms change size of the packet, it will come out.
1277          */
1278         if (inet->pmtudisc < IP_PMTUDISC_DO)
1279                 skb->local_df = 1;
1280
1281         /* DF bit is set when we want to see DF on outgoing frames.
1282          * If local_df is set too, we still allow to fragment this frame
1283          * locally. */
1284         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1285             (skb->len <= dst_mtu(&rt->dst) &&
1286              ip_dont_fragment(sk, &rt->dst)))
1287                 df = htons(IP_DF);
1288
1289         if (inet->cork.flags & IPCORK_OPT)
1290                 opt = inet->cork.opt;
1291
1292         if (rt->rt_type == RTN_MULTICAST)
1293                 ttl = inet->mc_ttl;
1294         else
1295                 ttl = ip_select_ttl(inet, &rt->dst);
1296
1297         iph = (struct iphdr *)skb->data;
1298         iph->version = 4;
1299         iph->ihl = 5;
1300         if (opt) {
1301                 iph->ihl += opt->optlen>>2;
1302                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1303         }
1304         iph->tos = inet->tos;
1305         iph->frag_off = df;
1306         ip_select_ident(iph, &rt->dst, sk);
1307         iph->ttl = ttl;
1308         iph->protocol = sk->sk_protocol;
1309         iph->saddr = rt->rt_src;
1310         iph->daddr = rt->rt_dst;
1311
1312         skb->priority = sk->sk_priority;
1313         skb->mark = sk->sk_mark;
1314         /*
1315          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1316          * on dst refcount
1317          */
1318         inet->cork.dst = NULL;
1319         skb_dst_set(skb, &rt->dst);
1320
1321         if (iph->protocol == IPPROTO_ICMP)
1322                 icmp_out_count(net, ((struct icmphdr *)
1323                         skb_transport_header(skb))->type);
1324
1325         /* Netfilter gets whole the not fragmented skb. */
1326         err = ip_local_out(skb);
1327         if (err) {
1328                 if (err > 0)
1329                         err = net_xmit_errno(err);
1330                 if (err)
1331                         goto error;
1332         }
1333
1334 out:
1335         ip_cork_release(inet);
1336         return err;
1337
1338 error:
1339         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1340         goto out;
1341 }
1342
1343 /*
1344  *      Throw away all pending data on the socket.
1345  */
1346 void ip_flush_pending_frames(struct sock *sk)
1347 {
1348         struct sk_buff *skb;
1349
1350         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1351                 kfree_skb(skb);
1352
1353         ip_cork_release(inet_sk(sk));
1354 }
1355
1356
1357 /*
1358  *      Fetch data from kernel space and fill in checksum if needed.
1359  */
1360 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1361                               int len, int odd, struct sk_buff *skb)
1362 {
1363         __wsum csum;
1364
1365         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1366         skb->csum = csum_block_add(skb->csum, csum, odd);
1367         return 0;
1368 }
1369
1370 /*
1371  *      Generic function to send a packet as reply to another packet.
1372  *      Used to send TCP resets so far. ICMP should use this function too.
1373  *
1374  *      Should run single threaded per socket because it uses the sock
1375  *      structure to pass arguments.
1376  */
1377 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1378                    unsigned int len)
1379 {
1380         struct inet_sock *inet = inet_sk(sk);
1381         struct {
1382                 struct ip_options       opt;
1383                 char                    data[40];
1384         } replyopts;
1385         struct ipcm_cookie ipc;
1386         __be32 daddr;
1387         struct rtable *rt = skb_rtable(skb);
1388
1389         if (ip_options_echo(&replyopts.opt, skb))
1390                 return;
1391
1392         daddr = ipc.addr = rt->rt_src;
1393         ipc.opt = NULL;
1394         ipc.shtx.flags = 0;
1395
1396         if (replyopts.opt.optlen) {
1397                 ipc.opt = &replyopts.opt;
1398
1399                 if (ipc.opt->srr)
1400                         daddr = replyopts.opt.faddr;
1401         }
1402
1403         {
1404                 struct flowi fl = { .oif = arg->bound_dev_if,
1405                                     .nl_u = { .ip4_u =
1406                                               { .daddr = daddr,
1407                                                 .saddr = rt->rt_spec_dst,
1408                                                 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1409                                     /* Not quite clean, but right. */
1410                                     .uli_u = { .ports =
1411                                                { .sport = tcp_hdr(skb)->dest,
1412                                                  .dport = tcp_hdr(skb)->source } },
1413                                     .proto = sk->sk_protocol,
1414                                     .flags = ip_reply_arg_flowi_flags(arg) };
1415                 security_skb_classify_flow(skb, &fl);
1416                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1417                         return;
1418         }
1419
1420         /* And let IP do all the hard work.
1421
1422            This chunk is not reenterable, hence spinlock.
1423            Note that it uses the fact, that this function is called
1424            with locally disabled BH and that sk cannot be already spinlocked.
1425          */
1426         bh_lock_sock(sk);
1427         inet->tos = ip_hdr(skb)->tos;
1428         sk->sk_priority = skb->priority;
1429         sk->sk_protocol = ip_hdr(skb)->protocol;
1430         sk->sk_bound_dev_if = arg->bound_dev_if;
1431         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1432                        &ipc, &rt, MSG_DONTWAIT);
1433         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1434                 if (arg->csumoffset >= 0)
1435                         *((__sum16 *)skb_transport_header(skb) +
1436                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1437                                                                 arg->csum));
1438                 skb->ip_summed = CHECKSUM_NONE;
1439                 ip_push_pending_frames(sk);
1440         }
1441
1442         bh_unlock_sock(sk);
1443
1444         ip_rt_put(rt);
1445 }
1446
1447 void __init ip_init(void)
1448 {
1449         ip_rt_init();
1450         inet_initpeers();
1451
1452 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1453         igmp_mc_proc_init();
1454 #endif
1455 }