Merge branches 'x86-urgent-for-linus' and 'irq-urgent-for-linus' of git://git.kernel...
[pandora-kernel.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85 EXPORT_SYMBOL(sysctl_ip_default_ttl);
86
87 /* Generate a checksum for an outgoing IP datagram. */
88 __inline__ void ip_send_check(struct iphdr *iph)
89 {
90         iph->check = 0;
91         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92 }
93 EXPORT_SYMBOL(ip_send_check);
94
95 int __ip_local_out(struct sk_buff *skb)
96 {
97         struct iphdr *iph = ip_hdr(skb);
98
99         iph->tot_len = htons(skb->len);
100         ip_send_check(iph);
101         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
102                        skb_dst(skb)->dev, dst_output);
103 }
104
105 int ip_local_out(struct sk_buff *skb)
106 {
107         int err;
108
109         err = __ip_local_out(skb);
110         if (likely(err == 1))
111                 err = dst_output(skb);
112
113         return err;
114 }
115 EXPORT_SYMBOL_GPL(ip_local_out);
116
117 /* dev_loopback_xmit for use with netfilter. */
118 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119 {
120         skb_reset_mac_header(newskb);
121         __skb_pull(newskb, skb_network_offset(newskb));
122         newskb->pkt_type = PACKET_LOOPBACK;
123         newskb->ip_summed = CHECKSUM_UNNECESSARY;
124         WARN_ON(!skb_dst(newskb));
125         netif_rx_ni(newskb);
126         return 0;
127 }
128
129 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130 {
131         int ttl = inet->uc_ttl;
132
133         if (ttl < 0)
134                 ttl = ip4_dst_hoplimit(dst);
135         return ttl;
136 }
137
138 /*
139  *              Add an ip header to a skbuff and send it out.
140  *
141  */
142 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
143                           __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
144 {
145         struct inet_sock *inet = inet_sk(sk);
146         struct rtable *rt = skb_rtable(skb);
147         struct iphdr *iph;
148
149         /* Build the IP header. */
150         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
151         skb_reset_network_header(skb);
152         iph = ip_hdr(skb);
153         iph->version  = 4;
154         iph->ihl      = 5;
155         iph->tos      = inet->tos;
156         if (ip_dont_fragment(sk, &rt->dst))
157                 iph->frag_off = htons(IP_DF);
158         else
159                 iph->frag_off = 0;
160         iph->ttl      = ip_select_ttl(inet, &rt->dst);
161         iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162         iph->saddr    = saddr;
163         iph->protocol = sk->sk_protocol;
164         ip_select_ident(iph, &rt->dst, sk);
165
166         if (opt && opt->opt.optlen) {
167                 iph->ihl += opt->opt.optlen>>2;
168                 ip_options_build(skb, &opt->opt, daddr, rt, 0);
169         }
170
171         skb->priority = sk->sk_priority;
172         skb->mark = sk->sk_mark;
173
174         /* Send it out. */
175         return ip_local_out(skb);
176 }
177 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
178
179 static inline int ip_finish_output2(struct sk_buff *skb)
180 {
181         struct dst_entry *dst = skb_dst(skb);
182         struct rtable *rt = (struct rtable *)dst;
183         struct net_device *dev = dst->dev;
184         unsigned int hh_len = LL_RESERVED_SPACE(dev);
185
186         if (rt->rt_type == RTN_MULTICAST) {
187                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
188         } else if (rt->rt_type == RTN_BROADCAST)
189                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
190
191         /* Be paranoid, rather than too clever. */
192         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
193                 struct sk_buff *skb2;
194
195                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
196                 if (skb2 == NULL) {
197                         kfree_skb(skb);
198                         return -ENOMEM;
199                 }
200                 if (skb->sk)
201                         skb_set_owner_w(skb2, skb->sk);
202                 kfree_skb(skb);
203                 skb = skb2;
204         }
205
206         if (dst->hh)
207                 return neigh_hh_output(dst->hh, skb);
208         else if (dst->neighbour)
209                 return dst->neighbour->output(skb);
210
211         if (net_ratelimit())
212                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
213         kfree_skb(skb);
214         return -EINVAL;
215 }
216
217 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
218 {
219         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
220
221         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
222                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
223 }
224
225 static int ip_finish_output(struct sk_buff *skb)
226 {
227 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
228         /* Policy lookup after SNAT yielded a new policy */
229         if (skb_dst(skb)->xfrm != NULL) {
230                 IPCB(skb)->flags |= IPSKB_REROUTED;
231                 return dst_output(skb);
232         }
233 #endif
234         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
235                 return ip_fragment(skb, ip_finish_output2);
236         else
237                 return ip_finish_output2(skb);
238 }
239
240 int ip_mc_output(struct sk_buff *skb)
241 {
242         struct sock *sk = skb->sk;
243         struct rtable *rt = skb_rtable(skb);
244         struct net_device *dev = rt->dst.dev;
245
246         /*
247          *      If the indicated interface is up and running, send the packet.
248          */
249         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
250
251         skb->dev = dev;
252         skb->protocol = htons(ETH_P_IP);
253
254         /*
255          *      Multicasts are looped back for other local users
256          */
257
258         if (rt->rt_flags&RTCF_MULTICAST) {
259                 if (sk_mc_loop(sk)
260 #ifdef CONFIG_IP_MROUTE
261                 /* Small optimization: do not loopback not local frames,
262                    which returned after forwarding; they will be  dropped
263                    by ip_mr_input in any case.
264                    Note, that local frames are looped back to be delivered
265                    to local recipients.
266
267                    This check is duplicated in ip_mr_input at the moment.
268                  */
269                     &&
270                     ((rt->rt_flags & RTCF_LOCAL) ||
271                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
272 #endif
273                    ) {
274                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
275                         if (newskb)
276                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
277                                         newskb, NULL, newskb->dev,
278                                         ip_dev_loopback_xmit);
279                 }
280
281                 /* Multicasts with ttl 0 must not go beyond the host */
282
283                 if (ip_hdr(skb)->ttl == 0) {
284                         kfree_skb(skb);
285                         return 0;
286                 }
287         }
288
289         if (rt->rt_flags&RTCF_BROADCAST) {
290                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
291                 if (newskb)
292                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
293                                 NULL, newskb->dev, ip_dev_loopback_xmit);
294         }
295
296         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
297                             skb->dev, ip_finish_output,
298                             !(IPCB(skb)->flags & IPSKB_REROUTED));
299 }
300
301 int ip_output(struct sk_buff *skb)
302 {
303         struct net_device *dev = skb_dst(skb)->dev;
304
305         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
306
307         skb->dev = dev;
308         skb->protocol = htons(ETH_P_IP);
309
310         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
311                             ip_finish_output,
312                             !(IPCB(skb)->flags & IPSKB_REROUTED));
313 }
314
315 int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
316 {
317         struct sock *sk = skb->sk;
318         struct inet_sock *inet = inet_sk(sk);
319         struct ip_options_rcu *inet_opt;
320         struct flowi4 *fl4;
321         struct rtable *rt;
322         struct iphdr *iph;
323         int res;
324
325         /* Skip all of this if the packet is already routed,
326          * f.e. by something like SCTP.
327          */
328         rcu_read_lock();
329         inet_opt = rcu_dereference(inet->inet_opt);
330         fl4 = &fl->u.ip4;
331         rt = skb_rtable(skb);
332         if (rt != NULL)
333                 goto packet_routed;
334
335         /* Make sure we can route this packet. */
336         rt = (struct rtable *)__sk_dst_check(sk, 0);
337         if (rt == NULL) {
338                 __be32 daddr;
339
340                 /* Use correct destination address if we have options. */
341                 daddr = inet->inet_daddr;
342                 if (inet_opt && inet_opt->opt.srr)
343                         daddr = inet_opt->opt.faddr;
344
345                 /* If this fails, retransmit mechanism of transport layer will
346                  * keep trying until route appears or the connection times
347                  * itself out.
348                  */
349                 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
350                                            daddr, inet->inet_saddr,
351                                            inet->inet_dport,
352                                            inet->inet_sport,
353                                            sk->sk_protocol,
354                                            RT_CONN_FLAGS(sk),
355                                            sk->sk_bound_dev_if);
356                 if (IS_ERR(rt))
357                         goto no_route;
358                 sk_setup_caps(sk, &rt->dst);
359         }
360         skb_dst_set_noref(skb, &rt->dst);
361
362 packet_routed:
363         if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
364                 goto no_route;
365
366         /* OK, we know where to send it, allocate and build IP header. */
367         skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
368         skb_reset_network_header(skb);
369         iph = ip_hdr(skb);
370         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
371         if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
372                 iph->frag_off = htons(IP_DF);
373         else
374                 iph->frag_off = 0;
375         iph->ttl      = ip_select_ttl(inet, &rt->dst);
376         iph->protocol = sk->sk_protocol;
377         iph->saddr    = fl4->saddr;
378         iph->daddr    = fl4->daddr;
379         /* Transport layer set skb->h.foo itself. */
380
381         if (inet_opt && inet_opt->opt.optlen) {
382                 iph->ihl += inet_opt->opt.optlen >> 2;
383                 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
384         }
385
386         ip_select_ident_more(iph, &rt->dst, sk,
387                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
388
389         skb->priority = sk->sk_priority;
390         skb->mark = sk->sk_mark;
391
392         res = ip_local_out(skb);
393         rcu_read_unlock();
394         return res;
395
396 no_route:
397         rcu_read_unlock();
398         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
399         kfree_skb(skb);
400         return -EHOSTUNREACH;
401 }
402 EXPORT_SYMBOL(ip_queue_xmit);
403
404
405 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
406 {
407         to->pkt_type = from->pkt_type;
408         to->priority = from->priority;
409         to->protocol = from->protocol;
410         skb_dst_drop(to);
411         skb_dst_copy(to, from);
412         to->dev = from->dev;
413         to->mark = from->mark;
414
415         /* Copy the flags to each fragment. */
416         IPCB(to)->flags = IPCB(from)->flags;
417
418 #ifdef CONFIG_NET_SCHED
419         to->tc_index = from->tc_index;
420 #endif
421         nf_copy(to, from);
422 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
423     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
424         to->nf_trace = from->nf_trace;
425 #endif
426 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
427         to->ipvs_property = from->ipvs_property;
428 #endif
429         skb_copy_secmark(to, from);
430 }
431
432 /*
433  *      This IP datagram is too large to be sent in one piece.  Break it up into
434  *      smaller pieces (each of size equal to IP header plus
435  *      a block of the data of the original IP data part) that will yet fit in a
436  *      single device frame, and queue such a frame for sending.
437  */
438
439 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
440 {
441         struct iphdr *iph;
442         int ptr;
443         struct net_device *dev;
444         struct sk_buff *skb2;
445         unsigned int mtu, hlen, left, len, ll_rs;
446         int offset;
447         __be16 not_last_frag;
448         struct rtable *rt = skb_rtable(skb);
449         int err = 0;
450
451         dev = rt->dst.dev;
452
453         /*
454          *      Point into the IP datagram header.
455          */
456
457         iph = ip_hdr(skb);
458
459         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
460                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
461                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
462                           htonl(ip_skb_dst_mtu(skb)));
463                 kfree_skb(skb);
464                 return -EMSGSIZE;
465         }
466
467         /*
468          *      Setup starting values.
469          */
470
471         hlen = iph->ihl * 4;
472         mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
473 #ifdef CONFIG_BRIDGE_NETFILTER
474         if (skb->nf_bridge)
475                 mtu -= nf_bridge_mtu_reduction(skb);
476 #endif
477         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
478
479         /* When frag_list is given, use it. First, check its validity:
480          * some transformers could create wrong frag_list or break existing
481          * one, it is not prohibited. In this case fall back to copying.
482          *
483          * LATER: this step can be merged to real generation of fragments,
484          * we can switch to copy when see the first bad fragment.
485          */
486         if (skb_has_frag_list(skb)) {
487                 struct sk_buff *frag, *frag2;
488                 int first_len = skb_pagelen(skb);
489
490                 if (first_len - hlen > mtu ||
491                     ((first_len - hlen) & 7) ||
492                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
493                     skb_cloned(skb))
494                         goto slow_path;
495
496                 skb_walk_frags(skb, frag) {
497                         /* Correct geometry. */
498                         if (frag->len > mtu ||
499                             ((frag->len & 7) && frag->next) ||
500                             skb_headroom(frag) < hlen)
501                                 goto slow_path_clean;
502
503                         /* Partially cloned skb? */
504                         if (skb_shared(frag))
505                                 goto slow_path_clean;
506
507                         BUG_ON(frag->sk);
508                         if (skb->sk) {
509                                 frag->sk = skb->sk;
510                                 frag->destructor = sock_wfree;
511                         }
512                         skb->truesize -= frag->truesize;
513                 }
514
515                 /* Everything is OK. Generate! */
516
517                 err = 0;
518                 offset = 0;
519                 frag = skb_shinfo(skb)->frag_list;
520                 skb_frag_list_init(skb);
521                 skb->data_len = first_len - skb_headlen(skb);
522                 skb->len = first_len;
523                 iph->tot_len = htons(first_len);
524                 iph->frag_off = htons(IP_MF);
525                 ip_send_check(iph);
526
527                 for (;;) {
528                         /* Prepare header of the next frame,
529                          * before previous one went down. */
530                         if (frag) {
531                                 frag->ip_summed = CHECKSUM_NONE;
532                                 skb_reset_transport_header(frag);
533                                 __skb_push(frag, hlen);
534                                 skb_reset_network_header(frag);
535                                 memcpy(skb_network_header(frag), iph, hlen);
536                                 iph = ip_hdr(frag);
537                                 iph->tot_len = htons(frag->len);
538                                 ip_copy_metadata(frag, skb);
539                                 if (offset == 0)
540                                         ip_options_fragment(frag);
541                                 offset += skb->len - hlen;
542                                 iph->frag_off = htons(offset>>3);
543                                 if (frag->next != NULL)
544                                         iph->frag_off |= htons(IP_MF);
545                                 /* Ready, complete checksum */
546                                 ip_send_check(iph);
547                         }
548
549                         err = output(skb);
550
551                         if (!err)
552                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
553                         if (err || !frag)
554                                 break;
555
556                         skb = frag;
557                         frag = skb->next;
558                         skb->next = NULL;
559                 }
560
561                 if (err == 0) {
562                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
563                         return 0;
564                 }
565
566                 while (frag) {
567                         skb = frag->next;
568                         kfree_skb(frag);
569                         frag = skb;
570                 }
571                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
572                 return err;
573
574 slow_path_clean:
575                 skb_walk_frags(skb, frag2) {
576                         if (frag2 == frag)
577                                 break;
578                         frag2->sk = NULL;
579                         frag2->destructor = NULL;
580                         skb->truesize += frag2->truesize;
581                 }
582         }
583
584 slow_path:
585         left = skb->len - hlen;         /* Space per frame */
586         ptr = hlen;             /* Where to start from */
587
588         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
589          * we need to make room for the encapsulating header
590          */
591         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
592
593         /*
594          *      Fragment the datagram.
595          */
596
597         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
598         not_last_frag = iph->frag_off & htons(IP_MF);
599
600         /*
601          *      Keep copying data until we run out.
602          */
603
604         while (left > 0) {
605                 len = left;
606                 /* IF: it doesn't fit, use 'mtu' - the data space left */
607                 if (len > mtu)
608                         len = mtu;
609                 /* IF: we are not sending up to and including the packet end
610                    then align the next start on an eight byte boundary */
611                 if (len < left) {
612                         len &= ~7;
613                 }
614                 /*
615                  *      Allocate buffer.
616                  */
617
618                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
619                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
620                         err = -ENOMEM;
621                         goto fail;
622                 }
623
624                 /*
625                  *      Set up data on packet
626                  */
627
628                 ip_copy_metadata(skb2, skb);
629                 skb_reserve(skb2, ll_rs);
630                 skb_put(skb2, len + hlen);
631                 skb_reset_network_header(skb2);
632                 skb2->transport_header = skb2->network_header + hlen;
633
634                 /*
635                  *      Charge the memory for the fragment to any owner
636                  *      it might possess
637                  */
638
639                 if (skb->sk)
640                         skb_set_owner_w(skb2, skb->sk);
641
642                 /*
643                  *      Copy the packet header into the new buffer.
644                  */
645
646                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
647
648                 /*
649                  *      Copy a block of the IP datagram.
650                  */
651                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
652                         BUG();
653                 left -= len;
654
655                 /*
656                  *      Fill in the new header fields.
657                  */
658                 iph = ip_hdr(skb2);
659                 iph->frag_off = htons((offset >> 3));
660
661                 /* ANK: dirty, but effective trick. Upgrade options only if
662                  * the segment to be fragmented was THE FIRST (otherwise,
663                  * options are already fixed) and make it ONCE
664                  * on the initial skb, so that all the following fragments
665                  * will inherit fixed options.
666                  */
667                 if (offset == 0)
668                         ip_options_fragment(skb);
669
670                 /*
671                  *      Added AC : If we are fragmenting a fragment that's not the
672                  *                 last fragment then keep MF on each bit
673                  */
674                 if (left > 0 || not_last_frag)
675                         iph->frag_off |= htons(IP_MF);
676                 ptr += len;
677                 offset += len;
678
679                 /*
680                  *      Put this fragment into the sending queue.
681                  */
682                 iph->tot_len = htons(len + hlen);
683
684                 ip_send_check(iph);
685
686                 err = output(skb2);
687                 if (err)
688                         goto fail;
689
690                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
691         }
692         kfree_skb(skb);
693         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
694         return err;
695
696 fail:
697         kfree_skb(skb);
698         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
699         return err;
700 }
701 EXPORT_SYMBOL(ip_fragment);
702
703 int
704 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
705 {
706         struct iovec *iov = from;
707
708         if (skb->ip_summed == CHECKSUM_PARTIAL) {
709                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
710                         return -EFAULT;
711         } else {
712                 __wsum csum = 0;
713                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
714                         return -EFAULT;
715                 skb->csum = csum_block_add(skb->csum, csum, odd);
716         }
717         return 0;
718 }
719 EXPORT_SYMBOL(ip_generic_getfrag);
720
721 static inline __wsum
722 csum_page(struct page *page, int offset, int copy)
723 {
724         char *kaddr;
725         __wsum csum;
726         kaddr = kmap(page);
727         csum = csum_partial(kaddr + offset, copy, 0);
728         kunmap(page);
729         return csum;
730 }
731
732 static inline int ip_ufo_append_data(struct sock *sk,
733                         struct sk_buff_head *queue,
734                         int getfrag(void *from, char *to, int offset, int len,
735                                int odd, struct sk_buff *skb),
736                         void *from, int length, int hh_len, int fragheaderlen,
737                         int transhdrlen, int mtu, unsigned int flags)
738 {
739         struct sk_buff *skb;
740         int err;
741
742         /* There is support for UDP fragmentation offload by network
743          * device, so create one single skb packet containing complete
744          * udp datagram
745          */
746         if ((skb = skb_peek_tail(queue)) == NULL) {
747                 skb = sock_alloc_send_skb(sk,
748                         hh_len + fragheaderlen + transhdrlen + 20,
749                         (flags & MSG_DONTWAIT), &err);
750
751                 if (skb == NULL)
752                         return err;
753
754                 /* reserve space for Hardware header */
755                 skb_reserve(skb, hh_len);
756
757                 /* create space for UDP/IP header */
758                 skb_put(skb, fragheaderlen + transhdrlen);
759
760                 /* initialize network header pointer */
761                 skb_reset_network_header(skb);
762
763                 /* initialize protocol header pointer */
764                 skb->transport_header = skb->network_header + fragheaderlen;
765
766                 skb->ip_summed = CHECKSUM_PARTIAL;
767                 skb->csum = 0;
768
769                 /* specify the length of each IP datagram fragment */
770                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
771                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
772                 __skb_queue_tail(queue, skb);
773         }
774
775         return skb_append_datato_frags(sk, skb, getfrag, from,
776                                        (length - transhdrlen));
777 }
778
779 static int __ip_append_data(struct sock *sk,
780                             struct flowi4 *fl4,
781                             struct sk_buff_head *queue,
782                             struct inet_cork *cork,
783                             int getfrag(void *from, char *to, int offset,
784                                         int len, int odd, struct sk_buff *skb),
785                             void *from, int length, int transhdrlen,
786                             unsigned int flags)
787 {
788         struct inet_sock *inet = inet_sk(sk);
789         struct sk_buff *skb;
790
791         struct ip_options *opt = cork->opt;
792         int hh_len;
793         int exthdrlen;
794         int mtu;
795         int copy;
796         int err;
797         int offset = 0;
798         unsigned int maxfraglen, fragheaderlen;
799         int csummode = CHECKSUM_NONE;
800         struct rtable *rt = (struct rtable *)cork->dst;
801
802         skb = skb_peek_tail(queue);
803
804         exthdrlen = !skb ? rt->dst.header_len : 0;
805         length += exthdrlen;
806         transhdrlen += exthdrlen;
807         mtu = cork->fragsize;
808
809         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
810
811         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
812         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
813
814         if (cork->length + length > 0xFFFF - fragheaderlen) {
815                 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
816                                mtu-exthdrlen);
817                 return -EMSGSIZE;
818         }
819
820         /*
821          * transhdrlen > 0 means that this is the first fragment and we wish
822          * it won't be fragmented in the future.
823          */
824         if (transhdrlen &&
825             length + fragheaderlen <= mtu &&
826             rt->dst.dev->features & NETIF_F_V4_CSUM &&
827             !exthdrlen)
828                 csummode = CHECKSUM_PARTIAL;
829
830         cork->length += length;
831         if (((length > mtu) || (skb && skb_is_gso(skb))) &&
832             (sk->sk_protocol == IPPROTO_UDP) &&
833             (rt->dst.dev->features & NETIF_F_UFO)) {
834                 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
835                                          hh_len, fragheaderlen, transhdrlen,
836                                          mtu, flags);
837                 if (err)
838                         goto error;
839                 return 0;
840         }
841
842         /* So, what's going on in the loop below?
843          *
844          * We use calculated fragment length to generate chained skb,
845          * each of segments is IP fragment ready for sending to network after
846          * adding appropriate IP header.
847          */
848
849         if (!skb)
850                 goto alloc_new_skb;
851
852         while (length > 0) {
853                 /* Check if the remaining data fits into current packet. */
854                 copy = mtu - skb->len;
855                 if (copy < length)
856                         copy = maxfraglen - skb->len;
857                 if (copy <= 0) {
858                         char *data;
859                         unsigned int datalen;
860                         unsigned int fraglen;
861                         unsigned int fraggap;
862                         unsigned int alloclen;
863                         struct sk_buff *skb_prev;
864 alloc_new_skb:
865                         skb_prev = skb;
866                         if (skb_prev)
867                                 fraggap = skb_prev->len - maxfraglen;
868                         else
869                                 fraggap = 0;
870
871                         /*
872                          * If remaining data exceeds the mtu,
873                          * we know we need more fragment(s).
874                          */
875                         datalen = length + fraggap;
876                         if (datalen > mtu - fragheaderlen)
877                                 datalen = maxfraglen - fragheaderlen;
878                         fraglen = datalen + fragheaderlen;
879
880                         if ((flags & MSG_MORE) &&
881                             !(rt->dst.dev->features&NETIF_F_SG))
882                                 alloclen = mtu;
883                         else
884                                 alloclen = fraglen;
885
886                         /* The last fragment gets additional space at tail.
887                          * Note, with MSG_MORE we overallocate on fragments,
888                          * because we have no idea what fragment will be
889                          * the last.
890                          */
891                         if (datalen == length + fraggap) {
892                                 alloclen += rt->dst.trailer_len;
893                                 /* make sure mtu is not reached */
894                                 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
895                                         datalen -= ALIGN(rt->dst.trailer_len, 8);
896                         }
897                         if (transhdrlen) {
898                                 skb = sock_alloc_send_skb(sk,
899                                                 alloclen + hh_len + 15,
900                                                 (flags & MSG_DONTWAIT), &err);
901                         } else {
902                                 skb = NULL;
903                                 if (atomic_read(&sk->sk_wmem_alloc) <=
904                                     2 * sk->sk_sndbuf)
905                                         skb = sock_wmalloc(sk,
906                                                            alloclen + hh_len + 15, 1,
907                                                            sk->sk_allocation);
908                                 if (unlikely(skb == NULL))
909                                         err = -ENOBUFS;
910                                 else
911                                         /* only the initial fragment is
912                                            time stamped */
913                                         cork->tx_flags = 0;
914                         }
915                         if (skb == NULL)
916                                 goto error;
917
918                         /*
919                          *      Fill in the control structures
920                          */
921                         skb->ip_summed = csummode;
922                         skb->csum = 0;
923                         skb_reserve(skb, hh_len);
924                         skb_shinfo(skb)->tx_flags = cork->tx_flags;
925
926                         /*
927                          *      Find where to start putting bytes.
928                          */
929                         data = skb_put(skb, fraglen);
930                         skb_set_network_header(skb, exthdrlen);
931                         skb->transport_header = (skb->network_header +
932                                                  fragheaderlen);
933                         data += fragheaderlen;
934
935                         if (fraggap) {
936                                 skb->csum = skb_copy_and_csum_bits(
937                                         skb_prev, maxfraglen,
938                                         data + transhdrlen, fraggap, 0);
939                                 skb_prev->csum = csum_sub(skb_prev->csum,
940                                                           skb->csum);
941                                 data += fraggap;
942                                 pskb_trim_unique(skb_prev, maxfraglen);
943                         }
944
945                         copy = datalen - transhdrlen - fraggap;
946                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
947                                 err = -EFAULT;
948                                 kfree_skb(skb);
949                                 goto error;
950                         }
951
952                         offset += copy;
953                         length -= datalen - fraggap;
954                         transhdrlen = 0;
955                         exthdrlen = 0;
956                         csummode = CHECKSUM_NONE;
957
958                         /*
959                          * Put the packet on the pending queue.
960                          */
961                         __skb_queue_tail(queue, skb);
962                         continue;
963                 }
964
965                 if (copy > length)
966                         copy = length;
967
968                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
969                         unsigned int off;
970
971                         off = skb->len;
972                         if (getfrag(from, skb_put(skb, copy),
973                                         offset, copy, off, skb) < 0) {
974                                 __skb_trim(skb, off);
975                                 err = -EFAULT;
976                                 goto error;
977                         }
978                 } else {
979                         int i = skb_shinfo(skb)->nr_frags;
980                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
981                         struct page *page = cork->page;
982                         int off = cork->off;
983                         unsigned int left;
984
985                         if (page && (left = PAGE_SIZE - off) > 0) {
986                                 if (copy >= left)
987                                         copy = left;
988                                 if (page != frag->page) {
989                                         if (i == MAX_SKB_FRAGS) {
990                                                 err = -EMSGSIZE;
991                                                 goto error;
992                                         }
993                                         get_page(page);
994                                         skb_fill_page_desc(skb, i, page, off, 0);
995                                         frag = &skb_shinfo(skb)->frags[i];
996                                 }
997                         } else if (i < MAX_SKB_FRAGS) {
998                                 if (copy > PAGE_SIZE)
999                                         copy = PAGE_SIZE;
1000                                 page = alloc_pages(sk->sk_allocation, 0);
1001                                 if (page == NULL)  {
1002                                         err = -ENOMEM;
1003                                         goto error;
1004                                 }
1005                                 cork->page = page;
1006                                 cork->off = 0;
1007
1008                                 skb_fill_page_desc(skb, i, page, 0, 0);
1009                                 frag = &skb_shinfo(skb)->frags[i];
1010                         } else {
1011                                 err = -EMSGSIZE;
1012                                 goto error;
1013                         }
1014                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1015                                 err = -EFAULT;
1016                                 goto error;
1017                         }
1018                         cork->off += copy;
1019                         frag->size += copy;
1020                         skb->len += copy;
1021                         skb->data_len += copy;
1022                         skb->truesize += copy;
1023                         atomic_add(copy, &sk->sk_wmem_alloc);
1024                 }
1025                 offset += copy;
1026                 length -= copy;
1027         }
1028
1029         return 0;
1030
1031 error:
1032         cork->length -= length;
1033         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1034         return err;
1035 }
1036
1037 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1038                          struct ipcm_cookie *ipc, struct rtable **rtp)
1039 {
1040         struct inet_sock *inet = inet_sk(sk);
1041         struct ip_options_rcu *opt;
1042         struct rtable *rt;
1043
1044         /*
1045          * setup for corking.
1046          */
1047         opt = ipc->opt;
1048         if (opt) {
1049                 if (cork->opt == NULL) {
1050                         cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1051                                             sk->sk_allocation);
1052                         if (unlikely(cork->opt == NULL))
1053                                 return -ENOBUFS;
1054                 }
1055                 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1056                 cork->flags |= IPCORK_OPT;
1057                 cork->addr = ipc->addr;
1058         }
1059         rt = *rtp;
1060         if (unlikely(!rt))
1061                 return -EFAULT;
1062         /*
1063          * We steal reference to this route, caller should not release it
1064          */
1065         *rtp = NULL;
1066         cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1067                          rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1068         cork->dst = &rt->dst;
1069         cork->length = 0;
1070         cork->tx_flags = ipc->tx_flags;
1071         cork->page = NULL;
1072         cork->off = 0;
1073
1074         return 0;
1075 }
1076
1077 /*
1078  *      ip_append_data() and ip_append_page() can make one large IP datagram
1079  *      from many pieces of data. Each pieces will be holded on the socket
1080  *      until ip_push_pending_frames() is called. Each piece can be a page
1081  *      or non-page data.
1082  *
1083  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
1084  *      this interface potentially.
1085  *
1086  *      LATER: length must be adjusted by pad at tail, when it is required.
1087  */
1088 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1089                    int getfrag(void *from, char *to, int offset, int len,
1090                                int odd, struct sk_buff *skb),
1091                    void *from, int length, int transhdrlen,
1092                    struct ipcm_cookie *ipc, struct rtable **rtp,
1093                    unsigned int flags)
1094 {
1095         struct inet_sock *inet = inet_sk(sk);
1096         int err;
1097
1098         if (flags&MSG_PROBE)
1099                 return 0;
1100
1101         if (skb_queue_empty(&sk->sk_write_queue)) {
1102                 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1103                 if (err)
1104                         return err;
1105         } else {
1106                 transhdrlen = 0;
1107         }
1108
1109         return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
1110                                 from, length, transhdrlen, flags);
1111 }
1112
1113 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1114                        int offset, size_t size, int flags)
1115 {
1116         struct inet_sock *inet = inet_sk(sk);
1117         struct sk_buff *skb;
1118         struct rtable *rt;
1119         struct ip_options *opt = NULL;
1120         struct inet_cork *cork;
1121         int hh_len;
1122         int mtu;
1123         int len;
1124         int err;
1125         unsigned int maxfraglen, fragheaderlen, fraggap;
1126
1127         if (inet->hdrincl)
1128                 return -EPERM;
1129
1130         if (flags&MSG_PROBE)
1131                 return 0;
1132
1133         if (skb_queue_empty(&sk->sk_write_queue))
1134                 return -EINVAL;
1135
1136         cork = &inet->cork.base;
1137         rt = (struct rtable *)cork->dst;
1138         if (cork->flags & IPCORK_OPT)
1139                 opt = cork->opt;
1140
1141         if (!(rt->dst.dev->features&NETIF_F_SG))
1142                 return -EOPNOTSUPP;
1143
1144         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1145         mtu = cork->fragsize;
1146
1147         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1148         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1149
1150         if (cork->length + size > 0xFFFF - fragheaderlen) {
1151                 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1152                 return -EMSGSIZE;
1153         }
1154
1155         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1156                 return -EINVAL;
1157
1158         cork->length += size;
1159         if ((size + skb->len > mtu) &&
1160             (sk->sk_protocol == IPPROTO_UDP) &&
1161             (rt->dst.dev->features & NETIF_F_UFO)) {
1162                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1163                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1164         }
1165
1166
1167         while (size > 0) {
1168                 int i;
1169
1170                 if (skb_is_gso(skb))
1171                         len = size;
1172                 else {
1173
1174                         /* Check if the remaining data fits into current packet. */
1175                         len = mtu - skb->len;
1176                         if (len < size)
1177                                 len = maxfraglen - skb->len;
1178                 }
1179                 if (len <= 0) {
1180                         struct sk_buff *skb_prev;
1181                         int alloclen;
1182
1183                         skb_prev = skb;
1184                         fraggap = skb_prev->len - maxfraglen;
1185
1186                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1187                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1188                         if (unlikely(!skb)) {
1189                                 err = -ENOBUFS;
1190                                 goto error;
1191                         }
1192
1193                         /*
1194                          *      Fill in the control structures
1195                          */
1196                         skb->ip_summed = CHECKSUM_NONE;
1197                         skb->csum = 0;
1198                         skb_reserve(skb, hh_len);
1199
1200                         /*
1201                          *      Find where to start putting bytes.
1202                          */
1203                         skb_put(skb, fragheaderlen + fraggap);
1204                         skb_reset_network_header(skb);
1205                         skb->transport_header = (skb->network_header +
1206                                                  fragheaderlen);
1207                         if (fraggap) {
1208                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1209                                                                    maxfraglen,
1210                                                     skb_transport_header(skb),
1211                                                                    fraggap, 0);
1212                                 skb_prev->csum = csum_sub(skb_prev->csum,
1213                                                           skb->csum);
1214                                 pskb_trim_unique(skb_prev, maxfraglen);
1215                         }
1216
1217                         /*
1218                          * Put the packet on the pending queue.
1219                          */
1220                         __skb_queue_tail(&sk->sk_write_queue, skb);
1221                         continue;
1222                 }
1223
1224                 i = skb_shinfo(skb)->nr_frags;
1225                 if (len > size)
1226                         len = size;
1227                 if (skb_can_coalesce(skb, i, page, offset)) {
1228                         skb_shinfo(skb)->frags[i-1].size += len;
1229                 } else if (i < MAX_SKB_FRAGS) {
1230                         get_page(page);
1231                         skb_fill_page_desc(skb, i, page, offset, len);
1232                 } else {
1233                         err = -EMSGSIZE;
1234                         goto error;
1235                 }
1236
1237                 if (skb->ip_summed == CHECKSUM_NONE) {
1238                         __wsum csum;
1239                         csum = csum_page(page, offset, len);
1240                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1241                 }
1242
1243                 skb->len += len;
1244                 skb->data_len += len;
1245                 skb->truesize += len;
1246                 atomic_add(len, &sk->sk_wmem_alloc);
1247                 offset += len;
1248                 size -= len;
1249         }
1250         return 0;
1251
1252 error:
1253         cork->length -= size;
1254         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1255         return err;
1256 }
1257
1258 static void ip_cork_release(struct inet_cork *cork)
1259 {
1260         cork->flags &= ~IPCORK_OPT;
1261         kfree(cork->opt);
1262         cork->opt = NULL;
1263         dst_release(cork->dst);
1264         cork->dst = NULL;
1265 }
1266
1267 /*
1268  *      Combined all pending IP fragments on the socket as one IP datagram
1269  *      and push them out.
1270  */
1271 struct sk_buff *__ip_make_skb(struct sock *sk,
1272                               struct flowi4 *fl4,
1273                               struct sk_buff_head *queue,
1274                               struct inet_cork *cork)
1275 {
1276         struct sk_buff *skb, *tmp_skb;
1277         struct sk_buff **tail_skb;
1278         struct inet_sock *inet = inet_sk(sk);
1279         struct net *net = sock_net(sk);
1280         struct ip_options *opt = NULL;
1281         struct rtable *rt = (struct rtable *)cork->dst;
1282         struct iphdr *iph;
1283         __be16 df = 0;
1284         __u8 ttl;
1285
1286         if ((skb = __skb_dequeue(queue)) == NULL)
1287                 goto out;
1288         tail_skb = &(skb_shinfo(skb)->frag_list);
1289
1290         /* move skb->data to ip header from ext header */
1291         if (skb->data < skb_network_header(skb))
1292                 __skb_pull(skb, skb_network_offset(skb));
1293         while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1294                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1295                 *tail_skb = tmp_skb;
1296                 tail_skb = &(tmp_skb->next);
1297                 skb->len += tmp_skb->len;
1298                 skb->data_len += tmp_skb->len;
1299                 skb->truesize += tmp_skb->truesize;
1300                 tmp_skb->destructor = NULL;
1301                 tmp_skb->sk = NULL;
1302         }
1303
1304         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1305          * to fragment the frame generated here. No matter, what transforms
1306          * how transforms change size of the packet, it will come out.
1307          */
1308         if (inet->pmtudisc < IP_PMTUDISC_DO)
1309                 skb->local_df = 1;
1310
1311         /* DF bit is set when we want to see DF on outgoing frames.
1312          * If local_df is set too, we still allow to fragment this frame
1313          * locally. */
1314         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1315             (skb->len <= dst_mtu(&rt->dst) &&
1316              ip_dont_fragment(sk, &rt->dst)))
1317                 df = htons(IP_DF);
1318
1319         if (cork->flags & IPCORK_OPT)
1320                 opt = cork->opt;
1321
1322         if (rt->rt_type == RTN_MULTICAST)
1323                 ttl = inet->mc_ttl;
1324         else
1325                 ttl = ip_select_ttl(inet, &rt->dst);
1326
1327         iph = (struct iphdr *)skb->data;
1328         iph->version = 4;
1329         iph->ihl = 5;
1330         iph->tos = inet->tos;
1331         iph->frag_off = df;
1332         ip_select_ident(iph, &rt->dst, sk);
1333         iph->ttl = ttl;
1334         iph->protocol = sk->sk_protocol;
1335         iph->saddr = fl4->saddr;
1336         iph->daddr = fl4->daddr;
1337
1338         if (opt) {
1339                 iph->ihl += opt->optlen>>2;
1340                 ip_options_build(skb, opt, cork->addr, rt, 0);
1341         }
1342
1343         skb->priority = sk->sk_priority;
1344         skb->mark = sk->sk_mark;
1345         /*
1346          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1347          * on dst refcount
1348          */
1349         cork->dst = NULL;
1350         skb_dst_set(skb, &rt->dst);
1351
1352         if (iph->protocol == IPPROTO_ICMP)
1353                 icmp_out_count(net, ((struct icmphdr *)
1354                         skb_transport_header(skb))->type);
1355
1356         ip_cork_release(cork);
1357 out:
1358         return skb;
1359 }
1360
1361 int ip_send_skb(struct sk_buff *skb)
1362 {
1363         struct net *net = sock_net(skb->sk);
1364         int err;
1365
1366         err = ip_local_out(skb);
1367         if (err) {
1368                 if (err > 0)
1369                         err = net_xmit_errno(err);
1370                 if (err)
1371                         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1372         }
1373
1374         return err;
1375 }
1376
1377 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1378 {
1379         struct sk_buff *skb;
1380
1381         skb = ip_finish_skb(sk, fl4);
1382         if (!skb)
1383                 return 0;
1384
1385         /* Netfilter gets whole the not fragmented skb. */
1386         return ip_send_skb(skb);
1387 }
1388
1389 /*
1390  *      Throw away all pending data on the socket.
1391  */
1392 static void __ip_flush_pending_frames(struct sock *sk,
1393                                       struct sk_buff_head *queue,
1394                                       struct inet_cork *cork)
1395 {
1396         struct sk_buff *skb;
1397
1398         while ((skb = __skb_dequeue_tail(queue)) != NULL)
1399                 kfree_skb(skb);
1400
1401         ip_cork_release(cork);
1402 }
1403
1404 void ip_flush_pending_frames(struct sock *sk)
1405 {
1406         __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1407 }
1408
1409 struct sk_buff *ip_make_skb(struct sock *sk,
1410                             struct flowi4 *fl4,
1411                             int getfrag(void *from, char *to, int offset,
1412                                         int len, int odd, struct sk_buff *skb),
1413                             void *from, int length, int transhdrlen,
1414                             struct ipcm_cookie *ipc, struct rtable **rtp,
1415                             unsigned int flags)
1416 {
1417         struct inet_cork cork;
1418         struct sk_buff_head queue;
1419         int err;
1420
1421         if (flags & MSG_PROBE)
1422                 return NULL;
1423
1424         __skb_queue_head_init(&queue);
1425
1426         cork.flags = 0;
1427         cork.addr = 0;
1428         cork.opt = NULL;
1429         err = ip_setup_cork(sk, &cork, ipc, rtp);
1430         if (err)
1431                 return ERR_PTR(err);
1432
1433         err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
1434                                from, length, transhdrlen, flags);
1435         if (err) {
1436                 __ip_flush_pending_frames(sk, &queue, &cork);
1437                 return ERR_PTR(err);
1438         }
1439
1440         return __ip_make_skb(sk, fl4, &queue, &cork);
1441 }
1442
1443 /*
1444  *      Fetch data from kernel space and fill in checksum if needed.
1445  */
1446 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1447                               int len, int odd, struct sk_buff *skb)
1448 {
1449         __wsum csum;
1450
1451         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1452         skb->csum = csum_block_add(skb->csum, csum, odd);
1453         return 0;
1454 }
1455
1456 /*
1457  *      Generic function to send a packet as reply to another packet.
1458  *      Used to send TCP resets so far. ICMP should use this function too.
1459  *
1460  *      Should run single threaded per socket because it uses the sock
1461  *      structure to pass arguments.
1462  */
1463 void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1464                    struct ip_reply_arg *arg, unsigned int len)
1465 {
1466         struct inet_sock *inet = inet_sk(sk);
1467         struct ip_options_data replyopts;
1468         struct ipcm_cookie ipc;
1469         struct flowi4 fl4;
1470         struct rtable *rt = skb_rtable(skb);
1471
1472         if (ip_options_echo(&replyopts.opt.opt, skb))
1473                 return;
1474
1475         ipc.addr = daddr;
1476         ipc.opt = NULL;
1477         ipc.tx_flags = 0;
1478
1479         if (replyopts.opt.opt.optlen) {
1480                 ipc.opt = &replyopts.opt;
1481
1482                 if (replyopts.opt.opt.srr)
1483                         daddr = replyopts.opt.opt.faddr;
1484         }
1485
1486         flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1487                            RT_TOS(ip_hdr(skb)->tos),
1488                            RT_SCOPE_UNIVERSE, sk->sk_protocol,
1489                            ip_reply_arg_flowi_flags(arg),
1490                            daddr, rt->rt_spec_dst,
1491                            tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1492         security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1493         rt = ip_route_output_key(sock_net(sk), &fl4);
1494         if (IS_ERR(rt))
1495                 return;
1496
1497         /* And let IP do all the hard work.
1498
1499            This chunk is not reenterable, hence spinlock.
1500            Note that it uses the fact, that this function is called
1501            with locally disabled BH and that sk cannot be already spinlocked.
1502          */
1503         bh_lock_sock(sk);
1504         inet->tos = ip_hdr(skb)->tos;
1505         sk->sk_priority = skb->priority;
1506         sk->sk_protocol = ip_hdr(skb)->protocol;
1507         sk->sk_bound_dev_if = arg->bound_dev_if;
1508         ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1509                        &ipc, &rt, MSG_DONTWAIT);
1510         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1511                 if (arg->csumoffset >= 0)
1512                         *((__sum16 *)skb_transport_header(skb) +
1513                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1514                                                                 arg->csum));
1515                 skb->ip_summed = CHECKSUM_NONE;
1516                 ip_push_pending_frames(sk, &fl4);
1517         }
1518
1519         bh_unlock_sock(sk);
1520
1521         ip_rt_put(rt);
1522 }
1523
1524 void __init ip_init(void)
1525 {
1526         ip_rt_init();
1527         inet_initpeers();
1528
1529 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1530         igmp_mc_proc_init();
1531 #endif
1532 }