dae9476fa4d8df55643ef7a4805e1fd88eabcf99
[pandora-kernel.git] / net / packet / af_packet.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              PACKET - implements raw packet sockets.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *              Alan Cox        :       verify_area() now used correctly
14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
15  *              Alan Cox        :       tidied skbuff lists.
16  *              Alan Cox        :       Now uses generic datagram routines I
17  *                                      added. Also fixed the peek/read crash
18  *                                      from all old Linux datagram code.
19  *              Alan Cox        :       Uses the improved datagram code.
20  *              Alan Cox        :       Added NULL's for socket options.
21  *              Alan Cox        :       Re-commented the code.
22  *              Alan Cox        :       Use new kernel side addressing
23  *              Rob Janssen     :       Correct MTU usage.
24  *              Dave Platt      :       Counter leaks caused by incorrect
25  *                                      interrupt locking and some slightly
26  *                                      dubious gcc output. Can you read
27  *                                      compiler: it said _VOLATILE_
28  *      Richard Kooijman        :       Timestamp fixes.
29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
30  *              Alan Cox        :       sendmsg/recvmsg support.
31  *              Alan Cox        :       Protocol setting support
32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
34  *      Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
38  *                                      The convention is that longer addresses
39  *                                      will simply extend the hardware address
40  *                                      byte arrays at the end of sockaddr_ll
41  *                                      and packet_mreq.
42  *              Johann Baudy    :       Added TX RING.
43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
44  *                                      layer.
45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *              This program is free software; you can redistribute it and/or
49  *              modify it under the terms of the GNU General Public License
50  *              as published by the Free Software Foundation; either version
51  *              2 of the License, or (at your option) any later version.
52  *
53  */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/system.h>
77 #include <asm/uaccess.h>
78 #include <asm/ioctls.h>
79 #include <asm/page.h>
80 #include <asm/cacheflush.h>
81 #include <asm/io.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 #include <linux/poll.h>
85 #include <linux/module.h>
86 #include <linux/init.h>
87 #include <linux/mutex.h>
88 #include <linux/if_vlan.h>
89 #include <linux/virtio_net.h>
90 #include <linux/errqueue.h>
91 #include <linux/net_tstamp.h>
92
93 #ifdef CONFIG_INET
94 #include <net/inet_common.h>
95 #endif
96
97 /*
98    Assumptions:
99    - if device has no dev->hard_header routine, it adds and removes ll header
100      inside itself. In this case ll header is invisible outside of device,
101      but higher levels still should reserve dev->hard_header_len.
102      Some devices are enough clever to reallocate skb, when header
103      will not fit to reserved space (tunnel), another ones are silly
104      (PPP).
105    - packet socket receives packets with pulled ll header,
106      so that SOCK_RAW should push it back.
107
108 On receive:
109 -----------
110
111 Incoming, dev->hard_header!=NULL
112    mac_header -> ll header
113    data       -> data
114
115 Outgoing, dev->hard_header!=NULL
116    mac_header -> ll header
117    data       -> ll header
118
119 Incoming, dev->hard_header==NULL
120    mac_header -> UNKNOWN position. It is very likely, that it points to ll
121                  header.  PPP makes it, that is wrong, because introduce
122                  assymetry between rx and tx paths.
123    data       -> data
124
125 Outgoing, dev->hard_header==NULL
126    mac_header -> data. ll header is still not built!
127    data       -> data
128
129 Resume
130   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
131
132
133 On transmit:
134 ------------
135
136 dev->hard_header != NULL
137    mac_header -> ll header
138    data       -> ll header
139
140 dev->hard_header == NULL (ll header is added by device, we cannot control it)
141    mac_header -> data
142    data       -> data
143
144    We should set nh.raw on output to correct posistion,
145    packet classifier depends on it.
146  */
147
148 /* Private packet socket structures. */
149
150 struct packet_mclist {
151         struct packet_mclist    *next;
152         int                     ifindex;
153         int                     count;
154         unsigned short          type;
155         unsigned short          alen;
156         unsigned char           addr[MAX_ADDR_LEN];
157 };
158 /* identical to struct packet_mreq except it has
159  * a longer address field.
160  */
161 struct packet_mreq_max {
162         int             mr_ifindex;
163         unsigned short  mr_type;
164         unsigned short  mr_alen;
165         unsigned char   mr_address[MAX_ADDR_LEN];
166 };
167
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169                 int closing, int tx_ring);
170
171
172 #define V3_ALIGNMENT    (8)
173
174 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176 #define BLK_PLUS_PRIV(sz_of_priv) \
177         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179 /* kbdq - kernel block descriptor queue */
180 struct tpacket_kbdq_core {
181         struct pgv      *pkbdq;
182         unsigned int    feature_req_word;
183         unsigned int    hdrlen;
184         unsigned char   reset_pending_on_curr_blk;
185         unsigned char   delete_blk_timer;
186         unsigned short  kactive_blk_num;
187         unsigned short  blk_sizeof_priv;
188
189         /* last_kactive_blk_num:
190          * trick to see if user-space has caught up
191          * in order to avoid refreshing timer when every single pkt arrives.
192          */
193         unsigned short  last_kactive_blk_num;
194
195         char            *pkblk_start;
196         char            *pkblk_end;
197         int             kblk_size;
198         unsigned int    knum_blocks;
199         uint64_t        knxt_seq_num;
200         char            *prev;
201         char            *nxt_offset;
202         struct sk_buff  *skb;
203
204         atomic_t        blk_fill_in_prog;
205
206         /* Default is set to 8ms */
207 #define DEFAULT_PRB_RETIRE_TOV  (8)
208
209         unsigned short  retire_blk_tov;
210         unsigned short  version;
211         unsigned long   tov_in_jiffies;
212
213         /* timer to retire an outstanding block */
214         struct timer_list retire_blk_timer;
215 };
216
217 #define PGV_FROM_VMALLOC 1
218 struct pgv {
219         char *buffer;
220 };
221
222 struct packet_ring_buffer {
223         struct pgv              *pg_vec;
224         unsigned int            head;
225         unsigned int            frames_per_block;
226         unsigned int            frame_size;
227         unsigned int            frame_max;
228
229         unsigned int            pg_vec_order;
230         unsigned int            pg_vec_pages;
231         unsigned int            pg_vec_len;
232
233         struct tpacket_kbdq_core        prb_bdqc;
234         atomic_t                pending;
235 };
236
237 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
238 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
239 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
240 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
241 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
242 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
243 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
244
245 struct packet_sock;
246 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
247
248 static void *packet_previous_frame(struct packet_sock *po,
249                 struct packet_ring_buffer *rb,
250                 int status);
251 static void packet_increment_head(struct packet_ring_buffer *buff);
252 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
253                         struct tpacket_block_desc *);
254 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
255                         struct packet_sock *);
256 static void prb_retire_current_block(struct tpacket_kbdq_core *,
257                 struct packet_sock *, unsigned int status);
258 static int prb_queue_frozen(struct tpacket_kbdq_core *);
259 static void prb_open_block(struct tpacket_kbdq_core *,
260                 struct tpacket_block_desc *);
261 static void prb_retire_rx_blk_timer_expired(unsigned long);
262 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
263 static void prb_init_blk_timer(struct packet_sock *,
264                 struct tpacket_kbdq_core *,
265                 void (*func) (unsigned long));
266 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
267 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
268                 struct tpacket3_hdr *);
269 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
270                 struct tpacket3_hdr *);
271 static void packet_flush_mclist(struct sock *sk);
272
273 struct packet_fanout;
274 struct packet_sock {
275         /* struct sock has to be the first member of packet_sock */
276         struct sock             sk;
277         struct packet_fanout    *fanout;
278         struct tpacket_stats    stats;
279         union  tpacket_stats_u  stats_u;
280         struct packet_ring_buffer       rx_ring;
281         struct packet_ring_buffer       tx_ring;
282         int                     copy_thresh;
283         spinlock_t              bind_lock;
284         struct mutex            pg_vec_lock;
285         unsigned int            running:1,      /* prot_hook is attached*/
286                                 auxdata:1,
287                                 origdev:1,
288                                 has_vnet_hdr:1;
289         int                     ifindex;        /* bound device         */
290         __be16                  num;
291         struct packet_mclist    *mclist;
292         atomic_t                mapped;
293         enum tpacket_versions   tp_version;
294         unsigned int            tp_hdrlen;
295         unsigned int            tp_reserve;
296         unsigned int            tp_loss:1;
297         unsigned int            tp_tstamp;
298         struct net_device __rcu *cached_dev;
299         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
300 };
301
302 #define PACKET_FANOUT_MAX       256
303
304 struct packet_fanout {
305 #ifdef CONFIG_NET_NS
306         struct net              *net;
307 #endif
308         unsigned int            num_members;
309         u16                     id;
310         u8                      type;
311         u8                      defrag;
312         atomic_t                rr_cur;
313         struct list_head        list;
314         struct sock             *arr[PACKET_FANOUT_MAX];
315         spinlock_t              lock;
316         atomic_t                sk_ref;
317         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
318 };
319
320 struct packet_skb_cb {
321         unsigned int origlen;
322         union {
323                 struct sockaddr_pkt pkt;
324                 struct sockaddr_ll ll;
325         } sa;
326 };
327
328 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
329
330 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
331 #define GET_PBLOCK_DESC(x, bid) \
332         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
333 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
334         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
335 #define GET_NEXT_PRB_BLK_NUM(x) \
336         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
337         ((x)->kactive_blk_num+1) : 0)
338
339 static struct packet_sock *pkt_sk(struct sock *sk)
340 {
341         return (struct packet_sock *)sk;
342 }
343
344 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
345 static void __fanout_link(struct sock *sk, struct packet_sock *po);
346
347 /* register_prot_hook must be invoked with the po->bind_lock held,
348  * or from a context in which asynchronous accesses to the packet
349  * socket is not possible (packet_create()).
350  */
351 static void register_prot_hook(struct sock *sk)
352 {
353         struct packet_sock *po = pkt_sk(sk);
354
355         if (!po->running) {
356                 if (po->fanout) {
357                         __fanout_link(sk, po);
358                 } else {
359                         dev_add_pack(&po->prot_hook);
360                         rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
361                 }
362
363                 sock_hold(sk);
364                 po->running = 1;
365         }
366 }
367
368 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
369  * held.   If the sync parameter is true, we will temporarily drop
370  * the po->bind_lock and do a synchronize_net to make sure no
371  * asynchronous packet processing paths still refer to the elements
372  * of po->prot_hook.  If the sync parameter is false, it is the
373  * callers responsibility to take care of this.
374  */
375 static void __unregister_prot_hook(struct sock *sk, bool sync)
376 {
377         struct packet_sock *po = pkt_sk(sk);
378
379         po->running = 0;
380         if (po->fanout) {
381                 __fanout_unlink(sk, po);
382         } else {
383                 __dev_remove_pack(&po->prot_hook);
384                 RCU_INIT_POINTER(po->cached_dev, NULL);
385         }
386
387         __sock_put(sk);
388
389         if (sync) {
390                 spin_unlock(&po->bind_lock);
391                 synchronize_net();
392                 spin_lock(&po->bind_lock);
393         }
394 }
395
396 static void unregister_prot_hook(struct sock *sk, bool sync)
397 {
398         struct packet_sock *po = pkt_sk(sk);
399
400         if (po->running)
401                 __unregister_prot_hook(sk, sync);
402 }
403
404 static inline __pure struct page *pgv_to_page(void *addr)
405 {
406         if (is_vmalloc_addr(addr))
407                 return vmalloc_to_page(addr);
408         return virt_to_page(addr);
409 }
410
411 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
412 {
413         union {
414                 struct tpacket_hdr *h1;
415                 struct tpacket2_hdr *h2;
416                 void *raw;
417         } h;
418
419         h.raw = frame;
420         switch (po->tp_version) {
421         case TPACKET_V1:
422                 h.h1->tp_status = status;
423                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
424                 break;
425         case TPACKET_V2:
426                 h.h2->tp_status = status;
427                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
428                 break;
429         case TPACKET_V3:
430         default:
431                 WARN(1, "TPACKET version not supported.\n");
432                 BUG();
433         }
434
435         smp_wmb();
436 }
437
438 static int __packet_get_status(struct packet_sock *po, void *frame)
439 {
440         union {
441                 struct tpacket_hdr *h1;
442                 struct tpacket2_hdr *h2;
443                 void *raw;
444         } h;
445
446         smp_rmb();
447
448         h.raw = frame;
449         switch (po->tp_version) {
450         case TPACKET_V1:
451                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
452                 return h.h1->tp_status;
453         case TPACKET_V2:
454                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
455                 return h.h2->tp_status;
456         case TPACKET_V3:
457         default:
458                 WARN(1, "TPACKET version not supported.\n");
459                 BUG();
460                 return 0;
461         }
462 }
463
464 static void *packet_lookup_frame(struct packet_sock *po,
465                 struct packet_ring_buffer *rb,
466                 unsigned int position,
467                 int status)
468 {
469         unsigned int pg_vec_pos, frame_offset;
470         union {
471                 struct tpacket_hdr *h1;
472                 struct tpacket2_hdr *h2;
473                 void *raw;
474         } h;
475
476         pg_vec_pos = position / rb->frames_per_block;
477         frame_offset = position % rb->frames_per_block;
478
479         h.raw = rb->pg_vec[pg_vec_pos].buffer +
480                 (frame_offset * rb->frame_size);
481
482         if (status != __packet_get_status(po, h.raw))
483                 return NULL;
484
485         return h.raw;
486 }
487
488 static void *packet_current_frame(struct packet_sock *po,
489                 struct packet_ring_buffer *rb,
490                 int status)
491 {
492         return packet_lookup_frame(po, rb, rb->head, status);
493 }
494
495 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
496 {
497         del_timer_sync(&pkc->retire_blk_timer);
498 }
499
500 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
501                 int tx_ring,
502                 struct sk_buff_head *rb_queue)
503 {
504         struct tpacket_kbdq_core *pkc;
505
506         pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
507
508         spin_lock_bh(&rb_queue->lock);
509         pkc->delete_blk_timer = 1;
510         spin_unlock_bh(&rb_queue->lock);
511
512         prb_del_retire_blk_timer(pkc);
513 }
514
515 static void prb_init_blk_timer(struct packet_sock *po,
516                 struct tpacket_kbdq_core *pkc,
517                 void (*func) (unsigned long))
518 {
519         init_timer(&pkc->retire_blk_timer);
520         pkc->retire_blk_timer.data = (long)po;
521         pkc->retire_blk_timer.function = func;
522         pkc->retire_blk_timer.expires = jiffies;
523 }
524
525 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
526 {
527         struct tpacket_kbdq_core *pkc;
528
529         if (tx_ring)
530                 BUG();
531
532         pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
533         prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
534 }
535
536 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
537                                 int blk_size_in_bytes)
538 {
539         struct net_device *dev;
540         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
541         struct ethtool_cmd ecmd;
542         int err;
543
544         rtnl_lock();
545         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
546         if (unlikely(!dev)) {
547                 rtnl_unlock();
548                 return DEFAULT_PRB_RETIRE_TOV;
549         }
550         err = __ethtool_get_settings(dev, &ecmd);
551         rtnl_unlock();
552         if (!err) {
553                 switch (ecmd.speed) {
554                 case SPEED_10000:
555                         msec = 1;
556                         div = 10000/1000;
557                         break;
558                 case SPEED_1000:
559                         msec = 1;
560                         div = 1000/1000;
561                         break;
562                 /*
563                  * If the link speed is so slow you don't really
564                  * need to worry about perf anyways
565                  */
566                 case SPEED_100:
567                 case SPEED_10:
568                 default:
569                         return DEFAULT_PRB_RETIRE_TOV;
570                 }
571         }
572
573         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
574
575         if (div)
576                 mbits /= div;
577
578         tmo = mbits * msec;
579
580         if (div)
581                 return tmo+1;
582         return tmo;
583 }
584
585 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
586                         union tpacket_req_u *req_u)
587 {
588         p1->feature_req_word = req_u->req3.tp_feature_req_word;
589 }
590
591 static void init_prb_bdqc(struct packet_sock *po,
592                         struct packet_ring_buffer *rb,
593                         struct pgv *pg_vec,
594                         union tpacket_req_u *req_u, int tx_ring)
595 {
596         struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
597         struct tpacket_block_desc *pbd;
598
599         memset(p1, 0x0, sizeof(*p1));
600
601         p1->knxt_seq_num = 1;
602         p1->pkbdq = pg_vec;
603         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
604         p1->pkblk_start = (char *)pg_vec[0].buffer;
605         p1->kblk_size = req_u->req3.tp_block_size;
606         p1->knum_blocks = req_u->req3.tp_block_nr;
607         p1->hdrlen = po->tp_hdrlen;
608         p1->version = po->tp_version;
609         p1->last_kactive_blk_num = 0;
610         po->stats_u.stats3.tp_freeze_q_cnt = 0;
611         if (req_u->req3.tp_retire_blk_tov)
612                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
613         else
614                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
615                                                 req_u->req3.tp_block_size);
616         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
617         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
618
619         prb_init_ft_ops(p1, req_u);
620         prb_setup_retire_blk_timer(po, tx_ring);
621         prb_open_block(p1, pbd);
622 }
623
624 /*  Do NOT update the last_blk_num first.
625  *  Assumes sk_buff_head lock is held.
626  */
627 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
628 {
629         mod_timer(&pkc->retire_blk_timer,
630                         jiffies + pkc->tov_in_jiffies);
631         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
632 }
633
634 /*
635  * Timer logic:
636  * 1) We refresh the timer only when we open a block.
637  *    By doing this we don't waste cycles refreshing the timer
638  *        on packet-by-packet basis.
639  *
640  * With a 1MB block-size, on a 1Gbps line, it will take
641  * i) ~8 ms to fill a block + ii) memcpy etc.
642  * In this cut we are not accounting for the memcpy time.
643  *
644  * So, if the user sets the 'tmo' to 10ms then the timer
645  * will never fire while the block is still getting filled
646  * (which is what we want). However, the user could choose
647  * to close a block early and that's fine.
648  *
649  * But when the timer does fire, we check whether or not to refresh it.
650  * Since the tmo granularity is in msecs, it is not too expensive
651  * to refresh the timer, lets say every '8' msecs.
652  * Either the user can set the 'tmo' or we can derive it based on
653  * a) line-speed and b) block-size.
654  * prb_calc_retire_blk_tmo() calculates the tmo.
655  *
656  */
657 static void prb_retire_rx_blk_timer_expired(unsigned long data)
658 {
659         struct packet_sock *po = (struct packet_sock *)data;
660         struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
661         unsigned int frozen;
662         struct tpacket_block_desc *pbd;
663
664         spin_lock(&po->sk.sk_receive_queue.lock);
665
666         frozen = prb_queue_frozen(pkc);
667         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
668
669         if (unlikely(pkc->delete_blk_timer))
670                 goto out;
671
672         /* We only need to plug the race when the block is partially filled.
673          * tpacket_rcv:
674          *              lock(); increment BLOCK_NUM_PKTS; unlock()
675          *              copy_bits() is in progress ...
676          *              timer fires on other cpu:
677          *              we can't retire the current block because copy_bits
678          *              is in progress.
679          *
680          */
681         if (BLOCK_NUM_PKTS(pbd)) {
682                 while (atomic_read(&pkc->blk_fill_in_prog)) {
683                         /* Waiting for skb_copy_bits to finish... */
684                         cpu_relax();
685                 }
686         }
687
688         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
689                 if (!frozen) {
690                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
691                         if (!prb_dispatch_next_block(pkc, po))
692                                 goto refresh_timer;
693                         else
694                                 goto out;
695                 } else {
696                         /* Case 1. Queue was frozen because user-space was
697                          *         lagging behind.
698                          */
699                         if (prb_curr_blk_in_use(pkc, pbd)) {
700                                 /*
701                                  * Ok, user-space is still behind.
702                                  * So just refresh the timer.
703                                  */
704                                 goto refresh_timer;
705                         } else {
706                                /* Case 2. queue was frozen,user-space caught up,
707                                 * now the link went idle && the timer fired.
708                                 * We don't have a block to close.So we open this
709                                 * block and restart the timer.
710                                 * opening a block thaws the queue,restarts timer
711                                 * Thawing/timer-refresh is a side effect.
712                                 */
713                                 prb_open_block(pkc, pbd);
714                                 goto out;
715                         }
716                 }
717         }
718
719 refresh_timer:
720         _prb_refresh_rx_retire_blk_timer(pkc);
721
722 out:
723         spin_unlock(&po->sk.sk_receive_queue.lock);
724 }
725
726 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
727                 struct tpacket_block_desc *pbd1, __u32 status)
728 {
729         /* Flush everything minus the block header */
730
731 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
732         u8 *start, *end;
733
734         start = (u8 *)pbd1;
735
736         /* Skip the block header(we know header WILL fit in 4K) */
737         start += PAGE_SIZE;
738
739         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
740         for (; start < end; start += PAGE_SIZE)
741                 flush_dcache_page(pgv_to_page(start));
742
743         smp_wmb();
744 #endif
745
746         /* Now update the block status. */
747
748         BLOCK_STATUS(pbd1) = status;
749
750         /* Flush the block header */
751
752 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
753         start = (u8 *)pbd1;
754         flush_dcache_page(pgv_to_page(start));
755
756         smp_wmb();
757 #endif
758 }
759
760 /*
761  * Side effect:
762  *
763  * 1) flush the block
764  * 2) Increment active_blk_num
765  *
766  * Note:We DONT refresh the timer on purpose.
767  *      Because almost always the next block will be opened.
768  */
769 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
770                 struct tpacket_block_desc *pbd1,
771                 struct packet_sock *po, unsigned int stat)
772 {
773         __u32 status = TP_STATUS_USER | stat;
774
775         struct tpacket3_hdr *last_pkt;
776         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
777
778         if (po->stats.tp_drops)
779                 status |= TP_STATUS_LOSING;
780
781         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
782         last_pkt->tp_next_offset = 0;
783
784         /* Get the ts of the last pkt */
785         if (BLOCK_NUM_PKTS(pbd1)) {
786                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
787                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
788         } else {
789                 /* Ok, we tmo'd - so get the current time */
790                 struct timespec ts;
791                 getnstimeofday(&ts);
792                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
793                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
794         }
795
796         smp_wmb();
797
798         /* Flush the block */
799         prb_flush_block(pkc1, pbd1, status);
800
801         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
802 }
803
804 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
805 {
806         pkc->reset_pending_on_curr_blk = 0;
807 }
808
809 /*
810  * Side effect of opening a block:
811  *
812  * 1) prb_queue is thawed.
813  * 2) retire_blk_timer is refreshed.
814  *
815  */
816 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
817         struct tpacket_block_desc *pbd1)
818 {
819         struct timespec ts;
820         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
821
822         smp_rmb();
823
824         /* We could have just memset this but we will lose the
825          * flexibility of making the priv area sticky
826          */
827         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
828         BLOCK_NUM_PKTS(pbd1) = 0;
829         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
830         getnstimeofday(&ts);
831         h1->ts_first_pkt.ts_sec = ts.tv_sec;
832         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
833         pkc1->pkblk_start = (char *)pbd1;
834         pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
835                                     BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
836         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
837         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
838         pbd1->version = pkc1->version;
839         pkc1->prev = pkc1->nxt_offset;
840         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
841         prb_thaw_queue(pkc1);
842         _prb_refresh_rx_retire_blk_timer(pkc1);
843
844         smp_wmb();
845 }
846
847 /*
848  * Queue freeze logic:
849  * 1) Assume tp_block_nr = 8 blocks.
850  * 2) At time 't0', user opens Rx ring.
851  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
852  * 4) user-space is either sleeping or processing block '0'.
853  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
854  *    it will close block-7,loop around and try to fill block '0'.
855  *    call-flow:
856  *    __packet_lookup_frame_in_block
857  *      prb_retire_current_block()
858  *      prb_dispatch_next_block()
859  *        |->(BLOCK_STATUS == USER) evaluates to true
860  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
861  * 6) Now there are two cases:
862  *    6.1) Link goes idle right after the queue is frozen.
863  *         But remember, the last open_block() refreshed the timer.
864  *         When this timer expires,it will refresh itself so that we can
865  *         re-open block-0 in near future.
866  *    6.2) Link is busy and keeps on receiving packets. This is a simple
867  *         case and __packet_lookup_frame_in_block will check if block-0
868  *         is free and can now be re-used.
869  */
870 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
871                                   struct packet_sock *po)
872 {
873         pkc->reset_pending_on_curr_blk = 1;
874         po->stats_u.stats3.tp_freeze_q_cnt++;
875 }
876
877 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
878
879 /*
880  * If the next block is free then we will dispatch it
881  * and return a good offset.
882  * Else, we will freeze the queue.
883  * So, caller must check the return value.
884  */
885 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
886                 struct packet_sock *po)
887 {
888         struct tpacket_block_desc *pbd;
889
890         smp_rmb();
891
892         /* 1. Get current block num */
893         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
894
895         /* 2. If this block is currently in_use then freeze the queue */
896         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
897                 prb_freeze_queue(pkc, po);
898                 return NULL;
899         }
900
901         /*
902          * 3.
903          * open this block and return the offset where the first packet
904          * needs to get stored.
905          */
906         prb_open_block(pkc, pbd);
907         return (void *)pkc->nxt_offset;
908 }
909
910 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
911                 struct packet_sock *po, unsigned int status)
912 {
913         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
914
915         /* retire/close the current block */
916         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
917                 /*
918                  * Plug the case where copy_bits() is in progress on
919                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
920                  * have space to copy the pkt in the current block and
921                  * called prb_retire_current_block()
922                  *
923                  * We don't need to worry about the TMO case because
924                  * the timer-handler already handled this case.
925                  */
926                 if (!(status & TP_STATUS_BLK_TMO)) {
927                         while (atomic_read(&pkc->blk_fill_in_prog)) {
928                                 /* Waiting for skb_copy_bits to finish... */
929                                 cpu_relax();
930                         }
931                 }
932                 prb_close_block(pkc, pbd, po, status);
933                 return;
934         }
935 }
936
937 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
938                                       struct tpacket_block_desc *pbd)
939 {
940         return TP_STATUS_USER & BLOCK_STATUS(pbd);
941 }
942
943 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
944 {
945         return pkc->reset_pending_on_curr_blk;
946 }
947
948 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
949 {
950         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
951         atomic_dec(&pkc->blk_fill_in_prog);
952 }
953
954 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
955                         struct tpacket3_hdr *ppd)
956 {
957         ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
958 }
959
960 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
961                         struct tpacket3_hdr *ppd)
962 {
963         ppd->hv1.tp_rxhash = 0;
964 }
965
966 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
967                         struct tpacket3_hdr *ppd)
968 {
969         if (vlan_tx_tag_present(pkc->skb)) {
970                 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
971                 ppd->tp_status = TP_STATUS_VLAN_VALID;
972         } else {
973                 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
974         }
975 }
976
977 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
978                         struct tpacket3_hdr *ppd)
979 {
980         prb_fill_vlan_info(pkc, ppd);
981
982         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
983                 prb_fill_rxhash(pkc, ppd);
984         else
985                 prb_clear_rxhash(pkc, ppd);
986 }
987
988 static void prb_fill_curr_block(char *curr,
989                                 struct tpacket_kbdq_core *pkc,
990                                 struct tpacket_block_desc *pbd,
991                                 unsigned int len)
992 {
993         struct tpacket3_hdr *ppd;
994
995         ppd  = (struct tpacket3_hdr *)curr;
996         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
997         pkc->prev = curr;
998         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
999         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1000         BLOCK_NUM_PKTS(pbd) += 1;
1001         atomic_inc(&pkc->blk_fill_in_prog);
1002         prb_run_all_ft_ops(pkc, ppd);
1003 }
1004
1005 /* Assumes caller has the sk->rx_queue.lock */
1006 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1007                                             struct sk_buff *skb,
1008                                                 int status,
1009                                             unsigned int len
1010                                             )
1011 {
1012         struct tpacket_kbdq_core *pkc;
1013         struct tpacket_block_desc *pbd;
1014         char *curr, *end;
1015
1016         pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1017         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1018
1019         /* Queue is frozen when user space is lagging behind */
1020         if (prb_queue_frozen(pkc)) {
1021                 /*
1022                  * Check if that last block which caused the queue to freeze,
1023                  * is still in_use by user-space.
1024                  */
1025                 if (prb_curr_blk_in_use(pkc, pbd)) {
1026                         /* Can't record this packet */
1027                         return NULL;
1028                 } else {
1029                         /*
1030                          * Ok, the block was released by user-space.
1031                          * Now let's open that block.
1032                          * opening a block also thaws the queue.
1033                          * Thawing is a side effect.
1034                          */
1035                         prb_open_block(pkc, pbd);
1036                 }
1037         }
1038
1039         smp_mb();
1040         curr = pkc->nxt_offset;
1041         pkc->skb = skb;
1042         end = (char *) ((char *)pbd + pkc->kblk_size);
1043
1044         /* first try the current block */
1045         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1046                 prb_fill_curr_block(curr, pkc, pbd, len);
1047                 return (void *)curr;
1048         }
1049
1050         /* Ok, close the current block */
1051         prb_retire_current_block(pkc, po, 0);
1052
1053         /* Now, try to dispatch the next block */
1054         curr = (char *)prb_dispatch_next_block(pkc, po);
1055         if (curr) {
1056                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1057                 prb_fill_curr_block(curr, pkc, pbd, len);
1058                 return (void *)curr;
1059         }
1060
1061         /*
1062          * No free blocks are available.user_space hasn't caught up yet.
1063          * Queue was just frozen and now this packet will get dropped.
1064          */
1065         return NULL;
1066 }
1067
1068 static void *packet_current_rx_frame(struct packet_sock *po,
1069                                             struct sk_buff *skb,
1070                                             int status, unsigned int len)
1071 {
1072         char *curr = NULL;
1073         switch (po->tp_version) {
1074         case TPACKET_V1:
1075         case TPACKET_V2:
1076                 curr = packet_lookup_frame(po, &po->rx_ring,
1077                                         po->rx_ring.head, status);
1078                 return curr;
1079         case TPACKET_V3:
1080                 return __packet_lookup_frame_in_block(po, skb, status, len);
1081         default:
1082                 WARN(1, "TPACKET version not supported\n");
1083                 BUG();
1084                 return 0;
1085         }
1086 }
1087
1088 static void *prb_lookup_block(struct packet_sock *po,
1089                                      struct packet_ring_buffer *rb,
1090                                      unsigned int previous,
1091                                      int status)
1092 {
1093         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1094         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1095
1096         if (status != BLOCK_STATUS(pbd))
1097                 return NULL;
1098         return pbd;
1099 }
1100
1101 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1102 {
1103         unsigned int prev;
1104         if (rb->prb_bdqc.kactive_blk_num)
1105                 prev = rb->prb_bdqc.kactive_blk_num-1;
1106         else
1107                 prev = rb->prb_bdqc.knum_blocks-1;
1108         return prev;
1109 }
1110
1111 /* Assumes caller has held the rx_queue.lock */
1112 static void *__prb_previous_block(struct packet_sock *po,
1113                                          struct packet_ring_buffer *rb,
1114                                          int status)
1115 {
1116         unsigned int previous = prb_previous_blk_num(rb);
1117         return prb_lookup_block(po, rb, previous, status);
1118 }
1119
1120 static void *packet_previous_rx_frame(struct packet_sock *po,
1121                                              struct packet_ring_buffer *rb,
1122                                              int status)
1123 {
1124         if (po->tp_version <= TPACKET_V2)
1125                 return packet_previous_frame(po, rb, status);
1126
1127         return __prb_previous_block(po, rb, status);
1128 }
1129
1130 static void packet_increment_rx_head(struct packet_sock *po,
1131                                             struct packet_ring_buffer *rb)
1132 {
1133         switch (po->tp_version) {
1134         case TPACKET_V1:
1135         case TPACKET_V2:
1136                 return packet_increment_head(rb);
1137         case TPACKET_V3:
1138         default:
1139                 WARN(1, "TPACKET version not supported.\n");
1140                 BUG();
1141                 return;
1142         }
1143 }
1144
1145 static void *packet_previous_frame(struct packet_sock *po,
1146                 struct packet_ring_buffer *rb,
1147                 int status)
1148 {
1149         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1150         return packet_lookup_frame(po, rb, previous, status);
1151 }
1152
1153 static void packet_increment_head(struct packet_ring_buffer *buff)
1154 {
1155         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1156 }
1157
1158 static void packet_sock_destruct(struct sock *sk)
1159 {
1160         skb_queue_purge(&sk->sk_error_queue);
1161
1162         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1163         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1164
1165         if (!sock_flag(sk, SOCK_DEAD)) {
1166                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1167                 return;
1168         }
1169
1170         sk_refcnt_debug_dec(sk);
1171 }
1172
1173 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1174 {
1175         u32 idx, hash = skb->rxhash;
1176
1177         idx = ((u64)hash * num) >> 32;
1178
1179         return f->arr[idx];
1180 }
1181
1182 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1183 {
1184         unsigned int val = atomic_inc_return(&f->rr_cur);
1185
1186         return f->arr[val % num];
1187 }
1188
1189 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1190 {
1191         unsigned int cpu = smp_processor_id();
1192
1193         return f->arr[cpu % num];
1194 }
1195
1196 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1197                              struct packet_type *pt, struct net_device *orig_dev)
1198 {
1199         struct packet_fanout *f = pt->af_packet_priv;
1200         unsigned int num = ACCESS_ONCE(f->num_members);
1201         struct packet_sock *po;
1202         struct sock *sk;
1203
1204         if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1205             !num) {
1206                 kfree_skb(skb);
1207                 return 0;
1208         }
1209
1210         switch (f->type) {
1211         case PACKET_FANOUT_HASH:
1212         default:
1213                 if (f->defrag) {
1214                         skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1215                         if (!skb)
1216                                 return 0;
1217                 }
1218                 skb_get_rxhash(skb);
1219                 sk = fanout_demux_hash(f, skb, num);
1220                 break;
1221         case PACKET_FANOUT_LB:
1222                 sk = fanout_demux_lb(f, skb, num);
1223                 break;
1224         case PACKET_FANOUT_CPU:
1225                 sk = fanout_demux_cpu(f, skb, num);
1226                 break;
1227         }
1228
1229         po = pkt_sk(sk);
1230
1231         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1232 }
1233
1234 static DEFINE_MUTEX(fanout_mutex);
1235 static LIST_HEAD(fanout_list);
1236
1237 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1238 {
1239         struct packet_fanout *f = po->fanout;
1240
1241         spin_lock(&f->lock);
1242         f->arr[f->num_members] = sk;
1243         smp_wmb();
1244         f->num_members++;
1245         spin_unlock(&f->lock);
1246 }
1247
1248 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1249 {
1250         struct packet_fanout *f = po->fanout;
1251         int i;
1252
1253         spin_lock(&f->lock);
1254         for (i = 0; i < f->num_members; i++) {
1255                 if (f->arr[i] == sk)
1256                         break;
1257         }
1258         BUG_ON(i >= f->num_members);
1259         f->arr[i] = f->arr[f->num_members - 1];
1260         f->num_members--;
1261         spin_unlock(&f->lock);
1262 }
1263
1264 bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1265 {
1266         if (sk->sk_family != PF_PACKET)
1267                 return false;
1268
1269         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1270 }
1271
1272 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1273 {
1274         struct packet_sock *po = pkt_sk(sk);
1275         struct packet_fanout *f, *match;
1276         u8 type = type_flags & 0xff;
1277         u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
1278         int err;
1279
1280         switch (type) {
1281         case PACKET_FANOUT_HASH:
1282         case PACKET_FANOUT_LB:
1283         case PACKET_FANOUT_CPU:
1284                 break;
1285         default:
1286                 return -EINVAL;
1287         }
1288
1289         if (!po->running)
1290                 return -EINVAL;
1291
1292         if (po->fanout)
1293                 return -EALREADY;
1294
1295         mutex_lock(&fanout_mutex);
1296         match = NULL;
1297         list_for_each_entry(f, &fanout_list, list) {
1298                 if (f->id == id &&
1299                     read_pnet(&f->net) == sock_net(sk)) {
1300                         match = f;
1301                         break;
1302                 }
1303         }
1304         err = -EINVAL;
1305         if (match && match->defrag != defrag)
1306                 goto out;
1307         if (!match) {
1308                 err = -ENOMEM;
1309                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1310                 if (!match)
1311                         goto out;
1312                 write_pnet(&match->net, sock_net(sk));
1313                 match->id = id;
1314                 match->type = type;
1315                 match->defrag = defrag;
1316                 atomic_set(&match->rr_cur, 0);
1317                 INIT_LIST_HEAD(&match->list);
1318                 spin_lock_init(&match->lock);
1319                 atomic_set(&match->sk_ref, 0);
1320                 match->prot_hook.type = po->prot_hook.type;
1321                 match->prot_hook.dev = po->prot_hook.dev;
1322                 match->prot_hook.func = packet_rcv_fanout;
1323                 match->prot_hook.af_packet_priv = match;
1324                 match->prot_hook.id_match = match_fanout_group;
1325                 dev_add_pack(&match->prot_hook);
1326                 list_add(&match->list, &fanout_list);
1327         }
1328         err = -EINVAL;
1329         if (match->type == type &&
1330             match->prot_hook.type == po->prot_hook.type &&
1331             match->prot_hook.dev == po->prot_hook.dev) {
1332                 err = -ENOSPC;
1333                 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1334                         __dev_remove_pack(&po->prot_hook);
1335                         po->fanout = match;
1336                         atomic_inc(&match->sk_ref);
1337                         __fanout_link(sk, po);
1338                         err = 0;
1339                 }
1340         }
1341 out:
1342         mutex_unlock(&fanout_mutex);
1343         return err;
1344 }
1345
1346 static void fanout_release(struct sock *sk)
1347 {
1348         struct packet_sock *po = pkt_sk(sk);
1349         struct packet_fanout *f;
1350
1351         f = po->fanout;
1352         if (!f)
1353                 return;
1354
1355         po->fanout = NULL;
1356
1357         mutex_lock(&fanout_mutex);
1358         if (atomic_dec_and_test(&f->sk_ref)) {
1359                 list_del(&f->list);
1360                 dev_remove_pack(&f->prot_hook);
1361                 kfree(f);
1362         }
1363         mutex_unlock(&fanout_mutex);
1364 }
1365
1366 static const struct proto_ops packet_ops;
1367
1368 static const struct proto_ops packet_ops_spkt;
1369
1370 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1371                            struct packet_type *pt, struct net_device *orig_dev)
1372 {
1373         struct sock *sk;
1374         struct sockaddr_pkt *spkt;
1375
1376         /*
1377          *      When we registered the protocol we saved the socket in the data
1378          *      field for just this event.
1379          */
1380
1381         sk = pt->af_packet_priv;
1382
1383         /*
1384          *      Yank back the headers [hope the device set this
1385          *      right or kerboom...]
1386          *
1387          *      Incoming packets have ll header pulled,
1388          *      push it back.
1389          *
1390          *      For outgoing ones skb->data == skb_mac_header(skb)
1391          *      so that this procedure is noop.
1392          */
1393
1394         if (skb->pkt_type == PACKET_LOOPBACK)
1395                 goto out;
1396
1397         if (!net_eq(dev_net(dev), sock_net(sk)))
1398                 goto out;
1399
1400         skb = skb_share_check(skb, GFP_ATOMIC);
1401         if (skb == NULL)
1402                 goto oom;
1403
1404         /* drop any routing info */
1405         skb_dst_drop(skb);
1406
1407         /* drop conntrack reference */
1408         nf_reset(skb);
1409
1410         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1411
1412         skb_push(skb, skb->data - skb_mac_header(skb));
1413
1414         /*
1415          *      The SOCK_PACKET socket receives _all_ frames.
1416          */
1417
1418         spkt->spkt_family = dev->type;
1419         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1420         spkt->spkt_protocol = skb->protocol;
1421
1422         /*
1423          *      Charge the memory to the socket. This is done specifically
1424          *      to prevent sockets using all the memory up.
1425          */
1426
1427         if (sock_queue_rcv_skb(sk, skb) == 0)
1428                 return 0;
1429
1430 out:
1431         kfree_skb(skb);
1432 oom:
1433         return 0;
1434 }
1435
1436
1437 /*
1438  *      Output a raw packet to a device layer. This bypasses all the other
1439  *      protocol layers and you must therefore supply it with a complete frame
1440  */
1441
1442 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1443                                struct msghdr *msg, size_t len)
1444 {
1445         struct sock *sk = sock->sk;
1446         struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1447         struct sk_buff *skb = NULL;
1448         struct net_device *dev;
1449         __be16 proto = 0;
1450         int err;
1451
1452         /*
1453          *      Get and verify the address.
1454          */
1455
1456         if (saddr) {
1457                 if (msg->msg_namelen < sizeof(struct sockaddr))
1458                         return -EINVAL;
1459                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1460                         proto = saddr->spkt_protocol;
1461         } else
1462                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1463
1464         /*
1465          *      Find the device first to size check it
1466          */
1467
1468         saddr->spkt_device[13] = 0;
1469 retry:
1470         rcu_read_lock();
1471         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1472         err = -ENODEV;
1473         if (dev == NULL)
1474                 goto out_unlock;
1475
1476         err = -ENETDOWN;
1477         if (!(dev->flags & IFF_UP))
1478                 goto out_unlock;
1479
1480         /*
1481          * You may not queue a frame bigger than the mtu. This is the lowest level
1482          * raw protocol and you must do your own fragmentation at this level.
1483          */
1484
1485         err = -EMSGSIZE;
1486         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
1487                 goto out_unlock;
1488
1489         if (!skb) {
1490                 size_t reserved = LL_RESERVED_SPACE(dev);
1491                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1492
1493                 rcu_read_unlock();
1494                 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
1495                 if (skb == NULL)
1496                         return -ENOBUFS;
1497                 /* FIXME: Save some space for broken drivers that write a hard
1498                  * header at transmission time by themselves. PPP is the notable
1499                  * one here. This should really be fixed at the driver level.
1500                  */
1501                 skb_reserve(skb, reserved);
1502                 skb_reset_network_header(skb);
1503
1504                 /* Try to align data part correctly */
1505                 if (hhlen) {
1506                         skb->data -= hhlen;
1507                         skb->tail -= hhlen;
1508                         if (len < hhlen)
1509                                 skb_reset_network_header(skb);
1510                 }
1511                 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1512                 if (err)
1513                         goto out_free;
1514                 goto retry;
1515         }
1516
1517         if (len > (dev->mtu + dev->hard_header_len)) {
1518                 /* Earlier code assumed this would be a VLAN pkt,
1519                  * double-check this now that we have the actual
1520                  * packet in hand.
1521                  */
1522                 struct ethhdr *ehdr;
1523                 skb_reset_mac_header(skb);
1524                 ehdr = eth_hdr(skb);
1525                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1526                         err = -EMSGSIZE;
1527                         goto out_unlock;
1528                 }
1529         }
1530
1531         skb->protocol = proto;
1532         skb->dev = dev;
1533         skb->priority = sk->sk_priority;
1534         skb->mark = sk->sk_mark;
1535         err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1536         if (err < 0)
1537                 goto out_unlock;
1538
1539         dev_queue_xmit(skb);
1540         rcu_read_unlock();
1541         return len;
1542
1543 out_unlock:
1544         rcu_read_unlock();
1545 out_free:
1546         kfree_skb(skb);
1547         return err;
1548 }
1549
1550 static unsigned int run_filter(const struct sk_buff *skb,
1551                                       const struct sock *sk,
1552                                       unsigned int res)
1553 {
1554         struct sk_filter *filter;
1555
1556         rcu_read_lock();
1557         filter = rcu_dereference(sk->sk_filter);
1558         if (filter != NULL)
1559                 res = SK_RUN_FILTER(filter, skb);
1560         rcu_read_unlock();
1561
1562         return res;
1563 }
1564
1565 /*
1566  * This function makes lazy skb cloning in hope that most of packets
1567  * are discarded by BPF.
1568  *
1569  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1570  * and skb->cb are mangled. It works because (and until) packets
1571  * falling here are owned by current CPU. Output packets are cloned
1572  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1573  * sequencially, so that if we return skb to original state on exit,
1574  * we will not harm anyone.
1575  */
1576
1577 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1578                       struct packet_type *pt, struct net_device *orig_dev)
1579 {
1580         struct sock *sk;
1581         struct sockaddr_ll *sll;
1582         struct packet_sock *po;
1583         u8 *skb_head = skb->data;
1584         int skb_len = skb->len;
1585         unsigned int snaplen, res;
1586
1587         if (skb->pkt_type == PACKET_LOOPBACK)
1588                 goto drop;
1589
1590         sk = pt->af_packet_priv;
1591         po = pkt_sk(sk);
1592
1593         if (!net_eq(dev_net(dev), sock_net(sk)))
1594                 goto drop;
1595
1596         skb->dev = dev;
1597
1598         if (dev->header_ops) {
1599                 /* The device has an explicit notion of ll header,
1600                  * exported to higher levels.
1601                  *
1602                  * Otherwise, the device hides details of its frame
1603                  * structure, so that corresponding packet head is
1604                  * never delivered to user.
1605                  */
1606                 if (sk->sk_type != SOCK_DGRAM)
1607                         skb_push(skb, skb->data - skb_mac_header(skb));
1608                 else if (skb->pkt_type == PACKET_OUTGOING) {
1609                         /* Special case: outgoing packets have ll header at head */
1610                         skb_pull(skb, skb_network_offset(skb));
1611                 }
1612         }
1613
1614         snaplen = skb->len;
1615
1616         res = run_filter(skb, sk, snaplen);
1617         if (!res)
1618                 goto drop_n_restore;
1619         if (snaplen > res)
1620                 snaplen = res;
1621
1622         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1623                 goto drop_n_acct;
1624
1625         if (skb_shared(skb)) {
1626                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1627                 if (nskb == NULL)
1628                         goto drop_n_acct;
1629
1630                 if (skb_head != skb->data) {
1631                         skb->data = skb_head;
1632                         skb->len = skb_len;
1633                 }
1634                 kfree_skb(skb);
1635                 skb = nskb;
1636         }
1637
1638         BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1639                      sizeof(skb->cb));
1640
1641         sll = &PACKET_SKB_CB(skb)->sa.ll;
1642         sll->sll_family = AF_PACKET;
1643         sll->sll_hatype = dev->type;
1644         sll->sll_protocol = skb->protocol;
1645         sll->sll_pkttype = skb->pkt_type;
1646         if (unlikely(po->origdev))
1647                 sll->sll_ifindex = orig_dev->ifindex;
1648         else
1649                 sll->sll_ifindex = dev->ifindex;
1650
1651         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1652
1653         PACKET_SKB_CB(skb)->origlen = skb->len;
1654
1655         if (pskb_trim(skb, snaplen))
1656                 goto drop_n_acct;
1657
1658         skb_set_owner_r(skb, sk);
1659         skb->dev = NULL;
1660         skb_dst_drop(skb);
1661
1662         /* drop conntrack reference */
1663         nf_reset(skb);
1664
1665         spin_lock(&sk->sk_receive_queue.lock);
1666         po->stats.tp_packets++;
1667         skb->dropcount = atomic_read(&sk->sk_drops);
1668         __skb_queue_tail(&sk->sk_receive_queue, skb);
1669         spin_unlock(&sk->sk_receive_queue.lock);
1670         sk->sk_data_ready(sk, skb->len);
1671         return 0;
1672
1673 drop_n_acct:
1674         spin_lock(&sk->sk_receive_queue.lock);
1675         po->stats.tp_drops++;
1676         atomic_inc(&sk->sk_drops);
1677         spin_unlock(&sk->sk_receive_queue.lock);
1678
1679 drop_n_restore:
1680         if (skb_head != skb->data && skb_shared(skb)) {
1681                 skb->data = skb_head;
1682                 skb->len = skb_len;
1683         }
1684 drop:
1685         consume_skb(skb);
1686         return 0;
1687 }
1688
1689 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1690                        struct packet_type *pt, struct net_device *orig_dev)
1691 {
1692         struct sock *sk;
1693         struct packet_sock *po;
1694         struct sockaddr_ll *sll;
1695         union {
1696                 struct tpacket_hdr *h1;
1697                 struct tpacket2_hdr *h2;
1698                 struct tpacket3_hdr *h3;
1699                 void *raw;
1700         } h;
1701         u8 *skb_head = skb->data;
1702         int skb_len = skb->len;
1703         unsigned int snaplen, res;
1704         unsigned long status = TP_STATUS_USER;
1705         unsigned short macoff, netoff, hdrlen;
1706         struct sk_buff *copy_skb = NULL;
1707         struct timeval tv;
1708         struct timespec ts;
1709         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1710
1711         if (skb->pkt_type == PACKET_LOOPBACK)
1712                 goto drop;
1713
1714         sk = pt->af_packet_priv;
1715         po = pkt_sk(sk);
1716
1717         if (!net_eq(dev_net(dev), sock_net(sk)))
1718                 goto drop;
1719
1720         if (dev->header_ops) {
1721                 if (sk->sk_type != SOCK_DGRAM)
1722                         skb_push(skb, skb->data - skb_mac_header(skb));
1723                 else if (skb->pkt_type == PACKET_OUTGOING) {
1724                         /* Special case: outgoing packets have ll header at head */
1725                         skb_pull(skb, skb_network_offset(skb));
1726                 }
1727         }
1728
1729         if (skb->ip_summed == CHECKSUM_PARTIAL)
1730                 status |= TP_STATUS_CSUMNOTREADY;
1731
1732         snaplen = skb->len;
1733
1734         res = run_filter(skb, sk, snaplen);
1735         if (!res)
1736                 goto drop_n_restore;
1737         if (snaplen > res)
1738                 snaplen = res;
1739
1740         if (sk->sk_type == SOCK_DGRAM) {
1741                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1742                                   po->tp_reserve;
1743         } else {
1744                 unsigned maclen = skb_network_offset(skb);
1745                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1746                                        (maclen < 16 ? 16 : maclen)) +
1747                         po->tp_reserve;
1748                 macoff = netoff - maclen;
1749         }
1750         if (po->tp_version <= TPACKET_V2) {
1751                 if (macoff + snaplen > po->rx_ring.frame_size) {
1752                         if (po->copy_thresh &&
1753                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1754                                 if (skb_shared(skb)) {
1755                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
1756                                 } else {
1757                                         copy_skb = skb_get(skb);
1758                                         skb_head = skb->data;
1759                                 }
1760                                 if (copy_skb)
1761                                         skb_set_owner_r(copy_skb, sk);
1762                         }
1763                         snaplen = po->rx_ring.frame_size - macoff;
1764                         if ((int)snaplen < 0)
1765                                 snaplen = 0;
1766                 }
1767         }
1768         spin_lock(&sk->sk_receive_queue.lock);
1769         h.raw = packet_current_rx_frame(po, skb,
1770                                         TP_STATUS_KERNEL, (macoff+snaplen));
1771         if (!h.raw)
1772                 goto ring_is_full;
1773         if (po->tp_version <= TPACKET_V2) {
1774                 packet_increment_rx_head(po, &po->rx_ring);
1775         /*
1776          * LOSING will be reported till you read the stats,
1777          * because it's COR - Clear On Read.
1778          * Anyways, moving it for V1/V2 only as V3 doesn't need this
1779          * at packet level.
1780          */
1781                 if (po->stats.tp_drops)
1782                         status |= TP_STATUS_LOSING;
1783         }
1784         po->stats.tp_packets++;
1785         if (copy_skb) {
1786                 status |= TP_STATUS_COPY;
1787                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1788         }
1789         spin_unlock(&sk->sk_receive_queue.lock);
1790
1791         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1792
1793         switch (po->tp_version) {
1794         case TPACKET_V1:
1795                 h.h1->tp_len = skb->len;
1796                 h.h1->tp_snaplen = snaplen;
1797                 h.h1->tp_mac = macoff;
1798                 h.h1->tp_net = netoff;
1799                 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1800                                 && shhwtstamps->syststamp.tv64)
1801                         tv = ktime_to_timeval(shhwtstamps->syststamp);
1802                 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1803                                 && shhwtstamps->hwtstamp.tv64)
1804                         tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1805                 else if (skb->tstamp.tv64)
1806                         tv = ktime_to_timeval(skb->tstamp);
1807                 else
1808                         do_gettimeofday(&tv);
1809                 h.h1->tp_sec = tv.tv_sec;
1810                 h.h1->tp_usec = tv.tv_usec;
1811                 hdrlen = sizeof(*h.h1);
1812                 break;
1813         case TPACKET_V2:
1814                 h.h2->tp_len = skb->len;
1815                 h.h2->tp_snaplen = snaplen;
1816                 h.h2->tp_mac = macoff;
1817                 h.h2->tp_net = netoff;
1818                 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1819                                 && shhwtstamps->syststamp.tv64)
1820                         ts = ktime_to_timespec(shhwtstamps->syststamp);
1821                 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1822                                 && shhwtstamps->hwtstamp.tv64)
1823                         ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1824                 else if (skb->tstamp.tv64)
1825                         ts = ktime_to_timespec(skb->tstamp);
1826                 else
1827                         getnstimeofday(&ts);
1828                 h.h2->tp_sec = ts.tv_sec;
1829                 h.h2->tp_nsec = ts.tv_nsec;
1830                 if (vlan_tx_tag_present(skb)) {
1831                         h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1832                         status |= TP_STATUS_VLAN_VALID;
1833                 } else {
1834                         h.h2->tp_vlan_tci = 0;
1835                 }
1836                 h.h2->tp_padding = 0;
1837                 hdrlen = sizeof(*h.h2);
1838                 break;
1839         case TPACKET_V3:
1840                 /* tp_nxt_offset,vlan are already populated above.
1841                  * So DONT clear those fields here
1842                  */
1843                 h.h3->tp_status |= status;
1844                 h.h3->tp_len = skb->len;
1845                 h.h3->tp_snaplen = snaplen;
1846                 h.h3->tp_mac = macoff;
1847                 h.h3->tp_net = netoff;
1848                 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1849                                 && shhwtstamps->syststamp.tv64)
1850                         ts = ktime_to_timespec(shhwtstamps->syststamp);
1851                 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1852                                 && shhwtstamps->hwtstamp.tv64)
1853                         ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1854                 else if (skb->tstamp.tv64)
1855                         ts = ktime_to_timespec(skb->tstamp);
1856                 else
1857                         getnstimeofday(&ts);
1858                 h.h3->tp_sec  = ts.tv_sec;
1859                 h.h3->tp_nsec = ts.tv_nsec;
1860                 hdrlen = sizeof(*h.h3);
1861                 break;
1862         default:
1863                 BUG();
1864         }
1865
1866         sll = h.raw + TPACKET_ALIGN(hdrlen);
1867         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1868         sll->sll_family = AF_PACKET;
1869         sll->sll_hatype = dev->type;
1870         sll->sll_protocol = skb->protocol;
1871         sll->sll_pkttype = skb->pkt_type;
1872         if (unlikely(po->origdev))
1873                 sll->sll_ifindex = orig_dev->ifindex;
1874         else
1875                 sll->sll_ifindex = dev->ifindex;
1876
1877         smp_mb();
1878 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1879         {
1880                 u8 *start, *end;
1881
1882                 if (po->tp_version <= TPACKET_V2) {
1883                         end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1884                                 + macoff + snaplen);
1885                         for (start = h.raw; start < end; start += PAGE_SIZE)
1886                                 flush_dcache_page(pgv_to_page(start));
1887                 }
1888                 smp_wmb();
1889         }
1890 #endif
1891         if (po->tp_version <= TPACKET_V2)
1892                 __packet_set_status(po, h.raw, status);
1893         else
1894                 prb_clear_blk_fill_status(&po->rx_ring);
1895
1896         sk->sk_data_ready(sk, 0);
1897
1898 drop_n_restore:
1899         if (skb_head != skb->data && skb_shared(skb)) {
1900                 skb->data = skb_head;
1901                 skb->len = skb_len;
1902         }
1903 drop:
1904         kfree_skb(skb);
1905         return 0;
1906
1907 ring_is_full:
1908         po->stats.tp_drops++;
1909         spin_unlock(&sk->sk_receive_queue.lock);
1910
1911         sk->sk_data_ready(sk, 0);
1912         kfree_skb(copy_skb);
1913         goto drop_n_restore;
1914 }
1915
1916 static void tpacket_destruct_skb(struct sk_buff *skb)
1917 {
1918         struct packet_sock *po = pkt_sk(skb->sk);
1919         void *ph;
1920
1921         if (likely(po->tx_ring.pg_vec)) {
1922                 ph = skb_shinfo(skb)->destructor_arg;
1923                 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1924                 atomic_dec(&po->tx_ring.pending);
1925                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1926         }
1927
1928         sock_wfree(skb);
1929 }
1930
1931 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1932                 void *frame, struct net_device *dev, int size_max,
1933                 __be16 proto, unsigned char *addr)
1934 {
1935         union {
1936                 struct tpacket_hdr *h1;
1937                 struct tpacket2_hdr *h2;
1938                 void *raw;
1939         } ph;
1940         int to_write, offset, len, tp_len, nr_frags, len_max;
1941         struct socket *sock = po->sk.sk_socket;
1942         struct page *page;
1943         void *data;
1944         int err;
1945
1946         ph.raw = frame;
1947
1948         skb->protocol = proto;
1949         skb->dev = dev;
1950         skb->priority = po->sk.sk_priority;
1951         skb->mark = po->sk.sk_mark;
1952         skb_shinfo(skb)->destructor_arg = ph.raw;
1953
1954         switch (po->tp_version) {
1955         case TPACKET_V2:
1956                 tp_len = ph.h2->tp_len;
1957                 break;
1958         default:
1959                 tp_len = ph.h1->tp_len;
1960                 break;
1961         }
1962         if (unlikely(tp_len > size_max)) {
1963                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1964                 return -EMSGSIZE;
1965         }
1966
1967         skb_reserve(skb, LL_RESERVED_SPACE(dev));
1968         skb_reset_network_header(skb);
1969
1970         data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1971         to_write = tp_len;
1972
1973         if (sock->type == SOCK_DGRAM) {
1974                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1975                                 NULL, tp_len);
1976                 if (unlikely(err < 0))
1977                         return -EINVAL;
1978         } else if (dev->hard_header_len) {
1979                 /* net device doesn't like empty head */
1980                 if (unlikely(tp_len <= dev->hard_header_len)) {
1981                         pr_err("packet size is too short (%d < %d)\n",
1982                                tp_len, dev->hard_header_len);
1983                         return -EINVAL;
1984                 }
1985
1986                 skb_push(skb, dev->hard_header_len);
1987                 err = skb_store_bits(skb, 0, data,
1988                                 dev->hard_header_len);
1989                 if (unlikely(err))
1990                         return err;
1991
1992                 data += dev->hard_header_len;
1993                 to_write -= dev->hard_header_len;
1994         }
1995
1996         err = -EFAULT;
1997         offset = offset_in_page(data);
1998         len_max = PAGE_SIZE - offset;
1999         len = ((to_write > len_max) ? len_max : to_write);
2000
2001         skb->data_len = to_write;
2002         skb->len += to_write;
2003         skb->truesize += to_write;
2004         atomic_add(to_write, &po->sk.sk_wmem_alloc);
2005
2006         while (likely(to_write)) {
2007                 nr_frags = skb_shinfo(skb)->nr_frags;
2008
2009                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2010                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2011                                MAX_SKB_FRAGS);
2012                         return -EFAULT;
2013                 }
2014
2015                 page = pgv_to_page(data);
2016                 data += len;
2017                 flush_dcache_page(page);
2018                 get_page(page);
2019                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2020                 to_write -= len;
2021                 offset = 0;
2022                 len_max = PAGE_SIZE;
2023                 len = ((to_write > len_max) ? len_max : to_write);
2024         }
2025
2026         return tp_len;
2027 }
2028
2029 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2030 {
2031         struct net_device *dev;
2032
2033         rcu_read_lock();
2034         dev = rcu_dereference(po->cached_dev);
2035         if (dev)
2036                 dev_hold(dev);
2037         rcu_read_unlock();
2038
2039         return dev;
2040 }
2041
2042 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2043 {
2044         struct sk_buff *skb;
2045         struct net_device *dev;
2046         __be16 proto;
2047         int err, reserve = 0;
2048         void *ph;
2049         struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2050         int tp_len, size_max;
2051         unsigned char *addr;
2052         int len_sum = 0;
2053         int status = 0;
2054
2055         mutex_lock(&po->pg_vec_lock);
2056
2057         err = -EBUSY;
2058         if (saddr == NULL) {
2059                 dev     = packet_cached_dev_get(po);
2060                 proto   = po->num;
2061                 addr    = NULL;
2062         } else {
2063                 err = -EINVAL;
2064                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2065                         goto out;
2066                 if (msg->msg_namelen < (saddr->sll_halen
2067                                         + offsetof(struct sockaddr_ll,
2068                                                 sll_addr)))
2069                         goto out;
2070                 proto   = saddr->sll_protocol;
2071                 addr    = saddr->sll_addr;
2072                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2073         }
2074
2075         err = -ENXIO;
2076         if (unlikely(dev == NULL))
2077                 goto out;
2078         err = -ENETDOWN;
2079         if (unlikely(!(dev->flags & IFF_UP)))
2080                 goto out_put;
2081
2082         reserve = dev->hard_header_len;
2083
2084         size_max = po->tx_ring.frame_size
2085                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2086
2087         if (size_max > dev->mtu + reserve)
2088                 size_max = dev->mtu + reserve;
2089
2090         do {
2091                 ph = packet_current_frame(po, &po->tx_ring,
2092                                 TP_STATUS_SEND_REQUEST);
2093
2094                 if (unlikely(ph == NULL)) {
2095                         schedule();
2096                         continue;
2097                 }
2098
2099                 status = TP_STATUS_SEND_REQUEST;
2100                 skb = sock_alloc_send_skb(&po->sk,
2101                                 LL_ALLOCATED_SPACE(dev)
2102                                 + sizeof(struct sockaddr_ll),
2103                                 0, &err);
2104
2105                 if (unlikely(skb == NULL))
2106                         goto out_status;
2107
2108                 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2109                                 addr);
2110
2111                 if (unlikely(tp_len < 0)) {
2112                         if (po->tp_loss) {
2113                                 __packet_set_status(po, ph,
2114                                                 TP_STATUS_AVAILABLE);
2115                                 packet_increment_head(&po->tx_ring);
2116                                 kfree_skb(skb);
2117                                 continue;
2118                         } else {
2119                                 status = TP_STATUS_WRONG_FORMAT;
2120                                 err = tp_len;
2121                                 goto out_status;
2122                         }
2123                 }
2124
2125                 skb->destructor = tpacket_destruct_skb;
2126                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2127                 atomic_inc(&po->tx_ring.pending);
2128
2129                 status = TP_STATUS_SEND_REQUEST;
2130                 err = dev_queue_xmit(skb);
2131                 if (unlikely(err > 0)) {
2132                         err = net_xmit_errno(err);
2133                         if (err && __packet_get_status(po, ph) ==
2134                                    TP_STATUS_AVAILABLE) {
2135                                 /* skb was destructed already */
2136                                 skb = NULL;
2137                                 goto out_status;
2138                         }
2139                         /*
2140                          * skb was dropped but not destructed yet;
2141                          * let's treat it like congestion or err < 0
2142                          */
2143                         err = 0;
2144                 }
2145                 packet_increment_head(&po->tx_ring);
2146                 len_sum += tp_len;
2147         } while (likely((ph != NULL) ||
2148                         ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2149                          (atomic_read(&po->tx_ring.pending))))
2150                 );
2151
2152         err = len_sum;
2153         goto out_put;
2154
2155 out_status:
2156         __packet_set_status(po, ph, status);
2157         kfree_skb(skb);
2158 out_put:
2159         dev_put(dev);
2160 out:
2161         mutex_unlock(&po->pg_vec_lock);
2162         return err;
2163 }
2164
2165 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2166                                         size_t reserve, size_t len,
2167                                         size_t linear, int noblock,
2168                                         int *err)
2169 {
2170         struct sk_buff *skb;
2171
2172         /* Under a page?  Don't bother with paged skb. */
2173         if (prepad + len < PAGE_SIZE || !linear)
2174                 linear = len;
2175
2176         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2177                                    err);
2178         if (!skb)
2179                 return NULL;
2180
2181         skb_reserve(skb, reserve);
2182         skb_put(skb, linear);
2183         skb->data_len = len - linear;
2184         skb->len += len - linear;
2185
2186         return skb;
2187 }
2188
2189 static int packet_snd(struct socket *sock,
2190                           struct msghdr *msg, size_t len)
2191 {
2192         struct sock *sk = sock->sk;
2193         struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2194         struct sk_buff *skb;
2195         struct net_device *dev;
2196         __be16 proto;
2197         unsigned char *addr;
2198         int err, reserve = 0;
2199         struct virtio_net_hdr vnet_hdr = { 0 };
2200         int offset = 0;
2201         int vnet_hdr_len;
2202         struct packet_sock *po = pkt_sk(sk);
2203         unsigned short gso_type = 0;
2204
2205         /*
2206          *      Get and verify the address.
2207          */
2208
2209         if (saddr == NULL) {
2210                 dev     = packet_cached_dev_get(po);
2211                 proto   = po->num;
2212                 addr    = NULL;
2213         } else {
2214                 err = -EINVAL;
2215                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2216                         goto out;
2217                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2218                         goto out;
2219                 proto   = saddr->sll_protocol;
2220                 addr    = saddr->sll_addr;
2221                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2222         }
2223
2224         err = -ENXIO;
2225         if (unlikely(dev == NULL))
2226                 goto out_unlock;
2227         err = -ENETDOWN;
2228         if (unlikely(!(dev->flags & IFF_UP)))
2229                 goto out_unlock;
2230
2231         if (sock->type == SOCK_RAW)
2232                 reserve = dev->hard_header_len;
2233         if (po->has_vnet_hdr) {
2234                 vnet_hdr_len = sizeof(vnet_hdr);
2235
2236                 err = -EINVAL;
2237                 if (len < vnet_hdr_len)
2238                         goto out_unlock;
2239
2240                 len -= vnet_hdr_len;
2241
2242                 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2243                                        vnet_hdr_len);
2244                 if (err < 0)
2245                         goto out_unlock;
2246
2247                 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2248                     (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2249                       vnet_hdr.hdr_len))
2250                         vnet_hdr.hdr_len = vnet_hdr.csum_start +
2251                                                  vnet_hdr.csum_offset + 2;
2252
2253                 err = -EINVAL;
2254                 if (vnet_hdr.hdr_len > len)
2255                         goto out_unlock;
2256
2257                 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2258                         switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2259                         case VIRTIO_NET_HDR_GSO_TCPV4:
2260                                 gso_type = SKB_GSO_TCPV4;
2261                                 break;
2262                         case VIRTIO_NET_HDR_GSO_TCPV6:
2263                                 gso_type = SKB_GSO_TCPV6;
2264                                 break;
2265                         case VIRTIO_NET_HDR_GSO_UDP:
2266                                 gso_type = SKB_GSO_UDP;
2267                                 break;
2268                         default:
2269                                 goto out_unlock;
2270                         }
2271
2272                         if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2273                                 gso_type |= SKB_GSO_TCP_ECN;
2274
2275                         if (vnet_hdr.gso_size == 0)
2276                                 goto out_unlock;
2277
2278                 }
2279         }
2280
2281         err = -EMSGSIZE;
2282         if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
2283                 goto out_unlock;
2284
2285         err = -ENOBUFS;
2286         skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
2287                                LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
2288                                msg->msg_flags & MSG_DONTWAIT, &err);
2289         if (skb == NULL)
2290                 goto out_unlock;
2291
2292         skb_set_network_header(skb, reserve);
2293
2294         err = -EINVAL;
2295         if (sock->type == SOCK_DGRAM &&
2296             (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2297                 goto out_free;
2298
2299         /* Returns -EFAULT on error */
2300         err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2301         if (err)
2302                 goto out_free;
2303         err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2304         if (err < 0)
2305                 goto out_free;
2306
2307         if (!gso_type && (len > dev->mtu + reserve)) {
2308                 /* Earlier code assumed this would be a VLAN pkt,
2309                  * double-check this now that we have the actual
2310                  * packet in hand.
2311                  */
2312                 struct ethhdr *ehdr;
2313                 skb_reset_mac_header(skb);
2314                 ehdr = eth_hdr(skb);
2315                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2316                         err = -EMSGSIZE;
2317                         goto out_free;
2318                 }
2319         }
2320
2321         skb->protocol = proto;
2322         skb->dev = dev;
2323         skb->priority = sk->sk_priority;
2324         skb->mark = sk->sk_mark;
2325
2326         if (po->has_vnet_hdr) {
2327                 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2328                         if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2329                                                   vnet_hdr.csum_offset)) {
2330                                 err = -EINVAL;
2331                                 goto out_free;
2332                         }
2333                 }
2334
2335                 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2336                 skb_shinfo(skb)->gso_type = gso_type;
2337
2338                 /* Header must be checked, and gso_segs computed. */
2339                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2340                 skb_shinfo(skb)->gso_segs = 0;
2341
2342                 len += vnet_hdr_len;
2343         }
2344
2345         /*
2346          *      Now send it
2347          */
2348
2349         err = dev_queue_xmit(skb);
2350         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2351                 goto out_unlock;
2352
2353         dev_put(dev);
2354
2355         return len;
2356
2357 out_free:
2358         kfree_skb(skb);
2359 out_unlock:
2360         if (dev)
2361                 dev_put(dev);
2362 out:
2363         return err;
2364 }
2365
2366 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2367                 struct msghdr *msg, size_t len)
2368 {
2369         struct sock *sk = sock->sk;
2370         struct packet_sock *po = pkt_sk(sk);
2371         if (po->tx_ring.pg_vec)
2372                 return tpacket_snd(po, msg);
2373         else
2374                 return packet_snd(sock, msg, len);
2375 }
2376
2377 /*
2378  *      Close a PACKET socket. This is fairly simple. We immediately go
2379  *      to 'closed' state and remove our protocol entry in the device list.
2380  */
2381
2382 static int packet_release(struct socket *sock)
2383 {
2384         struct sock *sk = sock->sk;
2385         struct packet_sock *po;
2386         struct net *net;
2387         union tpacket_req_u req_u;
2388
2389         if (!sk)
2390                 return 0;
2391
2392         net = sock_net(sk);
2393         po = pkt_sk(sk);
2394
2395         spin_lock_bh(&net->packet.sklist_lock);
2396         sk_del_node_init_rcu(sk);
2397         sock_prot_inuse_add(net, sk->sk_prot, -1);
2398         spin_unlock_bh(&net->packet.sklist_lock);
2399
2400         spin_lock(&po->bind_lock);
2401         unregister_prot_hook(sk, false);
2402         if (po->prot_hook.dev) {
2403                 dev_put(po->prot_hook.dev);
2404                 po->prot_hook.dev = NULL;
2405         }
2406         spin_unlock(&po->bind_lock);
2407
2408         packet_flush_mclist(sk);
2409
2410         if (po->rx_ring.pg_vec) {
2411                 memset(&req_u, 0, sizeof(req_u));
2412                 packet_set_ring(sk, &req_u, 1, 0);
2413         }
2414
2415         if (po->tx_ring.pg_vec) {
2416                 memset(&req_u, 0, sizeof(req_u));
2417                 packet_set_ring(sk, &req_u, 1, 1);
2418         }
2419
2420         fanout_release(sk);
2421
2422         synchronize_net();
2423         /*
2424          *      Now the socket is dead. No more input will appear.
2425          */
2426         sock_orphan(sk);
2427         sock->sk = NULL;
2428
2429         /* Purge queues */
2430
2431         skb_queue_purge(&sk->sk_receive_queue);
2432         sk_refcnt_debug_release(sk);
2433
2434         sock_put(sk);
2435         return 0;
2436 }
2437
2438 /*
2439  *      Attach a packet hook.
2440  */
2441
2442 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
2443 {
2444         struct packet_sock *po = pkt_sk(sk);
2445
2446         if (po->fanout) {
2447                 if (dev)
2448                         dev_put(dev);
2449
2450                 return -EINVAL;
2451         }
2452
2453         lock_sock(sk);
2454
2455         spin_lock(&po->bind_lock);
2456         unregister_prot_hook(sk, true);
2457         po->num = protocol;
2458         po->prot_hook.type = protocol;
2459         if (po->prot_hook.dev)
2460                 dev_put(po->prot_hook.dev);
2461         po->prot_hook.dev = dev;
2462
2463         po->ifindex = dev ? dev->ifindex : 0;
2464
2465         if (protocol == 0)
2466                 goto out_unlock;
2467
2468         if (!dev || (dev->flags & IFF_UP)) {
2469                 register_prot_hook(sk);
2470         } else {
2471                 sk->sk_err = ENETDOWN;
2472                 if (!sock_flag(sk, SOCK_DEAD))
2473                         sk->sk_error_report(sk);
2474         }
2475
2476 out_unlock:
2477         spin_unlock(&po->bind_lock);
2478         release_sock(sk);
2479         return 0;
2480 }
2481
2482 /*
2483  *      Bind a packet socket to a device
2484  */
2485
2486 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2487                             int addr_len)
2488 {
2489         struct sock *sk = sock->sk;
2490         char name[15];
2491         struct net_device *dev;
2492         int err = -ENODEV;
2493
2494         /*
2495          *      Check legality
2496          */
2497
2498         if (addr_len != sizeof(struct sockaddr))
2499                 return -EINVAL;
2500         strlcpy(name, uaddr->sa_data, sizeof(name));
2501
2502         dev = dev_get_by_name(sock_net(sk), name);
2503         if (dev)
2504                 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2505         return err;
2506 }
2507
2508 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2509 {
2510         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2511         struct sock *sk = sock->sk;
2512         struct net_device *dev = NULL;
2513         int err;
2514
2515
2516         /*
2517          *      Check legality
2518          */
2519
2520         if (addr_len < sizeof(struct sockaddr_ll))
2521                 return -EINVAL;
2522         if (sll->sll_family != AF_PACKET)
2523                 return -EINVAL;
2524
2525         if (sll->sll_ifindex) {
2526                 err = -ENODEV;
2527                 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2528                 if (dev == NULL)
2529                         goto out;
2530         }
2531         err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2532
2533 out:
2534         return err;
2535 }
2536
2537 static struct proto packet_proto = {
2538         .name     = "PACKET",
2539         .owner    = THIS_MODULE,
2540         .obj_size = sizeof(struct packet_sock),
2541 };
2542
2543 /*
2544  *      Create a packet of type SOCK_PACKET.
2545  */
2546
2547 static int packet_create(struct net *net, struct socket *sock, int protocol,
2548                          int kern)
2549 {
2550         struct sock *sk;
2551         struct packet_sock *po;
2552         __be16 proto = (__force __be16)protocol; /* weird, but documented */
2553         int err;
2554
2555         if (!capable(CAP_NET_RAW))
2556                 return -EPERM;
2557         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2558             sock->type != SOCK_PACKET)
2559                 return -ESOCKTNOSUPPORT;
2560
2561         sock->state = SS_UNCONNECTED;
2562
2563         err = -ENOBUFS;
2564         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2565         if (sk == NULL)
2566                 goto out;
2567
2568         sock->ops = &packet_ops;
2569         if (sock->type == SOCK_PACKET)
2570                 sock->ops = &packet_ops_spkt;
2571
2572         sock_init_data(sock, sk);
2573
2574         po = pkt_sk(sk);
2575         sk->sk_family = PF_PACKET;
2576         po->num = proto;
2577         RCU_INIT_POINTER(po->cached_dev, NULL);
2578
2579         sk->sk_destruct = packet_sock_destruct;
2580         sk_refcnt_debug_inc(sk);
2581
2582         /*
2583          *      Attach a protocol block
2584          */
2585
2586         spin_lock_init(&po->bind_lock);
2587         mutex_init(&po->pg_vec_lock);
2588         po->prot_hook.func = packet_rcv;
2589
2590         if (sock->type == SOCK_PACKET)
2591                 po->prot_hook.func = packet_rcv_spkt;
2592
2593         po->prot_hook.af_packet_priv = sk;
2594
2595         if (proto) {
2596                 po->prot_hook.type = proto;
2597                 register_prot_hook(sk);
2598         }
2599
2600         spin_lock_bh(&net->packet.sklist_lock);
2601         sk_add_node_rcu(sk, &net->packet.sklist);
2602         sock_prot_inuse_add(net, &packet_proto, 1);
2603         spin_unlock_bh(&net->packet.sklist_lock);
2604
2605         return 0;
2606 out:
2607         return err;
2608 }
2609
2610 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2611 {
2612         struct sock_exterr_skb *serr;
2613         struct sk_buff *skb, *skb2;
2614         int copied, err;
2615
2616         err = -EAGAIN;
2617         skb = skb_dequeue(&sk->sk_error_queue);
2618         if (skb == NULL)
2619                 goto out;
2620
2621         copied = skb->len;
2622         if (copied > len) {
2623                 msg->msg_flags |= MSG_TRUNC;
2624                 copied = len;
2625         }
2626         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2627         if (err)
2628                 goto out_free_skb;
2629
2630         sock_recv_timestamp(msg, sk, skb);
2631
2632         serr = SKB_EXT_ERR(skb);
2633         put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2634                  sizeof(serr->ee), &serr->ee);
2635
2636         msg->msg_flags |= MSG_ERRQUEUE;
2637         err = copied;
2638
2639         /* Reset and regenerate socket error */
2640         spin_lock_bh(&sk->sk_error_queue.lock);
2641         sk->sk_err = 0;
2642         if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2643                 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2644                 spin_unlock_bh(&sk->sk_error_queue.lock);
2645                 sk->sk_error_report(sk);
2646         } else
2647                 spin_unlock_bh(&sk->sk_error_queue.lock);
2648
2649 out_free_skb:
2650         kfree_skb(skb);
2651 out:
2652         return err;
2653 }
2654
2655 /*
2656  *      Pull a packet from our receive queue and hand it to the user.
2657  *      If necessary we block.
2658  */
2659
2660 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2661                           struct msghdr *msg, size_t len, int flags)
2662 {
2663         struct sock *sk = sock->sk;
2664         struct sk_buff *skb;
2665         int copied, err;
2666         int vnet_hdr_len = 0;
2667
2668         err = -EINVAL;
2669         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2670                 goto out;
2671
2672 #if 0
2673         /* What error should we return now? EUNATTACH? */
2674         if (pkt_sk(sk)->ifindex < 0)
2675                 return -ENODEV;
2676 #endif
2677
2678         if (flags & MSG_ERRQUEUE) {
2679                 err = packet_recv_error(sk, msg, len);
2680                 goto out;
2681         }
2682
2683         /*
2684          *      Call the generic datagram receiver. This handles all sorts
2685          *      of horrible races and re-entrancy so we can forget about it
2686          *      in the protocol layers.
2687          *
2688          *      Now it will return ENETDOWN, if device have just gone down,
2689          *      but then it will block.
2690          */
2691
2692         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2693
2694         /*
2695          *      An error occurred so return it. Because skb_recv_datagram()
2696          *      handles the blocking we don't see and worry about blocking
2697          *      retries.
2698          */
2699
2700         if (skb == NULL)
2701                 goto out;
2702
2703         if (pkt_sk(sk)->has_vnet_hdr) {
2704                 struct virtio_net_hdr vnet_hdr = { 0 };
2705
2706                 err = -EINVAL;
2707                 vnet_hdr_len = sizeof(vnet_hdr);
2708                 if (len < vnet_hdr_len)
2709                         goto out_free;
2710
2711                 len -= vnet_hdr_len;
2712
2713                 if (skb_is_gso(skb)) {
2714                         struct skb_shared_info *sinfo = skb_shinfo(skb);
2715
2716                         /* This is a hint as to how much should be linear. */
2717                         vnet_hdr.hdr_len = skb_headlen(skb);
2718                         vnet_hdr.gso_size = sinfo->gso_size;
2719                         if (sinfo->gso_type & SKB_GSO_TCPV4)
2720                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2721                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
2722                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2723                         else if (sinfo->gso_type & SKB_GSO_UDP)
2724                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2725                         else if (sinfo->gso_type & SKB_GSO_FCOE)
2726                                 goto out_free;
2727                         else
2728                                 BUG();
2729                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2730                                 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2731                 } else
2732                         vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2733
2734                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2735                         vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2736                         vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2737                         vnet_hdr.csum_offset = skb->csum_offset;
2738                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2739                         vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2740                 } /* else everything is zero */
2741
2742                 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2743                                      vnet_hdr_len);
2744                 if (err < 0)
2745                         goto out_free;
2746         }
2747
2748         /* You lose any data beyond the buffer you gave. If it worries
2749          * a user program they can ask the device for its MTU
2750          * anyway.
2751          */
2752         copied = skb->len;
2753         if (copied > len) {
2754                 copied = len;
2755                 msg->msg_flags |= MSG_TRUNC;
2756         }
2757
2758         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2759         if (err)
2760                 goto out_free;
2761
2762         sock_recv_ts_and_drops(msg, sk, skb);
2763
2764         if (msg->msg_name) {
2765                 /* If the address length field is there to be filled
2766                  * in, we fill it in now.
2767                  */
2768                 if (sock->type == SOCK_PACKET) {
2769                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
2770                 } else {
2771                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2772                         msg->msg_namelen = sll->sll_halen +
2773                                 offsetof(struct sockaddr_ll, sll_addr);
2774                 }
2775                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2776                        msg->msg_namelen);
2777         }
2778
2779         if (pkt_sk(sk)->auxdata) {
2780                 struct tpacket_auxdata aux;
2781
2782                 aux.tp_status = TP_STATUS_USER;
2783                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2784                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2785                 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2786                 aux.tp_snaplen = skb->len;
2787                 aux.tp_mac = 0;
2788                 aux.tp_net = skb_network_offset(skb);
2789                 if (vlan_tx_tag_present(skb)) {
2790                         aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2791                         aux.tp_status |= TP_STATUS_VLAN_VALID;
2792                 } else {
2793                         aux.tp_vlan_tci = 0;
2794                 }
2795                 aux.tp_padding = 0;
2796                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2797         }
2798
2799         /*
2800          *      Free or return the buffer as appropriate. Again this
2801          *      hides all the races and re-entrancy issues from us.
2802          */
2803         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2804
2805 out_free:
2806         skb_free_datagram(sk, skb);
2807 out:
2808         return err;
2809 }
2810
2811 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2812                                int *uaddr_len, int peer)
2813 {
2814         struct net_device *dev;
2815         struct sock *sk = sock->sk;
2816
2817         if (peer)
2818                 return -EOPNOTSUPP;
2819
2820         uaddr->sa_family = AF_PACKET;
2821         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
2822         rcu_read_lock();
2823         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2824         if (dev)
2825                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
2826         rcu_read_unlock();
2827         *uaddr_len = sizeof(*uaddr);
2828
2829         return 0;
2830 }
2831
2832 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2833                           int *uaddr_len, int peer)
2834 {
2835         struct net_device *dev;
2836         struct sock *sk = sock->sk;
2837         struct packet_sock *po = pkt_sk(sk);
2838         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2839
2840         if (peer)
2841                 return -EOPNOTSUPP;
2842
2843         sll->sll_family = AF_PACKET;
2844         sll->sll_ifindex = po->ifindex;
2845         sll->sll_protocol = po->num;
2846         sll->sll_pkttype = 0;
2847         rcu_read_lock();
2848         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2849         if (dev) {
2850                 sll->sll_hatype = dev->type;
2851                 sll->sll_halen = dev->addr_len;
2852                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2853         } else {
2854                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
2855                 sll->sll_halen = 0;
2856         }
2857         rcu_read_unlock();
2858         *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2859
2860         return 0;
2861 }
2862
2863 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2864                          int what)
2865 {
2866         switch (i->type) {
2867         case PACKET_MR_MULTICAST:
2868                 if (i->alen != dev->addr_len)
2869                         return -EINVAL;
2870                 if (what > 0)
2871                         return dev_mc_add(dev, i->addr);
2872                 else
2873                         return dev_mc_del(dev, i->addr);
2874                 break;
2875         case PACKET_MR_PROMISC:
2876                 return dev_set_promiscuity(dev, what);
2877                 break;
2878         case PACKET_MR_ALLMULTI:
2879                 return dev_set_allmulti(dev, what);
2880                 break;
2881         case PACKET_MR_UNICAST:
2882                 if (i->alen != dev->addr_len)
2883                         return -EINVAL;
2884                 if (what > 0)
2885                         return dev_uc_add(dev, i->addr);
2886                 else
2887                         return dev_uc_del(dev, i->addr);
2888                 break;
2889         default:
2890                 break;
2891         }
2892         return 0;
2893 }
2894
2895 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2896 {
2897         for ( ; i; i = i->next) {
2898                 if (i->ifindex == dev->ifindex)
2899                         packet_dev_mc(dev, i, what);
2900         }
2901 }
2902
2903 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2904 {
2905         struct packet_sock *po = pkt_sk(sk);
2906         struct packet_mclist *ml, *i;
2907         struct net_device *dev;
2908         int err;
2909
2910         rtnl_lock();
2911
2912         err = -ENODEV;
2913         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2914         if (!dev)
2915                 goto done;
2916
2917         err = -EINVAL;
2918         if (mreq->mr_alen > dev->addr_len)
2919                 goto done;
2920
2921         err = -ENOBUFS;
2922         i = kmalloc(sizeof(*i), GFP_KERNEL);
2923         if (i == NULL)
2924                 goto done;
2925
2926         err = 0;
2927         for (ml = po->mclist; ml; ml = ml->next) {
2928                 if (ml->ifindex == mreq->mr_ifindex &&
2929                     ml->type == mreq->mr_type &&
2930                     ml->alen == mreq->mr_alen &&
2931                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2932                         ml->count++;
2933                         /* Free the new element ... */
2934                         kfree(i);
2935                         goto done;
2936                 }
2937         }
2938
2939         i->type = mreq->mr_type;
2940         i->ifindex = mreq->mr_ifindex;
2941         i->alen = mreq->mr_alen;
2942         memcpy(i->addr, mreq->mr_address, i->alen);
2943         i->count = 1;
2944         i->next = po->mclist;
2945         po->mclist = i;
2946         err = packet_dev_mc(dev, i, 1);
2947         if (err) {
2948                 po->mclist = i->next;
2949                 kfree(i);
2950         }
2951
2952 done:
2953         rtnl_unlock();
2954         return err;
2955 }
2956
2957 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2958 {
2959         struct packet_mclist *ml, **mlp;
2960
2961         rtnl_lock();
2962
2963         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2964                 if (ml->ifindex == mreq->mr_ifindex &&
2965                     ml->type == mreq->mr_type &&
2966                     ml->alen == mreq->mr_alen &&
2967                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2968                         if (--ml->count == 0) {
2969                                 struct net_device *dev;
2970                                 *mlp = ml->next;
2971                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2972                                 if (dev)
2973                                         packet_dev_mc(dev, ml, -1);
2974                                 kfree(ml);
2975                         }
2976                         rtnl_unlock();
2977                         return 0;
2978                 }
2979         }
2980         rtnl_unlock();
2981         return -EADDRNOTAVAIL;
2982 }
2983
2984 static void packet_flush_mclist(struct sock *sk)
2985 {
2986         struct packet_sock *po = pkt_sk(sk);
2987         struct packet_mclist *ml;
2988
2989         if (!po->mclist)
2990                 return;
2991
2992         rtnl_lock();
2993         while ((ml = po->mclist) != NULL) {
2994                 struct net_device *dev;
2995
2996                 po->mclist = ml->next;
2997                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2998                 if (dev != NULL)
2999                         packet_dev_mc(dev, ml, -1);
3000                 kfree(ml);
3001         }
3002         rtnl_unlock();
3003 }
3004
3005 static int
3006 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3007 {
3008         struct sock *sk = sock->sk;
3009         struct packet_sock *po = pkt_sk(sk);
3010         int ret;
3011
3012         if (level != SOL_PACKET)
3013                 return -ENOPROTOOPT;
3014
3015         switch (optname) {
3016         case PACKET_ADD_MEMBERSHIP:
3017         case PACKET_DROP_MEMBERSHIP:
3018         {
3019                 struct packet_mreq_max mreq;
3020                 int len = optlen;
3021                 memset(&mreq, 0, sizeof(mreq));
3022                 if (len < sizeof(struct packet_mreq))
3023                         return -EINVAL;
3024                 if (len > sizeof(mreq))
3025                         len = sizeof(mreq);
3026                 if (copy_from_user(&mreq, optval, len))
3027                         return -EFAULT;
3028                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3029                         return -EINVAL;
3030                 if (optname == PACKET_ADD_MEMBERSHIP)
3031                         ret = packet_mc_add(sk, &mreq);
3032                 else
3033                         ret = packet_mc_drop(sk, &mreq);
3034                 return ret;
3035         }
3036
3037         case PACKET_RX_RING:
3038         case PACKET_TX_RING:
3039         {
3040                 union tpacket_req_u req_u;
3041                 int len;
3042
3043                 switch (po->tp_version) {
3044                 case TPACKET_V1:
3045                 case TPACKET_V2:
3046                         len = sizeof(req_u.req);
3047                         break;
3048                 case TPACKET_V3:
3049                 default:
3050                         len = sizeof(req_u.req3);
3051                         break;
3052                 }
3053                 if (optlen < len)
3054                         return -EINVAL;
3055                 if (pkt_sk(sk)->has_vnet_hdr)
3056                         return -EINVAL;
3057                 if (copy_from_user(&req_u.req, optval, len))
3058                         return -EFAULT;
3059                 return packet_set_ring(sk, &req_u, 0,
3060                         optname == PACKET_TX_RING);
3061         }
3062         case PACKET_COPY_THRESH:
3063         {
3064                 int val;
3065
3066                 if (optlen != sizeof(val))
3067                         return -EINVAL;
3068                 if (copy_from_user(&val, optval, sizeof(val)))
3069                         return -EFAULT;
3070
3071                 pkt_sk(sk)->copy_thresh = val;
3072                 return 0;
3073         }
3074         case PACKET_VERSION:
3075         {
3076                 int val;
3077
3078                 if (optlen != sizeof(val))
3079                         return -EINVAL;
3080                 if (copy_from_user(&val, optval, sizeof(val)))
3081                         return -EFAULT;
3082                 switch (val) {
3083                 case TPACKET_V1:
3084                 case TPACKET_V2:
3085                 case TPACKET_V3:
3086                         break;
3087                 default:
3088                         return -EINVAL;
3089                 }
3090                 lock_sock(sk);
3091                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3092                         ret = -EBUSY;
3093                 } else {
3094                         po->tp_version = val;
3095                         ret = 0;
3096                 }
3097                 release_sock(sk);
3098                 return ret;
3099         }
3100         case PACKET_RESERVE:
3101         {
3102                 unsigned int val;
3103
3104                 if (optlen != sizeof(val))
3105                         return -EINVAL;
3106                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3107                         return -EBUSY;
3108                 if (copy_from_user(&val, optval, sizeof(val)))
3109                         return -EFAULT;
3110                 po->tp_reserve = val;
3111                 return 0;
3112         }
3113         case PACKET_LOSS:
3114         {
3115                 unsigned int val;
3116
3117                 if (optlen != sizeof(val))
3118                         return -EINVAL;
3119                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3120                         return -EBUSY;
3121                 if (copy_from_user(&val, optval, sizeof(val)))
3122                         return -EFAULT;
3123                 po->tp_loss = !!val;
3124                 return 0;
3125         }
3126         case PACKET_AUXDATA:
3127         {
3128                 int val;
3129
3130                 if (optlen < sizeof(val))
3131                         return -EINVAL;
3132                 if (copy_from_user(&val, optval, sizeof(val)))
3133                         return -EFAULT;
3134
3135                 po->auxdata = !!val;
3136                 return 0;
3137         }
3138         case PACKET_ORIGDEV:
3139         {
3140                 int val;
3141
3142                 if (optlen < sizeof(val))
3143                         return -EINVAL;
3144                 if (copy_from_user(&val, optval, sizeof(val)))
3145                         return -EFAULT;
3146
3147                 po->origdev = !!val;
3148                 return 0;
3149         }
3150         case PACKET_VNET_HDR:
3151         {
3152                 int val;
3153
3154                 if (sock->type != SOCK_RAW)
3155                         return -EINVAL;
3156                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3157                         return -EBUSY;
3158                 if (optlen < sizeof(val))
3159                         return -EINVAL;
3160                 if (copy_from_user(&val, optval, sizeof(val)))
3161                         return -EFAULT;
3162
3163                 po->has_vnet_hdr = !!val;
3164                 return 0;
3165         }
3166         case PACKET_TIMESTAMP:
3167         {
3168                 int val;
3169
3170                 if (optlen != sizeof(val))
3171                         return -EINVAL;
3172                 if (copy_from_user(&val, optval, sizeof(val)))
3173                         return -EFAULT;
3174
3175                 po->tp_tstamp = val;
3176                 return 0;
3177         }
3178         case PACKET_FANOUT:
3179         {
3180                 int val;
3181
3182                 if (optlen != sizeof(val))
3183                         return -EINVAL;
3184                 if (copy_from_user(&val, optval, sizeof(val)))
3185                         return -EFAULT;
3186
3187                 return fanout_add(sk, val & 0xffff, val >> 16);
3188         }
3189         default:
3190                 return -ENOPROTOOPT;
3191         }
3192 }
3193
3194 static int packet_getsockopt(struct socket *sock, int level, int optname,
3195                              char __user *optval, int __user *optlen)
3196 {
3197         int len;
3198         int val;
3199         struct sock *sk = sock->sk;
3200         struct packet_sock *po = pkt_sk(sk);
3201         void *data;
3202         struct tpacket_stats st;
3203         union tpacket_stats_u st_u;
3204
3205         if (level != SOL_PACKET)
3206                 return -ENOPROTOOPT;
3207
3208         if (get_user(len, optlen))
3209                 return -EFAULT;
3210
3211         if (len < 0)
3212                 return -EINVAL;
3213
3214         switch (optname) {
3215         case PACKET_STATISTICS:
3216                 if (po->tp_version == TPACKET_V3) {
3217                         len = sizeof(struct tpacket_stats_v3);
3218                 } else {
3219                         if (len > sizeof(struct tpacket_stats))
3220                                 len = sizeof(struct tpacket_stats);
3221                 }
3222                 spin_lock_bh(&sk->sk_receive_queue.lock);
3223                 if (po->tp_version == TPACKET_V3) {
3224                         memcpy(&st_u.stats3, &po->stats,
3225                         sizeof(struct tpacket_stats));
3226                         st_u.stats3.tp_freeze_q_cnt =
3227                         po->stats_u.stats3.tp_freeze_q_cnt;
3228                         st_u.stats3.tp_packets += po->stats.tp_drops;
3229                         data = &st_u.stats3;
3230                 } else {
3231                         st = po->stats;
3232                         st.tp_packets += st.tp_drops;
3233                         data = &st;
3234                 }
3235                 memset(&po->stats, 0, sizeof(st));
3236                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3237                 break;
3238         case PACKET_AUXDATA:
3239                 if (len > sizeof(int))
3240                         len = sizeof(int);
3241                 val = po->auxdata;
3242
3243                 data = &val;
3244                 break;
3245         case PACKET_ORIGDEV:
3246                 if (len > sizeof(int))
3247                         len = sizeof(int);
3248                 val = po->origdev;
3249
3250                 data = &val;
3251                 break;
3252         case PACKET_VNET_HDR:
3253                 if (len > sizeof(int))
3254                         len = sizeof(int);
3255                 val = po->has_vnet_hdr;
3256
3257                 data = &val;
3258                 break;
3259         case PACKET_VERSION:
3260                 if (len > sizeof(int))
3261                         len = sizeof(int);
3262                 val = po->tp_version;
3263                 data = &val;
3264                 break;
3265         case PACKET_HDRLEN:
3266                 if (len > sizeof(int))
3267                         len = sizeof(int);
3268                 if (copy_from_user(&val, optval, len))
3269                         return -EFAULT;
3270                 switch (val) {
3271                 case TPACKET_V1:
3272                         val = sizeof(struct tpacket_hdr);
3273                         break;
3274                 case TPACKET_V2:
3275                         val = sizeof(struct tpacket2_hdr);
3276                         break;
3277                 case TPACKET_V3:
3278                         val = sizeof(struct tpacket3_hdr);
3279                         break;
3280                 default:
3281                         return -EINVAL;
3282                 }
3283                 data = &val;
3284                 break;
3285         case PACKET_RESERVE:
3286                 if (len > sizeof(unsigned int))
3287                         len = sizeof(unsigned int);
3288                 val = po->tp_reserve;
3289                 data = &val;
3290                 break;
3291         case PACKET_LOSS:
3292                 if (len > sizeof(unsigned int))
3293                         len = sizeof(unsigned int);
3294                 val = po->tp_loss;
3295                 data = &val;
3296                 break;
3297         case PACKET_TIMESTAMP:
3298                 if (len > sizeof(int))
3299                         len = sizeof(int);
3300                 val = po->tp_tstamp;
3301                 data = &val;
3302                 break;
3303         case PACKET_FANOUT:
3304                 if (len > sizeof(int))
3305                         len = sizeof(int);
3306                 val = (po->fanout ?
3307                        ((u32)po->fanout->id |
3308                         ((u32)po->fanout->type << 16)) :
3309                        0);
3310                 data = &val;
3311                 break;
3312         default:
3313                 return -ENOPROTOOPT;
3314         }
3315
3316         if (put_user(len, optlen))
3317                 return -EFAULT;
3318         if (copy_to_user(optval, data, len))
3319                 return -EFAULT;
3320         return 0;
3321 }
3322
3323
3324 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3325 {
3326         struct sock *sk;
3327         struct hlist_node *node;
3328         struct net_device *dev = data;
3329         struct net *net = dev_net(dev);
3330
3331         rcu_read_lock();
3332         sk_for_each_rcu(sk, node, &net->packet.sklist) {
3333                 struct packet_sock *po = pkt_sk(sk);
3334
3335                 switch (msg) {
3336                 case NETDEV_UNREGISTER:
3337                         if (po->mclist)
3338                                 packet_dev_mclist(dev, po->mclist, -1);
3339                         /* fallthrough */
3340
3341                 case NETDEV_DOWN:
3342                         if (dev->ifindex == po->ifindex) {
3343                                 spin_lock(&po->bind_lock);
3344                                 if (po->running) {
3345                                         __unregister_prot_hook(sk, false);
3346                                         sk->sk_err = ENETDOWN;
3347                                         if (!sock_flag(sk, SOCK_DEAD))
3348                                                 sk->sk_error_report(sk);
3349                                 }
3350                                 if (msg == NETDEV_UNREGISTER) {
3351                                         po->ifindex = -1;
3352                                         if (po->prot_hook.dev)
3353                                                 dev_put(po->prot_hook.dev);
3354                                         po->prot_hook.dev = NULL;
3355                                 }
3356                                 spin_unlock(&po->bind_lock);
3357                         }
3358                         break;
3359                 case NETDEV_UP:
3360                         if (dev->ifindex == po->ifindex) {
3361                                 spin_lock(&po->bind_lock);
3362                                 if (po->num)
3363                                         register_prot_hook(sk);
3364                                 spin_unlock(&po->bind_lock);
3365                         }
3366                         break;
3367                 }
3368         }
3369         rcu_read_unlock();
3370         return NOTIFY_DONE;
3371 }
3372
3373
3374 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3375                         unsigned long arg)
3376 {
3377         struct sock *sk = sock->sk;
3378
3379         switch (cmd) {
3380         case SIOCOUTQ:
3381         {
3382                 int amount = sk_wmem_alloc_get(sk);
3383
3384                 return put_user(amount, (int __user *)arg);
3385         }
3386         case SIOCINQ:
3387         {
3388                 struct sk_buff *skb;
3389                 int amount = 0;
3390
3391                 spin_lock_bh(&sk->sk_receive_queue.lock);
3392                 skb = skb_peek(&sk->sk_receive_queue);
3393                 if (skb)
3394                         amount = skb->len;
3395                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3396                 return put_user(amount, (int __user *)arg);
3397         }
3398         case SIOCGSTAMP:
3399                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3400         case SIOCGSTAMPNS:
3401                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3402
3403 #ifdef CONFIG_INET
3404         case SIOCADDRT:
3405         case SIOCDELRT:
3406         case SIOCDARP:
3407         case SIOCGARP:
3408         case SIOCSARP:
3409         case SIOCGIFADDR:
3410         case SIOCSIFADDR:
3411         case SIOCGIFBRDADDR:
3412         case SIOCSIFBRDADDR:
3413         case SIOCGIFNETMASK:
3414         case SIOCSIFNETMASK:
3415         case SIOCGIFDSTADDR:
3416         case SIOCSIFDSTADDR:
3417         case SIOCSIFFLAGS:
3418                 return inet_dgram_ops.ioctl(sock, cmd, arg);
3419 #endif
3420
3421         default:
3422                 return -ENOIOCTLCMD;
3423         }
3424         return 0;
3425 }
3426
3427 static unsigned int packet_poll(struct file *file, struct socket *sock,
3428                                 poll_table *wait)
3429 {
3430         struct sock *sk = sock->sk;
3431         struct packet_sock *po = pkt_sk(sk);
3432         unsigned int mask = datagram_poll(file, sock, wait);
3433
3434         spin_lock_bh(&sk->sk_receive_queue.lock);
3435         if (po->rx_ring.pg_vec) {
3436                 if (!packet_previous_rx_frame(po, &po->rx_ring,
3437                         TP_STATUS_KERNEL))
3438                         mask |= POLLIN | POLLRDNORM;
3439         }
3440         spin_unlock_bh(&sk->sk_receive_queue.lock);
3441         spin_lock_bh(&sk->sk_write_queue.lock);
3442         if (po->tx_ring.pg_vec) {
3443                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3444                         mask |= POLLOUT | POLLWRNORM;
3445         }
3446         spin_unlock_bh(&sk->sk_write_queue.lock);
3447         return mask;
3448 }
3449
3450
3451 /* Dirty? Well, I still did not learn better way to account
3452  * for user mmaps.
3453  */
3454
3455 static void packet_mm_open(struct vm_area_struct *vma)
3456 {
3457         struct file *file = vma->vm_file;
3458         struct socket *sock = file->private_data;
3459         struct sock *sk = sock->sk;
3460
3461         if (sk)
3462                 atomic_inc(&pkt_sk(sk)->mapped);
3463 }
3464
3465 static void packet_mm_close(struct vm_area_struct *vma)
3466 {
3467         struct file *file = vma->vm_file;
3468         struct socket *sock = file->private_data;
3469         struct sock *sk = sock->sk;
3470
3471         if (sk)
3472                 atomic_dec(&pkt_sk(sk)->mapped);
3473 }
3474
3475 static const struct vm_operations_struct packet_mmap_ops = {
3476         .open   =       packet_mm_open,
3477         .close  =       packet_mm_close,
3478 };
3479
3480 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3481                         unsigned int len)
3482 {
3483         int i;
3484
3485         for (i = 0; i < len; i++) {
3486                 if (likely(pg_vec[i].buffer)) {
3487                         if (is_vmalloc_addr(pg_vec[i].buffer))
3488                                 vfree(pg_vec[i].buffer);
3489                         else
3490                                 free_pages((unsigned long)pg_vec[i].buffer,
3491                                            order);
3492                         pg_vec[i].buffer = NULL;
3493                 }
3494         }
3495         kfree(pg_vec);
3496 }
3497
3498 static char *alloc_one_pg_vec_page(unsigned long order)
3499 {
3500         char *buffer = NULL;
3501         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3502                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3503
3504         buffer = (char *) __get_free_pages(gfp_flags, order);
3505
3506         if (buffer)
3507                 return buffer;
3508
3509         /*
3510          * __get_free_pages failed, fall back to vmalloc
3511          */
3512         buffer = vzalloc((1 << order) * PAGE_SIZE);
3513
3514         if (buffer)
3515                 return buffer;
3516
3517         /*
3518          * vmalloc failed, lets dig into swap here
3519          */
3520         gfp_flags &= ~__GFP_NORETRY;
3521         buffer = (char *)__get_free_pages(gfp_flags, order);
3522         if (buffer)
3523                 return buffer;
3524
3525         /*
3526          * complete and utter failure
3527          */
3528         return NULL;
3529 }
3530
3531 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3532 {
3533         unsigned int block_nr = req->tp_block_nr;
3534         struct pgv *pg_vec;
3535         int i;
3536
3537         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3538         if (unlikely(!pg_vec))
3539                 goto out;
3540
3541         for (i = 0; i < block_nr; i++) {
3542                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3543                 if (unlikely(!pg_vec[i].buffer))
3544                         goto out_free_pgvec;
3545         }
3546
3547 out:
3548         return pg_vec;
3549
3550 out_free_pgvec:
3551         free_pg_vec(pg_vec, order, block_nr);
3552         pg_vec = NULL;
3553         goto out;
3554 }
3555
3556 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3557                 int closing, int tx_ring)
3558 {
3559         struct pgv *pg_vec = NULL;
3560         struct packet_sock *po = pkt_sk(sk);
3561         int was_running, order = 0;
3562         struct packet_ring_buffer *rb;
3563         struct sk_buff_head *rb_queue;
3564         __be16 num;
3565         int err = -EINVAL;
3566         /* Added to avoid minimal code churn */
3567         struct tpacket_req *req = &req_u->req;
3568
3569         lock_sock(sk);
3570         /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3571         if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3572                 WARN(1, "Tx-ring is not supported.\n");
3573                 goto out;
3574         }
3575
3576         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3577         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3578
3579         err = -EBUSY;
3580         if (!closing) {
3581                 if (atomic_read(&po->mapped))
3582                         goto out;
3583                 if (atomic_read(&rb->pending))
3584                         goto out;
3585         }
3586
3587         if (req->tp_block_nr) {
3588                 /* Sanity tests and some calculations */
3589                 err = -EBUSY;
3590                 if (unlikely(rb->pg_vec))
3591                         goto out;
3592
3593                 switch (po->tp_version) {
3594                 case TPACKET_V1:
3595                         po->tp_hdrlen = TPACKET_HDRLEN;
3596                         break;
3597                 case TPACKET_V2:
3598                         po->tp_hdrlen = TPACKET2_HDRLEN;
3599                         break;
3600                 case TPACKET_V3:
3601                         po->tp_hdrlen = TPACKET3_HDRLEN;
3602                         break;
3603                 }
3604
3605                 err = -EINVAL;
3606                 if (unlikely((int)req->tp_block_size <= 0))
3607                         goto out;
3608                 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3609                         goto out;
3610                 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3611                                         po->tp_reserve))
3612                         goto out;
3613                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3614                         goto out;
3615
3616                 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3617                 if (unlikely(rb->frames_per_block <= 0))
3618                         goto out;
3619                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3620                                         req->tp_frame_nr))
3621                         goto out;
3622
3623                 err = -ENOMEM;
3624                 order = get_order(req->tp_block_size);
3625                 pg_vec = alloc_pg_vec(req, order);
3626                 if (unlikely(!pg_vec))
3627                         goto out;
3628                 switch (po->tp_version) {
3629                 case TPACKET_V3:
3630                 /* Transmit path is not supported. We checked
3631                  * it above but just being paranoid
3632                  */
3633                         if (!tx_ring)
3634                                 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3635                                 break;
3636                 default:
3637                         break;
3638                 }
3639         }
3640         /* Done */
3641         else {
3642                 err = -EINVAL;
3643                 if (unlikely(req->tp_frame_nr))
3644                         goto out;
3645         }
3646
3647
3648         /* Detach socket from network */
3649         spin_lock(&po->bind_lock);
3650         was_running = po->running;
3651         num = po->num;
3652         if (was_running) {
3653                 po->num = 0;
3654                 __unregister_prot_hook(sk, false);
3655         }
3656         spin_unlock(&po->bind_lock);
3657
3658         synchronize_net();
3659
3660         err = -EBUSY;
3661         mutex_lock(&po->pg_vec_lock);
3662         if (closing || atomic_read(&po->mapped) == 0) {
3663                 err = 0;
3664                 spin_lock_bh(&rb_queue->lock);
3665                 swap(rb->pg_vec, pg_vec);
3666