2d44726d8a00e5ebc874f7c6c3b887ae51ede517
[pandora-kernel.git] / net / packet / af_packet.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              PACKET - implements raw packet sockets.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *              Alan Cox        :       verify_area() now used correctly
14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
15  *              Alan Cox        :       tidied skbuff lists.
16  *              Alan Cox        :       Now uses generic datagram routines I
17  *                                      added. Also fixed the peek/read crash
18  *                                      from all old Linux datagram code.
19  *              Alan Cox        :       Uses the improved datagram code.
20  *              Alan Cox        :       Added NULL's for socket options.
21  *              Alan Cox        :       Re-commented the code.
22  *              Alan Cox        :       Use new kernel side addressing
23  *              Rob Janssen     :       Correct MTU usage.
24  *              Dave Platt      :       Counter leaks caused by incorrect
25  *                                      interrupt locking and some slightly
26  *                                      dubious gcc output. Can you read
27  *                                      compiler: it said _VOLATILE_
28  *      Richard Kooijman        :       Timestamp fixes.
29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
30  *              Alan Cox        :       sendmsg/recvmsg support.
31  *              Alan Cox        :       Protocol setting support
32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
34  *      Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
38  *                                      The convention is that longer addresses
39  *                                      will simply extend the hardware address
40  *                                      byte arrays at the end of sockaddr_ll
41  *                                      and packet_mreq.
42  *              Johann Baudy    :       Added TX RING.
43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
44  *                                      layer.
45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *              This program is free software; you can redistribute it and/or
49  *              modify it under the terms of the GNU General Public License
50  *              as published by the Free Software Foundation; either version
51  *              2 of the License, or (at your option) any later version.
52  *
53  */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/system.h>
77 #include <asm/uaccess.h>
78 #include <asm/ioctls.h>
79 #include <asm/page.h>
80 #include <asm/cacheflush.h>
81 #include <asm/io.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 #include <linux/poll.h>
85 #include <linux/module.h>
86 #include <linux/init.h>
87 #include <linux/mutex.h>
88 #include <linux/if_vlan.h>
89 #include <linux/virtio_net.h>
90 #include <linux/errqueue.h>
91 #include <linux/net_tstamp.h>
92
93 #ifdef CONFIG_INET
94 #include <net/inet_common.h>
95 #endif
96
97 /*
98    Assumptions:
99    - if device has no dev->hard_header routine, it adds and removes ll header
100      inside itself. In this case ll header is invisible outside of device,
101      but higher levels still should reserve dev->hard_header_len.
102      Some devices are enough clever to reallocate skb, when header
103      will not fit to reserved space (tunnel), another ones are silly
104      (PPP).
105    - packet socket receives packets with pulled ll header,
106      so that SOCK_RAW should push it back.
107
108 On receive:
109 -----------
110
111 Incoming, dev->hard_header!=NULL
112    mac_header -> ll header
113    data       -> data
114
115 Outgoing, dev->hard_header!=NULL
116    mac_header -> ll header
117    data       -> ll header
118
119 Incoming, dev->hard_header==NULL
120    mac_header -> UNKNOWN position. It is very likely, that it points to ll
121                  header.  PPP makes it, that is wrong, because introduce
122                  assymetry between rx and tx paths.
123    data       -> data
124
125 Outgoing, dev->hard_header==NULL
126    mac_header -> data. ll header is still not built!
127    data       -> data
128
129 Resume
130   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
131
132
133 On transmit:
134 ------------
135
136 dev->hard_header != NULL
137    mac_header -> ll header
138    data       -> ll header
139
140 dev->hard_header == NULL (ll header is added by device, we cannot control it)
141    mac_header -> data
142    data       -> data
143
144    We should set nh.raw on output to correct posistion,
145    packet classifier depends on it.
146  */
147
148 /* Private packet socket structures. */
149
150 struct packet_mclist {
151         struct packet_mclist    *next;
152         int                     ifindex;
153         int                     count;
154         unsigned short          type;
155         unsigned short          alen;
156         unsigned char           addr[MAX_ADDR_LEN];
157 };
158 /* identical to struct packet_mreq except it has
159  * a longer address field.
160  */
161 struct packet_mreq_max {
162         int             mr_ifindex;
163         unsigned short  mr_type;
164         unsigned short  mr_alen;
165         unsigned char   mr_address[MAX_ADDR_LEN];
166 };
167
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169                 int closing, int tx_ring);
170
171
172 #define V3_ALIGNMENT    (8)
173
174 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176 #define BLK_PLUS_PRIV(sz_of_priv) \
177         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179 /* kbdq - kernel block descriptor queue */
180 struct tpacket_kbdq_core {
181         struct pgv      *pkbdq;
182         unsigned int    feature_req_word;
183         unsigned int    hdrlen;
184         unsigned char   reset_pending_on_curr_blk;
185         unsigned char   delete_blk_timer;
186         unsigned short  kactive_blk_num;
187         unsigned short  blk_sizeof_priv;
188
189         /* last_kactive_blk_num:
190          * trick to see if user-space has caught up
191          * in order to avoid refreshing timer when every single pkt arrives.
192          */
193         unsigned short  last_kactive_blk_num;
194
195         char            *pkblk_start;
196         char            *pkblk_end;
197         int             kblk_size;
198         unsigned int    max_frame_len;
199         unsigned int    knum_blocks;
200         uint64_t        knxt_seq_num;
201         char            *prev;
202         char            *nxt_offset;
203         struct sk_buff  *skb;
204
205         atomic_t        blk_fill_in_prog;
206
207         /* Default is set to 8ms */
208 #define DEFAULT_PRB_RETIRE_TOV  (8)
209
210         unsigned short  retire_blk_tov;
211         unsigned short  version;
212         unsigned long   tov_in_jiffies;
213
214         /* timer to retire an outstanding block */
215         struct timer_list retire_blk_timer;
216 };
217
218 #define PGV_FROM_VMALLOC 1
219 struct pgv {
220         char *buffer;
221 };
222
223 struct packet_ring_buffer {
224         struct pgv              *pg_vec;
225         unsigned int            head;
226         unsigned int            frames_per_block;
227         unsigned int            frame_size;
228         unsigned int            frame_max;
229
230         unsigned int            pg_vec_order;
231         unsigned int            pg_vec_pages;
232         unsigned int            pg_vec_len;
233
234         struct tpacket_kbdq_core        prb_bdqc;
235         atomic_t                pending;
236 };
237
238 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
239 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
240 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
241 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
242 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
243 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
244 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
245
246 struct packet_sock;
247 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
248
249 static void *packet_previous_frame(struct packet_sock *po,
250                 struct packet_ring_buffer *rb,
251                 int status);
252 static void packet_increment_head(struct packet_ring_buffer *buff);
253 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
254                         struct tpacket_block_desc *);
255 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
256                         struct packet_sock *);
257 static void prb_retire_current_block(struct tpacket_kbdq_core *,
258                 struct packet_sock *, unsigned int status);
259 static int prb_queue_frozen(struct tpacket_kbdq_core *);
260 static void prb_open_block(struct tpacket_kbdq_core *,
261                 struct tpacket_block_desc *);
262 static void prb_retire_rx_blk_timer_expired(unsigned long);
263 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
264 static void prb_init_blk_timer(struct packet_sock *,
265                 struct tpacket_kbdq_core *,
266                 void (*func) (unsigned long));
267 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
268 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
269                 struct tpacket3_hdr *);
270 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
271                 struct tpacket3_hdr *);
272 static void packet_flush_mclist(struct sock *sk);
273
274 struct packet_fanout;
275 struct packet_sock {
276         /* struct sock has to be the first member of packet_sock */
277         struct sock             sk;
278         struct packet_fanout    *fanout;
279         struct tpacket_stats    stats;
280         union  tpacket_stats_u  stats_u;
281         struct packet_ring_buffer       rx_ring;
282         struct packet_ring_buffer       tx_ring;
283         int                     copy_thresh;
284         spinlock_t              bind_lock;
285         struct mutex            pg_vec_lock;
286         unsigned int            running:1,      /* prot_hook is attached*/
287                                 auxdata:1,
288                                 origdev:1,
289                                 has_vnet_hdr:1;
290         int                     ifindex;        /* bound device         */
291         __be16                  num;
292         struct packet_mclist    *mclist;
293         atomic_t                mapped;
294         enum tpacket_versions   tp_version;
295         unsigned int            tp_hdrlen;
296         unsigned int            tp_reserve;
297         unsigned int            tp_loss:1;
298         unsigned int            tp_tstamp;
299         struct net_device __rcu *cached_dev;
300         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
301 };
302
303 #define PACKET_FANOUT_MAX       256
304
305 struct packet_fanout {
306 #ifdef CONFIG_NET_NS
307         struct net              *net;
308 #endif
309         unsigned int            num_members;
310         u16                     id;
311         u8                      type;
312         u8                      defrag;
313         atomic_t                rr_cur;
314         struct list_head        list;
315         struct sock             *arr[PACKET_FANOUT_MAX];
316         spinlock_t              lock;
317         atomic_t                sk_ref;
318         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
319 };
320
321 struct packet_skb_cb {
322         unsigned int origlen;
323         union {
324                 struct sockaddr_pkt pkt;
325                 struct sockaddr_ll ll;
326         } sa;
327 };
328
329 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
330
331 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
332 #define GET_PBLOCK_DESC(x, bid) \
333         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
334 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
335         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
336 #define GET_NEXT_PRB_BLK_NUM(x) \
337         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
338         ((x)->kactive_blk_num+1) : 0)
339
340 static struct packet_sock *pkt_sk(struct sock *sk)
341 {
342         return (struct packet_sock *)sk;
343 }
344
345 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
346 static void __fanout_link(struct sock *sk, struct packet_sock *po);
347
348 /* register_prot_hook must be invoked with the po->bind_lock held,
349  * or from a context in which asynchronous accesses to the packet
350  * socket is not possible (packet_create()).
351  */
352 static void register_prot_hook(struct sock *sk)
353 {
354         struct packet_sock *po = pkt_sk(sk);
355
356         if (!po->running) {
357                 if (po->fanout) {
358                         __fanout_link(sk, po);
359                 } else {
360                         dev_add_pack(&po->prot_hook);
361                         rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
362                 }
363
364                 sock_hold(sk);
365                 po->running = 1;
366         }
367 }
368
369 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
370  * held.   If the sync parameter is true, we will temporarily drop
371  * the po->bind_lock and do a synchronize_net to make sure no
372  * asynchronous packet processing paths still refer to the elements
373  * of po->prot_hook.  If the sync parameter is false, it is the
374  * callers responsibility to take care of this.
375  */
376 static void __unregister_prot_hook(struct sock *sk, bool sync)
377 {
378         struct packet_sock *po = pkt_sk(sk);
379
380         po->running = 0;
381         if (po->fanout) {
382                 __fanout_unlink(sk, po);
383         } else {
384                 __dev_remove_pack(&po->prot_hook);
385                 RCU_INIT_POINTER(po->cached_dev, NULL);
386         }
387
388         __sock_put(sk);
389
390         if (sync) {
391                 spin_unlock(&po->bind_lock);
392                 synchronize_net();
393                 spin_lock(&po->bind_lock);
394         }
395 }
396
397 static void unregister_prot_hook(struct sock *sk, bool sync)
398 {
399         struct packet_sock *po = pkt_sk(sk);
400
401         if (po->running)
402                 __unregister_prot_hook(sk, sync);
403 }
404
405 static inline __pure struct page *pgv_to_page(void *addr)
406 {
407         if (is_vmalloc_addr(addr))
408                 return vmalloc_to_page(addr);
409         return virt_to_page(addr);
410 }
411
412 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
413 {
414         union {
415                 struct tpacket_hdr *h1;
416                 struct tpacket2_hdr *h2;
417                 void *raw;
418         } h;
419
420         h.raw = frame;
421         switch (po->tp_version) {
422         case TPACKET_V1:
423                 h.h1->tp_status = status;
424                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
425                 break;
426         case TPACKET_V2:
427                 h.h2->tp_status = status;
428                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
429                 break;
430         case TPACKET_V3:
431         default:
432                 WARN(1, "TPACKET version not supported.\n");
433                 BUG();
434         }
435
436         smp_wmb();
437 }
438
439 static int __packet_get_status(struct packet_sock *po, void *frame)
440 {
441         union {
442                 struct tpacket_hdr *h1;
443                 struct tpacket2_hdr *h2;
444                 void *raw;
445         } h;
446
447         smp_rmb();
448
449         h.raw = frame;
450         switch (po->tp_version) {
451         case TPACKET_V1:
452                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
453                 return h.h1->tp_status;
454         case TPACKET_V2:
455                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
456                 return h.h2->tp_status;
457         case TPACKET_V3:
458         default:
459                 WARN(1, "TPACKET version not supported.\n");
460                 BUG();
461                 return 0;
462         }
463 }
464
465 static void *packet_lookup_frame(struct packet_sock *po,
466                 struct packet_ring_buffer *rb,
467                 unsigned int position,
468                 int status)
469 {
470         unsigned int pg_vec_pos, frame_offset;
471         union {
472                 struct tpacket_hdr *h1;
473                 struct tpacket2_hdr *h2;
474                 void *raw;
475         } h;
476
477         pg_vec_pos = position / rb->frames_per_block;
478         frame_offset = position % rb->frames_per_block;
479
480         h.raw = rb->pg_vec[pg_vec_pos].buffer +
481                 (frame_offset * rb->frame_size);
482
483         if (status != __packet_get_status(po, h.raw))
484                 return NULL;
485
486         return h.raw;
487 }
488
489 static void *packet_current_frame(struct packet_sock *po,
490                 struct packet_ring_buffer *rb,
491                 int status)
492 {
493         return packet_lookup_frame(po, rb, rb->head, status);
494 }
495
496 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
497 {
498         del_timer_sync(&pkc->retire_blk_timer);
499 }
500
501 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
502                 int tx_ring,
503                 struct sk_buff_head *rb_queue)
504 {
505         struct tpacket_kbdq_core *pkc;
506
507         pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
508
509         spin_lock_bh(&rb_queue->lock);
510         pkc->delete_blk_timer = 1;
511         spin_unlock_bh(&rb_queue->lock);
512
513         prb_del_retire_blk_timer(pkc);
514 }
515
516 static void prb_init_blk_timer(struct packet_sock *po,
517                 struct tpacket_kbdq_core *pkc,
518                 void (*func) (unsigned long))
519 {
520         init_timer(&pkc->retire_blk_timer);
521         pkc->retire_blk_timer.data = (long)po;
522         pkc->retire_blk_timer.function = func;
523         pkc->retire_blk_timer.expires = jiffies;
524 }
525
526 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
527 {
528         struct tpacket_kbdq_core *pkc;
529
530         if (tx_ring)
531                 BUG();
532
533         pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
534         prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
535 }
536
537 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
538                                 int blk_size_in_bytes)
539 {
540         struct net_device *dev;
541         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
542         struct ethtool_cmd ecmd;
543         int err;
544
545         rtnl_lock();
546         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
547         if (unlikely(!dev)) {
548                 rtnl_unlock();
549                 return DEFAULT_PRB_RETIRE_TOV;
550         }
551         err = __ethtool_get_settings(dev, &ecmd);
552         rtnl_unlock();
553         if (!err) {
554                 switch (ecmd.speed) {
555                 case SPEED_10000:
556                         msec = 1;
557                         div = 10000/1000;
558                         break;
559                 case SPEED_1000:
560                         msec = 1;
561                         div = 1000/1000;
562                         break;
563                 /*
564                  * If the link speed is so slow you don't really
565                  * need to worry about perf anyways
566                  */
567                 case SPEED_100:
568                 case SPEED_10:
569                 default:
570                         return DEFAULT_PRB_RETIRE_TOV;
571                 }
572         }
573
574         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
575
576         if (div)
577                 mbits /= div;
578
579         tmo = mbits * msec;
580
581         if (div)
582                 return tmo+1;
583         return tmo;
584 }
585
586 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
587                         union tpacket_req_u *req_u)
588 {
589         p1->feature_req_word = req_u->req3.tp_feature_req_word;
590 }
591
592 static void init_prb_bdqc(struct packet_sock *po,
593                         struct packet_ring_buffer *rb,
594                         struct pgv *pg_vec,
595                         union tpacket_req_u *req_u, int tx_ring)
596 {
597         struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
598         struct tpacket_block_desc *pbd;
599
600         memset(p1, 0x0, sizeof(*p1));
601
602         p1->knxt_seq_num = 1;
603         p1->pkbdq = pg_vec;
604         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
605         p1->pkblk_start = (char *)pg_vec[0].buffer;
606         p1->kblk_size = req_u->req3.tp_block_size;
607         p1->knum_blocks = req_u->req3.tp_block_nr;
608         p1->hdrlen = po->tp_hdrlen;
609         p1->version = po->tp_version;
610         p1->last_kactive_blk_num = 0;
611         po->stats_u.stats3.tp_freeze_q_cnt = 0;
612         if (req_u->req3.tp_retire_blk_tov)
613                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
614         else
615                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
616                                                 req_u->req3.tp_block_size);
617         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
618         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
619
620         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
621         prb_init_ft_ops(p1, req_u);
622         prb_setup_retire_blk_timer(po, tx_ring);
623         prb_open_block(p1, pbd);
624 }
625
626 /*  Do NOT update the last_blk_num first.
627  *  Assumes sk_buff_head lock is held.
628  */
629 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
630 {
631         mod_timer(&pkc->retire_blk_timer,
632                         jiffies + pkc->tov_in_jiffies);
633         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
634 }
635
636 /*
637  * Timer logic:
638  * 1) We refresh the timer only when we open a block.
639  *    By doing this we don't waste cycles refreshing the timer
640  *        on packet-by-packet basis.
641  *
642  * With a 1MB block-size, on a 1Gbps line, it will take
643  * i) ~8 ms to fill a block + ii) memcpy etc.
644  * In this cut we are not accounting for the memcpy time.
645  *
646  * So, if the user sets the 'tmo' to 10ms then the timer
647  * will never fire while the block is still getting filled
648  * (which is what we want). However, the user could choose
649  * to close a block early and that's fine.
650  *
651  * But when the timer does fire, we check whether or not to refresh it.
652  * Since the tmo granularity is in msecs, it is not too expensive
653  * to refresh the timer, lets say every '8' msecs.
654  * Either the user can set the 'tmo' or we can derive it based on
655  * a) line-speed and b) block-size.
656  * prb_calc_retire_blk_tmo() calculates the tmo.
657  *
658  */
659 static void prb_retire_rx_blk_timer_expired(unsigned long data)
660 {
661         struct packet_sock *po = (struct packet_sock *)data;
662         struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
663         unsigned int frozen;
664         struct tpacket_block_desc *pbd;
665
666         spin_lock(&po->sk.sk_receive_queue.lock);
667
668         frozen = prb_queue_frozen(pkc);
669         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
670
671         if (unlikely(pkc->delete_blk_timer))
672                 goto out;
673
674         /* We only need to plug the race when the block is partially filled.
675          * tpacket_rcv:
676          *              lock(); increment BLOCK_NUM_PKTS; unlock()
677          *              copy_bits() is in progress ...
678          *              timer fires on other cpu:
679          *              we can't retire the current block because copy_bits
680          *              is in progress.
681          *
682          */
683         if (BLOCK_NUM_PKTS(pbd)) {
684                 while (atomic_read(&pkc->blk_fill_in_prog)) {
685                         /* Waiting for skb_copy_bits to finish... */
686                         cpu_relax();
687                 }
688         }
689
690         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
691                 if (!frozen) {
692                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
693                         if (!prb_dispatch_next_block(pkc, po))
694                                 goto refresh_timer;
695                         else
696                                 goto out;
697                 } else {
698                         /* Case 1. Queue was frozen because user-space was
699                          *         lagging behind.
700                          */
701                         if (prb_curr_blk_in_use(pkc, pbd)) {
702                                 /*
703                                  * Ok, user-space is still behind.
704                                  * So just refresh the timer.
705                                  */
706                                 goto refresh_timer;
707                         } else {
708                                /* Case 2. queue was frozen,user-space caught up,
709                                 * now the link went idle && the timer fired.
710                                 * We don't have a block to close.So we open this
711                                 * block and restart the timer.
712                                 * opening a block thaws the queue,restarts timer
713                                 * Thawing/timer-refresh is a side effect.
714                                 */
715                                 prb_open_block(pkc, pbd);
716                                 goto out;
717                         }
718                 }
719         }
720
721 refresh_timer:
722         _prb_refresh_rx_retire_blk_timer(pkc);
723
724 out:
725         spin_unlock(&po->sk.sk_receive_queue.lock);
726 }
727
728 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
729                 struct tpacket_block_desc *pbd1, __u32 status)
730 {
731         /* Flush everything minus the block header */
732
733 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
734         u8 *start, *end;
735
736         start = (u8 *)pbd1;
737
738         /* Skip the block header(we know header WILL fit in 4K) */
739         start += PAGE_SIZE;
740
741         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
742         for (; start < end; start += PAGE_SIZE)
743                 flush_dcache_page(pgv_to_page(start));
744
745         smp_wmb();
746 #endif
747
748         /* Now update the block status. */
749
750         BLOCK_STATUS(pbd1) = status;
751
752         /* Flush the block header */
753
754 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
755         start = (u8 *)pbd1;
756         flush_dcache_page(pgv_to_page(start));
757
758         smp_wmb();
759 #endif
760 }
761
762 /*
763  * Side effect:
764  *
765  * 1) flush the block
766  * 2) Increment active_blk_num
767  *
768  * Note:We DONT refresh the timer on purpose.
769  *      Because almost always the next block will be opened.
770  */
771 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
772                 struct tpacket_block_desc *pbd1,
773                 struct packet_sock *po, unsigned int stat)
774 {
775         __u32 status = TP_STATUS_USER | stat;
776
777         struct tpacket3_hdr *last_pkt;
778         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
779
780         if (po->stats.tp_drops)
781                 status |= TP_STATUS_LOSING;
782
783         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
784         last_pkt->tp_next_offset = 0;
785
786         /* Get the ts of the last pkt */
787         if (BLOCK_NUM_PKTS(pbd1)) {
788                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
789                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
790         } else {
791                 /* Ok, we tmo'd - so get the current time */
792                 struct timespec ts;
793                 getnstimeofday(&ts);
794                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
795                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
796         }
797
798         smp_wmb();
799
800         /* Flush the block */
801         prb_flush_block(pkc1, pbd1, status);
802
803         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
804 }
805
806 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
807 {
808         pkc->reset_pending_on_curr_blk = 0;
809 }
810
811 /*
812  * Side effect of opening a block:
813  *
814  * 1) prb_queue is thawed.
815  * 2) retire_blk_timer is refreshed.
816  *
817  */
818 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
819         struct tpacket_block_desc *pbd1)
820 {
821         struct timespec ts;
822         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
823
824         smp_rmb();
825
826         /* We could have just memset this but we will lose the
827          * flexibility of making the priv area sticky
828          */
829         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
830         BLOCK_NUM_PKTS(pbd1) = 0;
831         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
832         getnstimeofday(&ts);
833         h1->ts_first_pkt.ts_sec = ts.tv_sec;
834         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
835         pkc1->pkblk_start = (char *)pbd1;
836         pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
837                                     BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
838         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
839         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
840         pbd1->version = pkc1->version;
841         pkc1->prev = pkc1->nxt_offset;
842         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
843         prb_thaw_queue(pkc1);
844         _prb_refresh_rx_retire_blk_timer(pkc1);
845
846         smp_wmb();
847 }
848
849 /*
850  * Queue freeze logic:
851  * 1) Assume tp_block_nr = 8 blocks.
852  * 2) At time 't0', user opens Rx ring.
853  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
854  * 4) user-space is either sleeping or processing block '0'.
855  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
856  *    it will close block-7,loop around and try to fill block '0'.
857  *    call-flow:
858  *    __packet_lookup_frame_in_block
859  *      prb_retire_current_block()
860  *      prb_dispatch_next_block()
861  *        |->(BLOCK_STATUS == USER) evaluates to true
862  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
863  * 6) Now there are two cases:
864  *    6.1) Link goes idle right after the queue is frozen.
865  *         But remember, the last open_block() refreshed the timer.
866  *         When this timer expires,it will refresh itself so that we can
867  *         re-open block-0 in near future.
868  *    6.2) Link is busy and keeps on receiving packets. This is a simple
869  *         case and __packet_lookup_frame_in_block will check if block-0
870  *         is free and can now be re-used.
871  */
872 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
873                                   struct packet_sock *po)
874 {
875         pkc->reset_pending_on_curr_blk = 1;
876         po->stats_u.stats3.tp_freeze_q_cnt++;
877 }
878
879 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
880
881 /*
882  * If the next block is free then we will dispatch it
883  * and return a good offset.
884  * Else, we will freeze the queue.
885  * So, caller must check the return value.
886  */
887 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
888                 struct packet_sock *po)
889 {
890         struct tpacket_block_desc *pbd;
891
892         smp_rmb();
893
894         /* 1. Get current block num */
895         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
896
897         /* 2. If this block is currently in_use then freeze the queue */
898         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
899                 prb_freeze_queue(pkc, po);
900                 return NULL;
901         }
902
903         /*
904          * 3.
905          * open this block and return the offset where the first packet
906          * needs to get stored.
907          */
908         prb_open_block(pkc, pbd);
909         return (void *)pkc->nxt_offset;
910 }
911
912 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
913                 struct packet_sock *po, unsigned int status)
914 {
915         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
916
917         /* retire/close the current block */
918         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
919                 /*
920                  * Plug the case where copy_bits() is in progress on
921                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
922                  * have space to copy the pkt in the current block and
923                  * called prb_retire_current_block()
924                  *
925                  * We don't need to worry about the TMO case because
926                  * the timer-handler already handled this case.
927                  */
928                 if (!(status & TP_STATUS_BLK_TMO)) {
929                         while (atomic_read(&pkc->blk_fill_in_prog)) {
930                                 /* Waiting for skb_copy_bits to finish... */
931                                 cpu_relax();
932                         }
933                 }
934                 prb_close_block(pkc, pbd, po, status);
935                 return;
936         }
937 }
938
939 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
940                                       struct tpacket_block_desc *pbd)
941 {
942         return TP_STATUS_USER & BLOCK_STATUS(pbd);
943 }
944
945 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
946 {
947         return pkc->reset_pending_on_curr_blk;
948 }
949
950 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
951 {
952         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
953         atomic_dec(&pkc->blk_fill_in_prog);
954 }
955
956 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
957                         struct tpacket3_hdr *ppd)
958 {
959         ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
960 }
961
962 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
963                         struct tpacket3_hdr *ppd)
964 {
965         ppd->hv1.tp_rxhash = 0;
966 }
967
968 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
969                         struct tpacket3_hdr *ppd)
970 {
971         if (vlan_tx_tag_present(pkc->skb)) {
972                 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
973                 ppd->tp_status = TP_STATUS_VLAN_VALID;
974         } else {
975                 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
976         }
977 }
978
979 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
980                         struct tpacket3_hdr *ppd)
981 {
982         prb_fill_vlan_info(pkc, ppd);
983
984         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
985                 prb_fill_rxhash(pkc, ppd);
986         else
987                 prb_clear_rxhash(pkc, ppd);
988 }
989
990 static void prb_fill_curr_block(char *curr,
991                                 struct tpacket_kbdq_core *pkc,
992                                 struct tpacket_block_desc *pbd,
993                                 unsigned int len)
994 {
995         struct tpacket3_hdr *ppd;
996
997         ppd  = (struct tpacket3_hdr *)curr;
998         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
999         pkc->prev = curr;
1000         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1001         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1002         BLOCK_NUM_PKTS(pbd) += 1;
1003         atomic_inc(&pkc->blk_fill_in_prog);
1004         prb_run_all_ft_ops(pkc, ppd);
1005 }
1006
1007 /* Assumes caller has the sk->rx_queue.lock */
1008 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1009                                             struct sk_buff *skb,
1010                                                 int status,
1011                                             unsigned int len
1012                                             )
1013 {
1014         struct tpacket_kbdq_core *pkc;
1015         struct tpacket_block_desc *pbd;
1016         char *curr, *end;
1017
1018         pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1019         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1020
1021         /* Queue is frozen when user space is lagging behind */
1022         if (prb_queue_frozen(pkc)) {
1023                 /*
1024                  * Check if that last block which caused the queue to freeze,
1025                  * is still in_use by user-space.
1026                  */
1027                 if (prb_curr_blk_in_use(pkc, pbd)) {
1028                         /* Can't record this packet */
1029                         return NULL;
1030                 } else {
1031                         /*
1032                          * Ok, the block was released by user-space.
1033                          * Now let's open that block.
1034                          * opening a block also thaws the queue.
1035                          * Thawing is a side effect.
1036                          */
1037                         prb_open_block(pkc, pbd);
1038                 }
1039         }
1040
1041         smp_mb();
1042         curr = pkc->nxt_offset;
1043         pkc->skb = skb;
1044         end = (char *) ((char *)pbd + pkc->kblk_size);
1045
1046         /* first try the current block */
1047         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1048                 prb_fill_curr_block(curr, pkc, pbd, len);
1049                 return (void *)curr;
1050         }
1051
1052         /* Ok, close the current block */
1053         prb_retire_current_block(pkc, po, 0);
1054
1055         /* Now, try to dispatch the next block */
1056         curr = (char *)prb_dispatch_next_block(pkc, po);
1057         if (curr) {
1058                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1059                 prb_fill_curr_block(curr, pkc, pbd, len);
1060                 return (void *)curr;
1061         }
1062
1063         /*
1064          * No free blocks are available.user_space hasn't caught up yet.
1065          * Queue was just frozen and now this packet will get dropped.
1066          */
1067         return NULL;
1068 }
1069
1070 static void *packet_current_rx_frame(struct packet_sock *po,
1071                                             struct sk_buff *skb,
1072                                             int status, unsigned int len)
1073 {
1074         char *curr = NULL;
1075         switch (po->tp_version) {
1076         case TPACKET_V1:
1077         case TPACKET_V2:
1078                 curr = packet_lookup_frame(po, &po->rx_ring,
1079                                         po->rx_ring.head, status);
1080                 return curr;
1081         case TPACKET_V3:
1082                 return __packet_lookup_frame_in_block(po, skb, status, len);
1083         default:
1084                 WARN(1, "TPACKET version not supported\n");
1085                 BUG();
1086                 return 0;
1087         }
1088 }
1089
1090 static void *prb_lookup_block(struct packet_sock *po,
1091                                      struct packet_ring_buffer *rb,
1092                                      unsigned int previous,
1093                                      int status)
1094 {
1095         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1096         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1097
1098         if (status != BLOCK_STATUS(pbd))
1099                 return NULL;
1100         return pbd;
1101 }
1102
1103 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1104 {
1105         unsigned int prev;
1106         if (rb->prb_bdqc.kactive_blk_num)
1107                 prev = rb->prb_bdqc.kactive_blk_num-1;
1108         else
1109                 prev = rb->prb_bdqc.knum_blocks-1;
1110         return prev;
1111 }
1112
1113 /* Assumes caller has held the rx_queue.lock */
1114 static void *__prb_previous_block(struct packet_sock *po,
1115                                          struct packet_ring_buffer *rb,
1116                                          int status)
1117 {
1118         unsigned int previous = prb_previous_blk_num(rb);
1119         return prb_lookup_block(po, rb, previous, status);
1120 }
1121
1122 static void *packet_previous_rx_frame(struct packet_sock *po,
1123                                              struct packet_ring_buffer *rb,
1124                                              int status)
1125 {
1126         if (po->tp_version <= TPACKET_V2)
1127                 return packet_previous_frame(po, rb, status);
1128
1129         return __prb_previous_block(po, rb, status);
1130 }
1131
1132 static void packet_increment_rx_head(struct packet_sock *po,
1133                                             struct packet_ring_buffer *rb)
1134 {
1135         switch (po->tp_version) {
1136         case TPACKET_V1:
1137         case TPACKET_V2:
1138                 return packet_increment_head(rb);
1139         case TPACKET_V3:
1140         default:
1141                 WARN(1, "TPACKET version not supported.\n");
1142                 BUG();
1143                 return;
1144         }
1145 }
1146
1147 static void *packet_previous_frame(struct packet_sock *po,
1148                 struct packet_ring_buffer *rb,
1149                 int status)
1150 {
1151         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1152         return packet_lookup_frame(po, rb, previous, status);
1153 }
1154
1155 static void packet_increment_head(struct packet_ring_buffer *buff)
1156 {
1157         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1158 }
1159
1160 static void packet_sock_destruct(struct sock *sk)
1161 {
1162         skb_queue_purge(&sk->sk_error_queue);
1163
1164         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1165         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1166
1167         if (!sock_flag(sk, SOCK_DEAD)) {
1168                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1169                 return;
1170         }
1171
1172         sk_refcnt_debug_dec(sk);
1173 }
1174
1175 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1176 {
1177         u32 idx, hash = skb->rxhash;
1178
1179         idx = ((u64)hash * num) >> 32;
1180
1181         return f->arr[idx];
1182 }
1183
1184 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1185 {
1186         unsigned int val = atomic_inc_return(&f->rr_cur);
1187
1188         return f->arr[val % num];
1189 }
1190
1191 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1192 {
1193         unsigned int cpu = smp_processor_id();
1194
1195         return f->arr[cpu % num];
1196 }
1197
1198 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1199                              struct packet_type *pt, struct net_device *orig_dev)
1200 {
1201         struct packet_fanout *f = pt->af_packet_priv;
1202         unsigned int num = ACCESS_ONCE(f->num_members);
1203         struct packet_sock *po;
1204         struct sock *sk;
1205
1206         if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1207             !num) {
1208                 kfree_skb(skb);
1209                 return 0;
1210         }
1211
1212         switch (f->type) {
1213         case PACKET_FANOUT_HASH:
1214         default:
1215                 if (f->defrag) {
1216                         skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1217                         if (!skb)
1218                                 return 0;
1219                 }
1220                 skb_get_rxhash(skb);
1221                 sk = fanout_demux_hash(f, skb, num);
1222                 break;
1223         case PACKET_FANOUT_LB:
1224                 sk = fanout_demux_lb(f, skb, num);
1225                 break;
1226         case PACKET_FANOUT_CPU:
1227                 sk = fanout_demux_cpu(f, skb, num);
1228                 break;
1229         }
1230
1231         po = pkt_sk(sk);
1232
1233         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1234 }
1235
1236 static DEFINE_MUTEX(fanout_mutex);
1237 static LIST_HEAD(fanout_list);
1238
1239 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1240 {
1241         struct packet_fanout *f = po->fanout;
1242
1243         spin_lock(&f->lock);
1244         f->arr[f->num_members] = sk;
1245         smp_wmb();
1246         f->num_members++;
1247         if (f->num_members == 1)
1248                 dev_add_pack(&f->prot_hook);
1249         spin_unlock(&f->lock);
1250 }
1251
1252 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1253 {
1254         struct packet_fanout *f = po->fanout;
1255         int i;
1256
1257         spin_lock(&f->lock);
1258         for (i = 0; i < f->num_members; i++) {
1259                 if (f->arr[i] == sk)
1260                         break;
1261         }
1262         BUG_ON(i >= f->num_members);
1263         f->arr[i] = f->arr[f->num_members - 1];
1264         f->num_members--;
1265         if (f->num_members == 0)
1266                 __dev_remove_pack(&f->prot_hook);
1267         spin_unlock(&f->lock);
1268 }
1269
1270 bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1271 {
1272         if (sk->sk_family != PF_PACKET)
1273                 return false;
1274
1275         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1276 }
1277
1278 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1279 {
1280         struct packet_sock *po = pkt_sk(sk);
1281         struct packet_fanout *f, *match;
1282         u8 type = type_flags & 0xff;
1283         u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
1284         int err;
1285
1286         switch (type) {
1287         case PACKET_FANOUT_HASH:
1288         case PACKET_FANOUT_LB:
1289         case PACKET_FANOUT_CPU:
1290                 break;
1291         default:
1292                 return -EINVAL;
1293         }
1294
1295         mutex_lock(&fanout_mutex);
1296
1297         err = -EALREADY;
1298         if (po->fanout)
1299                 goto out;
1300
1301         match = NULL;
1302         list_for_each_entry(f, &fanout_list, list) {
1303                 if (f->id == id &&
1304                     read_pnet(&f->net) == sock_net(sk)) {
1305                         match = f;
1306                         break;
1307                 }
1308         }
1309         err = -EINVAL;
1310         if (match && match->defrag != defrag)
1311                 goto out;
1312         if (!match) {
1313                 err = -ENOMEM;
1314                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1315                 if (!match)
1316                         goto out;
1317                 write_pnet(&match->net, sock_net(sk));
1318                 match->id = id;
1319                 match->type = type;
1320                 match->defrag = defrag;
1321                 atomic_set(&match->rr_cur, 0);
1322                 INIT_LIST_HEAD(&match->list);
1323                 spin_lock_init(&match->lock);
1324                 atomic_set(&match->sk_ref, 0);
1325                 match->prot_hook.type = po->prot_hook.type;
1326                 match->prot_hook.dev = po->prot_hook.dev;
1327                 match->prot_hook.func = packet_rcv_fanout;
1328                 match->prot_hook.af_packet_priv = match;
1329                 match->prot_hook.id_match = match_fanout_group;
1330                 list_add(&match->list, &fanout_list);
1331         }
1332         err = -EINVAL;
1333
1334         spin_lock(&po->bind_lock);
1335         if (po->running &&
1336             match->type == type &&
1337             match->prot_hook.type == po->prot_hook.type &&
1338             match->prot_hook.dev == po->prot_hook.dev) {
1339                 err = -ENOSPC;
1340                 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1341                         __dev_remove_pack(&po->prot_hook);
1342                         po->fanout = match;
1343                         atomic_inc(&match->sk_ref);
1344                         __fanout_link(sk, po);
1345                         err = 0;
1346                 }
1347         }
1348         spin_unlock(&po->bind_lock);
1349
1350         if (err && !atomic_read(&match->sk_ref)) {
1351                 list_del(&match->list);
1352                 kfree(match);
1353         }
1354
1355 out:
1356         mutex_unlock(&fanout_mutex);
1357         return err;
1358 }
1359
1360 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1361  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1362  * It is the responsibility of the caller to call fanout_release_data() and
1363  * free the returned packet_fanout (after synchronize_net())
1364  */
1365 static struct packet_fanout *fanout_release(struct sock *sk)
1366 {
1367         struct packet_sock *po = pkt_sk(sk);
1368         struct packet_fanout *f;
1369
1370         mutex_lock(&fanout_mutex);
1371         f = po->fanout;
1372         if (f) {
1373                 po->fanout = NULL;
1374
1375                 if (atomic_dec_and_test(&f->sk_ref))
1376                         list_del(&f->list);
1377                 else
1378                         f = NULL;
1379         }
1380         mutex_unlock(&fanout_mutex);
1381
1382         return f;
1383 }
1384
1385 static const struct proto_ops packet_ops;
1386
1387 static const struct proto_ops packet_ops_spkt;
1388
1389 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1390                            struct packet_type *pt, struct net_device *orig_dev)
1391 {
1392         struct sock *sk;
1393         struct sockaddr_pkt *spkt;
1394
1395         /*
1396          *      When we registered the protocol we saved the socket in the data
1397          *      field for just this event.
1398          */
1399
1400         sk = pt->af_packet_priv;
1401
1402         /*
1403          *      Yank back the headers [hope the device set this
1404          *      right or kerboom...]
1405          *
1406          *      Incoming packets have ll header pulled,
1407          *      push it back.
1408          *
1409          *      For outgoing ones skb->data == skb_mac_header(skb)
1410          *      so that this procedure is noop.
1411          */
1412
1413         if (skb->pkt_type == PACKET_LOOPBACK)
1414                 goto out;
1415
1416         if (!net_eq(dev_net(dev), sock_net(sk)))
1417                 goto out;
1418
1419         skb = skb_share_check(skb, GFP_ATOMIC);
1420         if (skb == NULL)
1421                 goto oom;
1422
1423         /* drop any routing info */
1424         skb_dst_drop(skb);
1425
1426         /* drop conntrack reference */
1427         nf_reset(skb);
1428
1429         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1430
1431         skb_push(skb, skb->data - skb_mac_header(skb));
1432
1433         /*
1434          *      The SOCK_PACKET socket receives _all_ frames.
1435          */
1436
1437         spkt->spkt_family = dev->type;
1438         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1439         spkt->spkt_protocol = skb->protocol;
1440
1441         /*
1442          *      Charge the memory to the socket. This is done specifically
1443          *      to prevent sockets using all the memory up.
1444          */
1445
1446         if (sock_queue_rcv_skb(sk, skb) == 0)
1447                 return 0;
1448
1449 out:
1450         kfree_skb(skb);
1451 oom:
1452         return 0;
1453 }
1454
1455
1456 /*
1457  *      Output a raw packet to a device layer. This bypasses all the other
1458  *      protocol layers and you must therefore supply it with a complete frame
1459  */
1460
1461 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1462                                struct msghdr *msg, size_t len)
1463 {
1464         struct sock *sk = sock->sk;
1465         struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1466         struct sk_buff *skb = NULL;
1467         struct net_device *dev;
1468         __be16 proto = 0;
1469         int err;
1470
1471         /*
1472          *      Get and verify the address.
1473          */
1474
1475         if (saddr) {
1476                 if (msg->msg_namelen < sizeof(struct sockaddr))
1477                         return -EINVAL;
1478                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1479                         proto = saddr->spkt_protocol;
1480         } else
1481                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1482
1483         /*
1484          *      Find the device first to size check it
1485          */
1486
1487         saddr->spkt_device[13] = 0;
1488 retry:
1489         rcu_read_lock();
1490         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1491         err = -ENODEV;
1492         if (dev == NULL)
1493                 goto out_unlock;
1494
1495         err = -ENETDOWN;
1496         if (!(dev->flags & IFF_UP))
1497                 goto out_unlock;
1498
1499         /*
1500          * You may not queue a frame bigger than the mtu. This is the lowest level
1501          * raw protocol and you must do your own fragmentation at this level.
1502          */
1503
1504         err = -EMSGSIZE;
1505         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
1506                 goto out_unlock;
1507
1508         if (!skb) {
1509                 size_t reserved = LL_RESERVED_SPACE(dev);
1510                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1511
1512                 rcu_read_unlock();
1513                 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
1514                 if (skb == NULL)
1515                         return -ENOBUFS;
1516                 /* FIXME: Save some space for broken drivers that write a hard
1517                  * header at transmission time by themselves. PPP is the notable
1518                  * one here. This should really be fixed at the driver level.
1519                  */
1520                 skb_reserve(skb, reserved);
1521                 skb_reset_network_header(skb);
1522
1523                 /* Try to align data part correctly */
1524                 if (hhlen) {
1525                         skb->data -= hhlen;
1526                         skb->tail -= hhlen;
1527                         if (len < hhlen)
1528                                 skb_reset_network_header(skb);
1529                 }
1530                 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1531                 if (err)
1532                         goto out_free;
1533                 goto retry;
1534         }
1535
1536         if (len > (dev->mtu + dev->hard_header_len)) {
1537                 /* Earlier code assumed this would be a VLAN pkt,
1538                  * double-check this now that we have the actual
1539                  * packet in hand.
1540                  */
1541                 struct ethhdr *ehdr;
1542                 skb_reset_mac_header(skb);
1543                 ehdr = eth_hdr(skb);
1544                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1545                         err = -EMSGSIZE;
1546                         goto out_unlock;
1547                 }
1548         }
1549
1550         skb->protocol = proto;
1551         skb->dev = dev;
1552         skb->priority = sk->sk_priority;
1553         skb->mark = sk->sk_mark;
1554         err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1555         if (err < 0)
1556                 goto out_unlock;
1557
1558         dev_queue_xmit(skb);
1559         rcu_read_unlock();
1560         return len;
1561
1562 out_unlock:
1563         rcu_read_unlock();
1564 out_free:
1565         kfree_skb(skb);
1566         return err;
1567 }
1568
1569 static unsigned int run_filter(const struct sk_buff *skb,
1570                                       const struct sock *sk,
1571                                       unsigned int res)
1572 {
1573         struct sk_filter *filter;
1574
1575         rcu_read_lock();
1576         filter = rcu_dereference(sk->sk_filter);
1577         if (filter != NULL)
1578                 res = SK_RUN_FILTER(filter, skb);
1579         rcu_read_unlock();
1580
1581         return res;
1582 }
1583
1584 /*
1585  * This function makes lazy skb cloning in hope that most of packets
1586  * are discarded by BPF.
1587  *
1588  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1589  * and skb->cb are mangled. It works because (and until) packets
1590  * falling here are owned by current CPU. Output packets are cloned
1591  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1592  * sequencially, so that if we return skb to original state on exit,
1593  * we will not harm anyone.
1594  */
1595
1596 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1597                       struct packet_type *pt, struct net_device *orig_dev)
1598 {
1599         struct sock *sk;
1600         struct sockaddr_ll *sll;
1601         struct packet_sock *po;
1602         u8 *skb_head = skb->data;
1603         int skb_len = skb->len;
1604         unsigned int snaplen, res;
1605
1606         if (skb->pkt_type == PACKET_LOOPBACK)
1607                 goto drop;
1608
1609         sk = pt->af_packet_priv;
1610         po = pkt_sk(sk);
1611
1612         if (!net_eq(dev_net(dev), sock_net(sk)))
1613                 goto drop;
1614
1615         skb->dev = dev;
1616
1617         if (dev->header_ops) {
1618                 /* The device has an explicit notion of ll header,
1619                  * exported to higher levels.
1620                  *
1621                  * Otherwise, the device hides details of its frame
1622                  * structure, so that corresponding packet head is
1623                  * never delivered to user.
1624                  */
1625                 if (sk->sk_type != SOCK_DGRAM)
1626                         skb_push(skb, skb->data - skb_mac_header(skb));
1627                 else if (skb->pkt_type == PACKET_OUTGOING) {
1628                         /* Special case: outgoing packets have ll header at head */
1629                         skb_pull(skb, skb_network_offset(skb));
1630                 }
1631         }
1632
1633         snaplen = skb->len;
1634
1635         res = run_filter(skb, sk, snaplen);
1636         if (!res)
1637                 goto drop_n_restore;
1638         if (snaplen > res)
1639                 snaplen = res;
1640
1641         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1642                 goto drop_n_acct;
1643
1644         if (skb_shared(skb)) {
1645                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1646                 if (nskb == NULL)
1647                         goto drop_n_acct;
1648
1649                 if (skb_head != skb->data) {
1650                         skb->data = skb_head;
1651                         skb->len = skb_len;
1652                 }
1653                 kfree_skb(skb);
1654                 skb = nskb;
1655         }
1656
1657         BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1658                      sizeof(skb->cb));
1659
1660         sll = &PACKET_SKB_CB(skb)->sa.ll;
1661         sll->sll_family = AF_PACKET;
1662         sll->sll_hatype = dev->type;
1663         sll->sll_protocol = skb->protocol;
1664         sll->sll_pkttype = skb->pkt_type;
1665         if (unlikely(po->origdev))
1666                 sll->sll_ifindex = orig_dev->ifindex;
1667         else
1668                 sll->sll_ifindex = dev->ifindex;
1669
1670         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1671
1672         PACKET_SKB_CB(skb)->origlen = skb->len;
1673
1674         if (pskb_trim(skb, snaplen))
1675                 goto drop_n_acct;
1676
1677         skb_set_owner_r(skb, sk);
1678         skb->dev = NULL;
1679         skb_dst_drop(skb);
1680
1681         /* drop conntrack reference */
1682         nf_reset(skb);
1683
1684         spin_lock(&sk->sk_receive_queue.lock);
1685         po->stats.tp_packets++;
1686         skb->dropcount = atomic_read(&sk->sk_drops);
1687         __skb_queue_tail(&sk->sk_receive_queue, skb);
1688         spin_unlock(&sk->sk_receive_queue.lock);
1689         sk->sk_data_ready(sk, skb->len);
1690         return 0;
1691
1692 drop_n_acct:
1693         spin_lock(&sk->sk_receive_queue.lock);
1694         po->stats.tp_drops++;
1695         atomic_inc(&sk->sk_drops);
1696         spin_unlock(&sk->sk_receive_queue.lock);
1697
1698 drop_n_restore:
1699         if (skb_head != skb->data && skb_shared(skb)) {
1700                 skb->data = skb_head;
1701                 skb->len = skb_len;
1702         }
1703 drop:
1704         consume_skb(skb);
1705         return 0;
1706 }
1707
1708 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1709                        struct packet_type *pt, struct net_device *orig_dev)
1710 {
1711         struct sock *sk;
1712         struct packet_sock *po;
1713         struct sockaddr_ll *sll;
1714         union {
1715                 struct tpacket_hdr *h1;
1716                 struct tpacket2_hdr *h2;
1717                 struct tpacket3_hdr *h3;
1718                 void *raw;
1719         } h;
1720         u8 *skb_head = skb->data;
1721         int skb_len = skb->len;
1722         unsigned int snaplen, res;
1723         unsigned long status = TP_STATUS_USER;
1724         unsigned short macoff, netoff, hdrlen;
1725         struct sk_buff *copy_skb = NULL;
1726         struct timeval tv;
1727         struct timespec ts;
1728         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1729
1730         if (skb->pkt_type == PACKET_LOOPBACK)
1731                 goto drop;
1732
1733         sk = pt->af_packet_priv;
1734         po = pkt_sk(sk);
1735
1736         if (!net_eq(dev_net(dev), sock_net(sk)))
1737                 goto drop;
1738
1739         if (dev->header_ops) {
1740                 if (sk->sk_type != SOCK_DGRAM)
1741                         skb_push(skb, skb->data - skb_mac_header(skb));
1742                 else if (skb->pkt_type == PACKET_OUTGOING) {
1743                         /* Special case: outgoing packets have ll header at head */
1744                         skb_pull(skb, skb_network_offset(skb));
1745                 }
1746         }
1747
1748         if (skb->ip_summed == CHECKSUM_PARTIAL)
1749                 status |= TP_STATUS_CSUMNOTREADY;
1750
1751         snaplen = skb->len;
1752
1753         res = run_filter(skb, sk, snaplen);
1754         if (!res)
1755                 goto drop_n_restore;
1756         if (snaplen > res)
1757                 snaplen = res;
1758
1759         if (sk->sk_type == SOCK_DGRAM) {
1760                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1761                                   po->tp_reserve;
1762         } else {
1763                 unsigned maclen = skb_network_offset(skb);
1764                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1765                                        (maclen < 16 ? 16 : maclen)) +
1766                         po->tp_reserve;
1767                 macoff = netoff - maclen;
1768         }
1769         if (po->tp_version <= TPACKET_V2) {
1770                 if (macoff + snaplen > po->rx_ring.frame_size) {
1771                         if (po->copy_thresh &&
1772                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1773                                 if (skb_shared(skb)) {
1774                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
1775                                 } else {
1776                                         copy_skb = skb_get(skb);
1777                                         skb_head = skb->data;
1778                                 }
1779                                 if (copy_skb)
1780                                         skb_set_owner_r(copy_skb, sk);
1781                         }
1782                         snaplen = po->rx_ring.frame_size - macoff;
1783                         if ((int)snaplen < 0)
1784                                 snaplen = 0;
1785                 }
1786         } else if (unlikely(macoff + snaplen >
1787                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1788                 u32 nval;
1789
1790                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1791                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1792                             snaplen, nval, macoff);
1793                 snaplen = nval;
1794                 if (unlikely((int)snaplen < 0)) {
1795                         snaplen = 0;
1796                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1797                 }
1798         }
1799         spin_lock(&sk->sk_receive_queue.lock);
1800         h.raw = packet_current_rx_frame(po, skb,
1801                                         TP_STATUS_KERNEL, (macoff+snaplen));
1802         if (!h.raw)
1803                 goto ring_is_full;
1804         if (po->tp_version <= TPACKET_V2) {
1805                 packet_increment_rx_head(po, &po->rx_ring);
1806         /*
1807          * LOSING will be reported till you read the stats,
1808          * because it's COR - Clear On Read.
1809          * Anyways, moving it for V1/V2 only as V3 doesn't need this
1810          * at packet level.
1811          */
1812                 if (po->stats.tp_drops)
1813                         status |= TP_STATUS_LOSING;
1814         }
1815         po->stats.tp_packets++;
1816         if (copy_skb) {
1817                 status |= TP_STATUS_COPY;
1818                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1819         }
1820         spin_unlock(&sk->sk_receive_queue.lock);
1821
1822         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1823
1824         switch (po->tp_version) {
1825         case TPACKET_V1:
1826                 h.h1->tp_len = skb->len;
1827                 h.h1->tp_snaplen = snaplen;
1828                 h.h1->tp_mac = macoff;
1829                 h.h1->tp_net = netoff;
1830                 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1831                                 && shhwtstamps->syststamp.tv64)
1832                         tv = ktime_to_timeval(shhwtstamps->syststamp);
1833                 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1834                                 && shhwtstamps->hwtstamp.tv64)
1835                         tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1836                 else if (skb->tstamp.tv64)
1837                         tv = ktime_to_timeval(skb->tstamp);
1838                 else
1839                         do_gettimeofday(&tv);
1840                 h.h1->tp_sec = tv.tv_sec;
1841                 h.h1->tp_usec = tv.tv_usec;
1842                 hdrlen = sizeof(*h.h1);
1843                 break;
1844         case TPACKET_V2:
1845                 h.h2->tp_len = skb->len;
1846                 h.h2->tp_snaplen = snaplen;
1847                 h.h2->tp_mac = macoff;
1848                 h.h2->tp_net = netoff;
1849                 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1850                                 && shhwtstamps->syststamp.tv64)
1851                         ts = ktime_to_timespec(shhwtstamps->syststamp);
1852                 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1853                                 && shhwtstamps->hwtstamp.tv64)
1854                         ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1855                 else if (skb->tstamp.tv64)
1856                         ts = ktime_to_timespec(skb->tstamp);
1857                 else
1858                         getnstimeofday(&ts);
1859                 h.h2->tp_sec = ts.tv_sec;
1860                 h.h2->tp_nsec = ts.tv_nsec;
1861                 if (vlan_tx_tag_present(skb)) {
1862                         h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1863                         status |= TP_STATUS_VLAN_VALID;
1864                 } else {
1865                         h.h2->tp_vlan_tci = 0;
1866                 }
1867                 h.h2->tp_padding = 0;
1868                 hdrlen = sizeof(*h.h2);
1869                 break;
1870         case TPACKET_V3:
1871                 /* tp_nxt_offset,vlan are already populated above.
1872                  * So DONT clear those fields here
1873                  */
1874                 h.h3->tp_status |= status;
1875                 h.h3->tp_len = skb->len;
1876                 h.h3->tp_snaplen = snaplen;
1877                 h.h3->tp_mac = macoff;
1878                 h.h3->tp_net = netoff;
1879                 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1880                                 && shhwtstamps->syststamp.tv64)
1881                         ts = ktime_to_timespec(shhwtstamps->syststamp);
1882                 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1883                                 && shhwtstamps->hwtstamp.tv64)
1884                         ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1885                 else if (skb->tstamp.tv64)
1886                         ts = ktime_to_timespec(skb->tstamp);
1887                 else
1888                         getnstimeofday(&ts);
1889                 h.h3->tp_sec  = ts.tv_sec;
1890                 h.h3->tp_nsec = ts.tv_nsec;
1891                 hdrlen = sizeof(*h.h3);
1892                 break;
1893         default:
1894                 BUG();
1895         }
1896
1897         sll = h.raw + TPACKET_ALIGN(hdrlen);
1898         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1899         sll->sll_family = AF_PACKET;
1900         sll->sll_hatype = dev->type;
1901         sll->sll_protocol = skb->protocol;
1902         sll->sll_pkttype = skb->pkt_type;
1903         if (unlikely(po->origdev))
1904                 sll->sll_ifindex = orig_dev->ifindex;
1905         else
1906                 sll->sll_ifindex = dev->ifindex;
1907
1908         smp_mb();
1909 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1910         {
1911                 u8 *start, *end;
1912
1913                 if (po->tp_version <= TPACKET_V2) {
1914                         end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1915                                 + macoff + snaplen);
1916                         for (start = h.raw; start < end; start += PAGE_SIZE)
1917                                 flush_dcache_page(pgv_to_page(start));
1918                 }
1919                 smp_wmb();
1920         }
1921 #endif
1922         if (po->tp_version <= TPACKET_V2)
1923                 __packet_set_status(po, h.raw, status);
1924         else
1925                 prb_clear_blk_fill_status(&po->rx_ring);
1926
1927         sk->sk_data_ready(sk, 0);
1928
1929 drop_n_restore:
1930         if (skb_head != skb->data && skb_shared(skb)) {
1931                 skb->data = skb_head;
1932                 skb->len = skb_len;
1933         }
1934 drop:
1935         kfree_skb(skb);
1936         return 0;
1937
1938 ring_is_full:
1939         po->stats.tp_drops++;
1940         spin_unlock(&sk->sk_receive_queue.lock);
1941
1942         sk->sk_data_ready(sk, 0);
1943         kfree_skb(copy_skb);
1944         goto drop_n_restore;
1945 }
1946
1947 static void tpacket_destruct_skb(struct sk_buff *skb)
1948 {
1949         struct packet_sock *po = pkt_sk(skb->sk);
1950         void *ph;
1951
1952         if (likely(po->tx_ring.pg_vec)) {
1953                 ph = skb_shinfo(skb)->destructor_arg;
1954                 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1955                 atomic_dec(&po->tx_ring.pending);
1956                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1957         }
1958
1959         sock_wfree(skb);
1960 }
1961
1962 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1963                 void *frame, struct net_device *dev, int size_max,
1964                 __be16 proto, unsigned char *addr)
1965 {
1966         union {
1967                 struct tpacket_hdr *h1;
1968                 struct tpacket2_hdr *h2;
1969                 void *raw;
1970         } ph;
1971         int to_write, offset, len, tp_len, nr_frags, len_max;
1972         struct socket *sock = po->sk.sk_socket;
1973         struct page *page;
1974         void *data;
1975         int err;
1976
1977         ph.raw = frame;
1978
1979         skb->protocol = proto;
1980         skb->dev = dev;
1981         skb->priority = po->sk.sk_priority;
1982         skb->mark = po->sk.sk_mark;
1983         skb_shinfo(skb)->destructor_arg = ph.raw;
1984
1985         switch (po->tp_version) {
1986         case TPACKET_V2:
1987                 tp_len = ph.h2->tp_len;
1988                 break;
1989         default:
1990                 tp_len = ph.h1->tp_len;
1991                 break;
1992         }
1993         if (unlikely(tp_len > size_max)) {
1994                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1995                 return -EMSGSIZE;
1996         }
1997
1998         skb_reserve(skb, LL_RESERVED_SPACE(dev));
1999         skb_reset_network_header(skb);
2000
2001         data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2002         to_write = tp_len;
2003
2004         if (sock->type == SOCK_DGRAM) {
2005                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2006                                 NULL, tp_len);
2007                 if (unlikely(err < 0))
2008                         return -EINVAL;
2009         } else if (dev->hard_header_len) {
2010                 /* net device doesn't like empty head */
2011                 if (unlikely(tp_len <= dev->hard_header_len)) {
2012                         pr_err("packet size is too short (%d < %d)\n",
2013                                tp_len, dev->hard_header_len);
2014                         return -EINVAL;
2015                 }
2016
2017                 skb_push(skb, dev->hard_header_len);
2018                 err = skb_store_bits(skb, 0, data,
2019                                 dev->hard_header_len);
2020                 if (unlikely(err))
2021                         return err;
2022
2023                 data += dev->hard_header_len;
2024                 to_write -= dev->hard_header_len;
2025         }
2026
2027         err = -EFAULT;
2028         offset = offset_in_page(data);
2029         len_max = PAGE_SIZE - offset;
2030         len = ((to_write > len_max) ? len_max : to_write);
2031
2032         skb->data_len = to_write;
2033         skb->len += to_write;
2034         skb->truesize += to_write;
2035         atomic_add(to_write, &po->sk.sk_wmem_alloc);
2036
2037         while (likely(to_write)) {
2038                 nr_frags = skb_shinfo(skb)->nr_frags;
2039
2040                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2041                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2042                                MAX_SKB_FRAGS);
2043                         return -EFAULT;
2044                 }
2045
2046                 page = pgv_to_page(data);
2047                 data += len;
2048                 flush_dcache_page(page);
2049                 get_page(page);
2050                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2051                 to_write -= len;
2052                 offset = 0;
2053                 len_max = PAGE_SIZE;
2054                 len = ((to_write > len_max) ? len_max : to_write);
2055         }
2056
2057         return tp_len;
2058 }
2059
2060 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
2061 {
2062         struct net_device *dev;
2063
2064         rcu_read_lock();
2065         dev = rcu_dereference(po->cached_dev);
2066         if (dev)
2067                 dev_hold(dev);
2068         rcu_read_unlock();
2069
2070         return dev;
2071 }
2072
2073 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2074 {
2075         struct sk_buff *skb;
2076         struct net_device *dev;
2077         __be16 proto;
2078         int err, reserve = 0;
2079         void *ph;
2080         struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2081         int tp_len, size_max;
2082         unsigned char *addr;
2083         int len_sum = 0;
2084         int status = 0;
2085
2086         mutex_lock(&po->pg_vec_lock);
2087
2088         err = -EBUSY;
2089         if (saddr == NULL) {
2090                 dev     = packet_cached_dev_get(po);
2091                 proto   = po->num;
2092                 addr    = NULL;
2093         } else {
2094                 err = -EINVAL;
2095                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2096                         goto out;
2097                 if (msg->msg_namelen < (saddr->sll_halen
2098                                         + offsetof(struct sockaddr_ll,
2099                                                 sll_addr)))
2100                         goto out;
2101                 proto   = saddr->sll_protocol;
2102                 addr    = saddr->sll_addr;
2103                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2104         }
2105
2106         err = -ENXIO;
2107         if (unlikely(dev == NULL))
2108                 goto out;
2109         err = -ENETDOWN;
2110         if (unlikely(!(dev->flags & IFF_UP)))
2111                 goto out_put;
2112
2113         reserve = dev->hard_header_len;
2114
2115         size_max = po->tx_ring.frame_size
2116                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2117
2118         if (size_max > dev->mtu + reserve)
2119                 size_max = dev->mtu + reserve;
2120
2121         do {
2122                 ph = packet_current_frame(po, &po->tx_ring,
2123                                 TP_STATUS_SEND_REQUEST);
2124
2125                 if (unlikely(ph == NULL)) {
2126                         schedule();
2127                         continue;
2128                 }
2129
2130                 status = TP_STATUS_SEND_REQUEST;
2131                 skb = sock_alloc_send_skb(&po->sk,
2132                                 LL_ALLOCATED_SPACE(dev)
2133                                 + sizeof(struct sockaddr_ll),
2134                                 0, &err);
2135
2136                 if (unlikely(skb == NULL))
2137                         goto out_status;
2138
2139                 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2140                                 addr);
2141
2142                 if (unlikely(tp_len < 0)) {
2143                         if (po->tp_loss) {
2144                                 __packet_set_status(po, ph,
2145                                                 TP_STATUS_AVAILABLE);
2146                                 packet_increment_head(&po->tx_ring);
2147                                 kfree_skb(skb);
2148                                 continue;
2149                         } else {
2150                                 status = TP_STATUS_WRONG_FORMAT;
2151                                 err = tp_len;
2152                                 goto out_status;
2153                         }
2154                 }
2155
2156                 skb->destructor = tpacket_destruct_skb;
2157                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2158                 atomic_inc(&po->tx_ring.pending);
2159
2160                 status = TP_STATUS_SEND_REQUEST;
2161                 err = dev_queue_xmit(skb);
2162                 if (unlikely(err > 0)) {
2163                         err = net_xmit_errno(err);
2164                         if (err && __packet_get_status(po, ph) ==
2165                                    TP_STATUS_AVAILABLE) {
2166                                 /* skb was destructed already */
2167                                 skb = NULL;
2168                                 goto out_status;
2169                         }
2170                         /*
2171                          * skb was dropped but not destructed yet;
2172                          * let's treat it like congestion or err < 0
2173                          */
2174                         err = 0;
2175                 }
2176                 packet_increment_head(&po->tx_ring);
2177                 len_sum += tp_len;
2178         } while (likely((ph != NULL) ||
2179                         ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2180                          (atomic_read(&po->tx_ring.pending))))
2181                 );
2182
2183         err = len_sum;
2184         goto out_put;
2185
2186 out_status:
2187         __packet_set_status(po, ph, status);
2188         kfree_skb(skb);
2189 out_put:
2190         dev_put(dev);
2191 out:
2192         mutex_unlock(&po->pg_vec_lock);
2193         return err;
2194 }
2195
2196 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2197                                         size_t reserve, size_t len,
2198                                         size_t linear, int noblock,
2199                                         int *err)
2200 {
2201         struct sk_buff *skb;
2202
2203         /* Under a page?  Don't bother with paged skb. */
2204         if (prepad + len < PAGE_SIZE || !linear)
2205                 linear = len;
2206
2207         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2208                                    err);
2209         if (!skb)
2210                 return NULL;
2211
2212         skb_reserve(skb, reserve);
2213         skb_put(skb, linear);
2214         skb->data_len = len - linear;
2215         skb->len += len - linear;
2216
2217         return skb;
2218 }
2219
2220 static int packet_snd(struct socket *sock,
2221                           struct msghdr *msg, size_t len)
2222 {
2223         struct sock *sk = sock->sk;
2224         struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2225         struct sk_buff *skb;
2226         struct net_device *dev;
2227         __be16 proto;
2228         unsigned char *addr;
2229         int err, reserve = 0;
2230         struct virtio_net_hdr vnet_hdr = { 0 };
2231         int offset = 0;
2232         int vnet_hdr_len;
2233         struct packet_sock *po = pkt_sk(sk);
2234         unsigned short gso_type = 0;
2235
2236         /*
2237          *      Get and verify the address.
2238          */
2239
2240         if (saddr == NULL) {
2241                 dev     = packet_cached_dev_get(po);
2242                 proto   = po->num;
2243                 addr    = NULL;
2244         } else {
2245                 err = -EINVAL;
2246                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2247                         goto out;
2248                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2249                         goto out;
2250                 proto   = saddr->sll_protocol;
2251                 addr    = saddr->sll_addr;
2252                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2253         }
2254
2255         err = -ENXIO;
2256         if (unlikely(dev == NULL))
2257                 goto out_unlock;
2258         err = -ENETDOWN;
2259         if (unlikely(!(dev->flags & IFF_UP)))
2260                 goto out_unlock;
2261
2262         if (sock->type == SOCK_RAW)
2263                 reserve = dev->hard_header_len;
2264         if (po->has_vnet_hdr) {
2265                 vnet_hdr_len = sizeof(vnet_hdr);
2266
2267                 err = -EINVAL;
2268                 if (len < vnet_hdr_len)
2269                         goto out_unlock;
2270
2271                 len -= vnet_hdr_len;
2272
2273                 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2274                                        vnet_hdr_len);
2275                 if (err < 0)
2276                         goto out_unlock;
2277
2278                 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2279                     (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2280                       vnet_hdr.hdr_len))
2281                         vnet_hdr.hdr_len = vnet_hdr.csum_start +
2282                                                  vnet_hdr.csum_offset + 2;
2283
2284                 err = -EINVAL;
2285                 if (vnet_hdr.hdr_len > len)
2286                         goto out_unlock;
2287
2288                 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2289                         switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2290                         case VIRTIO_NET_HDR_GSO_TCPV4:
2291                                 gso_type = SKB_GSO_TCPV4;
2292                                 break;
2293                         case VIRTIO_NET_HDR_GSO_TCPV6:
2294                                 gso_type = SKB_GSO_TCPV6;
2295                                 break;
2296                         case VIRTIO_NET_HDR_GSO_UDP:
2297                                 gso_type = SKB_GSO_UDP;
2298                                 break;
2299                         default:
2300                                 goto out_unlock;
2301                         }
2302
2303                         if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2304                                 gso_type |= SKB_GSO_TCP_ECN;
2305
2306                         if (vnet_hdr.gso_size == 0)
2307                                 goto out_unlock;
2308
2309                 }
2310         }
2311
2312         err = -EMSGSIZE;
2313         if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
2314                 goto out_unlock;
2315
2316         err = -ENOBUFS;
2317         skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
2318                                LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
2319                                msg->msg_flags & MSG_DONTWAIT, &err);
2320         if (skb == NULL)
2321                 goto out_unlock;
2322
2323         skb_set_network_header(skb, reserve);
2324
2325         err = -EINVAL;
2326         if (sock->type == SOCK_DGRAM &&
2327             (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2328                 goto out_free;
2329
2330         /* Returns -EFAULT on error */
2331         err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2332         if (err)
2333                 goto out_free;
2334         err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2335         if (err < 0)
2336                 goto out_free;
2337
2338         if (!gso_type && (len > dev->mtu + reserve)) {
2339                 /* Earlier code assumed this would be a VLAN pkt,
2340                  * double-check this now that we have the actual
2341                  * packet in hand.
2342                  */
2343                 struct ethhdr *ehdr;
2344                 skb_reset_mac_header(skb);
2345                 ehdr = eth_hdr(skb);
2346                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2347                         err = -EMSGSIZE;
2348                         goto out_free;
2349                 }
2350         }
2351
2352         skb->protocol = proto;
2353         skb->dev = dev;
2354         skb->priority = sk->sk_priority;
2355         skb->mark = sk->sk_mark;
2356
2357         if (po->has_vnet_hdr) {
2358                 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2359                         if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2360                                                   vnet_hdr.csum_offset)) {
2361                                 err = -EINVAL;
2362                                 goto out_free;
2363                         }
2364                 }
2365
2366                 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2367                 skb_shinfo(skb)->gso_type = gso_type;
2368
2369                 /* Header must be checked, and gso_segs computed. */
2370                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2371                 skb_shinfo(skb)->gso_segs = 0;
2372
2373                 len += vnet_hdr_len;
2374         }
2375
2376         /*
2377          *      Now send it
2378          */
2379
2380         err = dev_queue_xmit(skb);
2381         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2382                 goto out_unlock;
2383
2384         dev_put(dev);
2385
2386         return len;
2387
2388 out_free:
2389         kfree_skb(skb);
2390 out_unlock:
2391         if (dev)
2392                 dev_put(dev);
2393 out:
2394         return err;
2395 }
2396
2397 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2398                 struct msghdr *msg, size_t len)
2399 {
2400         struct sock *sk = sock->sk;
2401         struct packet_sock *po = pkt_sk(sk);
2402         if (po->tx_ring.pg_vec)
2403                 return tpacket_snd(po, msg);
2404         else
2405                 return packet_snd(sock, msg, len);
2406 }
2407
2408 /*
2409  *      Close a PACKET socket. This is fairly simple. We immediately go
2410  *      to 'closed' state and remove our protocol entry in the device list.
2411  */
2412
2413 static int packet_release(struct socket *sock)
2414 {
2415         struct sock *sk = sock->sk;
2416         struct packet_sock *po;
2417         struct packet_fanout *f;
2418         struct net *net;
2419         union tpacket_req_u req_u;
2420
2421         if (!sk)
2422                 return 0;
2423
2424         net = sock_net(sk);
2425         po = pkt_sk(sk);
2426
2427         spin_lock_bh(&net->packet.sklist_lock);
2428         sk_del_node_init_rcu(sk);
2429         sock_prot_inuse_add(net, sk->sk_prot, -1);
2430         spin_unlock_bh(&net->packet.sklist_lock);
2431
2432         spin_lock(&po->bind_lock);
2433         unregister_prot_hook(sk, false);
2434         if (po->prot_hook.dev) {
2435                 dev_put(po->prot_hook.dev);
2436                 po->prot_hook.dev = NULL;
2437         }
2438         spin_unlock(&po->bind_lock);
2439
2440         packet_flush_mclist(sk);
2441
2442         if (po->rx_ring.pg_vec) {
2443                 memset(&req_u, 0, sizeof(req_u));
2444                 packet_set_ring(sk, &req_u, 1, 0);
2445         }
2446
2447         if (po->tx_ring.pg_vec) {
2448                 memset(&req_u, 0, sizeof(req_u));
2449                 packet_set_ring(sk, &req_u, 1, 1);
2450         }
2451
2452         f = fanout_release(sk);
2453
2454         synchronize_net();
2455
2456         kfree(f);
2457
2458         /*
2459          *      Now the socket is dead. No more input will appear.
2460          */
2461         sock_orphan(sk);
2462         sock->sk = NULL;
2463
2464         /* Purge queues */
2465
2466         skb_queue_purge(&sk->sk_receive_queue);
2467         sk_refcnt_debug_release(sk);
2468
2469         sock_put(sk);
2470         return 0;
2471 }
2472
2473 /*
2474  *      Attach a packet hook.
2475  */
2476
2477 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2478                           __be16 protocol)
2479 {
2480         struct packet_sock *po = pkt_sk(sk);
2481         struct net_device *dev_curr;
2482         struct net_device *dev = NULL;
2483         int ret = 0;
2484         bool unlisted = false;
2485
2486         lock_sock(sk);
2487
2488         spin_lock(&po->bind_lock);
2489         rcu_read_lock();
2490
2491         if (po->fanout) {
2492                 ret = -EINVAL;
2493                 goto out_unlock;
2494         }
2495
2496         if (name) {
2497                 dev = dev_get_by_name_rcu(sock_net(sk), name);
2498                 if (!dev) {
2499                         ret = -ENODEV;
2500                         goto out_unlock;
2501                 }
2502         } else if (ifindex) {
2503                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2504                 if (!dev) {
2505                         ret = -ENODEV;
2506                         goto out_unlock;
2507                 }
2508         }
2509
2510         if (dev)
2511                 dev_hold(dev);
2512
2513         dev_curr = po->prot_hook.dev;
2514
2515         if (po->running) {
2516                 rcu_read_unlock();
2517                 __unregister_prot_hook(sk, true);
2518                 rcu_read_lock();
2519                 dev_curr = po->prot_hook.dev;
2520                 if (dev)
2521                         unlisted = !dev_get_by_index_rcu(sock_net(sk),
2522                                                          dev->ifindex);
2523         }
2524         po->num = protocol;
2525         po->prot_hook.type = protocol;
2526
2527         if (unlikely(unlisted)) {
2528                 dev_put(dev);
2529                 po->prot_hook.dev = NULL;
2530                 po->ifindex = -1;
2531         } else {
2532                 po->prot_hook.dev = dev;
2533                 po->ifindex = dev ? dev->ifindex : 0;
2534         }
2535
2536         if (dev_curr)
2537                 dev_put(dev_curr);
2538
2539         if (protocol == 0)
2540                 goto out_unlock;
2541
2542         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
2543                 register_prot_hook(sk);
2544         } else {
2545                 sk->sk_err = ENETDOWN;
2546                 if (!sock_flag(sk, SOCK_DEAD))
2547                         sk->sk_error_report(sk);
2548         }
2549
2550 out_unlock:
2551         rcu_read_unlock();
2552         spin_unlock(&po->bind_lock);
2553         release_sock(sk);
2554         return ret;
2555 }
2556
2557 /*
2558  *      Bind a packet socket to a device
2559  */
2560
2561 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2562                             int addr_len)
2563 {
2564         struct sock *sk = sock->sk;
2565         char name[15];
2566
2567         /*
2568          *      Check legality
2569          */
2570
2571         if (addr_len != sizeof(struct sockaddr))
2572                 return -EINVAL;
2573         strlcpy(name, uaddr->sa_data, sizeof(name));
2574
2575         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
2576 }
2577
2578 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2579 {
2580         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2581         struct sock *sk = sock->sk;
2582
2583         /*
2584          *      Check legality
2585          */
2586
2587         if (addr_len < sizeof(struct sockaddr_ll))
2588                 return -EINVAL;
2589         if (sll->sll_family != AF_PACKET)
2590                 return -EINVAL;
2591
2592         return packet_do_bind(sk, NULL, sll->sll_ifindex,
2593                               sll->sll_protocol ? : pkt_sk(sk)->num);
2594 }
2595
2596 static struct proto packet_proto = {
2597         .name     = "PACKET",
2598         .owner    = THIS_MODULE,
2599         .obj_size = sizeof(struct packet_sock),
2600 };
2601
2602 /*
2603  *      Create a packet of type SOCK_PACKET.
2604  */
2605
2606 static int packet_create(struct net *net, struct socket *sock, int protocol,
2607                          int kern)
2608 {
2609         struct sock *sk;
2610         struct packet_sock *po;
2611         __be16 proto = (__force __be16)protocol; /* weird, but documented */
2612         int err;
2613
2614         if (!capable(CAP_NET_RAW))
2615                 return -EPERM;
2616         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2617             sock->type != SOCK_PACKET)
2618                 return -ESOCKTNOSUPPORT;
2619
2620         sock->state = SS_UNCONNECTED;
2621
2622         err = -ENOBUFS;
2623         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2624         if (sk == NULL)
2625                 goto out;
2626
2627         sock->ops = &packet_ops;
2628         if (sock->type == SOCK_PACKET)
2629                 sock->ops = &packet_ops_spkt;
2630
2631         sock_init_data(sock, sk);
2632
2633         po = pkt_sk(sk);
2634         sk->sk_family = PF_PACKET;
2635         po->num = proto;
2636         RCU_INIT_POINTER(po->cached_dev, NULL);
2637
2638         sk->sk_destruct = packet_sock_destruct;
2639         sk_refcnt_debug_inc(sk);
2640
2641         /*
2642          *      Attach a protocol block
2643          */
2644
2645         spin_lock_init(&po->bind_lock);
2646         mutex_init(&po->pg_vec_lock);
2647         po->prot_hook.func = packet_rcv;
2648
2649         if (sock->type == SOCK_PACKET)
2650                 po->prot_hook.func = packet_rcv_spkt;
2651
2652         po->prot_hook.af_packet_priv = sk;
2653
2654         if (proto) {
2655                 po->prot_hook.type = proto;
2656                 register_prot_hook(sk);
2657         }
2658
2659         spin_lock_bh(&net->packet.sklist_lock);
2660         sk_add_node_rcu(sk, &net->packet.sklist);
2661         sock_prot_inuse_add(net, &packet_proto, 1);
2662         spin_unlock_bh(&net->packet.sklist_lock);
2663
2664         return 0;
2665 out:
2666         return err;
2667 }
2668
2669 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2670 {
2671         struct sock_exterr_skb *serr;
2672         struct sk_buff *skb, *skb2;
2673         int copied, err;
2674
2675         err = -EAGAIN;
2676         skb = skb_dequeue(&sk->sk_error_queue);
2677         if (skb == NULL)
2678                 goto out;
2679
2680         copied = skb->len;
2681         if (copied > len) {
2682                 msg->msg_flags |= MSG_TRUNC;
2683                 copied = len;
2684         }
2685         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2686         if (err)
2687                 goto out_free_skb;
2688
2689         sock_recv_timestamp(msg, sk, skb);
2690
2691         serr = SKB_EXT_ERR(skb);
2692         put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2693                  sizeof(serr->ee), &serr->ee);
2694
2695         msg->msg_flags |= MSG_ERRQUEUE;
2696         err = copied;
2697
2698         /* Reset and regenerate socket error */
2699         spin_lock_bh(&sk->sk_error_queue.lock);
2700         sk->sk_err = 0;
2701         if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2702                 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2703                 spin_unlock_bh(&sk->sk_error_queue.lock);
2704                 sk->sk_error_report(sk);
2705         } else
2706                 spin_unlock_bh(&sk->sk_error_queue.lock);
2707
2708 out_free_skb:
2709         kfree_skb(skb);
2710 out:
2711         return err;
2712 }
2713
2714 /*
2715  *      Pull a packet from our receive queue and hand it to the user.
2716  *      If necessary we block.
2717  */
2718
2719 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2720                           struct msghdr *msg, size_t len, int flags)
2721 {
2722         struct sock *sk = sock->sk;
2723         struct sk_buff *skb;
2724         int copied, err;
2725         int vnet_hdr_len = 0;
2726
2727         err = -EINVAL;
2728         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2729                 goto out;
2730
2731 #if 0
2732         /* What error should we return now? EUNATTACH? */
2733         if (pkt_sk(sk)->ifindex < 0)
2734                 return -ENODEV;
2735 #endif
2736
2737         if (flags & MSG_ERRQUEUE) {
2738                 err = packet_recv_error(sk, msg, len);
2739                 goto out;
2740         }
2741
2742         /*
2743          *      Call the generic datagram receiver. This handles all sorts
2744          *      of horrible races and re-entrancy so we can forget about it
2745          *      in the protocol layers.
2746          *
2747          *      Now it will return ENETDOWN, if device have just gone down,
2748          *      but then it will block.
2749          */
2750
2751         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2752
2753         /*
2754          *      An error occurred so return it. Because skb_recv_datagram()
2755          *      handles the blocking we don't see and worry about blocking
2756          *      retries.
2757          */
2758
2759         if (skb == NULL)
2760                 goto out;
2761
2762         if (pkt_sk(sk)->has_vnet_hdr) {
2763                 struct virtio_net_hdr vnet_hdr = { 0 };
2764
2765                 err = -EINVAL;
2766                 vnet_hdr_len = sizeof(vnet_hdr);
2767                 if (len < vnet_hdr_len)
2768                         goto out_free;
2769
2770                 len -= vnet_hdr_len;
2771
2772                 if (skb_is_gso(skb)) {
2773                         struct skb_shared_info *sinfo = skb_shinfo(skb);
2774
2775                         /* This is a hint as to how much should be linear. */
2776                         vnet_hdr.hdr_len = skb_headlen(skb);
2777                         vnet_hdr.gso_size = sinfo->gso_size;
2778                         if (sinfo->gso_type & SKB_GSO_TCPV4)
2779                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2780                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
2781                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2782                         else if (sinfo->gso_type & SKB_GSO_UDP)
2783                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2784                         else if (sinfo->gso_type & SKB_GSO_FCOE)
2785                                 goto out_free;
2786                         else
2787                                 BUG();
2788                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2789                                 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2790                 } else
2791                         vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2792
2793                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2794                         vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2795                         vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2796                         vnet_hdr.csum_offset = skb->csum_offset;
2797                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2798                         vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2799                 } /* else everything is zero */
2800
2801                 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2802                                      vnet_hdr_len);
2803                 if (err < 0)
2804                         goto out_free;
2805         }
2806
2807         /* You lose any data beyond the buffer you gave. If it worries
2808          * a user program they can ask the device for its MTU
2809          * anyway.
2810          */
2811         copied = skb->len;
2812         if (copied > len) {
2813                 copied = len;
2814                 msg->msg_flags |= MSG_TRUNC;
2815         }
2816
2817         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2818         if (err)
2819                 goto out_free;
2820
2821         sock_recv_ts_and_drops(msg, sk, skb);
2822
2823         if (msg->msg_name) {
2824                 /* If the address length field is there to be filled
2825                  * in, we fill it in now.
2826                  */
2827                 if (sock->type == SOCK_PACKET) {
2828                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
2829                 } else {
2830                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2831                         msg->msg_namelen = sll->sll_halen +
2832                                 offsetof(struct sockaddr_ll, sll_addr);
2833                 }
2834                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2835                        msg->msg_namelen);
2836         }
2837
2838         if (pkt_sk(sk)->auxdata) {
2839                 struct tpacket_auxdata aux;
2840
2841                 aux.tp_status = TP_STATUS_USER;
2842                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2843                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2844                 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2845                 aux.tp_snaplen = skb->len;
2846                 aux.tp_mac = 0;
2847                 aux.tp_net = skb_network_offset(skb);
2848                 if (vlan_tx_tag_present(skb)) {
2849                         aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2850                         aux.tp_status |= TP_STATUS_VLAN_VALID;
2851                 } else {
2852                         aux.tp_vlan_tci = 0;
2853                 }
2854                 aux.tp_padding = 0;
2855                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2856         }
2857
2858         /*
2859          *      Free or return the buffer as appropriate. Again this
2860          *      hides all the races and re-entrancy issues from us.
2861          */
2862         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2863
2864 out_free:
2865         skb_free_datagram(sk, skb);
2866 out:
2867         return err;
2868 }
2869
2870 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2871                                int *uaddr_len, int peer)
2872 {
2873         struct net_device *dev;
2874         struct sock *sk = sock->sk;
2875
2876         if (peer)
2877                 return -EOPNOTSUPP;
2878
2879         uaddr->sa_family = AF_PACKET;
2880         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
2881         rcu_read_lock();
2882         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2883         if (dev)
2884                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
2885         rcu_read_unlock();
2886         *uaddr_len = sizeof(*uaddr);
2887
2888         return 0;
2889 }
2890
2891 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2892                           int *uaddr_len, int peer)
2893 {
2894         struct net_device *dev;
2895         struct sock *sk = sock->sk;
2896         struct packet_sock *po = pkt_sk(sk);
2897         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2898
2899         if (peer)
2900                 return -EOPNOTSUPP;
2901
2902         sll->sll_family = AF_PACKET;
2903         sll->sll_ifindex = po->ifindex;
2904         sll->sll_protocol = po->num;
2905         sll->sll_pkttype = 0;
2906         rcu_read_lock();
2907         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2908         if (dev) {
2909                 sll->sll_hatype = dev->type;
2910                 sll->sll_halen = dev->addr_len;
2911                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2912         } else {
2913                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
2914                 sll->sll_halen = 0;
2915         }
2916         rcu_read_unlock();
2917         *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2918
2919         return 0;
2920 }
2921
2922 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2923                          int what)
2924 {
2925         switch (i->type) {
2926         case PACKET_MR_MULTICAST:
2927                 if (i->alen != dev->addr_len)
2928                         return -EINVAL;
2929                 if (what > 0)
2930                         return dev_mc_add(dev, i->addr);
2931                 else
2932                         return dev_mc_del(dev, i->addr);
2933                 break;
2934         case PACKET_MR_PROMISC:
2935                 return dev_set_promiscuity(dev, what);
2936                 break;
2937         case PACKET_MR_ALLMULTI:
2938                 return dev_set_allmulti(dev, what);
2939                 break;
2940         case PACKET_MR_UNICAST:
2941                 if (i->alen != dev->addr_len)
2942                         return -EINVAL;
2943                 if (what > 0)
2944                         return dev_uc_add(dev, i->addr);
2945                 else
2946                         return dev_uc_del(dev, i->addr);
2947                 break;
2948         default:
2949                 break;
2950         }
2951         return 0;
2952 }
2953
2954 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2955 {
2956         for ( ; i; i = i->next) {
2957                 if (i->ifindex == dev->ifindex)
2958                         packet_dev_mc(dev, i, what);
2959         }
2960 }
2961
2962 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2963 {
2964         struct packet_sock *po = pkt_sk(sk);
2965         struct packet_mclist *ml, *i;
2966         struct net_device *dev;
2967         int err;
2968
2969         rtnl_lock();
2970
2971         err = -ENODEV;
2972         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2973         if (!dev)
2974                 goto done;
2975
2976         err = -EINVAL;
2977         if (mreq->mr_alen > dev->addr_len)
2978                 goto done;
2979
2980         err = -ENOBUFS;
2981         i = kmalloc(sizeof(*i), GFP_KERNEL);
2982         if (i == NULL)
2983                 goto done;
2984
2985         err = 0;
2986         for (ml = po->mclist; ml; ml = ml->next) {
2987                 if (ml->ifindex == mreq->mr_ifindex &&
2988                     ml->type == mreq->mr_type &&
2989                     ml->alen == mreq->mr_alen &&
2990                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2991                         ml->count++;
2992                         /* Free the new element ... */
2993                         kfree(i);
2994                         goto done;
2995                 }
2996         }
2997
2998         i->type = mreq->mr_type;
2999         i->ifindex = mreq->mr_ifindex;
3000         i->alen = mreq->mr_alen;
3001         memcpy(i->addr, mreq->mr_address, i->alen);
3002         i->count = 1;
3003         i->next = po->mclist;
3004         po->mclist = i;
3005         err = packet_dev_mc(dev, i, 1);
3006         if (err) {
3007                 po->mclist = i->next;
3008                 kfree(i);
3009         }
3010
3011 done:
3012         rtnl_unlock();
3013         return err;
3014 }
3015
3016 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3017 {
3018         struct packet_mclist *ml, **mlp;
3019
3020         rtnl_lock();
3021
3022         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3023                 if (ml->ifindex == mreq->mr_ifindex &&
3024                     ml->type == mreq->mr_type &&
3025                     ml->alen == mreq->mr_alen &&
3026                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3027                         if (--ml->count == 0) {
3028                                 struct net_device *dev;
3029                                 *mlp = ml->next;
3030                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3031                                 if (dev)
3032                                         packet_dev_mc(dev, ml, -1);
3033                                 kfree(ml);
3034                         }
3035                         rtnl_unlock();
3036                         return 0;
3037                 }
3038         }
3039         rtnl_unlock();
3040         return -EADDRNOTAVAIL;
3041 }
3042
3043 static void packet_flush_mclist(struct sock *sk)
3044 {
3045         struct packet_sock *po = pkt_sk(sk);
3046         struct packet_mclist *ml;
3047
3048         if (!po->mclist)
3049                 return;
3050
3051         rtnl_lock();
3052         while ((ml = po->mclist) != NULL) {
3053                 struct net_device *dev;
3054
3055                 po->mclist = ml->next;
3056                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3057                 if (dev != NULL)
3058                         packet_dev_mc(dev, ml, -1);
3059                 kfree(ml);
3060         }
3061         rtnl_unlock();
3062 }
3063
3064 static int
3065 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3066 {
3067         struct sock *sk = sock->sk;
3068         struct packet_sock *po = pkt_sk(sk);
3069         int ret;
3070
3071         if (level != SOL_PACKET)
3072                 return -ENOPROTOOPT;
3073
3074         switch (optname) {
3075         case PACKET_ADD_MEMBERSHIP:
3076         case PACKET_DROP_MEMBERSHIP:
3077         {
3078                 struct packet_mreq_max mreq;
3079                 int len = optlen;
3080                 memset(&mreq, 0, sizeof(mreq));
3081                 if (len < sizeof(struct packet_mreq))
3082                         return -EINVAL;
3083                 if (len > sizeof(mreq))
3084                         len = sizeof(mreq);
3085                 if (copy_from_user(&mreq, optval, len))
3086                         return -EFAULT;
3087                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3088                         return -EINVAL;
3089                 if (optname == PACKET_ADD_MEMBERSHIP)
3090                         ret = packet_mc_add(sk, &mreq);
3091                 else
3092                         ret = packet_mc_drop(sk, &mreq);
3093                 return ret;
3094         }
3095
3096         case PACKET_RX_RING:
3097         case PACKET_TX_RING:
3098         {
3099                 union tpacket_req_u req_u;
3100                 int len;
3101
3102                 switch (po->tp_version) {
3103                 case TPACKET_V1:
3104                 case TPACKET_V2:
3105                         len = sizeof(req_u.req);
3106                         break;
3107                 case TPACKET_V3:
3108                 default:
3109                         len = sizeof(req_u.req3);
3110                         break;
3111                 }
3112                 if (optlen < len)
3113                         return -EINVAL;
3114                 if (pkt_sk(sk)->has_vnet_hdr)
3115                         return -EINVAL;
3116                 if (copy_from_user(&req_u.req, optval, len))
3117                         return -EFAULT;
3118                 return packet_set_ring(sk, &req_u, 0,
3119                         optname == PACKET_TX_RING);
3120         }
3121         case PACKET_COPY_THRESH:
3122         {
3123                 int val;
3124
3125                 if (optlen != sizeof(val))
3126                         return -EINVAL;
3127                 if (copy_from_user(&val, optval, sizeof(val)))
3128                         return -EFAULT;
3129
3130                 pkt_sk(sk)->copy_thresh = val;
3131                 return 0;
3132         }
3133         case PACKET_VERSION:
3134         {
3135                 int val;
3136
3137                 if (optlen != sizeof(val))
3138                         return -EINVAL;
3139                 if (copy_from_user(&val, optval, sizeof(val)))
3140                         return -EFAULT;
3141                 switch (val) {
3142                 case TPACKET_V1:
3143                 case TPACKET_V2:
3144                 case TPACKET_V3:
3145                         break;
3146                 default:
3147                         return -EINVAL;
3148                 }
3149                 lock_sock(sk);
3150                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3151                         ret = -EBUSY;
3152                 } else {
3153                         po->tp_version = val;
3154                         ret = 0;
3155                 }
3156                 release_sock(sk);
3157                 return ret;
3158         }
3159         case PACKET_RESERVE:
3160         {
3161                 unsigned int val;
3162
3163                 if (optlen != sizeof(val))
3164                         return -EINVAL;
3165                 if (copy_from_user(&val, optval, sizeof(val)))
3166                         return -EFAULT;
3167                 if (val > INT_MAX)
3168                         return -EINVAL;
3169                 lock_sock(sk);
3170                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3171                         ret = -EBUSY;
3172                 } else {
3173                         po->tp_reserve = val;
3174                         ret = 0;
3175                 }
3176                 release_sock(sk);
3177                 return ret;
3178         }
3179         case PACKET_LOSS:
3180         {
3181                 unsigned int val;
3182
3183                 if (optlen != sizeof(val))
3184                         return -EINVAL;
3185                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3186                         return -EBUSY;
3187                 if (copy_from_user(&val, optval, sizeof(val)))
3188                         return -EFAULT;
3189                 po->tp_loss = !!val;
3190                 return 0;
3191         }
3192         case PACKET_AUXDATA:
3193         {
3194                 int val;
3195
3196                 if (optlen < sizeof(val))
3197                         return -EINVAL;
3198                 if (copy_from_user(&val, optval, sizeof(val)))
3199                         return -EFAULT;
3200
3201                 po->auxdata = !!val;
3202                 return 0;
3203         }
3204         case PACKET_ORIGDEV:
3205         {
3206                 int val;
3207
3208                 if (optlen < sizeof(val))
3209                         return -EINVAL;
3210                 if (copy_from_user(&val, optval, sizeof(val)))
3211                         return -EFAULT;
3212
3213                 po->origdev = !!val;
3214                 return 0;
3215         }
3216         case PACKET_VNET_HDR:
3217         {
3218                 int val;
3219
3220                 if (sock->type != SOCK_RAW)
3221                         return -EINVAL;
3222                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3223                         return -EBUSY;
3224                 if (optlen < sizeof(val))
3225                         return -EINVAL;
3226                 if (copy_from_user(&val, optval, sizeof(val)))
3227                         return -EFAULT;
3228
3229                 po->has_vnet_hdr = !!val;
3230                 return 0;
3231         }
3232         case PACKET_TIMESTAMP:
3233         {
3234                 int val;
3235
3236                 if (optlen != sizeof(val))
3237                         return -EINVAL;
3238                 if (copy_from_user(&val, optval, sizeof(val)))
3239                         return -EFAULT;
3240
3241                 po->tp_tstamp = val;
3242                 return 0;
3243         }
3244         case PACKET_FANOUT:
3245         {
3246                 int val;
3247
3248                 if (optlen != sizeof(val))
3249                         return -EINVAL;
3250                 if (copy_from_user(&val, optval, sizeof(val)))
3251                         return -EFAULT;
3252
3253                 return fanout_add(sk, val & 0xffff, val >> 16);
3254         }
3255         default:
3256                 return -ENOPROTOOPT;
3257         }
3258 }
3259
3260 static int packet_getsockopt(struct socket *sock, int level, int optname,
3261                              char __user *optval, int __user *optlen)
3262 {
3263         int len;
3264         int val;
3265         struct sock *sk = sock->sk;
3266         struct packet_sock *po = pkt_sk(sk);
3267         void *data;
3268         struct tpacket_stats st;
3269         union tpacket_stats_u st_u;
3270
3271         if (level != SOL_PACKET)
3272                 return -ENOPROTOOPT;
3273
3274         if (get_user(len, optlen))
3275                 return -EFAULT;
3276
3277         if (len < 0)
3278                 return -EINVAL;
3279
3280         switch (optname) {
3281         case PACKET_STATISTICS:
3282                 if (po->tp_version == TPACKET_V3) {
3283                         len = sizeof(struct tpacket_stats_v3);
3284                 } else {
3285                         if (len > sizeof(struct tpacket_stats))
3286                                 len = sizeof(struct tpacket_stats);
3287                 }
3288                 spin_lock_bh(&sk->sk_receive_queue.lock);
3289                 if (po->tp_version == TPACKET_V3) {
3290                         memcpy(&st_u.stats3, &po->stats,
3291                         sizeof(struct tpacket_stats));
3292                         st_u.stats3.tp_freeze_q_cnt =
3293                         po->stats_u.stats3.tp_freeze_q_cnt;
3294                         st_u.stats3.tp_packets += po->stats.tp_drops;
3295                         data = &st_u.stats3;
3296                 } else {
3297                         st = po->stats;
3298                         st.tp_packets += st.tp_drops;
3299                         data = &st;
3300                 }
3301                 memset(&po->stats, 0, sizeof(st));
3302                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3303                 break;
3304         case PACKET_AUXDATA:
3305                 if (len > sizeof(int))
3306                         len = sizeof(int);
3307                 val = po->auxdata;
3308
3309                 data = &val;
3310                 break;
3311         case PACKET_ORIGDEV:
3312                 if (len > sizeof(int))
3313                         len = sizeof(int);
3314                 val = po->origdev;
3315
3316                 data = &val;
3317                 break;
3318         case PACKET_VNET_HDR:
3319                 if (len > sizeof(int))
3320                         len = sizeof(int);
3321                 val = po->has_vnet_hdr;
3322
3323                 data = &val;
3324                 break;
3325         case PACKET_VERSION:
3326                 if (len > sizeof(int))
3327                         len = sizeof(int);
3328                 val = po->tp_version;
3329                 data = &val;
3330                 break;
3331         case PACKET_HDRLEN:
3332                 if (len > sizeof(int))
3333                         len = sizeof(int);
3334                 if (copy_from_user(&val, optval, len))
3335                         return -EFAULT;
3336                 switch (val) {
3337                 case TPACKET_V1:
3338                         val = sizeof(struct tpacket_hdr);
3339                         break;
3340                 case TPACKET_V2:
3341                         val = sizeof(struct tpacket2_hdr);
3342                         break;
3343                 case TPACKET_V3:
3344                         val = sizeof(struct tpacket3_hdr);
3345                         break;
3346                 default:
3347                         return -EINVAL;
3348                 }
3349                 data = &val;
3350                 break;
3351         case PACKET_RESERVE:
3352                 if (len > sizeof(unsigned int))
3353                         len = sizeof(unsigned int);
3354                 val = po->tp_reserve;
3355                 data = &val;
3356                 break;
3357         case PACKET_LOSS:
3358                 if (len > sizeof(unsigned int))
3359                         len = sizeof(unsigned int);
3360                 val = po->tp_loss;
3361                 data = &val;
3362                 break;
3363         case PACKET_TIMESTAMP:
3364                 if (len > sizeof(int))
3365                         len = sizeof(int);
3366                 val = po->tp_tstamp;
3367                 data = &val;
3368                 break;
3369         case PACKET_FANOUT:
3370                 if (len > sizeof(int))
3371                         len = sizeof(int);
3372                 val = (po->fanout ?
3373                        ((u32)po->fanout->id |
3374                         ((u32)po->fanout->type << 16)) :
3375                        0);
3376                 data = &val;
3377                 break;
3378         default:
3379                 return -ENOPROTOOPT;
3380         }
3381
3382         if (put_user(len, optlen))
3383                 return -EFAULT;
3384         if (copy_to_user(optval, data, len))
3385                 return -EFAULT;
3386         return 0;
3387 }
3388
3389
3390 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3391 {
3392         struct sock *sk;
3393         struct hlist_node *node;
3394         struct net_device *dev = data;
3395         struct net *net = dev_net(dev);
3396
3397         rcu_read_lock();
3398         sk_for_each_rcu(sk, node, &net->packet.sklist) {
3399                 struct packet_sock *po = pkt_sk(sk);
3400
3401                 switch (msg) {
3402                 case NETDEV_UNREGISTER:
3403                         if (po->mclist)
3404                                 packet_dev_mclist(dev, po->mclist, -1);
3405                         /* fallthrough */
3406
3407                 case NETDEV_DOWN:
3408                         if (dev->ifindex == po->ifindex) {
3409                                 spin_lock(&po->bind_lock);
3410                                 if (po->running) {
3411                                         __unregister_prot_hook(sk, false);
3412                                         sk->sk_err = ENETDOWN;
3413                                         if (!sock_flag(sk, SOCK_DEAD))
3414                                                 sk->sk_error_report(sk);
3415                                 }
3416                                 if (msg == NETDEV_UNREGISTER) {
3417                                         po->ifindex = -1;
3418                                         if (po->prot_hook.dev)
3419                                                 dev_put(po->prot_hook.dev);
3420                                         po->prot_hook.dev = NULL;
3421                                 }
3422                                 spin_unlock(&po->bind_lock);
3423                         }
3424                         break;
3425                 case NETDEV_UP:
3426                         if (dev->ifindex == po->ifindex) {
3427                                 spin_lock(&po->bind_lock);
3428                                 if (po->num)
3429                                         register_prot_hook(sk);
3430                                 spin_unlock(&po->bind_lock);
3431                         }
3432                         break;
3433                 }
3434         }
3435         rcu_read_unlock();
3436         return NOTIFY_DONE;
3437 }
3438
3439
3440 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3441                         unsigned long arg)
3442 {
3443         struct sock *sk = sock->sk;
3444
3445         switch (cmd) {
3446         case SIOCOUTQ:
3447         {
3448                 int amount = sk_wmem_alloc_get(sk);
3449
3450                 return put_user(amount, (int __user *)arg);
3451         }
3452         case SIOCINQ:
3453         {
3454                 struct sk_buff *skb;
3455                 int amount = 0;
3456
3457                 spin_lock_bh(&sk->sk_receive_queue.lock);
3458                 skb = skb_peek(&sk->sk_receive_queue);
3459                 if (skb)
3460                         amount = skb->len;
3461                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3462                 return put_user(amount, (int __user *)arg);
3463         }
3464         case SIOCGSTAMP:
3465                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3466         case SIOCGSTAMPNS:
3467                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3468
3469 #ifdef CONFIG_INET
3470         case SIOCADDRT:
3471         case SIOCDELRT:
3472         case SIOCDARP:
3473         case SIOCGARP:
3474         case SIOCSARP:
3475         case SIOCGIFADDR:
3476         case SIOCSIFADDR:
3477         case SIOCGIFBRDADDR:
3478         case SIOCSIFBRDADDR:
3479         case SIOCGIFNETMASK:
3480         case SIOCSIFNETMASK:
3481         case SIOCGIFDSTADDR:
3482         case SIOCSIFDSTADDR:
3483         case SIOCSIFFLAGS:
3484                 return inet_dgram_ops.ioctl(sock, cmd, arg);
3485 #endif
3486
3487         default:
3488                 return -ENOIOCTLCMD;
3489         }
3490         return 0;
3491 }
3492
3493 static unsigned int packet_poll(struct file *file, struct socket *sock,
3494                                 poll_table *wait)
3495 {
3496         struct sock *sk = sock->sk;
3497         struct packet_sock *po = pkt_sk(sk);
3498         unsigned int mask = datagram_poll(file, sock, wait);
3499
3500         spin_lock_bh(&sk->sk_receive_queue.lock);
3501         if (po->rx_ring.pg_vec) {
3502                 if (!packet_previous_rx_frame(po, &po->rx_ring,
3503                         TP_STATUS_KERNEL))
3504                         mask |= POLLIN | POLLRDNORM;
3505         }
3506         spin_unlock_bh(&sk->sk_receive_queue.lock);
3507         spin_lock_bh(&sk->sk_write_queue.lock);
3508         if (po->tx_ring.pg_vec) {
3509                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3510                         mask |= POLLOUT | POLLWRNORM;
3511         }
3512         spin_unlock_bh(&sk->sk_write_queue.lock);
3513         return mask;
3514 }
3515
3516
3517 /* Dirty? Well, I still did not learn better way to account
3518  * for user mmaps.
3519  */
3520
3521 static void packet_mm_open(struct vm_area_struct *vma)
3522 {
3523         struct file *file = vma->vm_file;
3524         struct socket *sock = file->private_data;
3525         struct sock *sk = sock->sk;
3526
3527         if (sk)
3528                 atomic_inc(&pkt_sk(sk)->mapped);
3529 }
3530
3531 static void packet_mm_close(struct vm_area_struct *vma)
3532 {
3533         struct file *file = vma->vm_file;
3534         struct socket *sock = file->private_data;
3535         struct sock *sk = sock->sk;
3536
3537         if (sk)
3538                 atomic_dec(&pkt_sk(sk)->mapped);
3539 }
3540
3541 static const struct vm_operations_struct packet_mmap_ops = {
3542         .open   =       packet_mm_open,
3543         .close  =       packet_mm_close,
3544 };
3545
3546 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3547                         unsigned int len)
3548 {
3549         int i;
3550
3551         for (i = 0; i < len; i++) {
3552                 if (likely(pg_vec[i].buffer)) {
3553                         if (is_vmalloc_addr(pg_vec[i].buffer))
3554                                 vfree(pg_vec[i].buffer);
3555                         else
3556                                 free_pages((unsigned long)pg_vec[i].buffer,
3557                                            order);
3558                         pg_vec[i].buffer = NULL;
3559                 }
3560         }
3561         kfree(pg_vec);
3562 }
3563
3564 static char *alloc_one_pg_vec_page(unsigned long order)
3565 {
3566         char *buffer = NULL;
3567         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3568                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3569
3570         buffer = (char *) __get_free_pages(gfp_flags, order);
3571
3572         if (buffer)
3573                 return buffer;
3574
3575         /*
3576          * __get_free_pages failed, fall back to vmalloc
3577          */
3578         buffer = vzalloc((1 << order) * PAGE_SIZE);
3579
3580         if (buffer)
3581                 return buffer;
3582
3583         /*
3584          * vmalloc failed, lets dig into swap here
3585          */
3586         gfp_flags &= ~__GFP_NORETRY;
3587         buffer = (char *)__get_free_pages(gfp_flags, order);
3588         if (buffer)
3589                 return buffer;
3590
3591         /*
3592          * complete and utter failure
3593          */
3594         return NULL;
3595 }
3596
3597 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3598 {
3599         unsigned int block_nr = req->tp_block_nr;
3600         struct pgv *pg_vec;
3601         int i;
3602
3603         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3604         if (unlikely(!pg_vec))
3605                 goto out;
3606
3607         for (i = 0; i < block_nr; i++) {
3608                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3609                 if (unlikely(!pg_vec[i].buffer))
3610                         goto out_free_pgvec;
3611         }
3612
3613 out:
3614         return pg_vec;
3615
3616 out_free_pgvec:
3617         free_pg_vec(pg_vec, order, block_nr);
3618         pg_vec = NULL;
3619         goto out;
3620 }
3621
3622 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3623                 int closing, int tx_ring)
3624 {
3625         struct pgv *pg_vec = NULL;
3626         struct packet_sock *po = pkt_sk(sk);
3627         int was_running, order = 0;
3628         struct packet_ring_buffer *rb;
3629         struct sk_buff_head *rb_queue;
3630         __be16 num;
3631         int err = -EINVAL;
3632         /* Added to avoid minimal code churn */
3633         struct tpacket_req *req = &req_u->req;
3634
3635         lock_sock(sk);
3636         /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3637         if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3638                 WARN(1, "Tx-ring is not supported.\n");
3639                 goto out;
3640         }
3641
3642         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3643         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3644
3645         err = -EBUSY;
3646         if (!closing) {
3647                 if (atomic_read(&po->mapped))
3648                         goto out;
3649                 if (atomic_read(&rb->pending))
3650                         goto out;
3651         }
3652
3653         if (req->tp_block_nr) {
3654                 /* Sanity tests and some calculations */
3655                 err = -EBUSY;
3656                 if (unlikely(rb->pg_vec))
3657                         goto out;
3658
3659                 switch (po->tp_version) {
3660                 case TPACKET_V1:
3661                         po->tp_hdrlen = TPACKET_HDRLEN;
3662                         break;
3663                 case TPACKET_V2:
3664                         po->tp_hdrlen = TPACKET2_HDRLEN;
3665                         break;
3666                 case TPACKET_V3:
3667                         po->tp_hdrlen = TPACKET3_HDRLEN;
3668                         break;
3669                 }
3670
3671                 err = -EINVAL;
3672                 if (unlikely((int)req->tp_block_size <= 0))
3673                         goto out;
3674                 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3675                         goto out;
3676                 if (po->tp_version >= TPACKET_V3 &&
3677                     req->tp_block_size <=
3678                           BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
3679                         goto out;
3680                 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3681                                         po->tp_reserve))
3682                         goto out;
3683                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3684                         goto out;
3685
3686                 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3687                 if (unlikely(rb->frames_per_block <= 0))
3688                         goto out;
3689                 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
3690                         goto out;
3691                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3692                                         req->tp_frame_nr))
3693                         goto out;
3694
3695                 err = -ENOMEM;
3696                 order = get_order(req->tp_block_size);
3697                 pg_vec = alloc_pg_vec(req, order);
3698                 if (unlikely(!pg_vec))
3699                         goto out;
3700                 switch (po->tp_version) {
3701                 case TPACKET_V3:
3702                 /* Transmit path is not supported. We checked
3703                  * it above but just being paranoid
3704                  */
3705                         if (!tx_ring)
3706                                 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3707                                 break;
3708                 default:
3709                         break;
3710                 }
3711         }
3712         /* Done */
3713         else {
3714                 err = -EINVAL;
3715                 if (unlikely(req->tp_frame_nr))
3716                         goto out;
3717         }
3718
3719
3720         /* Detach socket from network */
3721         spin_lock(&po->bind_lock);
3722         was_running = po->running;
3723         num = po->num;
3724         if (was_running) {
3725                 po->num = 0;
3726                 __unregister_prot_hook(sk, false);
3727         }
3728         spin_unlock(&po->bind_lock);
3729
3730         synchronize_net();
3731
3732         err = -EBUSY;
3733         mutex_lock(&po->pg_vec_lock);
3734         if (closing || atomic_read(&po->mapped) == 0) {
3735                 err = 0;
3736                 spin_lock_bh(&rb_queue->lock);
3737                 swap(rb->pg_vec, pg_vec);
3738                 rb->frame_max = (req->tp_frame_nr - 1);
3739                 rb->head = 0;
3740                 rb->frame_size = req->tp_frame_size;
3741                 spin_unlock_bh(&rb_queue->lock);
3742
3743                 swap(rb->pg_vec_order, order);
3744                 swap(rb->pg_vec_len, req->tp_block_nr);
3745
3746                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3747                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3748                                                 tpacket_rcv : packet_rcv;
3749                 skb_queue_purge(rb_queue);
3750                 if (atomic_read(&po->mapped))
3751                         pr_err("packet_mmap: vma is busy: %d\n",
3752                                atomic_read(&po->mapped));
3753         }
3754         mutex_unlock(&po->pg_vec_lock);
3755
3756         spin_lock(&po->bind_lock);
3757         if (was_running) {
3758                 po->num = num;
3759                 register_prot_hook(sk);
3760         }
3761         spin_unlock(&po->bind_lock);
3762         if (closing && (po->tp_version > TPACKET_V2)) {
3763                 /* Because we don't support block-based V3 on tx-ring */
3764                 if (!tx_ring)
3765                         prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3766         }
3767
3768         if (pg_vec)
3769                 free_pg_vec(pg_vec, order, req->tp_block_nr);
3770 out:
3771         release_sock(sk);
3772         return err;
3773 }
3774
3775 static int packet_mmap(struct file *file, struct socket *sock,
3776                 struct vm_area_struct *vma)
3777 {
3778         struct sock *sk = sock->sk;
3779         struct packet_sock *po = pkt_sk(sk);
3780         unsigned long size, expected_size;
3781         struct packet_ring_buffer *rb;
3782         unsigned long start;
3783         int err = -EINVAL;
3784         int i;
3785
3786         if (vma->vm_pgoff)
3787                 return -EINVAL;
3788
3789         mutex_lock(&po->pg_vec_lock);
3790
3791         expected_size = 0;
3792         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3793                 if (rb->pg_vec) {
3794                         expected_size += rb->pg_vec_len
3795                                                 * rb->pg_vec_pages
3796                                                 * PAGE_SIZE;
3797                 }
3798         }
3799
3800         if (expected_size == 0)
3801                 goto out;
3802
3803         size = vma->vm_end - vma->vm_start;
3804         if (size != expected_size)
3805                 goto out;
3806
3807         start = vma->vm_start;
3808         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3809                 if (rb->pg_vec == NULL)
3810                         continue;
3811
3812                 for (i = 0; i < rb->pg_vec_len; i++) {
3813                         struct page *page;
3814                         void *kaddr = rb->pg_vec[i].buffer;
3815                         int pg_num;
3816
3817                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3818                                 page = pgv_to_page(kaddr);
3819                                 err = vm_insert_page(vma, start, page);
3820                                 if (unlikely(err))
3821                                         goto out;
3822                                 start += PAGE_SIZE;
3823                                 kaddr += PAGE_SIZE;
3824                         }
3825                 }
3826         }
3827
3828         atomic_inc(&po->mapped);
3829         vma->vm_ops = &packet_mmap_ops;
3830         err = 0;
3831
3832 out:
3833         mutex_unlock(&po->pg_vec_lock);
3834         return err;
3835 }
3836
3837 static const struct proto_ops packet_ops_spkt = {
3838         .family =       PF_PACKET,
3839         .owner =        THIS_MODULE,
3840         .release =      packet_release,
3841         .bind =         packet_bind_spkt,
3842         .connect =      sock_no_connect,
3843         .socketpair =   sock_no_socketpair,
3844         .accept =       sock_no_accept,
3845         .getname =      packet_getname_spkt,
3846         .poll =         datagram_poll,
3847         .ioctl =        packet_ioctl,
3848         .listen =       sock_no_listen,
3849         .shutdown =     sock_no_shutdown,
3850         .setsockopt =   sock_no_setsockopt,
3851         .getsockopt =   sock_no_getsockopt,
3852         .sendmsg =      packet_sendmsg_spkt,
3853         .recvmsg =      packet_recvmsg,
3854         .mmap =         sock_no_mmap,
3855         .sendpage =     sock_no_sendpage,
3856 };
3857
3858 static const struct proto_ops packet_ops = {
3859         .family =       PF_PACKET,
3860         .owner =        THIS_MODULE,
3861         .release =      packet_release,
3862         .bind =         packet_bind,
3863         .connect =      sock_no_connect,
3864         .socketpair =   sock_no_socketpair,
3865         .accept =       sock_no_accept,
3866         .getname =      packet_getname,
3867         .poll =         packet_poll,
3868         .ioctl =        packet_ioctl,
3869         .listen =       sock_no_listen,
3870         .shutdown =     sock_no_shutdown,
3871         .setsockopt =   packet_setsockopt,
3872         .getsockopt =   packet_getsockopt,
3873         .sendmsg =      packet_sendmsg,
3874         .recvmsg =      packet_recvmsg,
3875         .mmap =         packet_mmap,
3876         .sendpage =     sock_no_sendpage,
3877 };
3878
3879 static const struct net_proto_family packet_family_ops = {
3880         .family =       PF_PACKET,
3881         .create =       packet_create,
3882         .owner  =       THIS_MODULE,
3883 };
3884
3885 static struct notifier_block packet_netdev_notifier = {
3886         .notifier_call =        packet_notifier,
3887 };
3888
3889 #ifdef CONFIG_PROC_FS
3890
3891 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3892         __acquires(RCU)
3893 {
3894         struct net *net = seq_file_net(seq);
3895
3896         rcu_read_lock();
3897         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3898 }
3899
3900 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3901 {
3902         struct net *net = seq_file_net(seq);
3903         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3904 }
3905
3906 static void packet_seq_stop(struct seq_file *seq, void *v)
3907         __releases(RCU)
3908 {
3909         rcu_read_unlock();
3910 }
3911
3912 static int packet_seq_show(struct seq_file *seq, void *v)
3913 {
3914         if (v == SEQ_START_TOKEN)
3915                 seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
3916         else {
3917                 struct sock *s = sk_entry(v);
3918                 const struct packet_sock *po = pkt_sk(s);
3919
3920                 seq_printf(seq,
3921                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
3922                            s,
3923                            atomic_read(&s->sk_refcnt),
3924                            s->sk_type,
3925                            ntohs(po->num),
3926                            po->ifindex,
3927                            po->running,
3928                            atomic_read(&s->sk_rmem_alloc),
3929                            sock_i_uid(s),
3930                            sock_i_ino(s));
3931         }
3932
3933         return 0;
3934 }
3935
3936 static const struct seq_operations packet_seq_ops = {
3937         .start  = packet_seq_start,
3938         .next   = packet_seq_next,
3939         .stop   = packet_seq_stop,
3940         .show   = packet_seq_show,
3941 };
3942
3943 static int packet_seq_open(struct inode *inode, struct file *file)
3944 {
3945         return seq_open_net(inode, file, &packet_seq_ops,
3946                             sizeof(struct seq_net_private));
3947 }
3948
3949 static const struct file_operations packet_seq_fops = {
3950         .owner          = THIS_MODULE,
3951         .open           = packet_seq_open,
3952         .read           = seq_read,
3953         .llseek         = seq_lseek,
3954         .release        = seq_release_net,
3955 };
3956
3957 #endif
3958
3959 static int __net_init packet_net_init(struct net *net)
3960 {
3961         spin_lock_init(&net->packet.sklist_lock);
3962         INIT_HLIST_HEAD(&net->packet.sklist);
3963
3964         if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3965                 return -ENOMEM;
3966
3967         return 0;
3968 }
3969
3970 static void __net_exit packet_net_exit(struct net *net)
3971 {
3972         proc_net_remove(net, "packet");
3973 }
3974
3975 static struct pernet_operations packet_net_ops = {
3976         .init = packet_net_init,
3977         .exit = packet_net_exit,
3978 };
3979
3980
3981 static void __exit packet_exit(void)
3982 {
3983         unregister_netdevice_notifier(&packet_netdev_notifier);
3984         unregister_pernet_subsys(&packet_net_ops);
3985         sock_unregister(PF_PACKET);
3986         proto_unregister(&packet_proto);
3987 }
3988
3989 static int __init packet_init(void)
3990 {
3991         int rc = proto_register(&packet_proto, 0);
3992
3993         if (rc != 0)
3994                 goto out;
3995
3996         sock_register(&packet_family_ops);
3997         register_pernet_subsys(&packet_net_ops);
3998         register_netdevice_notifier(&packet_netdev_notifier);
3999 out:
4000         return rc;
4001 }
4002
4003 module_init(packet_init);
4004 module_exit(packet_exit);
4005 MODULE_LICENSE("GPL");
4006 MODULE_ALIAS_NETPROTO(PF_PACKET);