2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
55 #include <linux/types.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/system.h>
77 #include <asm/uaccess.h>
78 #include <asm/ioctls.h>
80 #include <asm/cacheflush.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 #include <linux/poll.h>
85 #include <linux/module.h>
86 #include <linux/init.h>
87 #include <linux/mutex.h>
88 #include <linux/if_vlan.h>
89 #include <linux/virtio_net.h>
90 #include <linux/errqueue.h>
91 #include <linux/net_tstamp.h>
94 #include <net/inet_common.h>
99 - if device has no dev->hard_header routine, it adds and removes ll header
100 inside itself. In this case ll header is invisible outside of device,
101 but higher levels still should reserve dev->hard_header_len.
102 Some devices are enough clever to reallocate skb, when header
103 will not fit to reserved space (tunnel), another ones are silly
105 - packet socket receives packets with pulled ll header,
106 so that SOCK_RAW should push it back.
111 Incoming, dev->hard_header!=NULL
112 mac_header -> ll header
115 Outgoing, dev->hard_header!=NULL
116 mac_header -> ll header
119 Incoming, dev->hard_header==NULL
120 mac_header -> UNKNOWN position. It is very likely, that it points to ll
121 header. PPP makes it, that is wrong, because introduce
122 assymetry between rx and tx paths.
125 Outgoing, dev->hard_header==NULL
126 mac_header -> data. ll header is still not built!
130 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
136 dev->hard_header != NULL
137 mac_header -> ll header
140 dev->hard_header == NULL (ll header is added by device, we cannot control it)
144 We should set nh.raw on output to correct posistion,
145 packet classifier depends on it.
148 /* Private packet socket structures. */
150 struct packet_mclist {
151 struct packet_mclist *next;
156 unsigned char addr[MAX_ADDR_LEN];
158 /* identical to struct packet_mreq except it has
159 * a longer address field.
161 struct packet_mreq_max {
163 unsigned short mr_type;
164 unsigned short mr_alen;
165 unsigned char mr_address[MAX_ADDR_LEN];
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 int closing, int tx_ring);
172 #define V3_ALIGNMENT (8)
174 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
176 #define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179 /* kbdq - kernel block descriptor queue */
180 struct tpacket_kbdq_core {
182 unsigned int feature_req_word;
184 unsigned char reset_pending_on_curr_blk;
185 unsigned char delete_blk_timer;
186 unsigned short kactive_blk_num;
187 unsigned short blk_sizeof_priv;
189 /* last_kactive_blk_num:
190 * trick to see if user-space has caught up
191 * in order to avoid refreshing timer when every single pkt arrives.
193 unsigned short last_kactive_blk_num;
198 unsigned int knum_blocks;
199 uint64_t knxt_seq_num;
204 atomic_t blk_fill_in_prog;
206 /* Default is set to 8ms */
207 #define DEFAULT_PRB_RETIRE_TOV (8)
209 unsigned short retire_blk_tov;
210 unsigned short version;
211 unsigned long tov_in_jiffies;
213 /* timer to retire an outstanding block */
214 struct timer_list retire_blk_timer;
217 #define PGV_FROM_VMALLOC 1
222 struct packet_ring_buffer {
225 unsigned int frames_per_block;
226 unsigned int frame_size;
227 unsigned int frame_max;
229 unsigned int pg_vec_order;
230 unsigned int pg_vec_pages;
231 unsigned int pg_vec_len;
233 struct tpacket_kbdq_core prb_bdqc;
237 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
238 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
239 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
240 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
241 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
242 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
243 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
246 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
248 static void *packet_previous_frame(struct packet_sock *po,
249 struct packet_ring_buffer *rb,
251 static void packet_increment_head(struct packet_ring_buffer *buff);
252 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
253 struct tpacket_block_desc *);
254 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
255 struct packet_sock *);
256 static void prb_retire_current_block(struct tpacket_kbdq_core *,
257 struct packet_sock *, unsigned int status);
258 static int prb_queue_frozen(struct tpacket_kbdq_core *);
259 static void prb_open_block(struct tpacket_kbdq_core *,
260 struct tpacket_block_desc *);
261 static void prb_retire_rx_blk_timer_expired(unsigned long);
262 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
263 static void prb_init_blk_timer(struct packet_sock *,
264 struct tpacket_kbdq_core *,
265 void (*func) (unsigned long));
266 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
267 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
268 struct tpacket3_hdr *);
269 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
270 struct tpacket3_hdr *);
271 static void packet_flush_mclist(struct sock *sk);
273 struct packet_fanout;
275 /* struct sock has to be the first member of packet_sock */
277 struct packet_fanout *fanout;
278 struct tpacket_stats stats;
279 union tpacket_stats_u stats_u;
280 struct packet_ring_buffer rx_ring;
281 struct packet_ring_buffer tx_ring;
283 spinlock_t bind_lock;
284 struct mutex pg_vec_lock;
285 unsigned int running:1, /* prot_hook is attached*/
289 int ifindex; /* bound device */
291 struct packet_mclist *mclist;
293 enum tpacket_versions tp_version;
294 unsigned int tp_hdrlen;
295 unsigned int tp_reserve;
296 unsigned int tp_loss:1;
297 unsigned int tp_tstamp;
298 struct packet_type prot_hook ____cacheline_aligned_in_smp;
301 #define PACKET_FANOUT_MAX 256
303 struct packet_fanout {
307 unsigned int num_members;
312 struct list_head list;
313 struct sock *arr[PACKET_FANOUT_MAX];
316 struct packet_type prot_hook ____cacheline_aligned_in_smp;
319 struct packet_skb_cb {
320 unsigned int origlen;
322 struct sockaddr_pkt pkt;
323 struct sockaddr_ll ll;
327 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
329 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
330 #define GET_PBLOCK_DESC(x, bid) \
331 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
332 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
333 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
334 #define GET_NEXT_PRB_BLK_NUM(x) \
335 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
336 ((x)->kactive_blk_num+1) : 0)
338 static struct packet_sock *pkt_sk(struct sock *sk)
340 return (struct packet_sock *)sk;
343 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
344 static void __fanout_link(struct sock *sk, struct packet_sock *po);
346 /* register_prot_hook must be invoked with the po->bind_lock held,
347 * or from a context in which asynchronous accesses to the packet
348 * socket is not possible (packet_create()).
350 static void register_prot_hook(struct sock *sk)
352 struct packet_sock *po = pkt_sk(sk);
355 __fanout_link(sk, po);
357 dev_add_pack(&po->prot_hook);
363 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
364 * held. If the sync parameter is true, we will temporarily drop
365 * the po->bind_lock and do a synchronize_net to make sure no
366 * asynchronous packet processing paths still refer to the elements
367 * of po->prot_hook. If the sync parameter is false, it is the
368 * callers responsibility to take care of this.
370 static void __unregister_prot_hook(struct sock *sk, bool sync)
372 struct packet_sock *po = pkt_sk(sk);
376 __fanout_unlink(sk, po);
378 __dev_remove_pack(&po->prot_hook);
382 spin_unlock(&po->bind_lock);
384 spin_lock(&po->bind_lock);
388 static void unregister_prot_hook(struct sock *sk, bool sync)
390 struct packet_sock *po = pkt_sk(sk);
393 __unregister_prot_hook(sk, sync);
396 static inline __pure struct page *pgv_to_page(void *addr)
398 if (is_vmalloc_addr(addr))
399 return vmalloc_to_page(addr);
400 return virt_to_page(addr);
403 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
406 struct tpacket_hdr *h1;
407 struct tpacket2_hdr *h2;
412 switch (po->tp_version) {
414 h.h1->tp_status = status;
415 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
418 h.h2->tp_status = status;
419 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
423 WARN(1, "TPACKET version not supported.\n");
430 static int __packet_get_status(struct packet_sock *po, void *frame)
433 struct tpacket_hdr *h1;
434 struct tpacket2_hdr *h2;
441 switch (po->tp_version) {
443 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
444 return h.h1->tp_status;
446 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
447 return h.h2->tp_status;
450 WARN(1, "TPACKET version not supported.\n");
456 static void *packet_lookup_frame(struct packet_sock *po,
457 struct packet_ring_buffer *rb,
458 unsigned int position,
461 unsigned int pg_vec_pos, frame_offset;
463 struct tpacket_hdr *h1;
464 struct tpacket2_hdr *h2;
468 pg_vec_pos = position / rb->frames_per_block;
469 frame_offset = position % rb->frames_per_block;
471 h.raw = rb->pg_vec[pg_vec_pos].buffer +
472 (frame_offset * rb->frame_size);
474 if (status != __packet_get_status(po, h.raw))
480 static void *packet_current_frame(struct packet_sock *po,
481 struct packet_ring_buffer *rb,
484 return packet_lookup_frame(po, rb, rb->head, status);
487 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
489 del_timer_sync(&pkc->retire_blk_timer);
492 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
494 struct sk_buff_head *rb_queue)
496 struct tpacket_kbdq_core *pkc;
498 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
500 spin_lock(&rb_queue->lock);
501 pkc->delete_blk_timer = 1;
502 spin_unlock(&rb_queue->lock);
504 prb_del_retire_blk_timer(pkc);
507 static void prb_init_blk_timer(struct packet_sock *po,
508 struct tpacket_kbdq_core *pkc,
509 void (*func) (unsigned long))
511 init_timer(&pkc->retire_blk_timer);
512 pkc->retire_blk_timer.data = (long)po;
513 pkc->retire_blk_timer.function = func;
514 pkc->retire_blk_timer.expires = jiffies;
517 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
519 struct tpacket_kbdq_core *pkc;
524 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
525 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
528 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
529 int blk_size_in_bytes)
531 struct net_device *dev;
532 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
533 struct ethtool_cmd ecmd;
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
540 return DEFAULT_PRB_RETIRE_TOV;
542 err = __ethtool_get_settings(dev, &ecmd);
545 switch (ecmd.speed) {
555 * If the link speed is so slow you don't really
556 * need to worry about perf anyways
561 return DEFAULT_PRB_RETIRE_TOV;
565 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
577 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
578 union tpacket_req_u *req_u)
580 p1->feature_req_word = req_u->req3.tp_feature_req_word;
583 static void init_prb_bdqc(struct packet_sock *po,
584 struct packet_ring_buffer *rb,
586 union tpacket_req_u *req_u, int tx_ring)
588 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
589 struct tpacket_block_desc *pbd;
591 memset(p1, 0x0, sizeof(*p1));
593 p1->knxt_seq_num = 1;
595 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
596 p1->pkblk_start = (char *)pg_vec[0].buffer;
597 p1->kblk_size = req_u->req3.tp_block_size;
598 p1->knum_blocks = req_u->req3.tp_block_nr;
599 p1->hdrlen = po->tp_hdrlen;
600 p1->version = po->tp_version;
601 p1->last_kactive_blk_num = 0;
602 po->stats_u.stats3.tp_freeze_q_cnt = 0;
603 if (req_u->req3.tp_retire_blk_tov)
604 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
606 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
607 req_u->req3.tp_block_size);
608 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
609 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
611 prb_init_ft_ops(p1, req_u);
612 prb_setup_retire_blk_timer(po, tx_ring);
613 prb_open_block(p1, pbd);
616 /* Do NOT update the last_blk_num first.
617 * Assumes sk_buff_head lock is held.
619 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
621 mod_timer(&pkc->retire_blk_timer,
622 jiffies + pkc->tov_in_jiffies);
623 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
628 * 1) We refresh the timer only when we open a block.
629 * By doing this we don't waste cycles refreshing the timer
630 * on packet-by-packet basis.
632 * With a 1MB block-size, on a 1Gbps line, it will take
633 * i) ~8 ms to fill a block + ii) memcpy etc.
634 * In this cut we are not accounting for the memcpy time.
636 * So, if the user sets the 'tmo' to 10ms then the timer
637 * will never fire while the block is still getting filled
638 * (which is what we want). However, the user could choose
639 * to close a block early and that's fine.
641 * But when the timer does fire, we check whether or not to refresh it.
642 * Since the tmo granularity is in msecs, it is not too expensive
643 * to refresh the timer, lets say every '8' msecs.
644 * Either the user can set the 'tmo' or we can derive it based on
645 * a) line-speed and b) block-size.
646 * prb_calc_retire_blk_tmo() calculates the tmo.
649 static void prb_retire_rx_blk_timer_expired(unsigned long data)
651 struct packet_sock *po = (struct packet_sock *)data;
652 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
654 struct tpacket_block_desc *pbd;
656 spin_lock(&po->sk.sk_receive_queue.lock);
658 frozen = prb_queue_frozen(pkc);
659 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
661 if (unlikely(pkc->delete_blk_timer))
664 /* We only need to plug the race when the block is partially filled.
666 * lock(); increment BLOCK_NUM_PKTS; unlock()
667 * copy_bits() is in progress ...
668 * timer fires on other cpu:
669 * we can't retire the current block because copy_bits
673 if (BLOCK_NUM_PKTS(pbd)) {
674 while (atomic_read(&pkc->blk_fill_in_prog)) {
675 /* Waiting for skb_copy_bits to finish... */
680 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
682 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
683 if (!prb_dispatch_next_block(pkc, po))
688 /* Case 1. Queue was frozen because user-space was
691 if (prb_curr_blk_in_use(pkc, pbd)) {
693 * Ok, user-space is still behind.
694 * So just refresh the timer.
698 /* Case 2. queue was frozen,user-space caught up,
699 * now the link went idle && the timer fired.
700 * We don't have a block to close.So we open this
701 * block and restart the timer.
702 * opening a block thaws the queue,restarts timer
703 * Thawing/timer-refresh is a side effect.
705 prb_open_block(pkc, pbd);
712 _prb_refresh_rx_retire_blk_timer(pkc);
715 spin_unlock(&po->sk.sk_receive_queue.lock);
718 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
719 struct tpacket_block_desc *pbd1, __u32 status)
721 /* Flush everything minus the block header */
723 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
728 /* Skip the block header(we know header WILL fit in 4K) */
731 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
732 for (; start < end; start += PAGE_SIZE)
733 flush_dcache_page(pgv_to_page(start));
738 /* Now update the block status. */
740 BLOCK_STATUS(pbd1) = status;
742 /* Flush the block header */
744 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
746 flush_dcache_page(pgv_to_page(start));
756 * 2) Increment active_blk_num
758 * Note:We DONT refresh the timer on purpose.
759 * Because almost always the next block will be opened.
761 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
762 struct tpacket_block_desc *pbd1,
763 struct packet_sock *po, unsigned int stat)
765 __u32 status = TP_STATUS_USER | stat;
767 struct tpacket3_hdr *last_pkt;
768 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
770 if (po->stats.tp_drops)
771 status |= TP_STATUS_LOSING;
773 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
774 last_pkt->tp_next_offset = 0;
776 /* Get the ts of the last pkt */
777 if (BLOCK_NUM_PKTS(pbd1)) {
778 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
779 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
781 /* Ok, we tmo'd - so get the current time */
784 h1->ts_last_pkt.ts_sec = ts.tv_sec;
785 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
790 /* Flush the block */
791 prb_flush_block(pkc1, pbd1, status);
793 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
796 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
798 pkc->reset_pending_on_curr_blk = 0;
802 * Side effect of opening a block:
804 * 1) prb_queue is thawed.
805 * 2) retire_blk_timer is refreshed.
808 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
809 struct tpacket_block_desc *pbd1)
812 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
816 /* We could have just memset this but we will lose the
817 * flexibility of making the priv area sticky
819 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
820 BLOCK_NUM_PKTS(pbd1) = 0;
821 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
823 h1->ts_first_pkt.ts_sec = ts.tv_sec;
824 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
825 pkc1->pkblk_start = (char *)pbd1;
826 pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
827 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
828 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
829 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
830 pbd1->version = pkc1->version;
831 pkc1->prev = pkc1->nxt_offset;
832 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
833 prb_thaw_queue(pkc1);
834 _prb_refresh_rx_retire_blk_timer(pkc1);
840 * Queue freeze logic:
841 * 1) Assume tp_block_nr = 8 blocks.
842 * 2) At time 't0', user opens Rx ring.
843 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
844 * 4) user-space is either sleeping or processing block '0'.
845 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
846 * it will close block-7,loop around and try to fill block '0'.
848 * __packet_lookup_frame_in_block
849 * prb_retire_current_block()
850 * prb_dispatch_next_block()
851 * |->(BLOCK_STATUS == USER) evaluates to true
852 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
853 * 6) Now there are two cases:
854 * 6.1) Link goes idle right after the queue is frozen.
855 * But remember, the last open_block() refreshed the timer.
856 * When this timer expires,it will refresh itself so that we can
857 * re-open block-0 in near future.
858 * 6.2) Link is busy and keeps on receiving packets. This is a simple
859 * case and __packet_lookup_frame_in_block will check if block-0
860 * is free and can now be re-used.
862 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
863 struct packet_sock *po)
865 pkc->reset_pending_on_curr_blk = 1;
866 po->stats_u.stats3.tp_freeze_q_cnt++;
869 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
872 * If the next block is free then we will dispatch it
873 * and return a good offset.
874 * Else, we will freeze the queue.
875 * So, caller must check the return value.
877 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
878 struct packet_sock *po)
880 struct tpacket_block_desc *pbd;
884 /* 1. Get current block num */
885 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
887 /* 2. If this block is currently in_use then freeze the queue */
888 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
889 prb_freeze_queue(pkc, po);
895 * open this block and return the offset where the first packet
896 * needs to get stored.
898 prb_open_block(pkc, pbd);
899 return (void *)pkc->nxt_offset;
902 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
903 struct packet_sock *po, unsigned int status)
905 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
907 /* retire/close the current block */
908 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
910 * Plug the case where copy_bits() is in progress on
911 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
912 * have space to copy the pkt in the current block and
913 * called prb_retire_current_block()
915 * We don't need to worry about the TMO case because
916 * the timer-handler already handled this case.
918 if (!(status & TP_STATUS_BLK_TMO)) {
919 while (atomic_read(&pkc->blk_fill_in_prog)) {
920 /* Waiting for skb_copy_bits to finish... */
924 prb_close_block(pkc, pbd, po, status);
929 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
930 struct tpacket_block_desc *pbd)
932 return TP_STATUS_USER & BLOCK_STATUS(pbd);
935 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
937 return pkc->reset_pending_on_curr_blk;
940 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
942 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
943 atomic_dec(&pkc->blk_fill_in_prog);
946 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
947 struct tpacket3_hdr *ppd)
949 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
952 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
953 struct tpacket3_hdr *ppd)
955 ppd->hv1.tp_rxhash = 0;
958 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
959 struct tpacket3_hdr *ppd)
961 if (vlan_tx_tag_present(pkc->skb)) {
962 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
963 ppd->tp_status = TP_STATUS_VLAN_VALID;
965 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
969 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
970 struct tpacket3_hdr *ppd)
972 prb_fill_vlan_info(pkc, ppd);
974 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
975 prb_fill_rxhash(pkc, ppd);
977 prb_clear_rxhash(pkc, ppd);
980 static void prb_fill_curr_block(char *curr,
981 struct tpacket_kbdq_core *pkc,
982 struct tpacket_block_desc *pbd,
985 struct tpacket3_hdr *ppd;
987 ppd = (struct tpacket3_hdr *)curr;
988 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
990 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
991 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
992 BLOCK_NUM_PKTS(pbd) += 1;
993 atomic_inc(&pkc->blk_fill_in_prog);
994 prb_run_all_ft_ops(pkc, ppd);
997 /* Assumes caller has the sk->rx_queue.lock */
998 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1004 struct tpacket_kbdq_core *pkc;
1005 struct tpacket_block_desc *pbd;
1008 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1009 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1011 /* Queue is frozen when user space is lagging behind */
1012 if (prb_queue_frozen(pkc)) {
1014 * Check if that last block which caused the queue to freeze,
1015 * is still in_use by user-space.
1017 if (prb_curr_blk_in_use(pkc, pbd)) {
1018 /* Can't record this packet */
1022 * Ok, the block was released by user-space.
1023 * Now let's open that block.
1024 * opening a block also thaws the queue.
1025 * Thawing is a side effect.
1027 prb_open_block(pkc, pbd);
1032 curr = pkc->nxt_offset;
1034 end = (char *) ((char *)pbd + pkc->kblk_size);
1036 /* first try the current block */
1037 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1038 prb_fill_curr_block(curr, pkc, pbd, len);
1039 return (void *)curr;
1042 /* Ok, close the current block */
1043 prb_retire_current_block(pkc, po, 0);
1045 /* Now, try to dispatch the next block */
1046 curr = (char *)prb_dispatch_next_block(pkc, po);
1048 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1049 prb_fill_curr_block(curr, pkc, pbd, len);
1050 return (void *)curr;
1054 * No free blocks are available.user_space hasn't caught up yet.
1055 * Queue was just frozen and now this packet will get dropped.
1060 static void *packet_current_rx_frame(struct packet_sock *po,
1061 struct sk_buff *skb,
1062 int status, unsigned int len)
1065 switch (po->tp_version) {
1068 curr = packet_lookup_frame(po, &po->rx_ring,
1069 po->rx_ring.head, status);
1072 return __packet_lookup_frame_in_block(po, skb, status, len);
1074 WARN(1, "TPACKET version not supported\n");
1080 static void *prb_lookup_block(struct packet_sock *po,
1081 struct packet_ring_buffer *rb,
1082 unsigned int previous,
1085 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1086 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1088 if (status != BLOCK_STATUS(pbd))
1093 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1096 if (rb->prb_bdqc.kactive_blk_num)
1097 prev = rb->prb_bdqc.kactive_blk_num-1;
1099 prev = rb->prb_bdqc.knum_blocks-1;
1103 /* Assumes caller has held the rx_queue.lock */
1104 static void *__prb_previous_block(struct packet_sock *po,
1105 struct packet_ring_buffer *rb,
1108 unsigned int previous = prb_previous_blk_num(rb);
1109 return prb_lookup_block(po, rb, previous, status);
1112 static void *packet_previous_rx_frame(struct packet_sock *po,
1113 struct packet_ring_buffer *rb,
1116 if (po->tp_version <= TPACKET_V2)
1117 return packet_previous_frame(po, rb, status);
1119 return __prb_previous_block(po, rb, status);
1122 static void packet_increment_rx_head(struct packet_sock *po,
1123 struct packet_ring_buffer *rb)
1125 switch (po->tp_version) {
1128 return packet_increment_head(rb);
1131 WARN(1, "TPACKET version not supported.\n");
1137 static void *packet_previous_frame(struct packet_sock *po,
1138 struct packet_ring_buffer *rb,
1141 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1142 return packet_lookup_frame(po, rb, previous, status);
1145 static void packet_increment_head(struct packet_ring_buffer *buff)
1147 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1150 static void packet_sock_destruct(struct sock *sk)
1152 skb_queue_purge(&sk->sk_error_queue);
1154 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1155 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1157 if (!sock_flag(sk, SOCK_DEAD)) {
1158 pr_err("Attempt to release alive packet socket: %p\n", sk);
1162 sk_refcnt_debug_dec(sk);
1165 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1167 int x = atomic_read(&f->rr_cur) + 1;
1175 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1177 u32 idx, hash = skb->rxhash;
1179 idx = ((u64)hash * num) >> 32;
1184 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1188 cur = atomic_read(&f->rr_cur);
1189 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1190 fanout_rr_next(f, num))) != cur)
1195 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1197 unsigned int cpu = smp_processor_id();
1199 return f->arr[cpu % num];
1202 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1203 struct packet_type *pt, struct net_device *orig_dev)
1205 struct packet_fanout *f = pt->af_packet_priv;
1206 unsigned int num = f->num_members;
1207 struct packet_sock *po;
1210 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1217 case PACKET_FANOUT_HASH:
1220 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1224 skb_get_rxhash(skb);
1225 sk = fanout_demux_hash(f, skb, num);
1227 case PACKET_FANOUT_LB:
1228 sk = fanout_demux_lb(f, skb, num);
1230 case PACKET_FANOUT_CPU:
1231 sk = fanout_demux_cpu(f, skb, num);
1237 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1240 static DEFINE_MUTEX(fanout_mutex);
1241 static LIST_HEAD(fanout_list);
1243 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1245 struct packet_fanout *f = po->fanout;
1247 spin_lock(&f->lock);
1248 f->arr[f->num_members] = sk;
1251 spin_unlock(&f->lock);
1254 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1256 struct packet_fanout *f = po->fanout;
1259 spin_lock(&f->lock);
1260 for (i = 0; i < f->num_members; i++) {
1261 if (f->arr[i] == sk)
1264 BUG_ON(i >= f->num_members);
1265 f->arr[i] = f->arr[f->num_members - 1];
1267 spin_unlock(&f->lock);
1270 bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1272 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1278 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1280 struct packet_sock *po = pkt_sk(sk);
1281 struct packet_fanout *f, *match;
1282 u8 type = type_flags & 0xff;
1283 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
1287 case PACKET_FANOUT_HASH:
1288 case PACKET_FANOUT_LB:
1289 case PACKET_FANOUT_CPU:
1301 mutex_lock(&fanout_mutex);
1303 list_for_each_entry(f, &fanout_list, list) {
1305 read_pnet(&f->net) == sock_net(sk)) {
1311 if (match && match->defrag != defrag)
1315 match = kzalloc(sizeof(*match), GFP_KERNEL);
1318 write_pnet(&match->net, sock_net(sk));
1321 match->defrag = defrag;
1322 atomic_set(&match->rr_cur, 0);
1323 INIT_LIST_HEAD(&match->list);
1324 spin_lock_init(&match->lock);
1325 atomic_set(&match->sk_ref, 0);
1326 match->prot_hook.type = po->prot_hook.type;
1327 match->prot_hook.dev = po->prot_hook.dev;
1328 match->prot_hook.func = packet_rcv_fanout;
1329 match->prot_hook.af_packet_priv = match;
1330 match->prot_hook.id_match = match_fanout_group;
1331 dev_add_pack(&match->prot_hook);
1332 list_add(&match->list, &fanout_list);
1335 if (match->type == type &&
1336 match->prot_hook.type == po->prot_hook.type &&
1337 match->prot_hook.dev == po->prot_hook.dev) {
1339 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1340 __dev_remove_pack(&po->prot_hook);
1342 atomic_inc(&match->sk_ref);
1343 __fanout_link(sk, po);
1348 mutex_unlock(&fanout_mutex);
1352 static void fanout_release(struct sock *sk)
1354 struct packet_sock *po = pkt_sk(sk);
1355 struct packet_fanout *f;
1363 mutex_lock(&fanout_mutex);
1364 if (atomic_dec_and_test(&f->sk_ref)) {
1366 dev_remove_pack(&f->prot_hook);
1369 mutex_unlock(&fanout_mutex);
1372 static const struct proto_ops packet_ops;
1374 static const struct proto_ops packet_ops_spkt;
1376 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1377 struct packet_type *pt, struct net_device *orig_dev)
1380 struct sockaddr_pkt *spkt;
1383 * When we registered the protocol we saved the socket in the data
1384 * field for just this event.
1387 sk = pt->af_packet_priv;
1390 * Yank back the headers [hope the device set this
1391 * right or kerboom...]
1393 * Incoming packets have ll header pulled,
1396 * For outgoing ones skb->data == skb_mac_header(skb)
1397 * so that this procedure is noop.
1400 if (skb->pkt_type == PACKET_LOOPBACK)
1403 if (!net_eq(dev_net(dev), sock_net(sk)))
1406 skb = skb_share_check(skb, GFP_ATOMIC);
1410 /* drop any routing info */
1413 /* drop conntrack reference */
1416 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1418 skb_push(skb, skb->data - skb_mac_header(skb));
1421 * The SOCK_PACKET socket receives _all_ frames.
1424 spkt->spkt_family = dev->type;
1425 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1426 spkt->spkt_protocol = skb->protocol;
1429 * Charge the memory to the socket. This is done specifically
1430 * to prevent sockets using all the memory up.
1433 if (sock_queue_rcv_skb(sk, skb) == 0)
1444 * Output a raw packet to a device layer. This bypasses all the other
1445 * protocol layers and you must therefore supply it with a complete frame
1448 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1449 struct msghdr *msg, size_t len)
1451 struct sock *sk = sock->sk;
1452 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1453 struct sk_buff *skb = NULL;
1454 struct net_device *dev;
1459 * Get and verify the address.
1463 if (msg->msg_namelen < sizeof(struct sockaddr))
1465 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1466 proto = saddr->spkt_protocol;
1468 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1471 * Find the device first to size check it
1474 saddr->spkt_device[13] = 0;
1477 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1483 if (!(dev->flags & IFF_UP))
1487 * You may not queue a frame bigger than the mtu. This is the lowest level
1488 * raw protocol and you must do your own fragmentation at this level.
1492 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
1496 size_t reserved = LL_RESERVED_SPACE(dev);
1497 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1500 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
1503 /* FIXME: Save some space for broken drivers that write a hard
1504 * header at transmission time by themselves. PPP is the notable
1505 * one here. This should really be fixed at the driver level.
1507 skb_reserve(skb, reserved);
1508 skb_reset_network_header(skb);
1510 /* Try to align data part correctly */
1515 skb_reset_network_header(skb);
1517 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1523 if (len > (dev->mtu + dev->hard_header_len)) {
1524 /* Earlier code assumed this would be a VLAN pkt,
1525 * double-check this now that we have the actual
1528 struct ethhdr *ehdr;
1529 skb_reset_mac_header(skb);
1530 ehdr = eth_hdr(skb);
1531 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1537 skb->protocol = proto;
1539 skb->priority = sk->sk_priority;
1540 skb->mark = sk->sk_mark;
1541 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1545 dev_queue_xmit(skb);
1556 static unsigned int run_filter(const struct sk_buff *skb,
1557 const struct sock *sk,
1560 struct sk_filter *filter;
1563 filter = rcu_dereference(sk->sk_filter);
1565 res = SK_RUN_FILTER(filter, skb);
1572 * This function makes lazy skb cloning in hope that most of packets
1573 * are discarded by BPF.
1575 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1576 * and skb->cb are mangled. It works because (and until) packets
1577 * falling here are owned by current CPU. Output packets are cloned
1578 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1579 * sequencially, so that if we return skb to original state on exit,
1580 * we will not harm anyone.
1583 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1584 struct packet_type *pt, struct net_device *orig_dev)
1587 struct sockaddr_ll *sll;
1588 struct packet_sock *po;
1589 u8 *skb_head = skb->data;
1590 int skb_len = skb->len;
1591 unsigned int snaplen, res;
1593 if (skb->pkt_type == PACKET_LOOPBACK)
1596 sk = pt->af_packet_priv;
1599 if (!net_eq(dev_net(dev), sock_net(sk)))
1604 if (dev->header_ops) {
1605 /* The device has an explicit notion of ll header,
1606 * exported to higher levels.
1608 * Otherwise, the device hides details of its frame
1609 * structure, so that corresponding packet head is
1610 * never delivered to user.
1612 if (sk->sk_type != SOCK_DGRAM)
1613 skb_push(skb, skb->data - skb_mac_header(skb));
1614 else if (skb->pkt_type == PACKET_OUTGOING) {
1615 /* Special case: outgoing packets have ll header at head */
1616 skb_pull(skb, skb_network_offset(skb));
1622 res = run_filter(skb, sk, snaplen);
1624 goto drop_n_restore;
1628 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1631 if (skb_shared(skb)) {
1632 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1636 if (skb_head != skb->data) {
1637 skb->data = skb_head;
1644 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1647 sll = &PACKET_SKB_CB(skb)->sa.ll;
1648 sll->sll_family = AF_PACKET;
1649 sll->sll_hatype = dev->type;
1650 sll->sll_protocol = skb->protocol;
1651 sll->sll_pkttype = skb->pkt_type;
1652 if (unlikely(po->origdev))
1653 sll->sll_ifindex = orig_dev->ifindex;
1655 sll->sll_ifindex = dev->ifindex;
1657 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1659 PACKET_SKB_CB(skb)->origlen = skb->len;
1661 if (pskb_trim(skb, snaplen))
1664 skb_set_owner_r(skb, sk);
1668 /* drop conntrack reference */
1671 spin_lock(&sk->sk_receive_queue.lock);
1672 po->stats.tp_packets++;
1673 skb->dropcount = atomic_read(&sk->sk_drops);
1674 __skb_queue_tail(&sk->sk_receive_queue, skb);
1675 spin_unlock(&sk->sk_receive_queue.lock);
1676 sk->sk_data_ready(sk, skb->len);
1680 spin_lock(&sk->sk_receive_queue.lock);
1681 po->stats.tp_drops++;
1682 atomic_inc(&sk->sk_drops);
1683 spin_unlock(&sk->sk_receive_queue.lock);
1686 if (skb_head != skb->data && skb_shared(skb)) {
1687 skb->data = skb_head;
1695 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1696 struct packet_type *pt, struct net_device *orig_dev)
1699 struct packet_sock *po;
1700 struct sockaddr_ll *sll;
1702 struct tpacket_hdr *h1;
1703 struct tpacket2_hdr *h2;
1704 struct tpacket3_hdr *h3;
1707 u8 *skb_head = skb->data;
1708 int skb_len = skb->len;
1709 unsigned int snaplen, res;
1710 unsigned long status = TP_STATUS_USER;
1711 unsigned short macoff, netoff, hdrlen;
1712 struct sk_buff *copy_skb = NULL;
1715 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1717 if (skb->pkt_type == PACKET_LOOPBACK)
1720 sk = pt->af_packet_priv;
1723 if (!net_eq(dev_net(dev), sock_net(sk)))
1726 if (dev->header_ops) {
1727 if (sk->sk_type != SOCK_DGRAM)
1728 skb_push(skb, skb->data - skb_mac_header(skb));
1729 else if (skb->pkt_type == PACKET_OUTGOING) {
1730 /* Special case: outgoing packets have ll header at head */
1731 skb_pull(skb, skb_network_offset(skb));
1735 if (skb->ip_summed == CHECKSUM_PARTIAL)
1736 status |= TP_STATUS_CSUMNOTREADY;
1740 res = run_filter(skb, sk, snaplen);
1742 goto drop_n_restore;
1746 if (sk->sk_type == SOCK_DGRAM) {
1747 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1750 unsigned maclen = skb_network_offset(skb);
1751 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1752 (maclen < 16 ? 16 : maclen)) +
1754 macoff = netoff - maclen;
1756 if (po->tp_version <= TPACKET_V2) {
1757 if (macoff + snaplen > po->rx_ring.frame_size) {
1758 if (po->copy_thresh &&
1759 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1760 if (skb_shared(skb)) {
1761 copy_skb = skb_clone(skb, GFP_ATOMIC);
1763 copy_skb = skb_get(skb);
1764 skb_head = skb->data;
1767 skb_set_owner_r(copy_skb, sk);
1769 snaplen = po->rx_ring.frame_size - macoff;
1770 if ((int)snaplen < 0)
1774 spin_lock(&sk->sk_receive_queue.lock);
1775 h.raw = packet_current_rx_frame(po, skb,
1776 TP_STATUS_KERNEL, (macoff+snaplen));
1779 if (po->tp_version <= TPACKET_V2) {
1780 packet_increment_rx_head(po, &po->rx_ring);
1782 * LOSING will be reported till you read the stats,
1783 * because it's COR - Clear On Read.
1784 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1787 if (po->stats.tp_drops)
1788 status |= TP_STATUS_LOSING;
1790 po->stats.tp_packets++;
1792 status |= TP_STATUS_COPY;
1793 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1795 spin_unlock(&sk->sk_receive_queue.lock);
1797 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1799 switch (po->tp_version) {
1801 h.h1->tp_len = skb->len;
1802 h.h1->tp_snaplen = snaplen;
1803 h.h1->tp_mac = macoff;
1804 h.h1->tp_net = netoff;
1805 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1806 && shhwtstamps->syststamp.tv64)
1807 tv = ktime_to_timeval(shhwtstamps->syststamp);
1808 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1809 && shhwtstamps->hwtstamp.tv64)
1810 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1811 else if (skb->tstamp.tv64)
1812 tv = ktime_to_timeval(skb->tstamp);
1814 do_gettimeofday(&tv);
1815 h.h1->tp_sec = tv.tv_sec;
1816 h.h1->tp_usec = tv.tv_usec;
1817 hdrlen = sizeof(*h.h1);
1820 h.h2->tp_len = skb->len;
1821 h.h2->tp_snaplen = snaplen;
1822 h.h2->tp_mac = macoff;
1823 h.h2->tp_net = netoff;
1824 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1825 && shhwtstamps->syststamp.tv64)
1826 ts = ktime_to_timespec(shhwtstamps->syststamp);
1827 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1828 && shhwtstamps->hwtstamp.tv64)
1829 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1830 else if (skb->tstamp.tv64)
1831 ts = ktime_to_timespec(skb->tstamp);
1833 getnstimeofday(&ts);
1834 h.h2->tp_sec = ts.tv_sec;
1835 h.h2->tp_nsec = ts.tv_nsec;
1836 if (vlan_tx_tag_present(skb)) {
1837 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1838 status |= TP_STATUS_VLAN_VALID;
1840 h.h2->tp_vlan_tci = 0;
1842 h.h2->tp_padding = 0;
1843 hdrlen = sizeof(*h.h2);
1846 /* tp_nxt_offset,vlan are already populated above.
1847 * So DONT clear those fields here
1849 h.h3->tp_status |= status;
1850 h.h3->tp_len = skb->len;
1851 h.h3->tp_snaplen = snaplen;
1852 h.h3->tp_mac = macoff;
1853 h.h3->tp_net = netoff;
1854 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1855 && shhwtstamps->syststamp.tv64)
1856 ts = ktime_to_timespec(shhwtstamps->syststamp);
1857 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1858 && shhwtstamps->hwtstamp.tv64)
1859 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1860 else if (skb->tstamp.tv64)
1861 ts = ktime_to_timespec(skb->tstamp);
1863 getnstimeofday(&ts);
1864 h.h3->tp_sec = ts.tv_sec;
1865 h.h3->tp_nsec = ts.tv_nsec;
1866 hdrlen = sizeof(*h.h3);
1872 sll = h.raw + TPACKET_ALIGN(hdrlen);
1873 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1874 sll->sll_family = AF_PACKET;
1875 sll->sll_hatype = dev->type;
1876 sll->sll_protocol = skb->protocol;
1877 sll->sll_pkttype = skb->pkt_type;
1878 if (unlikely(po->origdev))
1879 sll->sll_ifindex = orig_dev->ifindex;
1881 sll->sll_ifindex = dev->ifindex;
1884 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1888 if (po->tp_version <= TPACKET_V2) {
1889 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1890 + macoff + snaplen);
1891 for (start = h.raw; start < end; start += PAGE_SIZE)
1892 flush_dcache_page(pgv_to_page(start));
1897 if (po->tp_version <= TPACKET_V2)
1898 __packet_set_status(po, h.raw, status);
1900 prb_clear_blk_fill_status(&po->rx_ring);
1902 sk->sk_data_ready(sk, 0);
1905 if (skb_head != skb->data && skb_shared(skb)) {
1906 skb->data = skb_head;
1914 po->stats.tp_drops++;
1915 spin_unlock(&sk->sk_receive_queue.lock);
1917 sk->sk_data_ready(sk, 0);
1918 kfree_skb(copy_skb);
1919 goto drop_n_restore;
1922 static void tpacket_destruct_skb(struct sk_buff *skb)
1924 struct packet_sock *po = pkt_sk(skb->sk);
1927 if (likely(po->tx_ring.pg_vec)) {
1928 ph = skb_shinfo(skb)->destructor_arg;
1929 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1930 atomic_dec(&po->tx_ring.pending);
1931 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1937 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1938 void *frame, struct net_device *dev, int size_max,
1939 __be16 proto, unsigned char *addr)
1942 struct tpacket_hdr *h1;
1943 struct tpacket2_hdr *h2;
1946 int to_write, offset, len, tp_len, nr_frags, len_max;
1947 struct socket *sock = po->sk.sk_socket;
1954 skb->protocol = proto;
1956 skb->priority = po->sk.sk_priority;
1957 skb->mark = po->sk.sk_mark;
1958 skb_shinfo(skb)->destructor_arg = ph.raw;
1960 switch (po->tp_version) {
1962 tp_len = ph.h2->tp_len;
1965 tp_len = ph.h1->tp_len;
1968 if (unlikely(tp_len > size_max)) {
1969 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1973 skb_reserve(skb, LL_RESERVED_SPACE(dev));
1974 skb_reset_network_header(skb);
1976 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1979 if (sock->type == SOCK_DGRAM) {
1980 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1982 if (unlikely(err < 0))
1984 } else if (dev->hard_header_len) {
1985 /* net device doesn't like empty head */
1986 if (unlikely(tp_len <= dev->hard_header_len)) {
1987 pr_err("packet size is too short (%d < %d)\n",
1988 tp_len, dev->hard_header_len);
1992 skb_push(skb, dev->hard_header_len);
1993 err = skb_store_bits(skb, 0, data,
1994 dev->hard_header_len);
1998 data += dev->hard_header_len;
1999 to_write -= dev->hard_header_len;
2003 offset = offset_in_page(data);
2004 len_max = PAGE_SIZE - offset;
2005 len = ((to_write > len_max) ? len_max : to_write);
2007 skb->data_len = to_write;
2008 skb->len += to_write;
2009 skb->truesize += to_write;
2010 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2012 while (likely(to_write)) {
2013 nr_frags = skb_shinfo(skb)->nr_frags;
2015 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2016 pr_err("Packet exceed the number of skb frags(%lu)\n",
2021 page = pgv_to_page(data);
2023 flush_dcache_page(page);
2025 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2028 len_max = PAGE_SIZE;
2029 len = ((to_write > len_max) ? len_max : to_write);
2035 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2037 struct sk_buff *skb;
2038 struct net_device *dev;
2040 bool need_rls_dev = false;
2041 int err, reserve = 0;
2043 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2044 int tp_len, size_max;
2045 unsigned char *addr;
2049 mutex_lock(&po->pg_vec_lock);
2052 if (saddr == NULL) {
2053 dev = po->prot_hook.dev;
2058 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2060 if (msg->msg_namelen < (saddr->sll_halen
2061 + offsetof(struct sockaddr_ll,
2064 proto = saddr->sll_protocol;
2065 addr = saddr->sll_addr;
2066 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2067 need_rls_dev = true;
2071 if (unlikely(dev == NULL))
2074 reserve = dev->hard_header_len;
2077 if (unlikely(!(dev->flags & IFF_UP)))
2080 size_max = po->tx_ring.frame_size
2081 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2083 if (size_max > dev->mtu + reserve)
2084 size_max = dev->mtu + reserve;
2087 ph = packet_current_frame(po, &po->tx_ring,
2088 TP_STATUS_SEND_REQUEST);
2090 if (unlikely(ph == NULL)) {
2095 status = TP_STATUS_SEND_REQUEST;
2096 skb = sock_alloc_send_skb(&po->sk,
2097 LL_ALLOCATED_SPACE(dev)
2098 + sizeof(struct sockaddr_ll),
2101 if (unlikely(skb == NULL))
2104 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2107 if (unlikely(tp_len < 0)) {
2109 __packet_set_status(po, ph,
2110 TP_STATUS_AVAILABLE);
2111 packet_increment_head(&po->tx_ring);
2115 status = TP_STATUS_WRONG_FORMAT;
2121 skb->destructor = tpacket_destruct_skb;
2122 __packet_set_status(po, ph, TP_STATUS_SENDING);
2123 atomic_inc(&po->tx_ring.pending);
2125 status = TP_STATUS_SEND_REQUEST;
2126 err = dev_queue_xmit(skb);
2127 if (unlikely(err > 0)) {
2128 err = net_xmit_errno(err);
2129 if (err && __packet_get_status(po, ph) ==
2130 TP_STATUS_AVAILABLE) {
2131 /* skb was destructed already */
2136 * skb was dropped but not destructed yet;
2137 * let's treat it like congestion or err < 0
2141 packet_increment_head(&po->tx_ring);
2143 } while (likely((ph != NULL) ||
2144 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2145 (atomic_read(&po->tx_ring.pending))))
2152 __packet_set_status(po, ph, status);
2158 mutex_unlock(&po->pg_vec_lock);
2162 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2163 size_t reserve, size_t len,
2164 size_t linear, int noblock,
2167 struct sk_buff *skb;
2169 /* Under a page? Don't bother with paged skb. */
2170 if (prepad + len < PAGE_SIZE || !linear)
2173 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2178 skb_reserve(skb, reserve);
2179 skb_put(skb, linear);
2180 skb->data_len = len - linear;
2181 skb->len += len - linear;
2186 static int packet_snd(struct socket *sock,
2187 struct msghdr *msg, size_t len)
2189 struct sock *sk = sock->sk;
2190 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2191 struct sk_buff *skb;
2192 struct net_device *dev;
2194 bool need_rls_dev = false;
2195 unsigned char *addr;
2196 int err, reserve = 0;
2197 struct virtio_net_hdr vnet_hdr = { 0 };
2200 struct packet_sock *po = pkt_sk(sk);
2201 unsigned short gso_type = 0;
2204 * Get and verify the address.
2207 if (saddr == NULL) {
2208 dev = po->prot_hook.dev;
2213 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2215 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2217 proto = saddr->sll_protocol;
2218 addr = saddr->sll_addr;
2219 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2220 need_rls_dev = true;
2226 if (sock->type == SOCK_RAW)
2227 reserve = dev->hard_header_len;
2230 if (!(dev->flags & IFF_UP))
2233 if (po->has_vnet_hdr) {
2234 vnet_hdr_len = sizeof(vnet_hdr);
2237 if (len < vnet_hdr_len)
2240 len -= vnet_hdr_len;
2242 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2247 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2248 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2250 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2251 vnet_hdr.csum_offset + 2;
2254 if (vnet_hdr.hdr_len > len)
2257 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2258 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2259 case VIRTIO_NET_HDR_GSO_TCPV4:
2260 gso_type = SKB_GSO_TCPV4;
2262 case VIRTIO_NET_HDR_GSO_TCPV6:
2263 gso_type = SKB_GSO_TCPV6;
2265 case VIRTIO_NET_HDR_GSO_UDP:
2266 gso_type = SKB_GSO_UDP;
2272 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2273 gso_type |= SKB_GSO_TCP_ECN;
2275 if (vnet_hdr.gso_size == 0)
2282 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
2286 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
2287 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
2288 msg->msg_flags & MSG_DONTWAIT, &err);
2292 skb_set_network_header(skb, reserve);
2295 if (sock->type == SOCK_DGRAM &&
2296 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2299 /* Returns -EFAULT on error */
2300 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2303 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2307 if (!gso_type && (len > dev->mtu + reserve)) {
2308 /* Earlier code assumed this would be a VLAN pkt,
2309 * double-check this now that we have the actual
2312 struct ethhdr *ehdr;
2313 skb_reset_mac_header(skb);
2314 ehdr = eth_hdr(skb);
2315 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2321 skb->protocol = proto;
2323 skb->priority = sk->sk_priority;
2324 skb->mark = sk->sk_mark;
2326 if (po->has_vnet_hdr) {
2327 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2328 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2329 vnet_hdr.csum_offset)) {
2335 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2336 skb_shinfo(skb)->gso_type = gso_type;
2338 /* Header must be checked, and gso_segs computed. */
2339 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2340 skb_shinfo(skb)->gso_segs = 0;
2342 len += vnet_hdr_len;
2349 err = dev_queue_xmit(skb);
2350 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2361 if (dev && need_rls_dev)
2367 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2368 struct msghdr *msg, size_t len)
2370 struct sock *sk = sock->sk;
2371 struct packet_sock *po = pkt_sk(sk);
2372 if (po->tx_ring.pg_vec)
2373 return tpacket_snd(po, msg);
2375 return packet_snd(sock, msg, len);
2379 * Close a PACKET socket. This is fairly simple. We immediately go
2380 * to 'closed' state and remove our protocol entry in the device list.
2383 static int packet_release(struct socket *sock)
2385 struct sock *sk = sock->sk;
2386 struct packet_sock *po;
2388 union tpacket_req_u req_u;
2396 spin_lock_bh(&net->packet.sklist_lock);
2397 sk_del_node_init_rcu(sk);
2398 sock_prot_inuse_add(net, sk->sk_prot, -1);
2399 spin_unlock_bh(&net->packet.sklist_lock);
2401 spin_lock(&po->bind_lock);
2402 unregister_prot_hook(sk, false);
2403 if (po->prot_hook.dev) {
2404 dev_put(po->prot_hook.dev);
2405 po->prot_hook.dev = NULL;
2407 spin_unlock(&po->bind_lock);
2409 packet_flush_mclist(sk);
2411 if (po->rx_ring.pg_vec) {
2412 memset(&req_u, 0, sizeof(req_u));
2413 packet_set_ring(sk, &req_u, 1, 0);
2416 if (po->tx_ring.pg_vec) {
2417 memset(&req_u, 0, sizeof(req_u));
2418 packet_set_ring(sk, &req_u, 1, 1);
2425 * Now the socket is dead. No more input will appear.
2432 skb_queue_purge(&sk->sk_receive_queue);
2433 sk_refcnt_debug_release(sk);
2440 * Attach a packet hook.
2443 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
2445 struct packet_sock *po = pkt_sk(sk);
2456 spin_lock(&po->bind_lock);
2457 unregister_prot_hook(sk, true);
2459 po->prot_hook.type = protocol;
2460 if (po->prot_hook.dev)
2461 dev_put(po->prot_hook.dev);
2462 po->prot_hook.dev = dev;
2464 po->ifindex = dev ? dev->ifindex : 0;
2469 if (!dev || (dev->flags & IFF_UP)) {
2470 register_prot_hook(sk);
2472 sk->sk_err = ENETDOWN;
2473 if (!sock_flag(sk, SOCK_DEAD))
2474 sk->sk_error_report(sk);
2478 spin_unlock(&po->bind_lock);
2484 * Bind a packet socket to a device
2487 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2490 struct sock *sk = sock->sk;
2492 struct net_device *dev;
2499 if (addr_len != sizeof(struct sockaddr))
2501 strlcpy(name, uaddr->sa_data, sizeof(name));
2503 dev = dev_get_by_name(sock_net(sk), name);
2505 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2509 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2511 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2512 struct sock *sk = sock->sk;
2513 struct net_device *dev = NULL;
2521 if (addr_len < sizeof(struct sockaddr_ll))
2523 if (sll->sll_family != AF_PACKET)
2526 if (sll->sll_ifindex) {
2528 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2532 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2538 static struct proto packet_proto = {
2540 .owner = THIS_MODULE,
2541 .obj_size = sizeof(struct packet_sock),
2545 * Create a packet of type SOCK_PACKET.
2548 static int packet_create(struct net *net, struct socket *sock, int protocol,
2552 struct packet_sock *po;
2553 __be16 proto = (__force __be16)protocol; /* weird, but documented */
2556 if (!capable(CAP_NET_RAW))
2558 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2559 sock->type != SOCK_PACKET)
2560 return -ESOCKTNOSUPPORT;
2562 sock->state = SS_UNCONNECTED;
2565 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2569 sock->ops = &packet_ops;
2570 if (sock->type == SOCK_PACKET)
2571 sock->ops = &packet_ops_spkt;
2573 sock_init_data(sock, sk);
2576 sk->sk_family = PF_PACKET;
2579 sk->sk_destruct = packet_sock_destruct;
2580 sk_refcnt_debug_inc(sk);
2583 * Attach a protocol block
2586 spin_lock_init(&po->bind_lock);
2587 mutex_init(&po->pg_vec_lock);
2588 po->prot_hook.func = packet_rcv;
2590 if (sock->type == SOCK_PACKET)
2591 po->prot_hook.func = packet_rcv_spkt;
2593 po->prot_hook.af_packet_priv = sk;
2596 po->prot_hook.type = proto;
2597 register_prot_hook(sk);
2600 spin_lock_bh(&net->packet.sklist_lock);
2601 sk_add_node_rcu(sk, &net->packet.sklist);
2602 sock_prot_inuse_add(net, &packet_proto, 1);
2603 spin_unlock_bh(&net->packet.sklist_lock);
2610 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2612 struct sock_exterr_skb *serr;
2613 struct sk_buff *skb, *skb2;
2617 skb = skb_dequeue(&sk->sk_error_queue);
2623 msg->msg_flags |= MSG_TRUNC;
2626 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2630 sock_recv_timestamp(msg, sk, skb);
2632 serr = SKB_EXT_ERR(skb);
2633 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2634 sizeof(serr->ee), &serr->ee);
2636 msg->msg_flags |= MSG_ERRQUEUE;
2639 /* Reset and regenerate socket error */
2640 spin_lock_bh(&sk->sk_error_queue.lock);
2642 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2643 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2644 spin_unlock_bh(&sk->sk_error_queue.lock);
2645 sk->sk_error_report(sk);
2647 spin_unlock_bh(&sk->sk_error_queue.lock);
2656 * Pull a packet from our receive queue and hand it to the user.
2657 * If necessary we block.
2660 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2661 struct msghdr *msg, size_t len, int flags)
2663 struct sock *sk = sock->sk;
2664 struct sk_buff *skb;
2666 int vnet_hdr_len = 0;
2669 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2673 /* What error should we return now? EUNATTACH? */
2674 if (pkt_sk(sk)->ifindex < 0)
2678 if (flags & MSG_ERRQUEUE) {
2679 err = packet_recv_error(sk, msg, len);
2684 * Call the generic datagram receiver. This handles all sorts
2685 * of horrible races and re-entrancy so we can forget about it
2686 * in the protocol layers.
2688 * Now it will return ENETDOWN, if device have just gone down,
2689 * but then it will block.
2692 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2695 * An error occurred so return it. Because skb_recv_datagram()
2696 * handles the blocking we don't see and worry about blocking
2703 if (pkt_sk(sk)->has_vnet_hdr) {
2704 struct virtio_net_hdr vnet_hdr = { 0 };
2707 vnet_hdr_len = sizeof(vnet_hdr);
2708 if (len < vnet_hdr_len)
2711 len -= vnet_hdr_len;
2713 if (skb_is_gso(skb)) {
2714 struct skb_shared_info *sinfo = skb_shinfo(skb);
2716 /* This is a hint as to how much should be linear. */
2717 vnet_hdr.hdr_len = skb_headlen(skb);
2718 vnet_hdr.gso_size = sinfo->gso_size;
2719 if (sinfo->gso_type & SKB_GSO_TCPV4)
2720 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2721 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2722 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2723 else if (sinfo->gso_type & SKB_GSO_UDP)
2724 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2725 else if (sinfo->gso_type & SKB_GSO_FCOE)
2729 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2730 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2732 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2734 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2735 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2736 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2737 vnet_hdr.csum_offset = skb->csum_offset;
2738 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2739 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2740 } /* else everything is zero */
2742 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2748 /* You lose any data beyond the buffer you gave. If it worries
2749 * a user program they can ask the device for its MTU
2755 msg->msg_flags |= MSG_TRUNC;
2758 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2762 sock_recv_ts_and_drops(msg, sk, skb);
2764 if (msg->msg_name) {
2765 /* If the address length field is there to be filled
2766 * in, we fill it in now.
2768 if (sock->type == SOCK_PACKET) {
2769 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2771 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2772 msg->msg_namelen = sll->sll_halen +
2773 offsetof(struct sockaddr_ll, sll_addr);
2775 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2779 if (pkt_sk(sk)->auxdata) {
2780 struct tpacket_auxdata aux;
2782 aux.tp_status = TP_STATUS_USER;
2783 if (skb->ip_summed == CHECKSUM_PARTIAL)
2784 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2785 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2786 aux.tp_snaplen = skb->len;
2788 aux.tp_net = skb_network_offset(skb);
2789 if (vlan_tx_tag_present(skb)) {
2790 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2791 aux.tp_status |= TP_STATUS_VLAN_VALID;
2793 aux.tp_vlan_tci = 0;
2796 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2800 * Free or return the buffer as appropriate. Again this
2801 * hides all the races and re-entrancy issues from us.
2803 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2806 skb_free_datagram(sk, skb);
2811 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2812 int *uaddr_len, int peer)
2814 struct net_device *dev;
2815 struct sock *sk = sock->sk;
2820 uaddr->sa_family = AF_PACKET;
2821 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
2823 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2825 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
2827 *uaddr_len = sizeof(*uaddr);
2832 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2833 int *uaddr_len, int peer)
2835 struct net_device *dev;
2836 struct sock *sk = sock->sk;
2837 struct packet_sock *po = pkt_sk(sk);
2838 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2843 sll->sll_family = AF_PACKET;
2844 sll->sll_ifindex = po->ifindex;
2845 sll->sll_protocol = po->num;
2846 sll->sll_pkttype = 0;
2848 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2850 sll->sll_hatype = dev->type;
2851 sll->sll_halen = dev->addr_len;
2852 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2854 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2858 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2863 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2867 case PACKET_MR_MULTICAST:
2868 if (i->alen != dev->addr_len)
2871 return dev_mc_add(dev, i->addr);
2873 return dev_mc_del(dev, i->addr);
2875 case PACKET_MR_PROMISC:
2876 return dev_set_promiscuity(dev, what);
2878 case PACKET_MR_ALLMULTI:
2879 return dev_set_allmulti(dev, what);
2881 case PACKET_MR_UNICAST:
2882 if (i->alen != dev->addr_len)
2885 return dev_uc_add(dev, i->addr);
2887 return dev_uc_del(dev, i->addr);
2895 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2897 for ( ; i; i = i->next) {
2898 if (i->ifindex == dev->ifindex)
2899 packet_dev_mc(dev, i, what);
2903 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2905 struct packet_sock *po = pkt_sk(sk);
2906 struct packet_mclist *ml, *i;
2907 struct net_device *dev;
2913 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2918 if (mreq->mr_alen > dev->addr_len)
2922 i = kmalloc(sizeof(*i), GFP_KERNEL);
2927 for (ml = po->mclist; ml; ml = ml->next) {
2928 if (ml->ifindex == mreq->mr_ifindex &&
2929 ml->type == mreq->mr_type &&
2930 ml->alen == mreq->mr_alen &&
2931 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2933 /* Free the new element ... */
2939 i->type = mreq->mr_type;
2940 i->ifindex = mreq->mr_ifindex;
2941 i->alen = mreq->mr_alen;
2942 memcpy(i->addr, mreq->mr_address, i->alen);
2944 i->next = po->mclist;
2946 err = packet_dev_mc(dev, i, 1);
2948 po->mclist = i->next;
2957 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2959 struct packet_mclist *ml, **mlp;
2963 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2964 if (ml->ifindex == mreq->mr_ifindex &&
2965 ml->type == mreq->mr_type &&
2966 ml->alen == mreq->mr_alen &&
2967 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2968 if (--ml->count == 0) {
2969 struct net_device *dev;
2971 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2973 packet_dev_mc(dev, ml, -1);
2981 return -EADDRNOTAVAIL;
2984 static void packet_flush_mclist(struct sock *sk)
2986 struct packet_sock *po = pkt_sk(sk);
2987 struct packet_mclist *ml;
2993 while ((ml = po->mclist) != NULL) {
2994 struct net_device *dev;
2996 po->mclist = ml->next;
2997 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2999 packet_dev_mc(dev, ml, -1);
3006 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3008 struct sock *sk = sock->sk;
3009 struct packet_sock *po = pkt_sk(sk);
3012 if (level != SOL_PACKET)
3013 return -ENOPROTOOPT;
3016 case PACKET_ADD_MEMBERSHIP:
3017 case PACKET_DROP_MEMBERSHIP:
3019 struct packet_mreq_max mreq;
3021 memset(&mreq, 0, sizeof(mreq));
3022 if (len < sizeof(struct packet_mreq))
3024 if (len > sizeof(mreq))
3026 if (copy_from_user(&mreq, optval, len))
3028 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3030 if (optname == PACKET_ADD_MEMBERSHIP)
3031 ret = packet_mc_add(sk, &mreq);
3033 ret = packet_mc_drop(sk, &mreq);
3037 case PACKET_RX_RING:
3038 case PACKET_TX_RING:
3040 union tpacket_req_u req_u;
3043 switch (po->tp_version) {
3046 len = sizeof(req_u.req);
3050 len = sizeof(req_u.req3);
3055 if (pkt_sk(sk)->has_vnet_hdr)
3057 if (copy_from_user(&req_u.req, optval, len))
3059 return packet_set_ring(sk, &req_u, 0,
3060 optname == PACKET_TX_RING);
3062 case PACKET_COPY_THRESH:
3066 if (optlen != sizeof(val))
3068 if (copy_from_user(&val, optval, sizeof(val)))
3071 pkt_sk(sk)->copy_thresh = val;
3074 case PACKET_VERSION:
3078 if (optlen != sizeof(val))
3080 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3082 if (copy_from_user(&val, optval, sizeof(val)))
3088 po->tp_version = val;
3094 case PACKET_RESERVE:
3098 if (optlen != sizeof(val))
3100 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3102 if (copy_from_user(&val, optval, sizeof(val)))
3104 po->tp_reserve = val;
3111 if (optlen != sizeof(val))
3113 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3115 if (copy_from_user(&val, optval, sizeof(val)))
3117 po->tp_loss = !!val;
3120 case PACKET_AUXDATA:
3124 if (optlen < sizeof(val))
3126 if (copy_from_user(&val, optval, sizeof(val)))
3129 po->auxdata = !!val;
3132 case PACKET_ORIGDEV:
3136 if (optlen < sizeof(val))
3138 if (copy_from_user(&val, optval, sizeof(val)))
3141 po->origdev = !!val;
3144 case PACKET_VNET_HDR:
3148 if (sock->type != SOCK_RAW)
3150 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3152 if (optlen < sizeof(val))
3154 if (copy_from_user(&val, optval, sizeof(val)))
3157 po->has_vnet_hdr = !!val;
3160 case PACKET_TIMESTAMP:
3164 if (optlen != sizeof(val))
3166 if (copy_from_user(&val, optval, sizeof(val)))
3169 po->tp_tstamp = val;
3176 if (optlen != sizeof(val))
3178 if (copy_from_user(&val, optval, sizeof(val)))
3181 return fanout_add(sk, val & 0xffff, val >> 16);
3184 return -ENOPROTOOPT;
3188 static int packet_getsockopt(struct socket *sock, int level, int optname,
3189 char __user *optval, int __user *optlen)
3193 struct sock *sk = sock->sk;
3194 struct packet_sock *po = pkt_sk(sk);
3196 struct tpacket_stats st;
3197 union tpacket_stats_u st_u;
3199 if (level != SOL_PACKET)
3200 return -ENOPROTOOPT;
3202 if (get_user(len, optlen))
3209 case PACKET_STATISTICS:
3210 if (po->tp_version == TPACKET_V3) {
3211 len = sizeof(struct tpacket_stats_v3);
3213 if (len > sizeof(struct tpacket_stats))
3214 len = sizeof(struct tpacket_stats);
3216 spin_lock_bh(&sk->sk_receive_queue.lock);
3217 if (po->tp_version == TPACKET_V3) {
3218 memcpy(&st_u.stats3, &po->stats,
3219 sizeof(struct tpacket_stats));
3220 st_u.stats3.tp_freeze_q_cnt =
3221 po->stats_u.stats3.tp_freeze_q_cnt;
3222 st_u.stats3.tp_packets += po->stats.tp_drops;
3223 data = &st_u.stats3;
3226 st.tp_packets += st.tp_drops;
3229 memset(&po->stats, 0, sizeof(st));
3230 spin_unlock_bh(&sk->sk_receive_queue.lock);
3232 case PACKET_AUXDATA:
3233 if (len > sizeof(int))
3239 case PACKET_ORIGDEV:
3240 if (len > sizeof(int))
3246 case PACKET_VNET_HDR:
3247 if (len > sizeof(int))
3249 val = po->has_vnet_hdr;
3253 case PACKET_VERSION:
3254 if (len > sizeof(int))
3256 val = po->tp_version;
3260 if (len > sizeof(int))
3262 if (copy_from_user(&val, optval, len))
3266 val = sizeof(struct tpacket_hdr);
3269 val = sizeof(struct tpacket2_hdr);
3272 val = sizeof(struct tpacket3_hdr);
3279 case PACKET_RESERVE:
3280 if (len > sizeof(unsigned int))
3281 len = sizeof(unsigned int);
3282 val = po->tp_reserve;
3286 if (len > sizeof(unsigned int))
3287 len = sizeof(unsigned int);
3291 case PACKET_TIMESTAMP:
3292 if (len > sizeof(int))
3294 val = po->tp_tstamp;
3298 if (len > sizeof(int))
3301 ((u32)po->fanout->id |
3302 ((u32)po->fanout->type << 16)) :
3307 return -ENOPROTOOPT;
3310 if (put_user(len, optlen))
3312 if (copy_to_user(optval, data, len))
3318 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3321 struct hlist_node *node;
3322 struct net_device *dev = data;
3323 struct net *net = dev_net(dev);
3326 sk_for_each_rcu(sk, node, &net->packet.sklist) {
3327 struct packet_sock *po = pkt_sk(sk);
3330 case NETDEV_UNREGISTER:
3332 packet_dev_mclist(dev, po->mclist, -1);
3336 if (dev->ifindex == po->ifindex) {
3337 spin_lock(&po->bind_lock);
3339 __unregister_prot_hook(sk, false);
3340 sk->sk_err = ENETDOWN;
3341 if (!sock_flag(sk, SOCK_DEAD))
3342 sk->sk_error_report(sk);
3344 if (msg == NETDEV_UNREGISTER) {
3346 if (po->prot_hook.dev)
3347 dev_put(po->prot_hook.dev);
3348 po->prot_hook.dev = NULL;
3350 spin_unlock(&po->bind_lock);
3354 if (dev->ifindex == po->ifindex) {
3355 spin_lock(&po->bind_lock);
3357 register_prot_hook(sk);
3358 spin_unlock(&po->bind_lock);
3368 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3371 struct sock *sk = sock->sk;
3376 int amount = sk_wmem_alloc_get(sk);
3378 return put_user(amount, (int __user *)arg);
3382 struct sk_buff *skb;
3385 spin_lock_bh(&sk->sk_receive_queue.lock);
3386 skb = skb_peek(&sk->sk_receive_queue);
3389 spin_unlock_bh(&sk->sk_receive_queue.lock);
3390 return put_user(amount, (int __user *)arg);
3393 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3395 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3405 case SIOCGIFBRDADDR:
3406 case SIOCSIFBRDADDR:
3407 case SIOCGIFNETMASK:
3408 case SIOCSIFNETMASK:
3409 case SIOCGIFDSTADDR:
3410 case SIOCSIFDSTADDR:
3412 return inet_dgram_ops.ioctl(sock, cmd, arg);
3416 return -ENOIOCTLCMD;
3421 static unsigned int packet_poll(struct file *file, struct socket *sock,
3424 struct sock *sk = sock->sk;
3425 struct packet_sock *po = pkt_sk(sk);
3426 unsigned int mask = datagram_poll(file, sock, wait);
3428 spin_lock_bh(&sk->sk_receive_queue.lock);
3429 if (po->rx_ring.pg_vec) {
3430 if (!packet_previous_rx_frame(po, &po->rx_ring,
3432 mask |= POLLIN | POLLRDNORM;
3434 spin_unlock_bh(&sk->sk_receive_queue.lock);
3435 spin_lock_bh(&sk->sk_write_queue.lock);
3436 if (po->tx_ring.pg_vec) {
3437 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3438 mask |= POLLOUT | POLLWRNORM;
3440 spin_unlock_bh(&sk->sk_write_queue.lock);
3445 /* Dirty? Well, I still did not learn better way to account
3449 static void packet_mm_open(struct vm_area_struct *vma)
3451 struct file *file = vma->vm_file;
3452 struct socket *sock = file->private_data;
3453 struct sock *sk = sock->sk;
3456 atomic_inc(&pkt_sk(sk)->mapped);
3459 static void packet_mm_close(struct vm_area_struct *vma)
3461 struct file *file = vma->vm_file;
3462 struct socket *sock = file->private_data;
3463 struct sock *sk = sock->sk;
3466 atomic_dec(&pkt_sk(sk)->mapped);
3469 static const struct vm_operations_struct packet_mmap_ops = {
3470 .open = packet_mm_open,
3471 .close = packet_mm_close,
3474 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3479 for (i = 0; i < len; i++) {
3480 if (likely(pg_vec[i].buffer)) {
3481 if (is_vmalloc_addr(pg_vec[i].buffer))
3482 vfree(pg_vec[i].buffer);
3484 free_pages((unsigned long)pg_vec[i].buffer,
3486 pg_vec[i].buffer = NULL;
3492 static char *alloc_one_pg_vec_page(unsigned long order)
3494 char *buffer = NULL;
3495 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3496 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3498 buffer = (char *) __get_free_pages(gfp_flags, order);
3504 * __get_free_pages failed, fall back to vmalloc
3506 buffer = vzalloc((1 << order) * PAGE_SIZE);
3512 * vmalloc failed, lets dig into swap here
3514 gfp_flags &= ~__GFP_NORETRY;
3515 buffer = (char *)__get_free_pages(gfp_flags, order);
3520 * complete and utter failure
3525 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3527 unsigned int block_nr = req->tp_block_nr;
3531 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3532 if (unlikely(!pg_vec))
3535 for (i = 0; i < block_nr; i++) {
3536 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3537 if (unlikely(!pg_vec[i].buffer))
3538 goto out_free_pgvec;
3545 free_pg_vec(pg_vec, order, block_nr);
3550 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3551 int closing, int tx_ring)
3553 struct pgv *pg_vec = NULL;
3554 struct packet_sock *po = pkt_sk(sk);
3555 int was_running, order = 0;
3556 struct packet_ring_buffer *rb;
3557 struct sk_buff_head *rb_queue;
3560 /* Added to avoid minimal code churn */
3561 struct tpacket_req *req = &req_u->req;
3563 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3564 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3565 WARN(1, "Tx-ring is not supported.\n");
3569 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3570 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3574 if (atomic_read(&po->mapped))
3576 if (atomic_read(&rb->pending))
3580 if (req->tp_block_nr) {
3581 /* Sanity tests and some calculations */
3583 if (unlikely(rb->pg_vec))
3586 switch (po->tp_version) {
3588 po->tp_hdrlen = TPACKET_HDRLEN;
3591 po->tp_hdrlen = TPACKET2_HDRLEN;
3594 po->tp_hdrlen = TPACKET3_HDRLEN;
3599 if (unlikely((int)req->tp_block_size <= 0))
3601 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3603 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3606 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3609 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3610 if (unlikely(rb->frames_per_block <= 0))
3612 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3617 order = get_order(req->tp_block_size);
3618 pg_vec = alloc_pg_vec(req, order);
3619 if (unlikely(!pg_vec))
3621 switch (po->tp_version) {
3623 /* Transmit path is not supported. We checked
3624 * it above but just being paranoid
3627 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3636 if (unlikely(req->tp_frame_nr))
3642 /* Detach socket from network */
3643 spin_lock(&po->bind_lock);
3644 was_running = po->running;
3648 __unregister_prot_hook(sk, false);
3650 spin_unlock(&po->bind_lock);
3655 mutex_lock(&po->pg_vec_lock);
3656 if (closing || atomic_read(&po->mapped) == 0) {
3658 spin_lock_bh(&rb_queue->lock);
3659 swap(rb->pg_vec, pg_vec);
3660 rb->frame_max = (req->tp_frame_nr - 1);
3662 rb->frame_size = req->tp_frame_size;
3663 spin_unlock_bh(&rb_queue->lock);
3665 swap(rb->pg_vec_order, order);
3666 swap(rb->pg_vec_len, req->tp_block_nr);
3668 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3669 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3670 tpacket_rcv : packet_rcv;
3671 skb_queue_purge(rb_queue);
3672 if (atomic_read(&po->mapped))
3673 pr_err("packet_mmap: vma is busy: %d\n",
3674 atomic_read(&po->mapped));
3676 mutex_unlock(&po->pg_vec_lock);
3678 spin_lock(&po->bind_lock);
3681 register_prot_hook(sk);
3683 spin_unlock(&po->bind_lock);
3684 if (closing && (po->tp_version > TPACKET_V2)) {
3685 /* Because we don't support block-based V3 on tx-ring */
3687 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3692 free_pg_vec(pg_vec, order, req->tp_block_nr);
3697 static int packet_mmap(struct file *file, struct socket *sock,
3698 struct vm_area_struct *vma)
3700 struct sock *sk = sock->sk;
3701 struct packet_sock *po = pkt_sk(sk);
3702 unsigned long size, expected_size;
3703 struct packet_ring_buffer *rb;
3704 unsigned long start;
3711 mutex_lock(&po->pg_vec_lock);
3714 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3716 expected_size += rb->pg_vec_len
3722 if (expected_size == 0)
3725 size = vma->vm_end - vma->vm_start;
3726 if (size != expected_size)
3729 start = vma->vm_start;
3730 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3731 if (rb->pg_vec == NULL)
3734 for (i = 0; i < rb->pg_vec_len; i++) {
3736 void *kaddr = rb->pg_vec[i].buffer;
3739 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3740 page = pgv_to_page(kaddr);
3741 err = vm_insert_page(vma, start, page);
3750 atomic_inc(&po->mapped);
3751 vma->vm_ops = &packet_mmap_ops;
3755 mutex_unlock(&po->pg_vec_lock);
3759 static const struct proto_ops packet_ops_spkt = {
3760 .family = PF_PACKET,
3761 .owner = THIS_MODULE,
3762 .release = packet_release,
3763 .bind = packet_bind_spkt,
3764 .connect = sock_no_connect,
3765 .socketpair = sock_no_socketpair,
3766 .accept = sock_no_accept,
3767 .getname = packet_getname_spkt,
3768 .poll = datagram_poll,
3769 .ioctl = packet_ioctl,
3770 .listen = sock_no_listen,
3771 .shutdown = sock_no_shutdown,
3772 .setsockopt = sock_no_setsockopt,
3773 .getsockopt = sock_no_getsockopt,
3774 .sendmsg = packet_sendmsg_spkt,
3775 .recvmsg = packet_recvmsg,
3776 .mmap = sock_no_mmap,
3777 .sendpage = sock_no_sendpage,
3780 static const struct proto_ops packet_ops = {
3781 .family = PF_PACKET,
3782 .owner = THIS_MODULE,
3783 .release = packet_release,
3784 .bind = packet_bind,
3785 .connect = sock_no_connect,
3786 .socketpair = sock_no_socketpair,
3787 .accept = sock_no_accept,
3788 .getname = packet_getname,
3789 .poll = packet_poll,
3790 .ioctl = packet_ioctl,
3791 .listen = sock_no_listen,
3792 .shutdown = sock_no_shutdown,
3793 .setsockopt = packet_setsockopt,
3794 .getsockopt = packet_getsockopt,
3795 .sendmsg = packet_sendmsg,
3796 .recvmsg = packet_recvmsg,
3797 .mmap = packet_mmap,
3798 .sendpage = sock_no_sendpage,
3801 static const struct net_proto_family packet_family_ops = {
3802 .family = PF_PACKET,
3803 .create = packet_create,
3804 .owner = THIS_MODULE,
3807 static struct notifier_block packet_netdev_notifier = {
3808 .notifier_call = packet_notifier,
3811 #ifdef CONFIG_PROC_FS
3813 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3816 struct net *net = seq_file_net(seq);
3819 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3822 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3824 struct net *net = seq_file_net(seq);
3825 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3828 static void packet_seq_stop(struct seq_file *seq, void *v)
3834 static int packet_seq_show(struct seq_file *seq, void *v)
3836 if (v == SEQ_START_TOKEN)
3837 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3839 struct sock *s = sk_entry(v);
3840 const struct packet_sock *po = pkt_sk(s);
3843 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
3845 atomic_read(&s->sk_refcnt),
3850 atomic_read(&s->sk_rmem_alloc),
3858 static const struct seq_operations packet_seq_ops = {
3859 .start = packet_seq_start,
3860 .next = packet_seq_next,
3861 .stop = packet_seq_stop,
3862 .show = packet_seq_show,
3865 static int packet_seq_open(struct inode *inode, struct file *file)
3867 return seq_open_net(inode, file, &packet_seq_ops,
3868 sizeof(struct seq_net_private));
3871 static const struct file_operations packet_seq_fops = {
3872 .owner = THIS_MODULE,
3873 .open = packet_seq_open,
3875 .llseek = seq_lseek,
3876 .release = seq_release_net,
3881 static int __net_init packet_net_init(struct net *net)
3883 spin_lock_init(&net->packet.sklist_lock);
3884 INIT_HLIST_HEAD(&net->packet.sklist);
3886 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3892 static void __net_exit packet_net_exit(struct net *net)
3894 proc_net_remove(net, "packet");
3897 static struct pernet_operations packet_net_ops = {
3898 .init = packet_net_init,
3899 .exit = packet_net_exit,
3903 static void __exit packet_exit(void)
3905 unregister_netdevice_notifier(&packet_netdev_notifier);
3906 unregister_pernet_subsys(&packet_net_ops);
3907 sock_unregister(PF_PACKET);
3908 proto_unregister(&packet_proto);
3911 static int __init packet_init(void)
3913 int rc = proto_register(&packet_proto, 0);
3918 sock_register(&packet_family_ops);
3919 register_pernet_subsys(&packet_net_ops);
3920 register_netdevice_notifier(&packet_netdev_notifier);
3925 module_init(packet_init);
3926 module_exit(packet_exit);
3927 MODULE_LICENSE("GPL");
3928 MODULE_ALIAS_NETPROTO(PF_PACKET);