2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <net/net_namespace.h>
67 #include <net/protocol.h>
68 #include <linux/skbuff.h>
70 #include <linux/errno.h>
71 #include <linux/timer.h>
72 #include <asm/system.h>
73 #include <asm/uaccess.h>
74 #include <asm/ioctls.h>
76 #include <asm/cacheflush.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
90 #include <net/inet_common.h>
95 - if device has no dev->hard_header routine, it adds and removes ll header
96 inside itself. In this case ll header is invisible outside of device,
97 but higher levels still should reserve dev->hard_header_len.
98 Some devices are enough clever to reallocate skb, when header
99 will not fit to reserved space (tunnel), another ones are silly
101 - packet socket receives packets with pulled ll header,
102 so that SOCK_RAW should push it back.
107 Incoming, dev->hard_header!=NULL
108 mac_header -> ll header
111 Outgoing, dev->hard_header!=NULL
112 mac_header -> ll header
115 Incoming, dev->hard_header==NULL
116 mac_header -> UNKNOWN position. It is very likely, that it points to ll
117 header. PPP makes it, that is wrong, because introduce
118 assymetry between rx and tx paths.
121 Outgoing, dev->hard_header==NULL
122 mac_header -> data. ll header is still not built!
126 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 dev->hard_header != NULL
133 mac_header -> ll header
136 dev->hard_header == NULL (ll header is added by device, we cannot control it)
140 We should set nh.raw on output to correct posistion,
141 packet classifier depends on it.
144 /* Private packet socket structures. */
146 struct packet_mclist {
147 struct packet_mclist *next;
152 unsigned char addr[MAX_ADDR_LEN];
154 /* identical to struct packet_mreq except it has
155 * a longer address field.
157 struct packet_mreq_max {
159 unsigned short mr_type;
160 unsigned short mr_alen;
161 unsigned char mr_address[MAX_ADDR_LEN];
164 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
165 int closing, int tx_ring);
171 struct packet_ring_buffer {
174 unsigned int frames_per_block;
175 unsigned int frame_size;
176 unsigned int frame_max;
178 unsigned int pg_vec_order;
179 unsigned int pg_vec_pages;
180 unsigned int pg_vec_len;
186 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
188 static void packet_flush_mclist(struct sock *sk);
191 /* struct sock has to be the first member of packet_sock */
193 struct tpacket_stats stats;
194 struct packet_ring_buffer rx_ring;
195 struct packet_ring_buffer tx_ring;
197 spinlock_t bind_lock;
198 struct mutex pg_vec_lock;
199 unsigned int running:1, /* prot_hook is attached*/
203 int ifindex; /* bound device */
205 struct packet_mclist *mclist;
207 enum tpacket_versions tp_version;
208 unsigned int tp_hdrlen;
209 unsigned int tp_reserve;
210 unsigned int tp_loss:1;
211 unsigned int tp_tstamp;
212 struct packet_type prot_hook ____cacheline_aligned_in_smp;
215 struct packet_skb_cb {
216 unsigned int origlen;
218 struct sockaddr_pkt pkt;
219 struct sockaddr_ll ll;
223 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
225 static inline __pure struct page *pgv_to_page(void *addr)
227 if (is_vmalloc_addr(addr))
228 return vmalloc_to_page(addr);
229 return virt_to_page(addr);
232 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
235 struct tpacket_hdr *h1;
236 struct tpacket2_hdr *h2;
241 switch (po->tp_version) {
243 h.h1->tp_status = status;
244 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
247 h.h2->tp_status = status;
248 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
251 pr_err("TPACKET version not supported\n");
258 static int __packet_get_status(struct packet_sock *po, void *frame)
261 struct tpacket_hdr *h1;
262 struct tpacket2_hdr *h2;
269 switch (po->tp_version) {
271 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
272 return h.h1->tp_status;
274 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
275 return h.h2->tp_status;
277 pr_err("TPACKET version not supported\n");
283 static void *packet_lookup_frame(struct packet_sock *po,
284 struct packet_ring_buffer *rb,
285 unsigned int position,
288 unsigned int pg_vec_pos, frame_offset;
290 struct tpacket_hdr *h1;
291 struct tpacket2_hdr *h2;
295 pg_vec_pos = position / rb->frames_per_block;
296 frame_offset = position % rb->frames_per_block;
298 h.raw = rb->pg_vec[pg_vec_pos].buffer +
299 (frame_offset * rb->frame_size);
301 if (status != __packet_get_status(po, h.raw))
307 static inline void *packet_current_frame(struct packet_sock *po,
308 struct packet_ring_buffer *rb,
311 return packet_lookup_frame(po, rb, rb->head, status);
314 static inline void *packet_previous_frame(struct packet_sock *po,
315 struct packet_ring_buffer *rb,
318 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
319 return packet_lookup_frame(po, rb, previous, status);
322 static inline void packet_increment_head(struct packet_ring_buffer *buff)
324 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
327 static inline struct packet_sock *pkt_sk(struct sock *sk)
329 return (struct packet_sock *)sk;
332 static void packet_sock_destruct(struct sock *sk)
334 skb_queue_purge(&sk->sk_error_queue);
336 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
337 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
339 if (!sock_flag(sk, SOCK_DEAD)) {
340 pr_err("Attempt to release alive packet socket: %p\n", sk);
344 sk_refcnt_debug_dec(sk);
348 static const struct proto_ops packet_ops;
350 static const struct proto_ops packet_ops_spkt;
352 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
353 struct packet_type *pt, struct net_device *orig_dev)
356 struct sockaddr_pkt *spkt;
359 * When we registered the protocol we saved the socket in the data
360 * field for just this event.
363 sk = pt->af_packet_priv;
366 * Yank back the headers [hope the device set this
367 * right or kerboom...]
369 * Incoming packets have ll header pulled,
372 * For outgoing ones skb->data == skb_mac_header(skb)
373 * so that this procedure is noop.
376 if (skb->pkt_type == PACKET_LOOPBACK)
379 if (!net_eq(dev_net(dev), sock_net(sk)))
382 skb = skb_share_check(skb, GFP_ATOMIC);
386 /* drop any routing info */
389 /* drop conntrack reference */
392 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
394 skb_push(skb, skb->data - skb_mac_header(skb));
397 * The SOCK_PACKET socket receives _all_ frames.
400 spkt->spkt_family = dev->type;
401 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
402 spkt->spkt_protocol = skb->protocol;
405 * Charge the memory to the socket. This is done specifically
406 * to prevent sockets using all the memory up.
409 if (sock_queue_rcv_skb(sk, skb) == 0)
420 * Output a raw packet to a device layer. This bypasses all the other
421 * protocol layers and you must therefore supply it with a complete frame
424 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
425 struct msghdr *msg, size_t len)
427 struct sock *sk = sock->sk;
428 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
429 struct sk_buff *skb = NULL;
430 struct net_device *dev;
435 * Get and verify the address.
439 if (msg->msg_namelen < sizeof(struct sockaddr))
441 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
442 proto = saddr->spkt_protocol;
444 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
447 * Find the device first to size check it
450 saddr->spkt_device[13] = 0;
453 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
459 if (!(dev->flags & IFF_UP))
463 * You may not queue a frame bigger than the mtu. This is the lowest level
464 * raw protocol and you must do your own fragmentation at this level.
468 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
472 size_t reserved = LL_RESERVED_SPACE(dev);
473 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
476 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
479 /* FIXME: Save some space for broken drivers that write a hard
480 * header at transmission time by themselves. PPP is the notable
481 * one here. This should really be fixed at the driver level.
483 skb_reserve(skb, reserved);
484 skb_reset_network_header(skb);
486 /* Try to align data part correctly */
491 skb_reset_network_header(skb);
493 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
499 if (len > (dev->mtu + dev->hard_header_len)) {
500 /* Earlier code assumed this would be a VLAN pkt,
501 * double-check this now that we have the actual
505 skb_reset_mac_header(skb);
507 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
513 skb->protocol = proto;
515 skb->priority = sk->sk_priority;
516 skb->mark = sk->sk_mark;
517 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
532 static inline unsigned int run_filter(const struct sk_buff *skb,
533 const struct sock *sk,
536 struct sk_filter *filter;
539 filter = rcu_dereference(sk->sk_filter);
541 res = SK_RUN_FILTER(filter, skb);
548 * This function makes lazy skb cloning in hope that most of packets
549 * are discarded by BPF.
551 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
552 * and skb->cb are mangled. It works because (and until) packets
553 * falling here are owned by current CPU. Output packets are cloned
554 * by dev_queue_xmit_nit(), input packets are processed by net_bh
555 * sequencially, so that if we return skb to original state on exit,
556 * we will not harm anyone.
559 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
560 struct packet_type *pt, struct net_device *orig_dev)
563 struct sockaddr_ll *sll;
564 struct packet_sock *po;
565 u8 *skb_head = skb->data;
566 int skb_len = skb->len;
567 unsigned int snaplen, res;
569 if (skb->pkt_type == PACKET_LOOPBACK)
572 sk = pt->af_packet_priv;
575 if (!net_eq(dev_net(dev), sock_net(sk)))
580 if (dev->header_ops) {
581 /* The device has an explicit notion of ll header,
582 * exported to higher levels.
584 * Otherwise, the device hides details of its frame
585 * structure, so that corresponding packet head is
586 * never delivered to user.
588 if (sk->sk_type != SOCK_DGRAM)
589 skb_push(skb, skb->data - skb_mac_header(skb));
590 else if (skb->pkt_type == PACKET_OUTGOING) {
591 /* Special case: outgoing packets have ll header at head */
592 skb_pull(skb, skb_network_offset(skb));
598 res = run_filter(skb, sk, snaplen);
604 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
605 (unsigned)sk->sk_rcvbuf)
608 if (skb_shared(skb)) {
609 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
613 if (skb_head != skb->data) {
614 skb->data = skb_head;
621 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
624 sll = &PACKET_SKB_CB(skb)->sa.ll;
625 sll->sll_family = AF_PACKET;
626 sll->sll_hatype = dev->type;
627 sll->sll_protocol = skb->protocol;
628 sll->sll_pkttype = skb->pkt_type;
629 if (unlikely(po->origdev))
630 sll->sll_ifindex = orig_dev->ifindex;
632 sll->sll_ifindex = dev->ifindex;
634 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
636 PACKET_SKB_CB(skb)->origlen = skb->len;
638 if (pskb_trim(skb, snaplen))
641 skb_set_owner_r(skb, sk);
645 /* drop conntrack reference */
648 spin_lock(&sk->sk_receive_queue.lock);
649 po->stats.tp_packets++;
650 skb->dropcount = atomic_read(&sk->sk_drops);
651 __skb_queue_tail(&sk->sk_receive_queue, skb);
652 spin_unlock(&sk->sk_receive_queue.lock);
653 sk->sk_data_ready(sk, skb->len);
657 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
660 if (skb_head != skb->data && skb_shared(skb)) {
661 skb->data = skb_head;
669 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
670 struct packet_type *pt, struct net_device *orig_dev)
673 struct packet_sock *po;
674 struct sockaddr_ll *sll;
676 struct tpacket_hdr *h1;
677 struct tpacket2_hdr *h2;
680 u8 *skb_head = skb->data;
681 int skb_len = skb->len;
682 unsigned int snaplen, res;
683 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
684 unsigned short macoff, netoff, hdrlen;
685 struct sk_buff *copy_skb = NULL;
688 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
690 if (skb->pkt_type == PACKET_LOOPBACK)
693 sk = pt->af_packet_priv;
696 if (!net_eq(dev_net(dev), sock_net(sk)))
699 if (dev->header_ops) {
700 if (sk->sk_type != SOCK_DGRAM)
701 skb_push(skb, skb->data - skb_mac_header(skb));
702 else if (skb->pkt_type == PACKET_OUTGOING) {
703 /* Special case: outgoing packets have ll header at head */
704 skb_pull(skb, skb_network_offset(skb));
708 if (skb->ip_summed == CHECKSUM_PARTIAL)
709 status |= TP_STATUS_CSUMNOTREADY;
713 res = run_filter(skb, sk, snaplen);
719 if (sk->sk_type == SOCK_DGRAM) {
720 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
723 unsigned maclen = skb_network_offset(skb);
724 netoff = TPACKET_ALIGN(po->tp_hdrlen +
725 (maclen < 16 ? 16 : maclen)) +
727 macoff = netoff - maclen;
730 if (macoff + snaplen > po->rx_ring.frame_size) {
731 if (po->copy_thresh &&
732 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
733 (unsigned)sk->sk_rcvbuf) {
734 if (skb_shared(skb)) {
735 copy_skb = skb_clone(skb, GFP_ATOMIC);
737 copy_skb = skb_get(skb);
738 skb_head = skb->data;
741 skb_set_owner_r(copy_skb, sk);
743 snaplen = po->rx_ring.frame_size - macoff;
744 if ((int)snaplen < 0)
748 spin_lock(&sk->sk_receive_queue.lock);
749 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
752 packet_increment_head(&po->rx_ring);
753 po->stats.tp_packets++;
755 status |= TP_STATUS_COPY;
756 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
758 if (!po->stats.tp_drops)
759 status &= ~TP_STATUS_LOSING;
760 spin_unlock(&sk->sk_receive_queue.lock);
762 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
764 switch (po->tp_version) {
766 h.h1->tp_len = skb->len;
767 h.h1->tp_snaplen = snaplen;
768 h.h1->tp_mac = macoff;
769 h.h1->tp_net = netoff;
770 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
771 && shhwtstamps->syststamp.tv64)
772 tv = ktime_to_timeval(shhwtstamps->syststamp);
773 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
774 && shhwtstamps->hwtstamp.tv64)
775 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
776 else if (skb->tstamp.tv64)
777 tv = ktime_to_timeval(skb->tstamp);
779 do_gettimeofday(&tv);
780 h.h1->tp_sec = tv.tv_sec;
781 h.h1->tp_usec = tv.tv_usec;
782 hdrlen = sizeof(*h.h1);
785 h.h2->tp_len = skb->len;
786 h.h2->tp_snaplen = snaplen;
787 h.h2->tp_mac = macoff;
788 h.h2->tp_net = netoff;
789 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
790 && shhwtstamps->syststamp.tv64)
791 ts = ktime_to_timespec(shhwtstamps->syststamp);
792 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
793 && shhwtstamps->hwtstamp.tv64)
794 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
795 else if (skb->tstamp.tv64)
796 ts = ktime_to_timespec(skb->tstamp);
799 h.h2->tp_sec = ts.tv_sec;
800 h.h2->tp_nsec = ts.tv_nsec;
801 if (vlan_tx_tag_present(skb)) {
802 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
803 status |= TP_STATUS_VLAN_VALID;
805 h.h2->tp_vlan_tci = 0;
807 hdrlen = sizeof(*h.h2);
813 sll = h.raw + TPACKET_ALIGN(hdrlen);
814 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
815 sll->sll_family = AF_PACKET;
816 sll->sll_hatype = dev->type;
817 sll->sll_protocol = skb->protocol;
818 sll->sll_pkttype = skb->pkt_type;
819 if (unlikely(po->origdev))
820 sll->sll_ifindex = orig_dev->ifindex;
822 sll->sll_ifindex = dev->ifindex;
824 __packet_set_status(po, h.raw, status);
826 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
830 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
831 for (start = h.raw; start < end; start += PAGE_SIZE)
832 flush_dcache_page(pgv_to_page(start));
836 sk->sk_data_ready(sk, 0);
839 if (skb_head != skb->data && skb_shared(skb)) {
840 skb->data = skb_head;
848 po->stats.tp_drops++;
849 spin_unlock(&sk->sk_receive_queue.lock);
851 sk->sk_data_ready(sk, 0);
856 static void tpacket_destruct_skb(struct sk_buff *skb)
858 struct packet_sock *po = pkt_sk(skb->sk);
863 if (likely(po->tx_ring.pg_vec)) {
864 ph = skb_shinfo(skb)->destructor_arg;
865 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
866 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
867 atomic_dec(&po->tx_ring.pending);
868 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
874 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
875 void *frame, struct net_device *dev, int size_max,
876 __be16 proto, unsigned char *addr)
879 struct tpacket_hdr *h1;
880 struct tpacket2_hdr *h2;
883 int to_write, offset, len, tp_len, nr_frags, len_max;
884 struct socket *sock = po->sk.sk_socket;
891 skb->protocol = proto;
893 skb->priority = po->sk.sk_priority;
894 skb->mark = po->sk.sk_mark;
895 skb_shinfo(skb)->destructor_arg = ph.raw;
897 switch (po->tp_version) {
899 tp_len = ph.h2->tp_len;
902 tp_len = ph.h1->tp_len;
905 if (unlikely(tp_len > size_max)) {
906 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
910 skb_reserve(skb, LL_RESERVED_SPACE(dev));
911 skb_reset_network_header(skb);
913 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
916 if (sock->type == SOCK_DGRAM) {
917 err = dev_hard_header(skb, dev, ntohs(proto), addr,
919 if (unlikely(err < 0))
921 } else if (dev->hard_header_len) {
922 /* net device doesn't like empty head */
923 if (unlikely(tp_len <= dev->hard_header_len)) {
924 pr_err("packet size is too short (%d < %d)\n",
925 tp_len, dev->hard_header_len);
929 skb_push(skb, dev->hard_header_len);
930 err = skb_store_bits(skb, 0, data,
931 dev->hard_header_len);
935 data += dev->hard_header_len;
936 to_write -= dev->hard_header_len;
940 offset = offset_in_page(data);
941 len_max = PAGE_SIZE - offset;
942 len = ((to_write > len_max) ? len_max : to_write);
944 skb->data_len = to_write;
945 skb->len += to_write;
946 skb->truesize += to_write;
947 atomic_add(to_write, &po->sk.sk_wmem_alloc);
949 while (likely(to_write)) {
950 nr_frags = skb_shinfo(skb)->nr_frags;
952 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
953 pr_err("Packet exceed the number of skb frags(%lu)\n",
958 page = pgv_to_page(data);
960 flush_dcache_page(page);
962 skb_fill_page_desc(skb, nr_frags, page, offset, len);
966 len = ((to_write > len_max) ? len_max : to_write);
972 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
975 struct net_device *dev;
977 int ifindex, err, reserve = 0;
979 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
980 int tp_len, size_max;
985 mutex_lock(&po->pg_vec_lock);
989 ifindex = po->ifindex;
994 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
996 if (msg->msg_namelen < (saddr->sll_halen
997 + offsetof(struct sockaddr_ll,
1000 ifindex = saddr->sll_ifindex;
1001 proto = saddr->sll_protocol;
1002 addr = saddr->sll_addr;
1005 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
1007 if (unlikely(dev == NULL))
1010 reserve = dev->hard_header_len;
1013 if (unlikely(!(dev->flags & IFF_UP)))
1016 size_max = po->tx_ring.frame_size
1017 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
1019 if (size_max > dev->mtu + reserve)
1020 size_max = dev->mtu + reserve;
1023 ph = packet_current_frame(po, &po->tx_ring,
1024 TP_STATUS_SEND_REQUEST);
1026 if (unlikely(ph == NULL)) {
1031 status = TP_STATUS_SEND_REQUEST;
1032 skb = sock_alloc_send_skb(&po->sk,
1033 LL_ALLOCATED_SPACE(dev)
1034 + sizeof(struct sockaddr_ll),
1037 if (unlikely(skb == NULL))
1040 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1043 if (unlikely(tp_len < 0)) {
1045 __packet_set_status(po, ph,
1046 TP_STATUS_AVAILABLE);
1047 packet_increment_head(&po->tx_ring);
1051 status = TP_STATUS_WRONG_FORMAT;
1057 skb->destructor = tpacket_destruct_skb;
1058 __packet_set_status(po, ph, TP_STATUS_SENDING);
1059 atomic_inc(&po->tx_ring.pending);
1061 status = TP_STATUS_SEND_REQUEST;
1062 err = dev_queue_xmit(skb);
1063 if (unlikely(err > 0)) {
1064 err = net_xmit_errno(err);
1065 if (err && __packet_get_status(po, ph) ==
1066 TP_STATUS_AVAILABLE) {
1067 /* skb was destructed already */
1072 * skb was dropped but not destructed yet;
1073 * let's treat it like congestion or err < 0
1077 packet_increment_head(&po->tx_ring);
1079 } while (likely((ph != NULL) ||
1080 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1081 (atomic_read(&po->tx_ring.pending))))
1088 __packet_set_status(po, ph, status);
1093 mutex_unlock(&po->pg_vec_lock);
1097 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1098 size_t reserve, size_t len,
1099 size_t linear, int noblock,
1102 struct sk_buff *skb;
1104 /* Under a page? Don't bother with paged skb. */
1105 if (prepad + len < PAGE_SIZE || !linear)
1108 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1113 skb_reserve(skb, reserve);
1114 skb_put(skb, linear);
1115 skb->data_len = len - linear;
1116 skb->len += len - linear;
1121 static int packet_snd(struct socket *sock,
1122 struct msghdr *msg, size_t len)
1124 struct sock *sk = sock->sk;
1125 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1126 struct sk_buff *skb;
1127 struct net_device *dev;
1129 unsigned char *addr;
1130 int ifindex, err, reserve = 0;
1131 struct virtio_net_hdr vnet_hdr = { 0 };
1134 struct packet_sock *po = pkt_sk(sk);
1135 unsigned short gso_type = 0;
1138 * Get and verify the address.
1141 if (saddr == NULL) {
1142 ifindex = po->ifindex;
1147 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1149 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1151 ifindex = saddr->sll_ifindex;
1152 proto = saddr->sll_protocol;
1153 addr = saddr->sll_addr;
1157 dev = dev_get_by_index(sock_net(sk), ifindex);
1161 if (sock->type == SOCK_RAW)
1162 reserve = dev->hard_header_len;
1165 if (!(dev->flags & IFF_UP))
1168 if (po->has_vnet_hdr) {
1169 vnet_hdr_len = sizeof(vnet_hdr);
1172 if (len < vnet_hdr_len)
1175 len -= vnet_hdr_len;
1177 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1182 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1183 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1185 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1186 vnet_hdr.csum_offset + 2;
1189 if (vnet_hdr.hdr_len > len)
1192 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1193 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1194 case VIRTIO_NET_HDR_GSO_TCPV4:
1195 gso_type = SKB_GSO_TCPV4;
1197 case VIRTIO_NET_HDR_GSO_TCPV6:
1198 gso_type = SKB_GSO_TCPV6;
1200 case VIRTIO_NET_HDR_GSO_UDP:
1201 gso_type = SKB_GSO_UDP;
1207 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1208 gso_type |= SKB_GSO_TCP_ECN;
1210 if (vnet_hdr.gso_size == 0)
1217 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
1221 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1222 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1223 msg->msg_flags & MSG_DONTWAIT, &err);
1227 skb_set_network_header(skb, reserve);
1230 if (sock->type == SOCK_DGRAM &&
1231 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1234 /* Returns -EFAULT on error */
1235 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1238 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1242 if (!gso_type && (len > dev->mtu + reserve)) {
1243 /* Earlier code assumed this would be a VLAN pkt,
1244 * double-check this now that we have the actual
1247 struct ethhdr *ehdr;
1248 skb_reset_mac_header(skb);
1249 ehdr = eth_hdr(skb);
1250 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1256 skb->protocol = proto;
1258 skb->priority = sk->sk_priority;
1259 skb->mark = sk->sk_mark;
1261 if (po->has_vnet_hdr) {
1262 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1263 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1264 vnet_hdr.csum_offset)) {
1270 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1271 skb_shinfo(skb)->gso_type = gso_type;
1273 /* Header must be checked, and gso_segs computed. */
1274 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1275 skb_shinfo(skb)->gso_segs = 0;
1277 len += vnet_hdr_len;
1284 err = dev_queue_xmit(skb);
1285 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1301 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1302 struct msghdr *msg, size_t len)
1304 struct sock *sk = sock->sk;
1305 struct packet_sock *po = pkt_sk(sk);
1306 if (po->tx_ring.pg_vec)
1307 return tpacket_snd(po, msg);
1309 return packet_snd(sock, msg, len);
1313 * Close a PACKET socket. This is fairly simple. We immediately go
1314 * to 'closed' state and remove our protocol entry in the device list.
1317 static int packet_release(struct socket *sock)
1319 struct sock *sk = sock->sk;
1320 struct packet_sock *po;
1322 struct tpacket_req req;
1330 spin_lock_bh(&net->packet.sklist_lock);
1331 sk_del_node_init_rcu(sk);
1332 sock_prot_inuse_add(net, sk->sk_prot, -1);
1333 spin_unlock_bh(&net->packet.sklist_lock);
1335 spin_lock(&po->bind_lock);
1338 * Remove from protocol table
1342 __dev_remove_pack(&po->prot_hook);
1345 spin_unlock(&po->bind_lock);
1347 packet_flush_mclist(sk);
1349 memset(&req, 0, sizeof(req));
1351 if (po->rx_ring.pg_vec)
1352 packet_set_ring(sk, &req, 1, 0);
1354 if (po->tx_ring.pg_vec)
1355 packet_set_ring(sk, &req, 1, 1);
1359 * Now the socket is dead. No more input will appear.
1366 skb_queue_purge(&sk->sk_receive_queue);
1367 sk_refcnt_debug_release(sk);
1374 * Attach a packet hook.
1377 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1379 struct packet_sock *po = pkt_sk(sk);
1381 * Detach an existing hook if present.
1386 spin_lock(&po->bind_lock);
1391 spin_unlock(&po->bind_lock);
1392 dev_remove_pack(&po->prot_hook);
1393 spin_lock(&po->bind_lock);
1397 po->prot_hook.type = protocol;
1398 po->prot_hook.dev = dev;
1400 po->ifindex = dev ? dev->ifindex : 0;
1405 if (!dev || (dev->flags & IFF_UP)) {
1406 dev_add_pack(&po->prot_hook);
1410 sk->sk_err = ENETDOWN;
1411 if (!sock_flag(sk, SOCK_DEAD))
1412 sk->sk_error_report(sk);
1416 spin_unlock(&po->bind_lock);
1422 * Bind a packet socket to a device
1425 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1428 struct sock *sk = sock->sk;
1430 struct net_device *dev;
1437 if (addr_len != sizeof(struct sockaddr))
1439 strlcpy(name, uaddr->sa_data, sizeof(name));
1441 dev = dev_get_by_name(sock_net(sk), name);
1443 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1449 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1451 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1452 struct sock *sk = sock->sk;
1453 struct net_device *dev = NULL;
1461 if (addr_len < sizeof(struct sockaddr_ll))
1463 if (sll->sll_family != AF_PACKET)
1466 if (sll->sll_ifindex) {
1468 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1472 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1480 static struct proto packet_proto = {
1482 .owner = THIS_MODULE,
1483 .obj_size = sizeof(struct packet_sock),
1487 * Create a packet of type SOCK_PACKET.
1490 static int packet_create(struct net *net, struct socket *sock, int protocol,
1494 struct packet_sock *po;
1495 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1498 if (!capable(CAP_NET_RAW))
1500 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1501 sock->type != SOCK_PACKET)
1502 return -ESOCKTNOSUPPORT;
1504 sock->state = SS_UNCONNECTED;
1507 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1511 sock->ops = &packet_ops;
1512 if (sock->type == SOCK_PACKET)
1513 sock->ops = &packet_ops_spkt;
1515 sock_init_data(sock, sk);
1518 sk->sk_family = PF_PACKET;
1521 sk->sk_destruct = packet_sock_destruct;
1522 sk_refcnt_debug_inc(sk);
1525 * Attach a protocol block
1528 spin_lock_init(&po->bind_lock);
1529 mutex_init(&po->pg_vec_lock);
1530 po->prot_hook.func = packet_rcv;
1532 if (sock->type == SOCK_PACKET)
1533 po->prot_hook.func = packet_rcv_spkt;
1535 po->prot_hook.af_packet_priv = sk;
1538 po->prot_hook.type = proto;
1539 dev_add_pack(&po->prot_hook);
1544 spin_lock_bh(&net->packet.sklist_lock);
1545 sk_add_node_rcu(sk, &net->packet.sklist);
1546 sock_prot_inuse_add(net, &packet_proto, 1);
1547 spin_unlock_bh(&net->packet.sklist_lock);
1554 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1556 struct sock_exterr_skb *serr;
1557 struct sk_buff *skb, *skb2;
1561 skb = skb_dequeue(&sk->sk_error_queue);
1567 msg->msg_flags |= MSG_TRUNC;
1570 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1574 sock_recv_timestamp(msg, sk, skb);
1576 serr = SKB_EXT_ERR(skb);
1577 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1578 sizeof(serr->ee), &serr->ee);
1580 msg->msg_flags |= MSG_ERRQUEUE;
1583 /* Reset and regenerate socket error */
1584 spin_lock_bh(&sk->sk_error_queue.lock);
1586 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1587 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1588 spin_unlock_bh(&sk->sk_error_queue.lock);
1589 sk->sk_error_report(sk);
1591 spin_unlock_bh(&sk->sk_error_queue.lock);
1600 * Pull a packet from our receive queue and hand it to the user.
1601 * If necessary we block.
1604 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1605 struct msghdr *msg, size_t len, int flags)
1607 struct sock *sk = sock->sk;
1608 struct sk_buff *skb;
1610 struct sockaddr_ll *sll;
1611 int vnet_hdr_len = 0;
1614 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1618 /* What error should we return now? EUNATTACH? */
1619 if (pkt_sk(sk)->ifindex < 0)
1623 if (flags & MSG_ERRQUEUE) {
1624 err = packet_recv_error(sk, msg, len);
1629 * Call the generic datagram receiver. This handles all sorts
1630 * of horrible races and re-entrancy so we can forget about it
1631 * in the protocol layers.
1633 * Now it will return ENETDOWN, if device have just gone down,
1634 * but then it will block.
1637 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1640 * An error occurred so return it. Because skb_recv_datagram()
1641 * handles the blocking we don't see and worry about blocking
1648 if (pkt_sk(sk)->has_vnet_hdr) {
1649 struct virtio_net_hdr vnet_hdr = { 0 };
1652 vnet_hdr_len = sizeof(vnet_hdr);
1653 if (len < vnet_hdr_len)
1656 len -= vnet_hdr_len;
1658 if (skb_is_gso(skb)) {
1659 struct skb_shared_info *sinfo = skb_shinfo(skb);
1661 /* This is a hint as to how much should be linear. */
1662 vnet_hdr.hdr_len = skb_headlen(skb);
1663 vnet_hdr.gso_size = sinfo->gso_size;
1664 if (sinfo->gso_type & SKB_GSO_TCPV4)
1665 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1666 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1667 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1668 else if (sinfo->gso_type & SKB_GSO_UDP)
1669 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1670 else if (sinfo->gso_type & SKB_GSO_FCOE)
1674 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1675 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1677 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1679 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1680 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1681 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
1682 vnet_hdr.csum_offset = skb->csum_offset;
1683 } /* else everything is zero */
1685 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1692 * If the address length field is there to be filled in, we fill
1696 sll = &PACKET_SKB_CB(skb)->sa.ll;
1697 if (sock->type == SOCK_PACKET)
1698 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1700 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1703 * You lose any data beyond the buffer you gave. If it worries a
1704 * user program they can ask the device for its MTU anyway.
1710 msg->msg_flags |= MSG_TRUNC;
1713 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1717 sock_recv_ts_and_drops(msg, sk, skb);
1720 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1723 if (pkt_sk(sk)->auxdata) {
1724 struct tpacket_auxdata aux;
1726 aux.tp_status = TP_STATUS_USER;
1727 if (skb->ip_summed == CHECKSUM_PARTIAL)
1728 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1729 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1730 aux.tp_snaplen = skb->len;
1732 aux.tp_net = skb_network_offset(skb);
1733 if (vlan_tx_tag_present(skb)) {
1734 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1735 aux.tp_status |= TP_STATUS_VLAN_VALID;
1737 aux.tp_vlan_tci = 0;
1739 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1743 * Free or return the buffer as appropriate. Again this
1744 * hides all the races and re-entrancy issues from us.
1746 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1749 skb_free_datagram(sk, skb);
1754 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1755 int *uaddr_len, int peer)
1757 struct net_device *dev;
1758 struct sock *sk = sock->sk;
1763 uaddr->sa_family = AF_PACKET;
1765 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1767 strncpy(uaddr->sa_data, dev->name, 14);
1769 memset(uaddr->sa_data, 0, 14);
1771 *uaddr_len = sizeof(*uaddr);
1776 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1777 int *uaddr_len, int peer)
1779 struct net_device *dev;
1780 struct sock *sk = sock->sk;
1781 struct packet_sock *po = pkt_sk(sk);
1782 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1787 sll->sll_family = AF_PACKET;
1788 sll->sll_ifindex = po->ifindex;
1789 sll->sll_protocol = po->num;
1790 sll->sll_pkttype = 0;
1792 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1794 sll->sll_hatype = dev->type;
1795 sll->sll_halen = dev->addr_len;
1796 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1798 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1802 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1807 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1811 case PACKET_MR_MULTICAST:
1812 if (i->alen != dev->addr_len)
1815 return dev_mc_add(dev, i->addr);
1817 return dev_mc_del(dev, i->addr);
1819 case PACKET_MR_PROMISC:
1820 return dev_set_promiscuity(dev, what);
1822 case PACKET_MR_ALLMULTI:
1823 return dev_set_allmulti(dev, what);
1825 case PACKET_MR_UNICAST:
1826 if (i->alen != dev->addr_len)
1829 return dev_uc_add(dev, i->addr);
1831 return dev_uc_del(dev, i->addr);
1839 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1841 for ( ; i; i = i->next) {
1842 if (i->ifindex == dev->ifindex)
1843 packet_dev_mc(dev, i, what);
1847 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1849 struct packet_sock *po = pkt_sk(sk);
1850 struct packet_mclist *ml, *i;
1851 struct net_device *dev;
1857 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1862 if (mreq->mr_alen > dev->addr_len)
1866 i = kmalloc(sizeof(*i), GFP_KERNEL);
1871 for (ml = po->mclist; ml; ml = ml->next) {
1872 if (ml->ifindex == mreq->mr_ifindex &&
1873 ml->type == mreq->mr_type &&
1874 ml->alen == mreq->mr_alen &&
1875 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1877 /* Free the new element ... */
1883 i->type = mreq->mr_type;
1884 i->ifindex = mreq->mr_ifindex;
1885 i->alen = mreq->mr_alen;
1886 memcpy(i->addr, mreq->mr_address, i->alen);
1888 i->next = po->mclist;
1890 err = packet_dev_mc(dev, i, 1);
1892 po->mclist = i->next;
1901 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1903 struct packet_mclist *ml, **mlp;
1907 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1908 if (ml->ifindex == mreq->mr_ifindex &&
1909 ml->type == mreq->mr_type &&
1910 ml->alen == mreq->mr_alen &&
1911 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1912 if (--ml->count == 0) {
1913 struct net_device *dev;
1915 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1917 packet_dev_mc(dev, ml, -1);
1925 return -EADDRNOTAVAIL;
1928 static void packet_flush_mclist(struct sock *sk)
1930 struct packet_sock *po = pkt_sk(sk);
1931 struct packet_mclist *ml;
1937 while ((ml = po->mclist) != NULL) {
1938 struct net_device *dev;
1940 po->mclist = ml->next;
1941 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1943 packet_dev_mc(dev, ml, -1);
1950 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1952 struct sock *sk = sock->sk;
1953 struct packet_sock *po = pkt_sk(sk);
1956 if (level != SOL_PACKET)
1957 return -ENOPROTOOPT;
1960 case PACKET_ADD_MEMBERSHIP:
1961 case PACKET_DROP_MEMBERSHIP:
1963 struct packet_mreq_max mreq;
1965 memset(&mreq, 0, sizeof(mreq));
1966 if (len < sizeof(struct packet_mreq))
1968 if (len > sizeof(mreq))
1970 if (copy_from_user(&mreq, optval, len))
1972 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1974 if (optname == PACKET_ADD_MEMBERSHIP)
1975 ret = packet_mc_add(sk, &mreq);
1977 ret = packet_mc_drop(sk, &mreq);
1981 case PACKET_RX_RING:
1982 case PACKET_TX_RING:
1984 struct tpacket_req req;
1986 if (optlen < sizeof(req))
1988 if (pkt_sk(sk)->has_vnet_hdr)
1990 if (copy_from_user(&req, optval, sizeof(req)))
1992 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1994 case PACKET_COPY_THRESH:
1998 if (optlen != sizeof(val))
2000 if (copy_from_user(&val, optval, sizeof(val)))
2003 pkt_sk(sk)->copy_thresh = val;
2006 case PACKET_VERSION:
2010 if (optlen != sizeof(val))
2012 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2014 if (copy_from_user(&val, optval, sizeof(val)))
2019 po->tp_version = val;
2025 case PACKET_RESERVE:
2029 if (optlen != sizeof(val))
2031 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2033 if (copy_from_user(&val, optval, sizeof(val)))
2035 po->tp_reserve = val;
2042 if (optlen != sizeof(val))
2044 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2046 if (copy_from_user(&val, optval, sizeof(val)))
2048 po->tp_loss = !!val;
2051 case PACKET_AUXDATA:
2055 if (optlen < sizeof(val))
2057 if (copy_from_user(&val, optval, sizeof(val)))
2060 po->auxdata = !!val;
2063 case PACKET_ORIGDEV:
2067 if (optlen < sizeof(val))
2069 if (copy_from_user(&val, optval, sizeof(val)))
2072 po->origdev = !!val;
2075 case PACKET_VNET_HDR:
2079 if (sock->type != SOCK_RAW)
2081 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2083 if (optlen < sizeof(val))
2085 if (copy_from_user(&val, optval, sizeof(val)))
2088 po->has_vnet_hdr = !!val;
2091 case PACKET_TIMESTAMP:
2095 if (optlen != sizeof(val))
2097 if (copy_from_user(&val, optval, sizeof(val)))
2100 po->tp_tstamp = val;
2104 return -ENOPROTOOPT;
2108 static int packet_getsockopt(struct socket *sock, int level, int optname,
2109 char __user *optval, int __user *optlen)
2113 struct sock *sk = sock->sk;
2114 struct packet_sock *po = pkt_sk(sk);
2116 struct tpacket_stats st;
2118 if (level != SOL_PACKET)
2119 return -ENOPROTOOPT;
2121 if (get_user(len, optlen))
2128 case PACKET_STATISTICS:
2129 if (len > sizeof(struct tpacket_stats))
2130 len = sizeof(struct tpacket_stats);
2131 spin_lock_bh(&sk->sk_receive_queue.lock);
2133 memset(&po->stats, 0, sizeof(st));
2134 spin_unlock_bh(&sk->sk_receive_queue.lock);
2135 st.tp_packets += st.tp_drops;
2139 case PACKET_AUXDATA:
2140 if (len > sizeof(int))
2146 case PACKET_ORIGDEV:
2147 if (len > sizeof(int))
2153 case PACKET_VNET_HDR:
2154 if (len > sizeof(int))
2156 val = po->has_vnet_hdr;
2160 case PACKET_VERSION:
2161 if (len > sizeof(int))
2163 val = po->tp_version;
2167 if (len > sizeof(int))
2169 if (copy_from_user(&val, optval, len))
2173 val = sizeof(struct tpacket_hdr);
2176 val = sizeof(struct tpacket2_hdr);
2183 case PACKET_RESERVE:
2184 if (len > sizeof(unsigned int))
2185 len = sizeof(unsigned int);
2186 val = po->tp_reserve;
2190 if (len > sizeof(unsigned int))
2191 len = sizeof(unsigned int);
2195 case PACKET_TIMESTAMP:
2196 if (len > sizeof(int))
2198 val = po->tp_tstamp;
2202 return -ENOPROTOOPT;
2205 if (put_user(len, optlen))
2207 if (copy_to_user(optval, data, len))
2213 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2216 struct hlist_node *node;
2217 struct net_device *dev = data;
2218 struct net *net = dev_net(dev);
2221 sk_for_each_rcu(sk, node, &net->packet.sklist) {
2222 struct packet_sock *po = pkt_sk(sk);
2225 case NETDEV_UNREGISTER:
2227 packet_dev_mclist(dev, po->mclist, -1);
2231 if (dev->ifindex == po->ifindex) {
2232 spin_lock(&po->bind_lock);
2234 __dev_remove_pack(&po->prot_hook);
2237 sk->sk_err = ENETDOWN;
2238 if (!sock_flag(sk, SOCK_DEAD))
2239 sk->sk_error_report(sk);
2241 if (msg == NETDEV_UNREGISTER) {
2243 po->prot_hook.dev = NULL;
2245 spin_unlock(&po->bind_lock);
2249 if (dev->ifindex == po->ifindex) {
2250 spin_lock(&po->bind_lock);
2251 if (po->num && !po->running) {
2252 dev_add_pack(&po->prot_hook);
2256 spin_unlock(&po->bind_lock);
2266 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2269 struct sock *sk = sock->sk;
2274 int amount = sk_wmem_alloc_get(sk);
2276 return put_user(amount, (int __user *)arg);
2280 struct sk_buff *skb;
2283 spin_lock_bh(&sk->sk_receive_queue.lock);
2284 skb = skb_peek(&sk->sk_receive_queue);
2287 spin_unlock_bh(&sk->sk_receive_queue.lock);
2288 return put_user(amount, (int __user *)arg);
2291 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2293 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2303 case SIOCGIFBRDADDR:
2304 case SIOCSIFBRDADDR:
2305 case SIOCGIFNETMASK:
2306 case SIOCSIFNETMASK:
2307 case SIOCGIFDSTADDR:
2308 case SIOCSIFDSTADDR:
2310 return inet_dgram_ops.ioctl(sock, cmd, arg);
2314 return -ENOIOCTLCMD;
2319 static unsigned int packet_poll(struct file *file, struct socket *sock,
2322 struct sock *sk = sock->sk;
2323 struct packet_sock *po = pkt_sk(sk);
2324 unsigned int mask = datagram_poll(file, sock, wait);
2326 spin_lock_bh(&sk->sk_receive_queue.lock);
2327 if (po->rx_ring.pg_vec) {
2328 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2329 mask |= POLLIN | POLLRDNORM;
2331 spin_unlock_bh(&sk->sk_receive_queue.lock);
2332 spin_lock_bh(&sk->sk_write_queue.lock);
2333 if (po->tx_ring.pg_vec) {
2334 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2335 mask |= POLLOUT | POLLWRNORM;
2337 spin_unlock_bh(&sk->sk_write_queue.lock);
2342 /* Dirty? Well, I still did not learn better way to account
2346 static void packet_mm_open(struct vm_area_struct *vma)
2348 struct file *file = vma->vm_file;
2349 struct socket *sock = file->private_data;
2350 struct sock *sk = sock->sk;
2353 atomic_inc(&pkt_sk(sk)->mapped);
2356 static void packet_mm_close(struct vm_area_struct *vma)
2358 struct file *file = vma->vm_file;
2359 struct socket *sock = file->private_data;
2360 struct sock *sk = sock->sk;
2363 atomic_dec(&pkt_sk(sk)->mapped);
2366 static const struct vm_operations_struct packet_mmap_ops = {
2367 .open = packet_mm_open,
2368 .close = packet_mm_close,
2371 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2376 for (i = 0; i < len; i++) {
2377 if (likely(pg_vec[i].buffer)) {
2378 if (is_vmalloc_addr(pg_vec[i].buffer))
2379 vfree(pg_vec[i].buffer);
2381 free_pages((unsigned long)pg_vec[i].buffer,
2383 pg_vec[i].buffer = NULL;
2389 static inline char *alloc_one_pg_vec_page(unsigned long order)
2391 char *buffer = NULL;
2392 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2393 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2395 buffer = (char *) __get_free_pages(gfp_flags, order);
2401 * __get_free_pages failed, fall back to vmalloc
2403 buffer = vzalloc((1 << order) * PAGE_SIZE);
2409 * vmalloc failed, lets dig into swap here
2411 gfp_flags &= ~__GFP_NORETRY;
2412 buffer = (char *)__get_free_pages(gfp_flags, order);
2417 * complete and utter failure
2422 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
2424 unsigned int block_nr = req->tp_block_nr;
2428 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
2429 if (unlikely(!pg_vec))
2432 for (i = 0; i < block_nr; i++) {
2433 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
2434 if (unlikely(!pg_vec[i].buffer))
2435 goto out_free_pgvec;
2442 free_pg_vec(pg_vec, order, block_nr);
2447 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2448 int closing, int tx_ring)
2450 struct pgv *pg_vec = NULL;
2451 struct packet_sock *po = pkt_sk(sk);
2452 int was_running, order = 0;
2453 struct packet_ring_buffer *rb;
2454 struct sk_buff_head *rb_queue;
2458 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2459 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2463 if (atomic_read(&po->mapped))
2465 if (atomic_read(&rb->pending))
2469 if (req->tp_block_nr) {
2470 /* Sanity tests and some calculations */
2472 if (unlikely(rb->pg_vec))
2475 switch (po->tp_version) {
2477 po->tp_hdrlen = TPACKET_HDRLEN;
2480 po->tp_hdrlen = TPACKET2_HDRLEN;
2485 if (unlikely((int)req->tp_block_size <= 0))
2487 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2489 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2492 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2495 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2496 if (unlikely(rb->frames_per_block <= 0))
2498 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2503 order = get_order(req->tp_block_size);
2504 pg_vec = alloc_pg_vec(req, order);
2505 if (unlikely(!pg_vec))
2511 if (unlikely(req->tp_frame_nr))
2517 /* Detach socket from network */
2518 spin_lock(&po->bind_lock);
2519 was_running = po->running;
2522 __dev_remove_pack(&po->prot_hook);
2527 spin_unlock(&po->bind_lock);
2532 mutex_lock(&po->pg_vec_lock);
2533 if (closing || atomic_read(&po->mapped) == 0) {
2535 spin_lock_bh(&rb_queue->lock);
2536 swap(rb->pg_vec, pg_vec);
2537 rb->frame_max = (req->tp_frame_nr - 1);
2539 rb->frame_size = req->tp_frame_size;
2540 spin_unlock_bh(&rb_queue->lock);
2542 swap(rb->pg_vec_order, order);
2543 swap(rb->pg_vec_len, req->tp_block_nr);
2545 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2546 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2547 tpacket_rcv : packet_rcv;
2548 skb_queue_purge(rb_queue);
2549 if (atomic_read(&po->mapped))
2550 pr_err("packet_mmap: vma is busy: %d\n",
2551 atomic_read(&po->mapped));
2553 mutex_unlock(&po->pg_vec_lock);
2555 spin_lock(&po->bind_lock);
2556 if (was_running && !po->running) {
2560 dev_add_pack(&po->prot_hook);
2562 spin_unlock(&po->bind_lock);
2567 free_pg_vec(pg_vec, order, req->tp_block_nr);
2572 static int packet_mmap(struct file *file, struct socket *sock,
2573 struct vm_area_struct *vma)
2575 struct sock *sk = sock->sk;
2576 struct packet_sock *po = pkt_sk(sk);
2577 unsigned long size, expected_size;
2578 struct packet_ring_buffer *rb;
2579 unsigned long start;
2586 mutex_lock(&po->pg_vec_lock);
2589 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2591 expected_size += rb->pg_vec_len
2597 if (expected_size == 0)
2600 size = vma->vm_end - vma->vm_start;
2601 if (size != expected_size)
2604 start = vma->vm_start;
2605 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2606 if (rb->pg_vec == NULL)
2609 for (i = 0; i < rb->pg_vec_len; i++) {
2611 void *kaddr = rb->pg_vec[i].buffer;
2614 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2615 page = pgv_to_page(kaddr);
2616 err = vm_insert_page(vma, start, page);
2625 atomic_inc(&po->mapped);
2626 vma->vm_ops = &packet_mmap_ops;
2630 mutex_unlock(&po->pg_vec_lock);
2634 static const struct proto_ops packet_ops_spkt = {
2635 .family = PF_PACKET,
2636 .owner = THIS_MODULE,
2637 .release = packet_release,
2638 .bind = packet_bind_spkt,
2639 .connect = sock_no_connect,
2640 .socketpair = sock_no_socketpair,
2641 .accept = sock_no_accept,
2642 .getname = packet_getname_spkt,
2643 .poll = datagram_poll,
2644 .ioctl = packet_ioctl,
2645 .listen = sock_no_listen,
2646 .shutdown = sock_no_shutdown,
2647 .setsockopt = sock_no_setsockopt,
2648 .getsockopt = sock_no_getsockopt,
2649 .sendmsg = packet_sendmsg_spkt,
2650 .recvmsg = packet_recvmsg,
2651 .mmap = sock_no_mmap,
2652 .sendpage = sock_no_sendpage,
2655 static const struct proto_ops packet_ops = {
2656 .family = PF_PACKET,
2657 .owner = THIS_MODULE,
2658 .release = packet_release,
2659 .bind = packet_bind,
2660 .connect = sock_no_connect,
2661 .socketpair = sock_no_socketpair,
2662 .accept = sock_no_accept,
2663 .getname = packet_getname,
2664 .poll = packet_poll,
2665 .ioctl = packet_ioctl,
2666 .listen = sock_no_listen,
2667 .shutdown = sock_no_shutdown,
2668 .setsockopt = packet_setsockopt,
2669 .getsockopt = packet_getsockopt,
2670 .sendmsg = packet_sendmsg,
2671 .recvmsg = packet_recvmsg,
2672 .mmap = packet_mmap,
2673 .sendpage = sock_no_sendpage,
2676 static const struct net_proto_family packet_family_ops = {
2677 .family = PF_PACKET,
2678 .create = packet_create,
2679 .owner = THIS_MODULE,
2682 static struct notifier_block packet_netdev_notifier = {
2683 .notifier_call = packet_notifier,
2686 #ifdef CONFIG_PROC_FS
2688 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2691 struct net *net = seq_file_net(seq);
2694 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2697 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2699 struct net *net = seq_file_net(seq);
2700 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2703 static void packet_seq_stop(struct seq_file *seq, void *v)
2709 static int packet_seq_show(struct seq_file *seq, void *v)
2711 if (v == SEQ_START_TOKEN)
2712 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2714 struct sock *s = sk_entry(v);
2715 const struct packet_sock *po = pkt_sk(s);
2718 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2720 atomic_read(&s->sk_refcnt),
2725 atomic_read(&s->sk_rmem_alloc),
2733 static const struct seq_operations packet_seq_ops = {
2734 .start = packet_seq_start,
2735 .next = packet_seq_next,
2736 .stop = packet_seq_stop,
2737 .show = packet_seq_show,
2740 static int packet_seq_open(struct inode *inode, struct file *file)
2742 return seq_open_net(inode, file, &packet_seq_ops,
2743 sizeof(struct seq_net_private));
2746 static const struct file_operations packet_seq_fops = {
2747 .owner = THIS_MODULE,
2748 .open = packet_seq_open,
2750 .llseek = seq_lseek,
2751 .release = seq_release_net,
2756 static int __net_init packet_net_init(struct net *net)
2758 spin_lock_init(&net->packet.sklist_lock);
2759 INIT_HLIST_HEAD(&net->packet.sklist);
2761 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2767 static void __net_exit packet_net_exit(struct net *net)
2769 proc_net_remove(net, "packet");
2772 static struct pernet_operations packet_net_ops = {
2773 .init = packet_net_init,
2774 .exit = packet_net_exit,
2778 static void __exit packet_exit(void)
2780 unregister_netdevice_notifier(&packet_netdev_notifier);
2781 unregister_pernet_subsys(&packet_net_ops);
2782 sock_unregister(PF_PACKET);
2783 proto_unregister(&packet_proto);
2786 static int __init packet_init(void)
2788 int rc = proto_register(&packet_proto, 0);
2793 sock_register(&packet_family_ops);
2794 register_pernet_subsys(&packet_net_ops);
2795 register_netdevice_notifier(&packet_netdev_notifier);
2800 module_init(packet_init);
2801 module_exit(packet_exit);
2802 MODULE_LICENSE("GPL");
2803 MODULE_ALIAS_NETPROTO(PF_PACKET);