2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <net/net_namespace.h>
67 #include <net/protocol.h>
68 #include <linux/skbuff.h>
70 #include <linux/errno.h>
71 #include <linux/timer.h>
72 #include <asm/system.h>
73 #include <asm/uaccess.h>
74 #include <asm/ioctls.h>
76 #include <asm/cacheflush.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
90 #include <net/inet_common.h>
95 - if device has no dev->hard_header routine, it adds and removes ll header
96 inside itself. In this case ll header is invisible outside of device,
97 but higher levels still should reserve dev->hard_header_len.
98 Some devices are enough clever to reallocate skb, when header
99 will not fit to reserved space (tunnel), another ones are silly
101 - packet socket receives packets with pulled ll header,
102 so that SOCK_RAW should push it back.
107 Incoming, dev->hard_header!=NULL
108 mac_header -> ll header
111 Outgoing, dev->hard_header!=NULL
112 mac_header -> ll header
115 Incoming, dev->hard_header==NULL
116 mac_header -> UNKNOWN position. It is very likely, that it points to ll
117 header. PPP makes it, that is wrong, because introduce
118 assymetry between rx and tx paths.
121 Outgoing, dev->hard_header==NULL
122 mac_header -> data. ll header is still not built!
126 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 dev->hard_header != NULL
133 mac_header -> ll header
136 dev->hard_header == NULL (ll header is added by device, we cannot control it)
140 We should set nh.raw on output to correct posistion,
141 packet classifier depends on it.
144 /* Private packet socket structures. */
146 struct packet_mclist {
147 struct packet_mclist *next;
152 unsigned char addr[MAX_ADDR_LEN];
154 /* identical to struct packet_mreq except it has
155 * a longer address field.
157 struct packet_mreq_max {
159 unsigned short mr_type;
160 unsigned short mr_alen;
161 unsigned char mr_address[MAX_ADDR_LEN];
164 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
165 int closing, int tx_ring);
171 struct packet_ring_buffer {
174 unsigned int frames_per_block;
175 unsigned int frame_size;
176 unsigned int frame_max;
178 unsigned int pg_vec_order;
179 unsigned int pg_vec_pages;
180 unsigned int pg_vec_len;
186 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
188 static void packet_flush_mclist(struct sock *sk);
191 /* struct sock has to be the first member of packet_sock */
193 struct tpacket_stats stats;
194 struct packet_ring_buffer rx_ring;
195 struct packet_ring_buffer tx_ring;
197 spinlock_t bind_lock;
198 struct mutex pg_vec_lock;
199 unsigned int running:1, /* prot_hook is attached*/
203 int ifindex; /* bound device */
205 struct packet_mclist *mclist;
207 enum tpacket_versions tp_version;
208 unsigned int tp_hdrlen;
209 unsigned int tp_reserve;
210 unsigned int tp_loss:1;
211 unsigned int tp_tstamp;
212 struct packet_type prot_hook ____cacheline_aligned_in_smp;
215 struct packet_skb_cb {
216 unsigned int origlen;
218 struct sockaddr_pkt pkt;
219 struct sockaddr_ll ll;
223 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
225 static inline __pure struct page *pgv_to_page(void *addr)
227 if (is_vmalloc_addr(addr))
228 return vmalloc_to_page(addr);
229 return virt_to_page(addr);
232 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
235 struct tpacket_hdr *h1;
236 struct tpacket2_hdr *h2;
241 switch (po->tp_version) {
243 h.h1->tp_status = status;
244 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
247 h.h2->tp_status = status;
248 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
251 pr_err("TPACKET version not supported\n");
258 static int __packet_get_status(struct packet_sock *po, void *frame)
261 struct tpacket_hdr *h1;
262 struct tpacket2_hdr *h2;
269 switch (po->tp_version) {
271 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
272 return h.h1->tp_status;
274 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
275 return h.h2->tp_status;
277 pr_err("TPACKET version not supported\n");
283 static void *packet_lookup_frame(struct packet_sock *po,
284 struct packet_ring_buffer *rb,
285 unsigned int position,
288 unsigned int pg_vec_pos, frame_offset;
290 struct tpacket_hdr *h1;
291 struct tpacket2_hdr *h2;
295 pg_vec_pos = position / rb->frames_per_block;
296 frame_offset = position % rb->frames_per_block;
298 h.raw = rb->pg_vec[pg_vec_pos].buffer +
299 (frame_offset * rb->frame_size);
301 if (status != __packet_get_status(po, h.raw))
307 static inline void *packet_current_frame(struct packet_sock *po,
308 struct packet_ring_buffer *rb,
311 return packet_lookup_frame(po, rb, rb->head, status);
314 static inline void *packet_previous_frame(struct packet_sock *po,
315 struct packet_ring_buffer *rb,
318 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
319 return packet_lookup_frame(po, rb, previous, status);
322 static inline void packet_increment_head(struct packet_ring_buffer *buff)
324 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
327 static inline struct packet_sock *pkt_sk(struct sock *sk)
329 return (struct packet_sock *)sk;
332 static void packet_sock_destruct(struct sock *sk)
334 skb_queue_purge(&sk->sk_error_queue);
336 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
337 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
339 if (!sock_flag(sk, SOCK_DEAD)) {
340 pr_err("Attempt to release alive packet socket: %p\n", sk);
344 sk_refcnt_debug_dec(sk);
348 static const struct proto_ops packet_ops;
350 static const struct proto_ops packet_ops_spkt;
352 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
353 struct packet_type *pt, struct net_device *orig_dev)
356 struct sockaddr_pkt *spkt;
359 * When we registered the protocol we saved the socket in the data
360 * field for just this event.
363 sk = pt->af_packet_priv;
366 * Yank back the headers [hope the device set this
367 * right or kerboom...]
369 * Incoming packets have ll header pulled,
372 * For outgoing ones skb->data == skb_mac_header(skb)
373 * so that this procedure is noop.
376 if (skb->pkt_type == PACKET_LOOPBACK)
379 if (!net_eq(dev_net(dev), sock_net(sk)))
382 skb = skb_share_check(skb, GFP_ATOMIC);
386 /* drop any routing info */
389 /* drop conntrack reference */
392 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
394 skb_push(skb, skb->data - skb_mac_header(skb));
397 * The SOCK_PACKET socket receives _all_ frames.
400 spkt->spkt_family = dev->type;
401 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
402 spkt->spkt_protocol = skb->protocol;
405 * Charge the memory to the socket. This is done specifically
406 * to prevent sockets using all the memory up.
409 if (sock_queue_rcv_skb(sk, skb) == 0)
420 * Output a raw packet to a device layer. This bypasses all the other
421 * protocol layers and you must therefore supply it with a complete frame
424 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
425 struct msghdr *msg, size_t len)
427 struct sock *sk = sock->sk;
428 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
429 struct sk_buff *skb = NULL;
430 struct net_device *dev;
435 * Get and verify the address.
439 if (msg->msg_namelen < sizeof(struct sockaddr))
441 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
442 proto = saddr->spkt_protocol;
444 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
447 * Find the device first to size check it
450 saddr->spkt_device[13] = 0;
453 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
459 if (!(dev->flags & IFF_UP))
463 * You may not queue a frame bigger than the mtu. This is the lowest level
464 * raw protocol and you must do your own fragmentation at this level.
468 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
472 size_t reserved = LL_RESERVED_SPACE(dev);
473 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
476 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
479 /* FIXME: Save some space for broken drivers that write a hard
480 * header at transmission time by themselves. PPP is the notable
481 * one here. This should really be fixed at the driver level.
483 skb_reserve(skb, reserved);
484 skb_reset_network_header(skb);
486 /* Try to align data part correctly */
491 skb_reset_network_header(skb);
493 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
499 if (len > (dev->mtu + dev->hard_header_len)) {
500 /* Earlier code assumed this would be a VLAN pkt,
501 * double-check this now that we have the actual
505 skb_reset_mac_header(skb);
507 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
513 skb->protocol = proto;
515 skb->priority = sk->sk_priority;
516 skb->mark = sk->sk_mark;
517 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
532 static inline unsigned int run_filter(const struct sk_buff *skb,
533 const struct sock *sk,
536 struct sk_filter *filter;
539 filter = rcu_dereference(sk->sk_filter);
541 res = SK_RUN_FILTER(filter, skb);
548 * This function makes lazy skb cloning in hope that most of packets
549 * are discarded by BPF.
551 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
552 * and skb->cb are mangled. It works because (and until) packets
553 * falling here are owned by current CPU. Output packets are cloned
554 * by dev_queue_xmit_nit(), input packets are processed by net_bh
555 * sequencially, so that if we return skb to original state on exit,
556 * we will not harm anyone.
559 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
560 struct packet_type *pt, struct net_device *orig_dev)
563 struct sockaddr_ll *sll;
564 struct packet_sock *po;
565 u8 *skb_head = skb->data;
566 int skb_len = skb->len;
567 unsigned int snaplen, res;
569 if (skb->pkt_type == PACKET_LOOPBACK)
572 sk = pt->af_packet_priv;
575 if (!net_eq(dev_net(dev), sock_net(sk)))
580 if (dev->header_ops) {
581 /* The device has an explicit notion of ll header,
582 * exported to higher levels.
584 * Otherwise, the device hides details of its frame
585 * structure, so that corresponding packet head is
586 * never delivered to user.
588 if (sk->sk_type != SOCK_DGRAM)
589 skb_push(skb, skb->data - skb_mac_header(skb));
590 else if (skb->pkt_type == PACKET_OUTGOING) {
591 /* Special case: outgoing packets have ll header at head */
592 skb_pull(skb, skb_network_offset(skb));
598 res = run_filter(skb, sk, snaplen);
604 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
605 (unsigned)sk->sk_rcvbuf)
608 if (skb_shared(skb)) {
609 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
613 if (skb_head != skb->data) {
614 skb->data = skb_head;
621 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
624 sll = &PACKET_SKB_CB(skb)->sa.ll;
625 sll->sll_family = AF_PACKET;
626 sll->sll_hatype = dev->type;
627 sll->sll_protocol = skb->protocol;
628 sll->sll_pkttype = skb->pkt_type;
629 if (unlikely(po->origdev))
630 sll->sll_ifindex = orig_dev->ifindex;
632 sll->sll_ifindex = dev->ifindex;
634 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
636 PACKET_SKB_CB(skb)->origlen = skb->len;
638 if (pskb_trim(skb, snaplen))
641 skb_set_owner_r(skb, sk);
645 /* drop conntrack reference */
648 spin_lock(&sk->sk_receive_queue.lock);
649 po->stats.tp_packets++;
650 skb->dropcount = atomic_read(&sk->sk_drops);
651 __skb_queue_tail(&sk->sk_receive_queue, skb);
652 spin_unlock(&sk->sk_receive_queue.lock);
653 sk->sk_data_ready(sk, skb->len);
657 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
660 if (skb_head != skb->data && skb_shared(skb)) {
661 skb->data = skb_head;
669 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
670 struct packet_type *pt, struct net_device *orig_dev)
673 struct packet_sock *po;
674 struct sockaddr_ll *sll;
676 struct tpacket_hdr *h1;
677 struct tpacket2_hdr *h2;
680 u8 *skb_head = skb->data;
681 int skb_len = skb->len;
682 unsigned int snaplen, res;
683 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
684 unsigned short macoff, netoff, hdrlen;
685 struct sk_buff *copy_skb = NULL;
688 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
690 if (skb->pkt_type == PACKET_LOOPBACK)
693 sk = pt->af_packet_priv;
696 if (!net_eq(dev_net(dev), sock_net(sk)))
699 if (dev->header_ops) {
700 if (sk->sk_type != SOCK_DGRAM)
701 skb_push(skb, skb->data - skb_mac_header(skb));
702 else if (skb->pkt_type == PACKET_OUTGOING) {
703 /* Special case: outgoing packets have ll header at head */
704 skb_pull(skb, skb_network_offset(skb));
708 if (skb->ip_summed == CHECKSUM_PARTIAL)
709 status |= TP_STATUS_CSUMNOTREADY;
713 res = run_filter(skb, sk, snaplen);
719 if (sk->sk_type == SOCK_DGRAM) {
720 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
723 unsigned maclen = skb_network_offset(skb);
724 netoff = TPACKET_ALIGN(po->tp_hdrlen +
725 (maclen < 16 ? 16 : maclen)) +
727 macoff = netoff - maclen;
730 if (macoff + snaplen > po->rx_ring.frame_size) {
731 if (po->copy_thresh &&
732 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
733 (unsigned)sk->sk_rcvbuf) {
734 if (skb_shared(skb)) {
735 copy_skb = skb_clone(skb, GFP_ATOMIC);
737 copy_skb = skb_get(skb);
738 skb_head = skb->data;
741 skb_set_owner_r(copy_skb, sk);
743 snaplen = po->rx_ring.frame_size - macoff;
744 if ((int)snaplen < 0)
748 spin_lock(&sk->sk_receive_queue.lock);
749 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
752 packet_increment_head(&po->rx_ring);
753 po->stats.tp_packets++;
755 status |= TP_STATUS_COPY;
756 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
758 if (!po->stats.tp_drops)
759 status &= ~TP_STATUS_LOSING;
760 spin_unlock(&sk->sk_receive_queue.lock);
762 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
764 switch (po->tp_version) {
766 h.h1->tp_len = skb->len;
767 h.h1->tp_snaplen = snaplen;
768 h.h1->tp_mac = macoff;
769 h.h1->tp_net = netoff;
770 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
771 && shhwtstamps->syststamp.tv64)
772 tv = ktime_to_timeval(shhwtstamps->syststamp);
773 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
774 && shhwtstamps->hwtstamp.tv64)
775 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
776 else if (skb->tstamp.tv64)
777 tv = ktime_to_timeval(skb->tstamp);
779 do_gettimeofday(&tv);
780 h.h1->tp_sec = tv.tv_sec;
781 h.h1->tp_usec = tv.tv_usec;
782 hdrlen = sizeof(*h.h1);
785 h.h2->tp_len = skb->len;
786 h.h2->tp_snaplen = snaplen;
787 h.h2->tp_mac = macoff;
788 h.h2->tp_net = netoff;
789 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
790 && shhwtstamps->syststamp.tv64)
791 ts = ktime_to_timespec(shhwtstamps->syststamp);
792 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
793 && shhwtstamps->hwtstamp.tv64)
794 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
795 else if (skb->tstamp.tv64)
796 ts = ktime_to_timespec(skb->tstamp);
799 h.h2->tp_sec = ts.tv_sec;
800 h.h2->tp_nsec = ts.tv_nsec;
801 if (vlan_tx_tag_present(skb)) {
802 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
803 status |= TP_STATUS_VLAN_VALID;
805 h.h2->tp_vlan_tci = 0;
807 hdrlen = sizeof(*h.h2);
813 sll = h.raw + TPACKET_ALIGN(hdrlen);
814 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
815 sll->sll_family = AF_PACKET;
816 sll->sll_hatype = dev->type;
817 sll->sll_protocol = skb->protocol;
818 sll->sll_pkttype = skb->pkt_type;
819 if (unlikely(po->origdev))
820 sll->sll_ifindex = orig_dev->ifindex;
822 sll->sll_ifindex = dev->ifindex;
824 __packet_set_status(po, h.raw, status);
826 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
830 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
831 for (start = h.raw; start < end; start += PAGE_SIZE)
832 flush_dcache_page(pgv_to_page(start));
836 sk->sk_data_ready(sk, 0);
839 if (skb_head != skb->data && skb_shared(skb)) {
840 skb->data = skb_head;
848 po->stats.tp_drops++;
849 spin_unlock(&sk->sk_receive_queue.lock);
851 sk->sk_data_ready(sk, 0);
856 static void tpacket_destruct_skb(struct sk_buff *skb)
858 struct packet_sock *po = pkt_sk(skb->sk);
863 if (likely(po->tx_ring.pg_vec)) {
864 ph = skb_shinfo(skb)->destructor_arg;
865 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
866 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
867 atomic_dec(&po->tx_ring.pending);
868 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
874 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
875 void *frame, struct net_device *dev, int size_max,
876 __be16 proto, unsigned char *addr)
879 struct tpacket_hdr *h1;
880 struct tpacket2_hdr *h2;
883 int to_write, offset, len, tp_len, nr_frags, len_max;
884 struct socket *sock = po->sk.sk_socket;
891 skb->protocol = proto;
893 skb->priority = po->sk.sk_priority;
894 skb->mark = po->sk.sk_mark;
895 skb_shinfo(skb)->destructor_arg = ph.raw;
897 switch (po->tp_version) {
899 tp_len = ph.h2->tp_len;
902 tp_len = ph.h1->tp_len;
905 if (unlikely(tp_len > size_max)) {
906 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
910 skb_reserve(skb, LL_RESERVED_SPACE(dev));
911 skb_reset_network_header(skb);
913 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
916 if (sock->type == SOCK_DGRAM) {
917 err = dev_hard_header(skb, dev, ntohs(proto), addr,
919 if (unlikely(err < 0))
921 } else if (dev->hard_header_len) {
922 /* net device doesn't like empty head */
923 if (unlikely(tp_len <= dev->hard_header_len)) {
924 pr_err("packet size is too short (%d < %d)\n",
925 tp_len, dev->hard_header_len);
929 skb_push(skb, dev->hard_header_len);
930 err = skb_store_bits(skb, 0, data,
931 dev->hard_header_len);
935 data += dev->hard_header_len;
936 to_write -= dev->hard_header_len;
940 offset = offset_in_page(data);
941 len_max = PAGE_SIZE - offset;
942 len = ((to_write > len_max) ? len_max : to_write);
944 skb->data_len = to_write;
945 skb->len += to_write;
946 skb->truesize += to_write;
947 atomic_add(to_write, &po->sk.sk_wmem_alloc);
949 while (likely(to_write)) {
950 nr_frags = skb_shinfo(skb)->nr_frags;
952 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
953 pr_err("Packet exceed the number of skb frags(%lu)\n",
958 page = pgv_to_page(data);
960 flush_dcache_page(page);
962 skb_fill_page_desc(skb, nr_frags, page, offset, len);
966 len = ((to_write > len_max) ? len_max : to_write);
972 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
975 struct net_device *dev;
977 bool need_rls_dev = false;
978 int err, reserve = 0;
980 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
981 int tp_len, size_max;
986 mutex_lock(&po->pg_vec_lock);
990 dev = po->prot_hook.dev;
995 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
997 if (msg->msg_namelen < (saddr->sll_halen
998 + offsetof(struct sockaddr_ll,
1001 proto = saddr->sll_protocol;
1002 addr = saddr->sll_addr;
1003 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1004 need_rls_dev = true;
1008 if (unlikely(dev == NULL))
1011 reserve = dev->hard_header_len;
1014 if (unlikely(!(dev->flags & IFF_UP)))
1017 size_max = po->tx_ring.frame_size
1018 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
1020 if (size_max > dev->mtu + reserve)
1021 size_max = dev->mtu + reserve;
1024 ph = packet_current_frame(po, &po->tx_ring,
1025 TP_STATUS_SEND_REQUEST);
1027 if (unlikely(ph == NULL)) {
1032 status = TP_STATUS_SEND_REQUEST;
1033 skb = sock_alloc_send_skb(&po->sk,
1034 LL_ALLOCATED_SPACE(dev)
1035 + sizeof(struct sockaddr_ll),
1038 if (unlikely(skb == NULL))
1041 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1044 if (unlikely(tp_len < 0)) {
1046 __packet_set_status(po, ph,
1047 TP_STATUS_AVAILABLE);
1048 packet_increment_head(&po->tx_ring);
1052 status = TP_STATUS_WRONG_FORMAT;
1058 skb->destructor = tpacket_destruct_skb;
1059 __packet_set_status(po, ph, TP_STATUS_SENDING);
1060 atomic_inc(&po->tx_ring.pending);
1062 status = TP_STATUS_SEND_REQUEST;
1063 err = dev_queue_xmit(skb);
1064 if (unlikely(err > 0)) {
1065 err = net_xmit_errno(err);
1066 if (err && __packet_get_status(po, ph) ==
1067 TP_STATUS_AVAILABLE) {
1068 /* skb was destructed already */
1073 * skb was dropped but not destructed yet;
1074 * let's treat it like congestion or err < 0
1078 packet_increment_head(&po->tx_ring);
1080 } while (likely((ph != NULL) ||
1081 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1082 (atomic_read(&po->tx_ring.pending))))
1089 __packet_set_status(po, ph, status);
1095 mutex_unlock(&po->pg_vec_lock);
1099 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1100 size_t reserve, size_t len,
1101 size_t linear, int noblock,
1104 struct sk_buff *skb;
1106 /* Under a page? Don't bother with paged skb. */
1107 if (prepad + len < PAGE_SIZE || !linear)
1110 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1115 skb_reserve(skb, reserve);
1116 skb_put(skb, linear);
1117 skb->data_len = len - linear;
1118 skb->len += len - linear;
1123 static int packet_snd(struct socket *sock,
1124 struct msghdr *msg, size_t len)
1126 struct sock *sk = sock->sk;
1127 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1128 struct sk_buff *skb;
1129 struct net_device *dev;
1131 bool need_rls_dev = false;
1132 unsigned char *addr;
1133 int err, reserve = 0;
1134 struct virtio_net_hdr vnet_hdr = { 0 };
1137 struct packet_sock *po = pkt_sk(sk);
1138 unsigned short gso_type = 0;
1141 * Get and verify the address.
1144 if (saddr == NULL) {
1145 dev = po->prot_hook.dev;
1150 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1152 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1154 proto = saddr->sll_protocol;
1155 addr = saddr->sll_addr;
1156 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1157 need_rls_dev = true;
1163 if (sock->type == SOCK_RAW)
1164 reserve = dev->hard_header_len;
1167 if (!(dev->flags & IFF_UP))
1170 if (po->has_vnet_hdr) {
1171 vnet_hdr_len = sizeof(vnet_hdr);
1174 if (len < vnet_hdr_len)
1177 len -= vnet_hdr_len;
1179 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1184 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1185 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1187 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1188 vnet_hdr.csum_offset + 2;
1191 if (vnet_hdr.hdr_len > len)
1194 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1195 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1196 case VIRTIO_NET_HDR_GSO_TCPV4:
1197 gso_type = SKB_GSO_TCPV4;
1199 case VIRTIO_NET_HDR_GSO_TCPV6:
1200 gso_type = SKB_GSO_TCPV6;
1202 case VIRTIO_NET_HDR_GSO_UDP:
1203 gso_type = SKB_GSO_UDP;
1209 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1210 gso_type |= SKB_GSO_TCP_ECN;
1212 if (vnet_hdr.gso_size == 0)
1219 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
1223 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1224 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1225 msg->msg_flags & MSG_DONTWAIT, &err);
1229 skb_set_network_header(skb, reserve);
1232 if (sock->type == SOCK_DGRAM &&
1233 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1236 /* Returns -EFAULT on error */
1237 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1240 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1244 if (!gso_type && (len > dev->mtu + reserve)) {
1245 /* Earlier code assumed this would be a VLAN pkt,
1246 * double-check this now that we have the actual
1249 struct ethhdr *ehdr;
1250 skb_reset_mac_header(skb);
1251 ehdr = eth_hdr(skb);
1252 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1258 skb->protocol = proto;
1260 skb->priority = sk->sk_priority;
1261 skb->mark = sk->sk_mark;
1263 if (po->has_vnet_hdr) {
1264 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1265 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1266 vnet_hdr.csum_offset)) {
1272 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1273 skb_shinfo(skb)->gso_type = gso_type;
1275 /* Header must be checked, and gso_segs computed. */
1276 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1277 skb_shinfo(skb)->gso_segs = 0;
1279 len += vnet_hdr_len;
1286 err = dev_queue_xmit(skb);
1287 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1298 if (dev && need_rls_dev)
1304 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1305 struct msghdr *msg, size_t len)
1307 struct sock *sk = sock->sk;
1308 struct packet_sock *po = pkt_sk(sk);
1309 if (po->tx_ring.pg_vec)
1310 return tpacket_snd(po, msg);
1312 return packet_snd(sock, msg, len);
1316 * Close a PACKET socket. This is fairly simple. We immediately go
1317 * to 'closed' state and remove our protocol entry in the device list.
1320 static int packet_release(struct socket *sock)
1322 struct sock *sk = sock->sk;
1323 struct packet_sock *po;
1325 struct tpacket_req req;
1333 spin_lock_bh(&net->packet.sklist_lock);
1334 sk_del_node_init_rcu(sk);
1335 sock_prot_inuse_add(net, sk->sk_prot, -1);
1336 spin_unlock_bh(&net->packet.sklist_lock);
1338 spin_lock(&po->bind_lock);
1341 * Remove from protocol table
1345 __dev_remove_pack(&po->prot_hook);
1348 if (po->prot_hook.dev) {
1349 dev_put(po->prot_hook.dev);
1350 po->prot_hook.dev = NULL;
1352 spin_unlock(&po->bind_lock);
1354 packet_flush_mclist(sk);
1356 memset(&req, 0, sizeof(req));
1358 if (po->rx_ring.pg_vec)
1359 packet_set_ring(sk, &req, 1, 0);
1361 if (po->tx_ring.pg_vec)
1362 packet_set_ring(sk, &req, 1, 1);
1366 * Now the socket is dead. No more input will appear.
1373 skb_queue_purge(&sk->sk_receive_queue);
1374 sk_refcnt_debug_release(sk);
1381 * Attach a packet hook.
1384 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1386 struct packet_sock *po = pkt_sk(sk);
1388 * Detach an existing hook if present.
1393 spin_lock(&po->bind_lock);
1398 spin_unlock(&po->bind_lock);
1399 dev_remove_pack(&po->prot_hook);
1400 spin_lock(&po->bind_lock);
1404 po->prot_hook.type = protocol;
1405 if (po->prot_hook.dev)
1406 dev_put(po->prot_hook.dev);
1407 po->prot_hook.dev = dev;
1409 po->ifindex = dev ? dev->ifindex : 0;
1414 if (!dev || (dev->flags & IFF_UP)) {
1415 dev_add_pack(&po->prot_hook);
1419 sk->sk_err = ENETDOWN;
1420 if (!sock_flag(sk, SOCK_DEAD))
1421 sk->sk_error_report(sk);
1425 spin_unlock(&po->bind_lock);
1431 * Bind a packet socket to a device
1434 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1437 struct sock *sk = sock->sk;
1439 struct net_device *dev;
1446 if (addr_len != sizeof(struct sockaddr))
1448 strlcpy(name, uaddr->sa_data, sizeof(name));
1450 dev = dev_get_by_name(sock_net(sk), name);
1452 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1456 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1458 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1459 struct sock *sk = sock->sk;
1460 struct net_device *dev = NULL;
1468 if (addr_len < sizeof(struct sockaddr_ll))
1470 if (sll->sll_family != AF_PACKET)
1473 if (sll->sll_ifindex) {
1475 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1479 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1485 static struct proto packet_proto = {
1487 .owner = THIS_MODULE,
1488 .obj_size = sizeof(struct packet_sock),
1492 * Create a packet of type SOCK_PACKET.
1495 static int packet_create(struct net *net, struct socket *sock, int protocol,
1499 struct packet_sock *po;
1500 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1503 if (!capable(CAP_NET_RAW))
1505 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1506 sock->type != SOCK_PACKET)
1507 return -ESOCKTNOSUPPORT;
1509 sock->state = SS_UNCONNECTED;
1512 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1516 sock->ops = &packet_ops;
1517 if (sock->type == SOCK_PACKET)
1518 sock->ops = &packet_ops_spkt;
1520 sock_init_data(sock, sk);
1523 sk->sk_family = PF_PACKET;
1526 sk->sk_destruct = packet_sock_destruct;
1527 sk_refcnt_debug_inc(sk);
1530 * Attach a protocol block
1533 spin_lock_init(&po->bind_lock);
1534 mutex_init(&po->pg_vec_lock);
1535 po->prot_hook.func = packet_rcv;
1537 if (sock->type == SOCK_PACKET)
1538 po->prot_hook.func = packet_rcv_spkt;
1540 po->prot_hook.af_packet_priv = sk;
1543 po->prot_hook.type = proto;
1544 dev_add_pack(&po->prot_hook);
1549 spin_lock_bh(&net->packet.sklist_lock);
1550 sk_add_node_rcu(sk, &net->packet.sklist);
1551 sock_prot_inuse_add(net, &packet_proto, 1);
1552 spin_unlock_bh(&net->packet.sklist_lock);
1559 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1561 struct sock_exterr_skb *serr;
1562 struct sk_buff *skb, *skb2;
1566 skb = skb_dequeue(&sk->sk_error_queue);
1572 msg->msg_flags |= MSG_TRUNC;
1575 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1579 sock_recv_timestamp(msg, sk, skb);
1581 serr = SKB_EXT_ERR(skb);
1582 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1583 sizeof(serr->ee), &serr->ee);
1585 msg->msg_flags |= MSG_ERRQUEUE;
1588 /* Reset and regenerate socket error */
1589 spin_lock_bh(&sk->sk_error_queue.lock);
1591 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1592 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1593 spin_unlock_bh(&sk->sk_error_queue.lock);
1594 sk->sk_error_report(sk);
1596 spin_unlock_bh(&sk->sk_error_queue.lock);
1605 * Pull a packet from our receive queue and hand it to the user.
1606 * If necessary we block.
1609 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1610 struct msghdr *msg, size_t len, int flags)
1612 struct sock *sk = sock->sk;
1613 struct sk_buff *skb;
1615 struct sockaddr_ll *sll;
1616 int vnet_hdr_len = 0;
1619 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1623 /* What error should we return now? EUNATTACH? */
1624 if (pkt_sk(sk)->ifindex < 0)
1628 if (flags & MSG_ERRQUEUE) {
1629 err = packet_recv_error(sk, msg, len);
1634 * Call the generic datagram receiver. This handles all sorts
1635 * of horrible races and re-entrancy so we can forget about it
1636 * in the protocol layers.
1638 * Now it will return ENETDOWN, if device have just gone down,
1639 * but then it will block.
1642 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1645 * An error occurred so return it. Because skb_recv_datagram()
1646 * handles the blocking we don't see and worry about blocking
1653 if (pkt_sk(sk)->has_vnet_hdr) {
1654 struct virtio_net_hdr vnet_hdr = { 0 };
1657 vnet_hdr_len = sizeof(vnet_hdr);
1658 if (len < vnet_hdr_len)
1661 len -= vnet_hdr_len;
1663 if (skb_is_gso(skb)) {
1664 struct skb_shared_info *sinfo = skb_shinfo(skb);
1666 /* This is a hint as to how much should be linear. */
1667 vnet_hdr.hdr_len = skb_headlen(skb);
1668 vnet_hdr.gso_size = sinfo->gso_size;
1669 if (sinfo->gso_type & SKB_GSO_TCPV4)
1670 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1671 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1672 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1673 else if (sinfo->gso_type & SKB_GSO_UDP)
1674 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1675 else if (sinfo->gso_type & SKB_GSO_FCOE)
1679 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1680 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1682 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1684 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1685 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1686 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
1687 vnet_hdr.csum_offset = skb->csum_offset;
1688 } /* else everything is zero */
1690 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1697 * If the address length field is there to be filled in, we fill
1701 sll = &PACKET_SKB_CB(skb)->sa.ll;
1702 if (sock->type == SOCK_PACKET)
1703 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1705 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1708 * You lose any data beyond the buffer you gave. If it worries a
1709 * user program they can ask the device for its MTU anyway.
1715 msg->msg_flags |= MSG_TRUNC;
1718 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1722 sock_recv_ts_and_drops(msg, sk, skb);
1725 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1728 if (pkt_sk(sk)->auxdata) {
1729 struct tpacket_auxdata aux;
1731 aux.tp_status = TP_STATUS_USER;
1732 if (skb->ip_summed == CHECKSUM_PARTIAL)
1733 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1734 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1735 aux.tp_snaplen = skb->len;
1737 aux.tp_net = skb_network_offset(skb);
1738 if (vlan_tx_tag_present(skb)) {
1739 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1740 aux.tp_status |= TP_STATUS_VLAN_VALID;
1742 aux.tp_vlan_tci = 0;
1744 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1748 * Free or return the buffer as appropriate. Again this
1749 * hides all the races and re-entrancy issues from us.
1751 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1754 skb_free_datagram(sk, skb);
1759 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1760 int *uaddr_len, int peer)
1762 struct net_device *dev;
1763 struct sock *sk = sock->sk;
1768 uaddr->sa_family = AF_PACKET;
1770 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1772 strncpy(uaddr->sa_data, dev->name, 14);
1774 memset(uaddr->sa_data, 0, 14);
1776 *uaddr_len = sizeof(*uaddr);
1781 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1782 int *uaddr_len, int peer)
1784 struct net_device *dev;
1785 struct sock *sk = sock->sk;
1786 struct packet_sock *po = pkt_sk(sk);
1787 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1792 sll->sll_family = AF_PACKET;
1793 sll->sll_ifindex = po->ifindex;
1794 sll->sll_protocol = po->num;
1795 sll->sll_pkttype = 0;
1797 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1799 sll->sll_hatype = dev->type;
1800 sll->sll_halen = dev->addr_len;
1801 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1803 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1807 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1812 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1816 case PACKET_MR_MULTICAST:
1817 if (i->alen != dev->addr_len)
1820 return dev_mc_add(dev, i->addr);
1822 return dev_mc_del(dev, i->addr);
1824 case PACKET_MR_PROMISC:
1825 return dev_set_promiscuity(dev, what);
1827 case PACKET_MR_ALLMULTI:
1828 return dev_set_allmulti(dev, what);
1830 case PACKET_MR_UNICAST:
1831 if (i->alen != dev->addr_len)
1834 return dev_uc_add(dev, i->addr);
1836 return dev_uc_del(dev, i->addr);
1844 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1846 for ( ; i; i = i->next) {
1847 if (i->ifindex == dev->ifindex)
1848 packet_dev_mc(dev, i, what);
1852 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1854 struct packet_sock *po = pkt_sk(sk);
1855 struct packet_mclist *ml, *i;
1856 struct net_device *dev;
1862 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1867 if (mreq->mr_alen > dev->addr_len)
1871 i = kmalloc(sizeof(*i), GFP_KERNEL);
1876 for (ml = po->mclist; ml; ml = ml->next) {
1877 if (ml->ifindex == mreq->mr_ifindex &&
1878 ml->type == mreq->mr_type &&
1879 ml->alen == mreq->mr_alen &&
1880 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1882 /* Free the new element ... */
1888 i->type = mreq->mr_type;
1889 i->ifindex = mreq->mr_ifindex;
1890 i->alen = mreq->mr_alen;
1891 memcpy(i->addr, mreq->mr_address, i->alen);
1893 i->next = po->mclist;
1895 err = packet_dev_mc(dev, i, 1);
1897 po->mclist = i->next;
1906 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1908 struct packet_mclist *ml, **mlp;
1912 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1913 if (ml->ifindex == mreq->mr_ifindex &&
1914 ml->type == mreq->mr_type &&
1915 ml->alen == mreq->mr_alen &&
1916 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1917 if (--ml->count == 0) {
1918 struct net_device *dev;
1920 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1922 packet_dev_mc(dev, ml, -1);
1930 return -EADDRNOTAVAIL;
1933 static void packet_flush_mclist(struct sock *sk)
1935 struct packet_sock *po = pkt_sk(sk);
1936 struct packet_mclist *ml;
1942 while ((ml = po->mclist) != NULL) {
1943 struct net_device *dev;
1945 po->mclist = ml->next;
1946 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1948 packet_dev_mc(dev, ml, -1);
1955 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1957 struct sock *sk = sock->sk;
1958 struct packet_sock *po = pkt_sk(sk);
1961 if (level != SOL_PACKET)
1962 return -ENOPROTOOPT;
1965 case PACKET_ADD_MEMBERSHIP:
1966 case PACKET_DROP_MEMBERSHIP:
1968 struct packet_mreq_max mreq;
1970 memset(&mreq, 0, sizeof(mreq));
1971 if (len < sizeof(struct packet_mreq))
1973 if (len > sizeof(mreq))
1975 if (copy_from_user(&mreq, optval, len))
1977 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1979 if (optname == PACKET_ADD_MEMBERSHIP)
1980 ret = packet_mc_add(sk, &mreq);
1982 ret = packet_mc_drop(sk, &mreq);
1986 case PACKET_RX_RING:
1987 case PACKET_TX_RING:
1989 struct tpacket_req req;
1991 if (optlen < sizeof(req))
1993 if (pkt_sk(sk)->has_vnet_hdr)
1995 if (copy_from_user(&req, optval, sizeof(req)))
1997 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1999 case PACKET_COPY_THRESH:
2003 if (optlen != sizeof(val))
2005 if (copy_from_user(&val, optval, sizeof(val)))
2008 pkt_sk(sk)->copy_thresh = val;
2011 case PACKET_VERSION:
2015 if (optlen != sizeof(val))
2017 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2019 if (copy_from_user(&val, optval, sizeof(val)))
2024 po->tp_version = val;
2030 case PACKET_RESERVE:
2034 if (optlen != sizeof(val))
2036 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2038 if (copy_from_user(&val, optval, sizeof(val)))
2040 po->tp_reserve = val;
2047 if (optlen != sizeof(val))
2049 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2051 if (copy_from_user(&val, optval, sizeof(val)))
2053 po->tp_loss = !!val;
2056 case PACKET_AUXDATA:
2060 if (optlen < sizeof(val))
2062 if (copy_from_user(&val, optval, sizeof(val)))
2065 po->auxdata = !!val;
2068 case PACKET_ORIGDEV:
2072 if (optlen < sizeof(val))
2074 if (copy_from_user(&val, optval, sizeof(val)))
2077 po->origdev = !!val;
2080 case PACKET_VNET_HDR:
2084 if (sock->type != SOCK_RAW)
2086 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2088 if (optlen < sizeof(val))
2090 if (copy_from_user(&val, optval, sizeof(val)))
2093 po->has_vnet_hdr = !!val;
2096 case PACKET_TIMESTAMP:
2100 if (optlen != sizeof(val))
2102 if (copy_from_user(&val, optval, sizeof(val)))
2105 po->tp_tstamp = val;
2109 return -ENOPROTOOPT;
2113 static int packet_getsockopt(struct socket *sock, int level, int optname,
2114 char __user *optval, int __user *optlen)
2118 struct sock *sk = sock->sk;
2119 struct packet_sock *po = pkt_sk(sk);
2121 struct tpacket_stats st;
2123 if (level != SOL_PACKET)
2124 return -ENOPROTOOPT;
2126 if (get_user(len, optlen))
2133 case PACKET_STATISTICS:
2134 if (len > sizeof(struct tpacket_stats))
2135 len = sizeof(struct tpacket_stats);
2136 spin_lock_bh(&sk->sk_receive_queue.lock);
2138 memset(&po->stats, 0, sizeof(st));
2139 spin_unlock_bh(&sk->sk_receive_queue.lock);
2140 st.tp_packets += st.tp_drops;
2144 case PACKET_AUXDATA:
2145 if (len > sizeof(int))
2151 case PACKET_ORIGDEV:
2152 if (len > sizeof(int))
2158 case PACKET_VNET_HDR:
2159 if (len > sizeof(int))
2161 val = po->has_vnet_hdr;
2165 case PACKET_VERSION:
2166 if (len > sizeof(int))
2168 val = po->tp_version;
2172 if (len > sizeof(int))
2174 if (copy_from_user(&val, optval, len))
2178 val = sizeof(struct tpacket_hdr);
2181 val = sizeof(struct tpacket2_hdr);
2188 case PACKET_RESERVE:
2189 if (len > sizeof(unsigned int))
2190 len = sizeof(unsigned int);
2191 val = po->tp_reserve;
2195 if (len > sizeof(unsigned int))
2196 len = sizeof(unsigned int);
2200 case PACKET_TIMESTAMP:
2201 if (len > sizeof(int))
2203 val = po->tp_tstamp;
2207 return -ENOPROTOOPT;
2210 if (put_user(len, optlen))
2212 if (copy_to_user(optval, data, len))
2218 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2221 struct hlist_node *node;
2222 struct net_device *dev = data;
2223 struct net *net = dev_net(dev);
2226 sk_for_each_rcu(sk, node, &net->packet.sklist) {
2227 struct packet_sock *po = pkt_sk(sk);
2230 case NETDEV_UNREGISTER:
2232 packet_dev_mclist(dev, po->mclist, -1);
2236 if (dev->ifindex == po->ifindex) {
2237 spin_lock(&po->bind_lock);
2239 __dev_remove_pack(&po->prot_hook);
2242 sk->sk_err = ENETDOWN;
2243 if (!sock_flag(sk, SOCK_DEAD))
2244 sk->sk_error_report(sk);
2246 if (msg == NETDEV_UNREGISTER) {
2248 if (po->prot_hook.dev)
2249 dev_put(po->prot_hook.dev);
2250 po->prot_hook.dev = NULL;
2252 spin_unlock(&po->bind_lock);
2256 if (dev->ifindex == po->ifindex) {
2257 spin_lock(&po->bind_lock);
2258 if (po->num && !po->running) {
2259 dev_add_pack(&po->prot_hook);
2263 spin_unlock(&po->bind_lock);
2273 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2276 struct sock *sk = sock->sk;
2281 int amount = sk_wmem_alloc_get(sk);
2283 return put_user(amount, (int __user *)arg);
2287 struct sk_buff *skb;
2290 spin_lock_bh(&sk->sk_receive_queue.lock);
2291 skb = skb_peek(&sk->sk_receive_queue);
2294 spin_unlock_bh(&sk->sk_receive_queue.lock);
2295 return put_user(amount, (int __user *)arg);
2298 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2300 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2310 case SIOCGIFBRDADDR:
2311 case SIOCSIFBRDADDR:
2312 case SIOCGIFNETMASK:
2313 case SIOCSIFNETMASK:
2314 case SIOCGIFDSTADDR:
2315 case SIOCSIFDSTADDR:
2317 return inet_dgram_ops.ioctl(sock, cmd, arg);
2321 return -ENOIOCTLCMD;
2326 static unsigned int packet_poll(struct file *file, struct socket *sock,
2329 struct sock *sk = sock->sk;
2330 struct packet_sock *po = pkt_sk(sk);
2331 unsigned int mask = datagram_poll(file, sock, wait);
2333 spin_lock_bh(&sk->sk_receive_queue.lock);
2334 if (po->rx_ring.pg_vec) {
2335 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2336 mask |= POLLIN | POLLRDNORM;
2338 spin_unlock_bh(&sk->sk_receive_queue.lock);
2339 spin_lock_bh(&sk->sk_write_queue.lock);
2340 if (po->tx_ring.pg_vec) {
2341 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2342 mask |= POLLOUT | POLLWRNORM;
2344 spin_unlock_bh(&sk->sk_write_queue.lock);
2349 /* Dirty? Well, I still did not learn better way to account
2353 static void packet_mm_open(struct vm_area_struct *vma)
2355 struct file *file = vma->vm_file;
2356 struct socket *sock = file->private_data;
2357 struct sock *sk = sock->sk;
2360 atomic_inc(&pkt_sk(sk)->mapped);
2363 static void packet_mm_close(struct vm_area_struct *vma)
2365 struct file *file = vma->vm_file;
2366 struct socket *sock = file->private_data;
2367 struct sock *sk = sock->sk;
2370 atomic_dec(&pkt_sk(sk)->mapped);
2373 static const struct vm_operations_struct packet_mmap_ops = {
2374 .open = packet_mm_open,
2375 .close = packet_mm_close,
2378 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2383 for (i = 0; i < len; i++) {
2384 if (likely(pg_vec[i].buffer)) {
2385 if (is_vmalloc_addr(pg_vec[i].buffer))
2386 vfree(pg_vec[i].buffer);
2388 free_pages((unsigned long)pg_vec[i].buffer,
2390 pg_vec[i].buffer = NULL;
2396 static inline char *alloc_one_pg_vec_page(unsigned long order)
2398 char *buffer = NULL;
2399 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2400 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2402 buffer = (char *) __get_free_pages(gfp_flags, order);
2408 * __get_free_pages failed, fall back to vmalloc
2410 buffer = vzalloc((1 << order) * PAGE_SIZE);
2416 * vmalloc failed, lets dig into swap here
2418 gfp_flags &= ~__GFP_NORETRY;
2419 buffer = (char *)__get_free_pages(gfp_flags, order);
2424 * complete and utter failure
2429 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
2431 unsigned int block_nr = req->tp_block_nr;
2435 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
2436 if (unlikely(!pg_vec))
2439 for (i = 0; i < block_nr; i++) {
2440 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
2441 if (unlikely(!pg_vec[i].buffer))
2442 goto out_free_pgvec;
2449 free_pg_vec(pg_vec, order, block_nr);
2454 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2455 int closing, int tx_ring)
2457 struct pgv *pg_vec = NULL;
2458 struct packet_sock *po = pkt_sk(sk);
2459 int was_running, order = 0;
2460 struct packet_ring_buffer *rb;
2461 struct sk_buff_head *rb_queue;
2465 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2466 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2470 if (atomic_read(&po->mapped))
2472 if (atomic_read(&rb->pending))
2476 if (req->tp_block_nr) {
2477 /* Sanity tests and some calculations */
2479 if (unlikely(rb->pg_vec))
2482 switch (po->tp_version) {
2484 po->tp_hdrlen = TPACKET_HDRLEN;
2487 po->tp_hdrlen = TPACKET2_HDRLEN;
2492 if (unlikely((int)req->tp_block_size <= 0))
2494 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2496 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2499 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2502 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2503 if (unlikely(rb->frames_per_block <= 0))
2505 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2510 order = get_order(req->tp_block_size);
2511 pg_vec = alloc_pg_vec(req, order);
2512 if (unlikely(!pg_vec))
2518 if (unlikely(req->tp_frame_nr))
2524 /* Detach socket from network */
2525 spin_lock(&po->bind_lock);
2526 was_running = po->running;
2529 __dev_remove_pack(&po->prot_hook);
2534 spin_unlock(&po->bind_lock);
2539 mutex_lock(&po->pg_vec_lock);
2540 if (closing || atomic_read(&po->mapped) == 0) {
2542 spin_lock_bh(&rb_queue->lock);
2543 swap(rb->pg_vec, pg_vec);
2544 rb->frame_max = (req->tp_frame_nr - 1);
2546 rb->frame_size = req->tp_frame_size;
2547 spin_unlock_bh(&rb_queue->lock);
2549 swap(rb->pg_vec_order, order);
2550 swap(rb->pg_vec_len, req->tp_block_nr);
2552 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2553 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2554 tpacket_rcv : packet_rcv;
2555 skb_queue_purge(rb_queue);
2556 if (atomic_read(&po->mapped))
2557 pr_err("packet_mmap: vma is busy: %d\n",
2558 atomic_read(&po->mapped));
2560 mutex_unlock(&po->pg_vec_lock);
2562 spin_lock(&po->bind_lock);
2563 if (was_running && !po->running) {
2567 dev_add_pack(&po->prot_hook);
2569 spin_unlock(&po->bind_lock);
2574 free_pg_vec(pg_vec, order, req->tp_block_nr);
2579 static int packet_mmap(struct file *file, struct socket *sock,
2580 struct vm_area_struct *vma)
2582 struct sock *sk = sock->sk;
2583 struct packet_sock *po = pkt_sk(sk);
2584 unsigned long size, expected_size;
2585 struct packet_ring_buffer *rb;
2586 unsigned long start;
2593 mutex_lock(&po->pg_vec_lock);
2596 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2598 expected_size += rb->pg_vec_len
2604 if (expected_size == 0)
2607 size = vma->vm_end - vma->vm_start;
2608 if (size != expected_size)
2611 start = vma->vm_start;
2612 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2613 if (rb->pg_vec == NULL)
2616 for (i = 0; i < rb->pg_vec_len; i++) {
2618 void *kaddr = rb->pg_vec[i].buffer;
2621 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2622 page = pgv_to_page(kaddr);
2623 err = vm_insert_page(vma, start, page);
2632 atomic_inc(&po->mapped);
2633 vma->vm_ops = &packet_mmap_ops;
2637 mutex_unlock(&po->pg_vec_lock);
2641 static const struct proto_ops packet_ops_spkt = {
2642 .family = PF_PACKET,
2643 .owner = THIS_MODULE,
2644 .release = packet_release,
2645 .bind = packet_bind_spkt,
2646 .connect = sock_no_connect,
2647 .socketpair = sock_no_socketpair,
2648 .accept = sock_no_accept,
2649 .getname = packet_getname_spkt,
2650 .poll = datagram_poll,
2651 .ioctl = packet_ioctl,
2652 .listen = sock_no_listen,
2653 .shutdown = sock_no_shutdown,
2654 .setsockopt = sock_no_setsockopt,
2655 .getsockopt = sock_no_getsockopt,
2656 .sendmsg = packet_sendmsg_spkt,
2657 .recvmsg = packet_recvmsg,
2658 .mmap = sock_no_mmap,
2659 .sendpage = sock_no_sendpage,
2662 static const struct proto_ops packet_ops = {
2663 .family = PF_PACKET,
2664 .owner = THIS_MODULE,
2665 .release = packet_release,
2666 .bind = packet_bind,
2667 .connect = sock_no_connect,
2668 .socketpair = sock_no_socketpair,
2669 .accept = sock_no_accept,
2670 .getname = packet_getname,
2671 .poll = packet_poll,
2672 .ioctl = packet_ioctl,
2673 .listen = sock_no_listen,
2674 .shutdown = sock_no_shutdown,
2675 .setsockopt = packet_setsockopt,
2676 .getsockopt = packet_getsockopt,
2677 .sendmsg = packet_sendmsg,
2678 .recvmsg = packet_recvmsg,
2679 .mmap = packet_mmap,
2680 .sendpage = sock_no_sendpage,
2683 static const struct net_proto_family packet_family_ops = {
2684 .family = PF_PACKET,
2685 .create = packet_create,
2686 .owner = THIS_MODULE,
2689 static struct notifier_block packet_netdev_notifier = {
2690 .notifier_call = packet_notifier,
2693 #ifdef CONFIG_PROC_FS
2695 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2698 struct net *net = seq_file_net(seq);
2701 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2704 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2706 struct net *net = seq_file_net(seq);
2707 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2710 static void packet_seq_stop(struct seq_file *seq, void *v)
2716 static int packet_seq_show(struct seq_file *seq, void *v)
2718 if (v == SEQ_START_TOKEN)
2719 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2721 struct sock *s = sk_entry(v);
2722 const struct packet_sock *po = pkt_sk(s);
2725 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2727 atomic_read(&s->sk_refcnt),
2732 atomic_read(&s->sk_rmem_alloc),
2740 static const struct seq_operations packet_seq_ops = {
2741 .start = packet_seq_start,
2742 .next = packet_seq_next,
2743 .stop = packet_seq_stop,
2744 .show = packet_seq_show,
2747 static int packet_seq_open(struct inode *inode, struct file *file)
2749 return seq_open_net(inode, file, &packet_seq_ops,
2750 sizeof(struct seq_net_private));
2753 static const struct file_operations packet_seq_fops = {
2754 .owner = THIS_MODULE,
2755 .open = packet_seq_open,
2757 .llseek = seq_lseek,
2758 .release = seq_release_net,
2763 static int __net_init packet_net_init(struct net *net)
2765 spin_lock_init(&net->packet.sklist_lock);
2766 INIT_HLIST_HEAD(&net->packet.sklist);
2768 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2774 static void __net_exit packet_net_exit(struct net *net)
2776 proc_net_remove(net, "packet");
2779 static struct pernet_operations packet_net_ops = {
2780 .init = packet_net_init,
2781 .exit = packet_net_exit,
2785 static void __exit packet_exit(void)
2787 unregister_netdevice_notifier(&packet_netdev_notifier);
2788 unregister_pernet_subsys(&packet_net_ops);
2789 sock_unregister(PF_PACKET);
2790 proto_unregister(&packet_proto);
2793 static int __init packet_init(void)
2795 int rc = proto_register(&packet_proto, 0);
2800 sock_register(&packet_family_ops);
2801 register_pernet_subsys(&packet_net_ops);
2802 register_netdevice_notifier(&packet_netdev_notifier);
2807 module_init(packet_init);
2808 module_exit(packet_exit);
2809 MODULE_LICENSE("GPL");
2810 MODULE_ALIAS_NETPROTO(PF_PACKET);