2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses
41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll
45 * This program is free software; you can redistribute it and/or
46 * modify it under the terms of the GNU General Public License
47 * as published by the Free Software Foundation; either version
48 * 2 of the License, or (at your option) any later version.
52 #include <linux/types.h>
53 #include <linux/sched.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kmod.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
82 #include <net/inet_common.h>
85 #define CONFIG_SOCK_PACKET 1
88 Proposed replacement for SIOC{ADD,DEL}MULTI and
89 IFF_PROMISC, IFF_ALLMULTI flags.
91 It is more expensive, but I believe,
92 it is really correct solution: reentereble, safe and fault tolerant.
94 IFF_PROMISC/IFF_ALLMULTI/SIOC{ADD/DEL}MULTI are faked by keeping
95 reference count and global flag, so that real status is
96 (gflag|(count != 0)), so that we can use obsolete faulty interface
97 not harming clever users.
99 #define CONFIG_PACKET_MULTICAST 1
103 - if device has no dev->hard_header routine, it adds and removes ll header
104 inside itself. In this case ll header is invisible outside of device,
105 but higher levels still should reserve dev->hard_header_len.
106 Some devices are enough clever to reallocate skb, when header
107 will not fit to reserved space (tunnel), another ones are silly
109 - packet socket receives packets with pulled ll header,
110 so that SOCK_RAW should push it back.
115 Incoming, dev->hard_header!=NULL
119 Outgoing, dev->hard_header!=NULL
123 Incoming, dev->hard_header==NULL
124 mac.raw -> UNKNOWN position. It is very likely, that it points to ll header.
125 PPP makes it, that is wrong, because introduce assymetry
126 between rx and tx paths.
129 Outgoing, dev->hard_header==NULL
130 mac.raw -> data. ll header is still not built!
134 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
140 dev->hard_header != NULL
144 dev->hard_header == NULL (ll header is added by device, we cannot control it)
148 We should set nh.raw on output to correct posistion,
149 packet classifier depends on it.
152 /* List of all packet sockets. */
153 static HLIST_HEAD(packet_sklist);
154 static DEFINE_RWLOCK(packet_sklist_lock);
156 static atomic_t packet_socks_nr;
159 /* Private packet socket structures. */
161 #ifdef CONFIG_PACKET_MULTICAST
164 struct packet_mclist *next;
169 unsigned char addr[MAX_ADDR_LEN];
171 /* identical to struct packet_mreq except it has
172 * a longer address field.
174 struct packet_mreq_max
177 unsigned short mr_type;
178 unsigned short mr_alen;
179 unsigned char mr_address[MAX_ADDR_LEN];
182 #ifdef CONFIG_PACKET_MMAP
183 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
186 static void packet_flush_mclist(struct sock *sk);
189 /* struct sock has to be the first member of packet_sock */
191 struct tpacket_stats stats;
192 #ifdef CONFIG_PACKET_MMAP
195 unsigned int frames_per_block;
196 unsigned int frame_size;
197 unsigned int frame_max;
200 struct packet_type prot_hook;
201 spinlock_t bind_lock;
202 char running; /* prot_hook is attached*/
203 int ifindex; /* bound device */
205 #ifdef CONFIG_PACKET_MULTICAST
206 struct packet_mclist *mclist;
208 #ifdef CONFIG_PACKET_MMAP
210 unsigned int pg_vec_order;
211 unsigned int pg_vec_pages;
212 unsigned int pg_vec_len;
216 #ifdef CONFIG_PACKET_MMAP
218 static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
220 unsigned int pg_vec_pos, frame_offset;
223 pg_vec_pos = position / po->frames_per_block;
224 frame_offset = position % po->frames_per_block;
226 frame = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
232 static inline struct packet_sock *pkt_sk(struct sock *sk)
234 return (struct packet_sock *)sk;
237 static void packet_sock_destruct(struct sock *sk)
239 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
240 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
242 if (!sock_flag(sk, SOCK_DEAD)) {
243 printk("Attempt to release alive packet socket: %p\n", sk);
247 atomic_dec(&packet_socks_nr);
248 #ifdef PACKET_REFCNT_DEBUG
249 printk(KERN_DEBUG "PACKET socket %p is free, %d are alive\n", sk, atomic_read(&packet_socks_nr));
254 static const struct proto_ops packet_ops;
256 #ifdef CONFIG_SOCK_PACKET
257 static const struct proto_ops packet_ops_spkt;
259 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
262 struct sockaddr_pkt *spkt;
265 * When we registered the protocol we saved the socket in the data
266 * field for just this event.
269 sk = pt->af_packet_priv;
272 * Yank back the headers [hope the device set this
273 * right or kerboom...]
275 * Incoming packets have ll header pulled,
278 * For outgoing ones skb->data == skb->mac.raw
279 * so that this procedure is noop.
282 if (skb->pkt_type == PACKET_LOOPBACK)
285 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
288 /* drop any routing info */
289 dst_release(skb->dst);
292 /* drop conntrack reference */
295 spkt = (struct sockaddr_pkt*)skb->cb;
297 skb_push(skb, skb->data-skb->mac.raw);
300 * The SOCK_PACKET socket receives _all_ frames.
303 spkt->spkt_family = dev->type;
304 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
305 spkt->spkt_protocol = skb->protocol;
308 * Charge the memory to the socket. This is done specifically
309 * to prevent sockets using all the memory up.
312 if (sock_queue_rcv_skb(sk,skb) == 0)
323 * Output a raw packet to a device layer. This bypasses all the other
324 * protocol layers and you must therefore supply it with a complete frame
327 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
328 struct msghdr *msg, size_t len)
330 struct sock *sk = sock->sk;
331 struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
333 struct net_device *dev;
338 * Get and verify the address.
343 if (msg->msg_namelen < sizeof(struct sockaddr))
345 if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
346 proto=saddr->spkt_protocol;
349 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
352 * Find the device first to size check it
355 saddr->spkt_device[13] = 0;
356 dev = dev_get_by_name(saddr->spkt_device);
362 * You may not queue a frame bigger than the mtu. This is the lowest level
363 * raw protocol and you must do your own fragmentation at this level.
367 if (len > dev->mtu + dev->hard_header_len)
371 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
374 * If the write buffer is full, then tough. At this level the user gets to
375 * deal with the problem - do your own algorithmic backoffs. That's far
386 /* FIXME: Save some space for broken drivers that write a
387 * hard header at transmission time by themselves. PPP is the
388 * notable one here. This should really be fixed at the driver level.
390 skb_reserve(skb, LL_RESERVED_SPACE(dev));
391 skb->nh.raw = skb->data;
393 /* Try to align data part correctly */
394 if (dev->hard_header) {
395 skb->data -= dev->hard_header_len;
396 skb->tail -= dev->hard_header_len;
397 if (len < dev->hard_header_len)
398 skb->nh.raw = skb->data;
401 /* Returns -EFAULT on error */
402 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
403 skb->protocol = proto;
405 skb->priority = sk->sk_priority;
410 if (!(dev->flags & IFF_UP))
430 static inline int run_filter(struct sk_buff *skb, struct sock *sk,
433 struct sk_filter *filter;
437 filter = rcu_dereference(sk->sk_filter);
438 if (filter != NULL) {
439 err = sk_run_filter(skb, filter->insns, filter->len);
442 else if (*snaplen > err)
445 rcu_read_unlock_bh();
451 This function makes lazy skb cloning in hope that most of packets
452 are discarded by BPF.
454 Note tricky part: we DO mangle shared skb! skb->data, skb->len
455 and skb->cb are mangled. It works because (and until) packets
456 falling here are owned by current CPU. Output packets are cloned
457 by dev_queue_xmit_nit(), input packets are processed by net_bh
458 sequencially, so that if we return skb to original state on exit,
459 we will not harm anyone.
462 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
465 struct sockaddr_ll *sll;
466 struct packet_sock *po;
467 u8 * skb_head = skb->data;
468 int skb_len = skb->len;
471 if (skb->pkt_type == PACKET_LOOPBACK)
474 sk = pt->af_packet_priv;
479 if (dev->hard_header) {
480 /* The device has an explicit notion of ll header,
481 exported to higher levels.
483 Otherwise, the device hides datails of it frame
484 structure, so that corresponding packet head
485 never delivered to user.
487 if (sk->sk_type != SOCK_DGRAM)
488 skb_push(skb, skb->data - skb->mac.raw);
489 else if (skb->pkt_type == PACKET_OUTGOING) {
490 /* Special case: outgoing packets have ll header at head */
491 skb_pull(skb, skb->nh.raw - skb->data);
497 if (run_filter(skb, sk, &snaplen) < 0)
500 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
501 (unsigned)sk->sk_rcvbuf)
504 if (skb_shared(skb)) {
505 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
509 if (skb_head != skb->data) {
510 skb->data = skb_head;
517 sll = (struct sockaddr_ll*)skb->cb;
518 sll->sll_family = AF_PACKET;
519 sll->sll_hatype = dev->type;
520 sll->sll_protocol = skb->protocol;
521 sll->sll_pkttype = skb->pkt_type;
522 sll->sll_ifindex = dev->ifindex;
525 if (dev->hard_header_parse)
526 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
528 if (pskb_trim(skb, snaplen))
531 skb_set_owner_r(skb, sk);
533 dst_release(skb->dst);
536 /* drop conntrack reference */
539 spin_lock(&sk->sk_receive_queue.lock);
540 po->stats.tp_packets++;
541 __skb_queue_tail(&sk->sk_receive_queue, skb);
542 spin_unlock(&sk->sk_receive_queue.lock);
543 sk->sk_data_ready(sk, skb->len);
547 spin_lock(&sk->sk_receive_queue.lock);
548 po->stats.tp_drops++;
549 spin_unlock(&sk->sk_receive_queue.lock);
552 if (skb_head != skb->data && skb_shared(skb)) {
553 skb->data = skb_head;
561 #ifdef CONFIG_PACKET_MMAP
562 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
565 struct packet_sock *po;
566 struct sockaddr_ll *sll;
567 struct tpacket_hdr *h;
568 u8 * skb_head = skb->data;
569 int skb_len = skb->len;
571 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
572 unsigned short macoff, netoff;
573 struct sk_buff *copy_skb = NULL;
575 if (skb->pkt_type == PACKET_LOOPBACK)
578 sk = pt->af_packet_priv;
581 if (dev->hard_header) {
582 if (sk->sk_type != SOCK_DGRAM)
583 skb_push(skb, skb->data - skb->mac.raw);
584 else if (skb->pkt_type == PACKET_OUTGOING) {
585 /* Special case: outgoing packets have ll header at head */
586 skb_pull(skb, skb->nh.raw - skb->data);
587 if (skb->ip_summed == CHECKSUM_PARTIAL)
588 status |= TP_STATUS_CSUMNOTREADY;
594 if (run_filter(skb, sk, &snaplen) < 0)
597 if (sk->sk_type == SOCK_DGRAM) {
598 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
600 unsigned maclen = skb->nh.raw - skb->data;
601 netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
602 macoff = netoff - maclen;
605 if (macoff + snaplen > po->frame_size) {
606 if (po->copy_thresh &&
607 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
608 (unsigned)sk->sk_rcvbuf) {
609 if (skb_shared(skb)) {
610 copy_skb = skb_clone(skb, GFP_ATOMIC);
612 copy_skb = skb_get(skb);
613 skb_head = skb->data;
616 skb_set_owner_r(copy_skb, sk);
618 snaplen = po->frame_size - macoff;
619 if ((int)snaplen < 0)
623 spin_lock(&sk->sk_receive_queue.lock);
624 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
628 po->head = po->head != po->frame_max ? po->head+1 : 0;
629 po->stats.tp_packets++;
631 status |= TP_STATUS_COPY;
632 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
634 if (!po->stats.tp_drops)
635 status &= ~TP_STATUS_LOSING;
636 spin_unlock(&sk->sk_receive_queue.lock);
638 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen);
640 h->tp_len = skb->len;
641 h->tp_snaplen = snaplen;
644 if (skb->tstamp.off_sec == 0) {
645 __net_timestamp(skb);
646 sock_enable_timestamp(sk);
648 h->tp_sec = skb->tstamp.off_sec;
649 h->tp_usec = skb->tstamp.off_usec;
651 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
653 if (dev->hard_header_parse)
654 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
655 sll->sll_family = AF_PACKET;
656 sll->sll_hatype = dev->type;
657 sll->sll_protocol = skb->protocol;
658 sll->sll_pkttype = skb->pkt_type;
659 sll->sll_ifindex = dev->ifindex;
661 h->tp_status = status;
665 struct page *p_start, *p_end;
666 u8 *h_end = (u8 *)h + macoff + snaplen - 1;
668 p_start = virt_to_page(h);
669 p_end = virt_to_page(h_end);
670 while (p_start <= p_end) {
671 flush_dcache_page(p_start);
676 sk->sk_data_ready(sk, 0);
679 if (skb_head != skb->data && skb_shared(skb)) {
680 skb->data = skb_head;
688 po->stats.tp_drops++;
689 spin_unlock(&sk->sk_receive_queue.lock);
691 sk->sk_data_ready(sk, 0);
700 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
701 struct msghdr *msg, size_t len)
703 struct sock *sk = sock->sk;
704 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
706 struct net_device *dev;
709 int ifindex, err, reserve = 0;
712 * Get and verify the address.
716 struct packet_sock *po = pkt_sk(sk);
718 ifindex = po->ifindex;
723 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
725 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
727 ifindex = saddr->sll_ifindex;
728 proto = saddr->sll_protocol;
729 addr = saddr->sll_addr;
733 dev = dev_get_by_index(ifindex);
737 if (sock->type == SOCK_RAW)
738 reserve = dev->hard_header_len;
741 if (len > dev->mtu+reserve)
744 skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
745 msg->msg_flags & MSG_DONTWAIT, &err);
749 skb_reserve(skb, LL_RESERVED_SPACE(dev));
750 skb->nh.raw = skb->data;
752 if (dev->hard_header) {
755 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
756 if (sock->type != SOCK_DGRAM) {
757 skb->tail = skb->data;
763 /* Returns -EFAULT on error */
764 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
768 skb->protocol = proto;
770 skb->priority = sk->sk_priority;
773 if (!(dev->flags & IFF_UP))
780 err = dev_queue_xmit(skb);
781 if (err > 0 && (err = net_xmit_errno(err)) != 0)
798 * Close a PACKET socket. This is fairly simple. We immediately go
799 * to 'closed' state and remove our protocol entry in the device list.
802 static int packet_release(struct socket *sock)
804 struct sock *sk = sock->sk;
805 struct packet_sock *po;
812 write_lock_bh(&packet_sklist_lock);
813 sk_del_node_init(sk);
814 write_unlock_bh(&packet_sklist_lock);
817 * Unhook packet receive handler.
822 * Remove the protocol hook
824 dev_remove_pack(&po->prot_hook);
830 #ifdef CONFIG_PACKET_MULTICAST
831 packet_flush_mclist(sk);
834 #ifdef CONFIG_PACKET_MMAP
836 struct tpacket_req req;
837 memset(&req, 0, sizeof(req));
838 packet_set_ring(sk, &req, 1);
843 * Now the socket is dead. No more input will appear.
851 skb_queue_purge(&sk->sk_receive_queue);
858 * Attach a packet hook.
861 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
863 struct packet_sock *po = pkt_sk(sk);
865 * Detach an existing hook if present.
870 spin_lock(&po->bind_lock);
875 spin_unlock(&po->bind_lock);
876 dev_remove_pack(&po->prot_hook);
877 spin_lock(&po->bind_lock);
881 po->prot_hook.type = protocol;
882 po->prot_hook.dev = dev;
884 po->ifindex = dev ? dev->ifindex : 0;
890 if (dev->flags&IFF_UP) {
891 dev_add_pack(&po->prot_hook);
895 sk->sk_err = ENETDOWN;
896 if (!sock_flag(sk, SOCK_DEAD))
897 sk->sk_error_report(sk);
900 dev_add_pack(&po->prot_hook);
906 spin_unlock(&po->bind_lock);
912 * Bind a packet socket to a device
915 #ifdef CONFIG_SOCK_PACKET
917 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
919 struct sock *sk=sock->sk;
921 struct net_device *dev;
928 if (addr_len != sizeof(struct sockaddr))
930 strlcpy(name,uaddr->sa_data,sizeof(name));
932 dev = dev_get_by_name(name);
934 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
941 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
943 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
944 struct sock *sk=sock->sk;
945 struct net_device *dev = NULL;
953 if (addr_len < sizeof(struct sockaddr_ll))
955 if (sll->sll_family != AF_PACKET)
958 if (sll->sll_ifindex) {
960 dev = dev_get_by_index(sll->sll_ifindex);
964 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
972 static struct proto packet_proto = {
974 .owner = THIS_MODULE,
975 .obj_size = sizeof(struct packet_sock),
979 * Create a packet of type SOCK_PACKET.
982 static int packet_create(struct socket *sock, int protocol)
985 struct packet_sock *po;
986 __be16 proto = (__force __be16)protocol; /* weird, but documented */
989 if (!capable(CAP_NET_RAW))
991 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW
992 #ifdef CONFIG_SOCK_PACKET
993 && sock->type != SOCK_PACKET
996 return -ESOCKTNOSUPPORT;
998 sock->state = SS_UNCONNECTED;
1001 sk = sk_alloc(PF_PACKET, GFP_KERNEL, &packet_proto, 1);
1005 sock->ops = &packet_ops;
1006 #ifdef CONFIG_SOCK_PACKET
1007 if (sock->type == SOCK_PACKET)
1008 sock->ops = &packet_ops_spkt;
1010 sock_init_data(sock, sk);
1013 sk->sk_family = PF_PACKET;
1016 sk->sk_destruct = packet_sock_destruct;
1017 atomic_inc(&packet_socks_nr);
1020 * Attach a protocol block
1023 spin_lock_init(&po->bind_lock);
1024 po->prot_hook.func = packet_rcv;
1025 #ifdef CONFIG_SOCK_PACKET
1026 if (sock->type == SOCK_PACKET)
1027 po->prot_hook.func = packet_rcv_spkt;
1029 po->prot_hook.af_packet_priv = sk;
1032 po->prot_hook.type = proto;
1033 dev_add_pack(&po->prot_hook);
1038 write_lock_bh(&packet_sklist_lock);
1039 sk_add_node(sk, &packet_sklist);
1040 write_unlock_bh(&packet_sklist_lock);
1047 * Pull a packet from our receive queue and hand it to the user.
1048 * If necessary we block.
1051 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1052 struct msghdr *msg, size_t len, int flags)
1054 struct sock *sk = sock->sk;
1055 struct sk_buff *skb;
1057 struct sockaddr_ll *sll;
1060 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1064 /* What error should we return now? EUNATTACH? */
1065 if (pkt_sk(sk)->ifindex < 0)
1070 * Call the generic datagram receiver. This handles all sorts
1071 * of horrible races and re-entrancy so we can forget about it
1072 * in the protocol layers.
1074 * Now it will return ENETDOWN, if device have just gone down,
1075 * but then it will block.
1078 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1081 * An error occurred so return it. Because skb_recv_datagram()
1082 * handles the blocking we don't see and worry about blocking
1090 * If the address length field is there to be filled in, we fill
1094 sll = (struct sockaddr_ll*)skb->cb;
1095 if (sock->type == SOCK_PACKET)
1096 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1098 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1101 * You lose any data beyond the buffer you gave. If it worries a
1102 * user program they can ask the device for its MTU anyway.
1109 msg->msg_flags|=MSG_TRUNC;
1112 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1116 sock_recv_timestamp(msg, sk, skb);
1119 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1122 * Free or return the buffer as appropriate. Again this
1123 * hides all the races and re-entrancy issues from us.
1125 err = (flags&MSG_TRUNC) ? skb->len : copied;
1128 skb_free_datagram(sk, skb);
1133 #ifdef CONFIG_SOCK_PACKET
1134 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1135 int *uaddr_len, int peer)
1137 struct net_device *dev;
1138 struct sock *sk = sock->sk;
1143 uaddr->sa_family = AF_PACKET;
1144 dev = dev_get_by_index(pkt_sk(sk)->ifindex);
1146 strlcpy(uaddr->sa_data, dev->name, 15);
1149 memset(uaddr->sa_data, 0, 14);
1150 *uaddr_len = sizeof(*uaddr);
1156 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1157 int *uaddr_len, int peer)
1159 struct net_device *dev;
1160 struct sock *sk = sock->sk;
1161 struct packet_sock *po = pkt_sk(sk);
1162 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1167 sll->sll_family = AF_PACKET;
1168 sll->sll_ifindex = po->ifindex;
1169 sll->sll_protocol = po->num;
1170 dev = dev_get_by_index(po->ifindex);
1172 sll->sll_hatype = dev->type;
1173 sll->sll_halen = dev->addr_len;
1174 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1177 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1180 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1185 #ifdef CONFIG_PACKET_MULTICAST
1186 static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what)
1189 case PACKET_MR_MULTICAST:
1191 dev_mc_add(dev, i->addr, i->alen, 0);
1193 dev_mc_delete(dev, i->addr, i->alen, 0);
1195 case PACKET_MR_PROMISC:
1196 dev_set_promiscuity(dev, what);
1198 case PACKET_MR_ALLMULTI:
1199 dev_set_allmulti(dev, what);
1205 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1207 for ( ; i; i=i->next) {
1208 if (i->ifindex == dev->ifindex)
1209 packet_dev_mc(dev, i, what);
1213 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1215 struct packet_sock *po = pkt_sk(sk);
1216 struct packet_mclist *ml, *i;
1217 struct net_device *dev;
1223 dev = __dev_get_by_index(mreq->mr_ifindex);
1228 if (mreq->mr_alen > dev->addr_len)
1232 i = kmalloc(sizeof(*i), GFP_KERNEL);
1237 for (ml = po->mclist; ml; ml = ml->next) {
1238 if (ml->ifindex == mreq->mr_ifindex &&
1239 ml->type == mreq->mr_type &&
1240 ml->alen == mreq->mr_alen &&
1241 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1243 /* Free the new element ... */
1249 i->type = mreq->mr_type;
1250 i->ifindex = mreq->mr_ifindex;
1251 i->alen = mreq->mr_alen;
1252 memcpy(i->addr, mreq->mr_address, i->alen);
1254 i->next = po->mclist;
1256 packet_dev_mc(dev, i, +1);
1263 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1265 struct packet_mclist *ml, **mlp;
1269 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1270 if (ml->ifindex == mreq->mr_ifindex &&
1271 ml->type == mreq->mr_type &&
1272 ml->alen == mreq->mr_alen &&
1273 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1274 if (--ml->count == 0) {
1275 struct net_device *dev;
1277 dev = dev_get_by_index(ml->ifindex);
1279 packet_dev_mc(dev, ml, -1);
1289 return -EADDRNOTAVAIL;
1292 static void packet_flush_mclist(struct sock *sk)
1294 struct packet_sock *po = pkt_sk(sk);
1295 struct packet_mclist *ml;
1301 while ((ml = po->mclist) != NULL) {
1302 struct net_device *dev;
1304 po->mclist = ml->next;
1305 if ((dev = dev_get_by_index(ml->ifindex)) != NULL) {
1306 packet_dev_mc(dev, ml, -1);
1316 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1318 struct sock *sk = sock->sk;
1321 if (level != SOL_PACKET)
1322 return -ENOPROTOOPT;
1325 #ifdef CONFIG_PACKET_MULTICAST
1326 case PACKET_ADD_MEMBERSHIP:
1327 case PACKET_DROP_MEMBERSHIP:
1329 struct packet_mreq_max mreq;
1331 memset(&mreq, 0, sizeof(mreq));
1332 if (len < sizeof(struct packet_mreq))
1334 if (len > sizeof(mreq))
1336 if (copy_from_user(&mreq,optval,len))
1338 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1340 if (optname == PACKET_ADD_MEMBERSHIP)
1341 ret = packet_mc_add(sk, &mreq);
1343 ret = packet_mc_drop(sk, &mreq);
1347 #ifdef CONFIG_PACKET_MMAP
1348 case PACKET_RX_RING:
1350 struct tpacket_req req;
1352 if (optlen<sizeof(req))
1354 if (copy_from_user(&req,optval,sizeof(req)))
1356 return packet_set_ring(sk, &req, 0);
1358 case PACKET_COPY_THRESH:
1362 if (optlen!=sizeof(val))
1364 if (copy_from_user(&val,optval,sizeof(val)))
1367 pkt_sk(sk)->copy_thresh = val;
1372 return -ENOPROTOOPT;
1376 static int packet_getsockopt(struct socket *sock, int level, int optname,
1377 char __user *optval, int __user *optlen)
1380 struct sock *sk = sock->sk;
1381 struct packet_sock *po = pkt_sk(sk);
1383 if (level != SOL_PACKET)
1384 return -ENOPROTOOPT;
1386 if (get_user(len, optlen))
1393 case PACKET_STATISTICS:
1395 struct tpacket_stats st;
1397 if (len > sizeof(struct tpacket_stats))
1398 len = sizeof(struct tpacket_stats);
1399 spin_lock_bh(&sk->sk_receive_queue.lock);
1401 memset(&po->stats, 0, sizeof(st));
1402 spin_unlock_bh(&sk->sk_receive_queue.lock);
1403 st.tp_packets += st.tp_drops;
1405 if (copy_to_user(optval, &st, len))
1410 return -ENOPROTOOPT;
1413 if (put_user(len, optlen))
1419 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1422 struct hlist_node *node;
1423 struct net_device *dev = (struct net_device*)data;
1425 read_lock(&packet_sklist_lock);
1426 sk_for_each(sk, node, &packet_sklist) {
1427 struct packet_sock *po = pkt_sk(sk);
1430 case NETDEV_UNREGISTER:
1431 #ifdef CONFIG_PACKET_MULTICAST
1433 packet_dev_mclist(dev, po->mclist, -1);
1437 if (dev->ifindex == po->ifindex) {
1438 spin_lock(&po->bind_lock);
1440 __dev_remove_pack(&po->prot_hook);
1443 sk->sk_err = ENETDOWN;
1444 if (!sock_flag(sk, SOCK_DEAD))
1445 sk->sk_error_report(sk);
1447 if (msg == NETDEV_UNREGISTER) {
1449 po->prot_hook.dev = NULL;
1451 spin_unlock(&po->bind_lock);
1455 spin_lock(&po->bind_lock);
1456 if (dev->ifindex == po->ifindex && po->num &&
1458 dev_add_pack(&po->prot_hook);
1462 spin_unlock(&po->bind_lock);
1466 read_unlock(&packet_sklist_lock);
1471 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1474 struct sock *sk = sock->sk;
1479 int amount = atomic_read(&sk->sk_wmem_alloc);
1480 return put_user(amount, (int __user *)arg);
1484 struct sk_buff *skb;
1487 spin_lock_bh(&sk->sk_receive_queue.lock);
1488 skb = skb_peek(&sk->sk_receive_queue);
1491 spin_unlock_bh(&sk->sk_receive_queue.lock);
1492 return put_user(amount, (int __user *)arg);
1495 return sock_get_timestamp(sk, (struct timeval __user *)arg);
1505 case SIOCGIFBRDADDR:
1506 case SIOCSIFBRDADDR:
1507 case SIOCGIFNETMASK:
1508 case SIOCSIFNETMASK:
1509 case SIOCGIFDSTADDR:
1510 case SIOCSIFDSTADDR:
1512 return inet_dgram_ops.ioctl(sock, cmd, arg);
1516 return -ENOIOCTLCMD;
1521 #ifndef CONFIG_PACKET_MMAP
1522 #define packet_mmap sock_no_mmap
1523 #define packet_poll datagram_poll
1526 static unsigned int packet_poll(struct file * file, struct socket *sock,
1529 struct sock *sk = sock->sk;
1530 struct packet_sock *po = pkt_sk(sk);
1531 unsigned int mask = datagram_poll(file, sock, wait);
1533 spin_lock_bh(&sk->sk_receive_queue.lock);
1535 unsigned last = po->head ? po->head-1 : po->frame_max;
1536 struct tpacket_hdr *h;
1538 h = (struct tpacket_hdr *)packet_lookup_frame(po, last);
1541 mask |= POLLIN | POLLRDNORM;
1543 spin_unlock_bh(&sk->sk_receive_queue.lock);
1548 /* Dirty? Well, I still did not learn better way to account
1552 static void packet_mm_open(struct vm_area_struct *vma)
1554 struct file *file = vma->vm_file;
1555 struct socket * sock = file->private_data;
1556 struct sock *sk = sock->sk;
1559 atomic_inc(&pkt_sk(sk)->mapped);
1562 static void packet_mm_close(struct vm_area_struct *vma)
1564 struct file *file = vma->vm_file;
1565 struct socket * sock = file->private_data;
1566 struct sock *sk = sock->sk;
1569 atomic_dec(&pkt_sk(sk)->mapped);
1572 static struct vm_operations_struct packet_mmap_ops = {
1573 .open = packet_mm_open,
1574 .close =packet_mm_close,
1577 static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
1579 return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
1582 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1586 for (i = 0; i < len; i++) {
1587 if (likely(pg_vec[i]))
1588 free_pages((unsigned long) pg_vec[i], order);
1593 static inline char *alloc_one_pg_vec_page(unsigned long order)
1595 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
1599 static char **alloc_pg_vec(struct tpacket_req *req, int order)
1601 unsigned int block_nr = req->tp_block_nr;
1605 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
1606 if (unlikely(!pg_vec))
1609 for (i = 0; i < block_nr; i++) {
1610 pg_vec[i] = alloc_one_pg_vec_page(order);
1611 if (unlikely(!pg_vec[i]))
1612 goto out_free_pgvec;
1619 free_pg_vec(pg_vec, order, block_nr);
1624 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
1626 char **pg_vec = NULL;
1627 struct packet_sock *po = pkt_sk(sk);
1628 int was_running, order = 0;
1632 if (req->tp_block_nr) {
1635 /* Sanity tests and some calculations */
1637 if (unlikely(po->pg_vec))
1640 if (unlikely((int)req->tp_block_size <= 0))
1642 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1644 if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
1646 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1649 po->frames_per_block = req->tp_block_size/req->tp_frame_size;
1650 if (unlikely(po->frames_per_block <= 0))
1652 if (unlikely((po->frames_per_block * req->tp_block_nr) !=
1657 order = get_order(req->tp_block_size);
1658 pg_vec = alloc_pg_vec(req, order);
1659 if (unlikely(!pg_vec))
1663 for (i = 0; i < req->tp_block_nr; i++) {
1664 char *ptr = pg_vec[i];
1665 struct tpacket_hdr *header;
1668 for (k = 0; k < po->frames_per_block; k++) {
1669 header = (struct tpacket_hdr *) ptr;
1670 header->tp_status = TP_STATUS_KERNEL;
1671 ptr += req->tp_frame_size;
1676 if (unlikely(req->tp_frame_nr))
1682 /* Detach socket from network */
1683 spin_lock(&po->bind_lock);
1684 was_running = po->running;
1687 __dev_remove_pack(&po->prot_hook);
1692 spin_unlock(&po->bind_lock);
1697 if (closing || atomic_read(&po->mapped) == 0) {
1699 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1701 spin_lock_bh(&sk->sk_receive_queue.lock);
1702 pg_vec = XC(po->pg_vec, pg_vec);
1703 po->frame_max = (req->tp_frame_nr - 1);
1705 po->frame_size = req->tp_frame_size;
1706 spin_unlock_bh(&sk->sk_receive_queue.lock);
1708 order = XC(po->pg_vec_order, order);
1709 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
1711 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1712 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
1713 skb_queue_purge(&sk->sk_receive_queue);
1715 if (atomic_read(&po->mapped))
1716 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1719 spin_lock(&po->bind_lock);
1720 if (was_running && !po->running) {
1724 dev_add_pack(&po->prot_hook);
1726 spin_unlock(&po->bind_lock);
1731 free_pg_vec(pg_vec, order, req->tp_block_nr);
1736 static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1738 struct sock *sk = sock->sk;
1739 struct packet_sock *po = pkt_sk(sk);
1741 unsigned long start;
1748 size = vma->vm_end - vma->vm_start;
1751 if (po->pg_vec == NULL)
1753 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
1756 start = vma->vm_start;
1757 for (i = 0; i < po->pg_vec_len; i++) {
1758 struct page *page = virt_to_page(po->pg_vec[i]);
1761 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
1762 err = vm_insert_page(vma, start, page);
1768 atomic_inc(&po->mapped);
1769 vma->vm_ops = &packet_mmap_ops;
1779 #ifdef CONFIG_SOCK_PACKET
1780 static const struct proto_ops packet_ops_spkt = {
1781 .family = PF_PACKET,
1782 .owner = THIS_MODULE,
1783 .release = packet_release,
1784 .bind = packet_bind_spkt,
1785 .connect = sock_no_connect,
1786 .socketpair = sock_no_socketpair,
1787 .accept = sock_no_accept,
1788 .getname = packet_getname_spkt,
1789 .poll = datagram_poll,
1790 .ioctl = packet_ioctl,
1791 .listen = sock_no_listen,
1792 .shutdown = sock_no_shutdown,
1793 .setsockopt = sock_no_setsockopt,
1794 .getsockopt = sock_no_getsockopt,
1795 .sendmsg = packet_sendmsg_spkt,
1796 .recvmsg = packet_recvmsg,
1797 .mmap = sock_no_mmap,
1798 .sendpage = sock_no_sendpage,
1802 static const struct proto_ops packet_ops = {
1803 .family = PF_PACKET,
1804 .owner = THIS_MODULE,
1805 .release = packet_release,
1806 .bind = packet_bind,
1807 .connect = sock_no_connect,
1808 .socketpair = sock_no_socketpair,
1809 .accept = sock_no_accept,
1810 .getname = packet_getname,
1811 .poll = packet_poll,
1812 .ioctl = packet_ioctl,
1813 .listen = sock_no_listen,
1814 .shutdown = sock_no_shutdown,
1815 .setsockopt = packet_setsockopt,
1816 .getsockopt = packet_getsockopt,
1817 .sendmsg = packet_sendmsg,
1818 .recvmsg = packet_recvmsg,
1819 .mmap = packet_mmap,
1820 .sendpage = sock_no_sendpage,
1823 static struct net_proto_family packet_family_ops = {
1824 .family = PF_PACKET,
1825 .create = packet_create,
1826 .owner = THIS_MODULE,
1829 static struct notifier_block packet_netdev_notifier = {
1830 .notifier_call =packet_notifier,
1833 #ifdef CONFIG_PROC_FS
1834 static inline struct sock *packet_seq_idx(loff_t off)
1837 struct hlist_node *node;
1839 sk_for_each(s, node, &packet_sklist) {
1846 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
1848 read_lock(&packet_sklist_lock);
1849 return *pos ? packet_seq_idx(*pos - 1) : SEQ_START_TOKEN;
1852 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1855 return (v == SEQ_START_TOKEN)
1856 ? sk_head(&packet_sklist)
1857 : sk_next((struct sock*)v) ;
1860 static void packet_seq_stop(struct seq_file *seq, void *v)
1862 read_unlock(&packet_sklist_lock);
1865 static int packet_seq_show(struct seq_file *seq, void *v)
1867 if (v == SEQ_START_TOKEN)
1868 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1871 const struct packet_sock *po = pkt_sk(s);
1874 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1876 atomic_read(&s->sk_refcnt),
1881 atomic_read(&s->sk_rmem_alloc),
1889 static struct seq_operations packet_seq_ops = {
1890 .start = packet_seq_start,
1891 .next = packet_seq_next,
1892 .stop = packet_seq_stop,
1893 .show = packet_seq_show,
1896 static int packet_seq_open(struct inode *inode, struct file *file)
1898 return seq_open(file, &packet_seq_ops);
1901 static struct file_operations packet_seq_fops = {
1902 .owner = THIS_MODULE,
1903 .open = packet_seq_open,
1905 .llseek = seq_lseek,
1906 .release = seq_release,
1911 static void __exit packet_exit(void)
1913 proc_net_remove("packet");
1914 unregister_netdevice_notifier(&packet_netdev_notifier);
1915 sock_unregister(PF_PACKET);
1916 proto_unregister(&packet_proto);
1919 static int __init packet_init(void)
1921 int rc = proto_register(&packet_proto, 0);
1926 sock_register(&packet_family_ops);
1927 register_netdevice_notifier(&packet_netdev_notifier);
1928 proc_net_fops_create("packet", 0, &packet_seq_fops);
1933 module_init(packet_init);
1934 module_exit(packet_exit);
1935 MODULE_LICENSE("GPL");
1936 MODULE_ALIAS_NETPROTO(PF_PACKET);