1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/interrupt.h>
4 #include <linux/nsproxy.h>
5 #include <linux/compat.h>
6 #include <linux/if_tun.h>
7 #include <linux/module.h>
8 #include <linux/skbuff.h>
9 #include <linux/cache.h>
10 #include <linux/sched.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
19 #include <net/net_namespace.h>
20 #include <net/rtnetlink.h>
22 #include <linux/virtio_net.h>
25 * A macvtap queue is the central object of this driver, it connects
26 * an open character device to a macvlan interface. There can be
27 * multiple queues on one interface, which map back to queues
28 * implemented in hardware on the underlying device.
30 * macvtap_proto is used to allocate queues through the sock allocation
33 * TODO: multiqueue support is currently not implemented, even though
34 * macvtap is basically prepared for that. We will need to add this
35 * here as well as in virtio-net and qemu to get line rate on 10gbit
36 * adapters from a guest.
38 struct macvtap_queue {
43 struct macvlan_dev __rcu *vlan;
48 static struct proto macvtap_proto = {
51 .obj_size = sizeof (struct macvtap_queue),
55 * Variables for dealing with macvtaps device numbers.
57 static dev_t macvtap_major;
58 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
59 static DEFINE_MUTEX(minor_lock);
60 static DEFINE_IDR(minor_idr);
62 #define GOODCOPY_LEN 128
63 static struct class *macvtap_class;
64 static struct cdev macvtap_cdev;
66 static const struct proto_ops macvtap_socket_ops;
70 * The macvtap_queue and the macvlan_dev are loosely coupled, the
71 * pointers from one to the other can only be read while rcu_read_lock
72 * or macvtap_lock is held.
74 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
75 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
76 * q->vlan becomes inaccessible. When the files gets closed,
77 * macvtap_get_queue() fails.
79 * There may still be references to the struct sock inside of the
80 * queue from outbound SKBs, but these never reference back to the
81 * file or the dev. The data structure is freed through __sk_free
82 * when both our references and any pending SKBs are gone.
84 static DEFINE_SPINLOCK(macvtap_lock);
87 * get_slot: return a [unused/occupied] slot in vlan->taps[]:
88 * - if 'q' is NULL, return the first empty slot;
89 * - otherwise, return the slot this pointer occupies.
91 static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
95 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
96 if (rcu_dereference(vlan->taps[i]) == q)
100 /* Should never happen */
104 static int macvtap_set_queue(struct net_device *dev, struct file *file,
105 struct macvtap_queue *q)
107 struct macvlan_dev *vlan = netdev_priv(dev);
111 spin_lock(&macvtap_lock);
112 if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
116 index = get_slot(vlan, NULL);
117 rcu_assign_pointer(q->vlan, vlan);
118 rcu_assign_pointer(vlan->taps[index], q);
122 file->private_data = q;
127 spin_unlock(&macvtap_lock);
132 * The file owning the queue got closed, give up both
133 * the reference that the files holds as well as the
134 * one from the macvlan_dev if that still exists.
136 * Using the spinlock makes sure that we don't get
137 * to the queue again after destroying it.
139 static void macvtap_put_queue(struct macvtap_queue *q)
141 struct macvlan_dev *vlan;
143 spin_lock(&macvtap_lock);
144 vlan = rcu_dereference_protected(q->vlan,
145 lockdep_is_held(&macvtap_lock));
147 int index = get_slot(vlan, q);
149 rcu_assign_pointer(vlan->taps[index], NULL);
150 rcu_assign_pointer(q->vlan, NULL);
155 spin_unlock(&macvtap_lock);
162 * Select a queue based on the rxq of the device on which this packet
163 * arrived. If the incoming device is not mq, calculate a flow hash
164 * to select a queue. If all fails, find the first available queue.
165 * Cache vlan->numvtaps since it can become zero during the execution
168 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
171 struct macvlan_dev *vlan = netdev_priv(dev);
172 struct macvtap_queue *tap = NULL;
173 int numvtaps = vlan->numvtaps;
179 if (likely(skb_rx_queue_recorded(skb))) {
180 rxq = skb_get_rx_queue(skb);
182 while (unlikely(rxq >= numvtaps))
185 tap = rcu_dereference(vlan->taps[rxq]);
190 /* Check if we can use flow to select a queue */
191 rxq = skb_get_rxhash(skb);
193 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
198 /* Everything failed - find first available queue */
199 for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
200 tap = rcu_dereference(vlan->taps[rxq]);
210 * The net_device is going away, give up the reference
211 * that it holds on all queues and safely set the pointer
212 * from the queues to NULL.
214 static void macvtap_del_queues(struct net_device *dev)
216 struct macvlan_dev *vlan = netdev_priv(dev);
217 struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
220 /* macvtap_put_queue can free some slots, so go through all slots */
221 spin_lock(&macvtap_lock);
222 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
223 q = rcu_dereference_protected(vlan->taps[i],
224 lockdep_is_held(&macvtap_lock));
227 rcu_assign_pointer(vlan->taps[i], NULL);
228 rcu_assign_pointer(q->vlan, NULL);
232 BUG_ON(vlan->numvtaps != 0);
233 /* guarantee that any future macvtap_set_queue will fail */
234 vlan->numvtaps = MAX_MACVTAP_QUEUES;
235 spin_unlock(&macvtap_lock);
239 for (--j; j >= 0; j--)
240 sock_put(&qlist[j]->sk);
244 * Forward happens for data that gets sent from one macvlan
245 * endpoint to another one in bridge mode. We just take
246 * the skb and put it into the receive queue.
248 static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
250 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
254 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
257 skb_queue_tail(&q->sk.sk_receive_queue, skb);
258 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
259 return NET_RX_SUCCESS;
267 * Receive is for data from the external interface (lowerdev),
268 * in case of macvtap, we can treat that the same way as
269 * forward, which macvlan cannot.
271 static int macvtap_receive(struct sk_buff *skb)
273 skb_push(skb, ETH_HLEN);
274 return macvtap_forward(skb->dev, skb);
277 static int macvtap_get_minor(struct macvlan_dev *vlan)
279 int retval = -ENOMEM;
282 mutex_lock(&minor_lock);
283 if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
286 retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
288 if (retval == -EAGAIN)
292 if (id < MACVTAP_NUM_DEVS) {
295 printk(KERN_ERR "too many macvtap devices\n");
297 idr_remove(&minor_idr, id);
300 mutex_unlock(&minor_lock);
304 static void macvtap_free_minor(struct macvlan_dev *vlan)
306 mutex_lock(&minor_lock);
308 idr_remove(&minor_idr, vlan->minor);
311 mutex_unlock(&minor_lock);
314 static struct net_device *dev_get_by_macvtap_minor(int minor)
316 struct net_device *dev = NULL;
317 struct macvlan_dev *vlan;
319 mutex_lock(&minor_lock);
320 vlan = idr_find(&minor_idr, minor);
325 mutex_unlock(&minor_lock);
329 static int macvtap_newlink(struct net *src_net,
330 struct net_device *dev,
332 struct nlattr *data[])
334 /* Don't put anything that may fail after macvlan_common_newlink
335 * because we can't undo what it does.
337 return macvlan_common_newlink(src_net, dev, tb, data,
338 macvtap_receive, macvtap_forward);
341 static void macvtap_dellink(struct net_device *dev,
342 struct list_head *head)
344 macvtap_del_queues(dev);
345 macvlan_dellink(dev, head);
348 static void macvtap_setup(struct net_device *dev)
350 macvlan_common_setup(dev);
351 dev->tx_queue_len = TUN_READQ_SIZE;
354 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
356 .setup = macvtap_setup,
357 .newlink = macvtap_newlink,
358 .dellink = macvtap_dellink,
362 static void macvtap_sock_write_space(struct sock *sk)
364 wait_queue_head_t *wqueue;
366 if (!sock_writeable(sk) ||
367 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
370 wqueue = sk_sleep(sk);
371 if (wqueue && waitqueue_active(wqueue))
372 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
375 static void macvtap_sock_destruct(struct sock *sk)
377 skb_queue_purge(&sk->sk_receive_queue);
380 static int macvtap_open(struct inode *inode, struct file *file)
382 struct net *net = current->nsproxy->net_ns;
383 struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
384 struct macvtap_queue *q;
392 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
398 init_waitqueue_head(&q->wq.wait);
399 q->sock.type = SOCK_RAW;
400 q->sock.state = SS_CONNECTED;
402 q->sock.ops = &macvtap_socket_ops;
403 sock_init_data(&q->sock, &q->sk);
404 q->sk.sk_write_space = macvtap_sock_write_space;
405 q->sk.sk_destruct = macvtap_sock_destruct;
406 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
407 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
410 * so far only KVM virtio_net uses macvtap, enable zero copy between
411 * guest kernel and host kernel when lower device supports zerocopy
413 * The macvlan supports zerocopy iff the lower device supports zero
414 * copy so we don't have to look at the lower device directly.
416 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
417 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
419 err = macvtap_set_queue(dev, file, q);
430 static int macvtap_release(struct inode *inode, struct file *file)
432 struct macvtap_queue *q = file->private_data;
433 macvtap_put_queue(q);
437 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
439 struct macvtap_queue *q = file->private_data;
440 unsigned int mask = POLLERR;
446 poll_wait(file, &q->wq.wait, wait);
448 if (!skb_queue_empty(&q->sk.sk_receive_queue))
449 mask |= POLLIN | POLLRDNORM;
451 if (sock_writeable(&q->sk) ||
452 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
453 sock_writeable(&q->sk)))
454 mask |= POLLOUT | POLLWRNORM;
460 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
461 size_t len, size_t linear,
462 int noblock, int *err)
466 /* Under a page? Don't bother with paged skb. */
467 if (prepad + len < PAGE_SIZE || !linear)
470 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
475 skb_reserve(skb, prepad);
476 skb_put(skb, linear);
477 skb->data_len = len - linear;
478 skb->len += len - linear;
483 /* set skb frags from iovec, this can move to core network code for reuse */
484 static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
485 int offset, size_t count)
487 int len = iov_length(from, count) - offset;
488 int copy = skb_headlen(skb);
489 int size, offset1 = 0;
492 /* Skip over from offset */
493 while (count && (offset >= from->iov_len)) {
494 offset -= from->iov_len;
499 /* copy up to skb headlen */
500 while (count && (copy > 0)) {
501 size = min_t(unsigned int, copy, from->iov_len - offset);
502 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
519 struct page *page[MAX_SKB_FRAGS];
522 unsigned long truesize;
524 len = from->iov_len - offset;
530 base = (unsigned long)from->iov_base + offset;
531 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
532 if (i + size > MAX_SKB_FRAGS)
534 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
535 if (num_pages != size) {
538 for (j = 0; j < num_pages; j++)
539 put_page(page[i + j]);
542 truesize = size * PAGE_SIZE;
543 skb->data_len += len;
545 skb->truesize += truesize;
546 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
548 int off = base & ~PAGE_MASK;
549 int size = min_t(int, len, PAGE_SIZE - off);
550 __skb_fill_page_desc(skb, i, page[i], off, size);
551 skb_shinfo(skb)->nr_frags++;
552 /* increase sk_wmem_alloc */
564 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
565 * be shared with the tun/tap driver.
567 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
568 struct virtio_net_hdr *vnet_hdr)
570 unsigned short gso_type = 0;
571 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
572 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
573 case VIRTIO_NET_HDR_GSO_TCPV4:
574 gso_type = SKB_GSO_TCPV4;
576 case VIRTIO_NET_HDR_GSO_TCPV6:
577 gso_type = SKB_GSO_TCPV6;
579 case VIRTIO_NET_HDR_GSO_UDP:
580 gso_type = SKB_GSO_UDP;
581 if (skb->protocol == htons(ETH_P_IPV6))
582 ipv6_proxy_select_ident(skb);
588 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
589 gso_type |= SKB_GSO_TCP_ECN;
591 if (vnet_hdr->gso_size == 0)
595 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
596 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
597 vnet_hdr->csum_offset))
601 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
602 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
603 skb_shinfo(skb)->gso_type = gso_type;
605 /* Header must be checked, and gso_segs computed. */
606 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
607 skb_shinfo(skb)->gso_segs = 0;
612 static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
613 struct virtio_net_hdr *vnet_hdr)
615 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
617 if (skb_is_gso(skb)) {
618 struct skb_shared_info *sinfo = skb_shinfo(skb);
620 /* This is a hint as to how much should be linear. */
621 vnet_hdr->hdr_len = skb_headlen(skb);
622 vnet_hdr->gso_size = sinfo->gso_size;
623 if (sinfo->gso_type & SKB_GSO_TCPV4)
624 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
625 else if (sinfo->gso_type & SKB_GSO_TCPV6)
626 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
627 else if (sinfo->gso_type & SKB_GSO_UDP)
628 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
631 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
632 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
634 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
636 if (skb->ip_summed == CHECKSUM_PARTIAL) {
637 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
638 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
639 vnet_hdr->csum_offset = skb->csum_offset;
640 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
641 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
642 } /* else everything is zero */
647 static unsigned long iov_pages(const struct iovec *iv, int offset,
648 unsigned long nr_segs)
650 unsigned long seg, base;
651 int pages = 0, len, size;
653 while (nr_segs && (offset >= iv->iov_len)) {
654 offset -= iv->iov_len;
659 for (seg = 0; seg < nr_segs; seg++) {
660 base = (unsigned long)iv[seg].iov_base + offset;
661 len = iv[seg].iov_len - offset;
662 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
670 /* Get packet from user space buffer */
671 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
672 const struct iovec *iv, unsigned long total_len,
673 size_t count, int noblock)
676 struct macvlan_dev *vlan;
677 unsigned long len = total_len;
679 struct virtio_net_hdr vnet_hdr = { 0 };
680 int vnet_hdr_len = 0;
682 bool zerocopy = false;
685 if (q->flags & IFF_VNET_HDR) {
686 vnet_hdr_len = q->vnet_hdr_sz;
689 if (len < vnet_hdr_len)
693 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
697 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
698 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
700 vnet_hdr.hdr_len = vnet_hdr.csum_start +
701 vnet_hdr.csum_offset + 2;
703 if (vnet_hdr.hdr_len > len)
708 if (unlikely(len < ETH_HLEN))
712 if (unlikely(count > UIO_MAXIOV))
715 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
716 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
718 if (iov_pages(iv, vnet_hdr_len + copylen, count)
725 linear = vnet_hdr.hdr_len;
728 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
729 linear, noblock, &err);
734 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
736 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
738 if (!err && m && m->msg_control) {
739 struct ubuf_info *uarg = m->msg_control;
740 uarg->callback(uarg);
747 skb_set_network_header(skb, ETH_HLEN);
748 skb_reset_mac_header(skb);
749 skb->protocol = eth_hdr(skb)->h_proto;
752 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
758 vlan = rcu_dereference_bh(q->vlan);
759 /* copy skb_ubuf_info for callback when skb has no error */
761 skb_shinfo(skb)->destructor_arg = m->msg_control;
762 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
765 macvlan_start_xmit(skb, vlan->dev);
768 rcu_read_unlock_bh();
777 vlan = rcu_dereference_bh(q->vlan);
779 vlan->dev->stats.tx_dropped++;
780 rcu_read_unlock_bh();
785 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
786 unsigned long count, loff_t pos)
788 struct file *file = iocb->ki_filp;
789 ssize_t result = -ENOLINK;
790 struct macvtap_queue *q = file->private_data;
792 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
793 file->f_flags & O_NONBLOCK);
797 /* Put packet to the user space buffer */
798 static ssize_t macvtap_put_user(struct macvtap_queue *q,
799 const struct sk_buff *skb,
800 const struct iovec *iv, int len)
802 struct macvlan_dev *vlan;
804 int vnet_hdr_len = 0;
806 if (q->flags & IFF_VNET_HDR) {
807 struct virtio_net_hdr vnet_hdr;
808 vnet_hdr_len = q->vnet_hdr_sz;
809 if ((len -= vnet_hdr_len) < 0)
812 ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
816 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
820 len = min_t(int, skb->len, len);
822 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
825 vlan = rcu_dereference_bh(q->vlan);
827 macvlan_count_rx(vlan, len, ret == 0, 0);
828 rcu_read_unlock_bh();
830 return ret ? ret : (len + vnet_hdr_len);
833 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
834 const struct iovec *iv, unsigned long len,
837 DECLARE_WAITQUEUE(wait, current);
841 add_wait_queue(sk_sleep(&q->sk), &wait);
843 current->state = TASK_INTERRUPTIBLE;
845 /* Read frames from the queue */
846 skb = skb_dequeue(&q->sk.sk_receive_queue);
852 if (signal_pending(current)) {
856 /* Nothing to read, let's sleep */
860 ret = macvtap_put_user(q, skb, iv, len);
865 current->state = TASK_RUNNING;
866 remove_wait_queue(sk_sleep(&q->sk), &wait);
870 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
871 unsigned long count, loff_t pos)
873 struct file *file = iocb->ki_filp;
874 struct macvtap_queue *q = file->private_data;
875 ssize_t len, ret = 0;
877 len = iov_length(iv, count);
883 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
884 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
890 * provide compatibility with generic tun/tap interface
892 static long macvtap_ioctl(struct file *file, unsigned int cmd,
895 struct macvtap_queue *q = file->private_data;
896 struct macvlan_dev *vlan;
897 void __user *argp = (void __user *)arg;
898 struct ifreq __user *ifr = argp;
899 unsigned int __user *up = argp;
901 int __user *sp = argp;
907 /* ignore the name, just look at flags */
908 if (get_user(u, &ifr->ifr_flags))
912 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
921 vlan = rcu_dereference_bh(q->vlan);
924 rcu_read_unlock_bh();
930 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
931 put_user(q->flags, &ifr->ifr_flags))
937 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
948 case TUNGETVNETHDRSZ:
954 case TUNSETVNETHDRSZ:
957 if (s < (int)sizeof(struct virtio_net_hdr))
964 /* let the user check for future flags */
965 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
966 TUN_F_TSO_ECN | TUN_F_UFO))
969 /* TODO: only accept frames with the features that
970 got enabled for forwarded frames */
971 if (!(q->flags & IFF_VNET_HDR))
981 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
984 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
988 static const struct file_operations macvtap_fops = {
989 .owner = THIS_MODULE,
990 .open = macvtap_open,
991 .release = macvtap_release,
992 .aio_read = macvtap_aio_read,
993 .aio_write = macvtap_aio_write,
994 .poll = macvtap_poll,
996 .unlocked_ioctl = macvtap_ioctl,
998 .compat_ioctl = macvtap_compat_ioctl,
1002 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1003 struct msghdr *m, size_t total_len)
1005 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1006 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
1007 m->msg_flags & MSG_DONTWAIT);
1010 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1011 struct msghdr *m, size_t total_len,
1014 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1016 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1018 ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
1019 flags & MSG_DONTWAIT);
1020 if (ret > total_len) {
1021 m->msg_flags |= MSG_TRUNC;
1022 ret = flags & MSG_TRUNC ? ret : total_len;
1027 /* Ops structure to mimic raw sockets with tun */
1028 static const struct proto_ops macvtap_socket_ops = {
1029 .sendmsg = macvtap_sendmsg,
1030 .recvmsg = macvtap_recvmsg,
1033 /* Get an underlying socket object from tun file. Returns error unless file is
1034 * attached to a device. The returned object works like a packet socket, it
1035 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1036 * holding a reference to the file for as long as the socket is in use. */
1037 struct socket *macvtap_get_socket(struct file *file)
1039 struct macvtap_queue *q;
1040 if (file->f_op != &macvtap_fops)
1041 return ERR_PTR(-EINVAL);
1042 q = file->private_data;
1044 return ERR_PTR(-EBADFD);
1047 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1049 static int macvtap_device_event(struct notifier_block *unused,
1050 unsigned long event, void *ptr)
1052 struct net_device *dev = ptr;
1053 struct macvlan_dev *vlan;
1054 struct device *classdev;
1058 if (dev->rtnl_link_ops != &macvtap_link_ops)
1061 vlan = netdev_priv(dev);
1064 case NETDEV_REGISTER:
1065 /* Create the device node here after the network device has
1066 * been registered but before register_netdevice has
1069 err = macvtap_get_minor(vlan);
1071 return notifier_from_errno(err);
1073 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1074 classdev = device_create(macvtap_class, &dev->dev, devt,
1075 dev, "tap%d", dev->ifindex);
1076 if (IS_ERR(classdev)) {
1077 macvtap_free_minor(vlan);
1078 return notifier_from_errno(PTR_ERR(classdev));
1081 case NETDEV_UNREGISTER:
1082 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1083 device_destroy(macvtap_class, devt);
1084 macvtap_free_minor(vlan);
1091 static struct notifier_block macvtap_notifier_block __read_mostly = {
1092 .notifier_call = macvtap_device_event,
1095 static int macvtap_init(void)
1099 err = alloc_chrdev_region(&macvtap_major, 0,
1100 MACVTAP_NUM_DEVS, "macvtap");
1104 cdev_init(&macvtap_cdev, &macvtap_fops);
1105 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1109 macvtap_class = class_create(THIS_MODULE, "macvtap");
1110 if (IS_ERR(macvtap_class)) {
1111 err = PTR_ERR(macvtap_class);
1115 err = register_netdevice_notifier(&macvtap_notifier_block);
1119 err = macvlan_link_register(&macvtap_link_ops);
1126 unregister_netdevice_notifier(&macvtap_notifier_block);
1128 class_unregister(macvtap_class);
1130 cdev_del(&macvtap_cdev);
1132 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1136 module_init(macvtap_init);
1138 static void macvtap_exit(void)
1140 rtnl_link_unregister(&macvtap_link_ops);
1141 unregister_netdevice_notifier(&macvtap_notifier_block);
1142 class_unregister(macvtap_class);
1143 cdev_del(&macvtap_cdev);
1144 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1146 module_exit(macvtap_exit);
1148 MODULE_ALIAS_RTNL_LINK("macvtap");
1149 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1150 MODULE_LICENSE("GPL");