2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
134 scm->secid = *UNIXSID(skb);
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n)
152 unsigned hash = (__force unsigned)n;
155 return hash&(UNIX_HASH_SIZE-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
162 return unix_peer(osk) == sk;
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 static inline int unix_recvq_full(struct sock const *sk)
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 static struct sock *unix_peer_get(struct sock *s)
183 unix_state_unlock(s);
187 static inline void unix_release_addr(struct unix_address *addr)
189 if (atomic_dec_and_test(&addr->refcnt))
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
206 if (sunaddr->sun_path[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
223 static void __unix_remove_socket(struct sock *sk)
225 sk_del_node_init(sk);
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
234 static inline void unix_remove_socket(struct sock *sk)
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!net_eq(sock_net(s), net))
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
281 spin_unlock(&unix_table_lock);
285 static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
288 struct hlist_node *node;
290 spin_lock(&unix_table_lock);
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
295 if (!net_eq(sock_net(s), net))
298 if (dentry && dentry->d_inode == i) {
305 spin_unlock(&unix_table_lock);
309 static inline int unix_writable(struct sock *sk)
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
314 static void unix_write_space(struct sock *sk)
316 struct socket_wq *wq;
319 if (unix_writable(sk)) {
320 wq = rcu_dereference(sk->sk_wq);
321 if (wq_has_sleeper(wq))
322 wake_up_interruptible_sync(&wq->wait);
323 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
328 /* When dgram socket disconnects (or changes its peer), we clear its receive
329 * queue of packets arrived from previous peer. First, it allows to do
330 * flow control based only on wmem_alloc; second, sk connected to peer
331 * may receive messages only from that peer. */
332 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
334 if (!skb_queue_empty(&sk->sk_receive_queue)) {
335 skb_queue_purge(&sk->sk_receive_queue);
336 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
338 /* If one link of bidirectional dgram pipe is disconnected,
339 * we signal error. Messages are lost. Do not make this,
340 * when peer was not connected to us.
342 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
343 other->sk_err = ECONNRESET;
344 other->sk_error_report(other);
349 static void unix_sock_destructor(struct sock *sk)
351 struct unix_sock *u = unix_sk(sk);
353 skb_queue_purge(&sk->sk_receive_queue);
355 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
356 WARN_ON(!sk_unhashed(sk));
357 WARN_ON(sk->sk_socket);
358 if (!sock_flag(sk, SOCK_DEAD)) {
359 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
364 unix_release_addr(u->addr);
366 atomic_dec(&unix_nr_socks);
368 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
370 #ifdef UNIX_REFCNT_DEBUG
371 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
372 atomic_read(&unix_nr_socks));
376 static int unix_release_sock(struct sock *sk, int embrion)
378 struct unix_sock *u = unix_sk(sk);
379 struct dentry *dentry;
380 struct vfsmount *mnt;
385 unix_remove_socket(sk);
390 sk->sk_shutdown = SHUTDOWN_MASK;
395 state = sk->sk_state;
396 sk->sk_state = TCP_CLOSE;
397 unix_state_unlock(sk);
399 wake_up_interruptible_all(&u->peer_wait);
401 skpair = unix_peer(sk);
403 if (skpair != NULL) {
404 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
405 unix_state_lock(skpair);
407 skpair->sk_shutdown = SHUTDOWN_MASK;
408 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
409 skpair->sk_err = ECONNRESET;
410 unix_state_unlock(skpair);
411 skpair->sk_state_change(skpair);
412 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
414 sock_put(skpair); /* It may now die */
415 unix_peer(sk) = NULL;
418 /* Try to flush out this socket. Throw out buffers at least */
420 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
421 if (state == TCP_LISTEN)
422 unix_release_sock(skb->sk, 1);
423 /* passed fds are erased in the kfree_skb hook */
434 /* ---- Socket is dead now and most probably destroyed ---- */
437 * Fixme: BSD difference: In BSD all sockets connected to use get
438 * ECONNRESET and we die on the spot. In Linux we behave
439 * like files and pipes do and wait for the last
442 * Can't we simply set sock->err?
444 * What the above comment does talk about? --ANK(980817)
447 if (unix_tot_inflight)
448 unix_gc(); /* Garbage collect fds */
453 static void init_peercred(struct sock *sk)
455 put_pid(sk->sk_peer_pid);
456 if (sk->sk_peer_cred)
457 put_cred(sk->sk_peer_cred);
458 sk->sk_peer_pid = get_pid(task_tgid(current));
459 sk->sk_peer_cred = get_current_cred();
462 static void copy_peercred(struct sock *sk, struct sock *peersk)
464 put_pid(sk->sk_peer_pid);
465 if (sk->sk_peer_cred)
466 put_cred(sk->sk_peer_cred);
467 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
468 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
471 static int unix_listen(struct socket *sock, int backlog)
474 struct sock *sk = sock->sk;
475 struct unix_sock *u = unix_sk(sk);
476 struct pid *old_pid = NULL;
477 const struct cred *old_cred = NULL;
480 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
481 goto out; /* Only stream/seqpacket sockets accept */
484 goto out; /* No listens on an unbound socket */
486 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
488 if (backlog > sk->sk_max_ack_backlog)
489 wake_up_interruptible_all(&u->peer_wait);
490 sk->sk_max_ack_backlog = backlog;
491 sk->sk_state = TCP_LISTEN;
492 /* set credentials so connect can copy them */
497 unix_state_unlock(sk);
505 static int unix_release(struct socket *);
506 static int unix_bind(struct socket *, struct sockaddr *, int);
507 static int unix_stream_connect(struct socket *, struct sockaddr *,
508 int addr_len, int flags);
509 static int unix_socketpair(struct socket *, struct socket *);
510 static int unix_accept(struct socket *, struct socket *, int);
511 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
512 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
513 static unsigned int unix_dgram_poll(struct file *, struct socket *,
515 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
516 static int unix_shutdown(struct socket *, int);
517 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
518 struct msghdr *, size_t);
519 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
520 struct msghdr *, size_t, int);
521 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
522 struct msghdr *, size_t);
523 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
524 struct msghdr *, size_t, int);
525 static int unix_dgram_connect(struct socket *, struct sockaddr *,
527 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
528 struct msghdr *, size_t);
530 static const struct proto_ops unix_stream_ops = {
532 .owner = THIS_MODULE,
533 .release = unix_release,
535 .connect = unix_stream_connect,
536 .socketpair = unix_socketpair,
537 .accept = unix_accept,
538 .getname = unix_getname,
541 .listen = unix_listen,
542 .shutdown = unix_shutdown,
543 .setsockopt = sock_no_setsockopt,
544 .getsockopt = sock_no_getsockopt,
545 .sendmsg = unix_stream_sendmsg,
546 .recvmsg = unix_stream_recvmsg,
547 .mmap = sock_no_mmap,
548 .sendpage = sock_no_sendpage,
551 static const struct proto_ops unix_dgram_ops = {
553 .owner = THIS_MODULE,
554 .release = unix_release,
556 .connect = unix_dgram_connect,
557 .socketpair = unix_socketpair,
558 .accept = sock_no_accept,
559 .getname = unix_getname,
560 .poll = unix_dgram_poll,
562 .listen = sock_no_listen,
563 .shutdown = unix_shutdown,
564 .setsockopt = sock_no_setsockopt,
565 .getsockopt = sock_no_getsockopt,
566 .sendmsg = unix_dgram_sendmsg,
567 .recvmsg = unix_dgram_recvmsg,
568 .mmap = sock_no_mmap,
569 .sendpage = sock_no_sendpage,
572 static const struct proto_ops unix_seqpacket_ops = {
574 .owner = THIS_MODULE,
575 .release = unix_release,
577 .connect = unix_stream_connect,
578 .socketpair = unix_socketpair,
579 .accept = unix_accept,
580 .getname = unix_getname,
581 .poll = unix_dgram_poll,
583 .listen = unix_listen,
584 .shutdown = unix_shutdown,
585 .setsockopt = sock_no_setsockopt,
586 .getsockopt = sock_no_getsockopt,
587 .sendmsg = unix_seqpacket_sendmsg,
588 .recvmsg = unix_dgram_recvmsg,
589 .mmap = sock_no_mmap,
590 .sendpage = sock_no_sendpage,
593 static struct proto unix_proto = {
595 .owner = THIS_MODULE,
596 .obj_size = sizeof(struct unix_sock),
600 * AF_UNIX sockets do not interact with hardware, hence they
601 * dont trigger interrupts - so it's safe for them to have
602 * bh-unsafe locking for their sk_receive_queue.lock. Split off
603 * this special lock-class by reinitializing the spinlock key:
605 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
607 static struct sock *unix_create1(struct net *net, struct socket *sock)
609 struct sock *sk = NULL;
612 atomic_inc(&unix_nr_socks);
613 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
616 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
620 sock_init_data(sock, sk);
621 lockdep_set_class(&sk->sk_receive_queue.lock,
622 &af_unix_sk_receive_queue_lock_key);
624 sk->sk_write_space = unix_write_space;
625 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
626 sk->sk_destruct = unix_sock_destructor;
630 spin_lock_init(&u->lock);
631 atomic_long_set(&u->inflight, 0);
632 INIT_LIST_HEAD(&u->link);
633 mutex_init(&u->readlock); /* single task reading lock */
634 init_waitqueue_head(&u->peer_wait);
635 unix_insert_socket(unix_sockets_unbound, sk);
638 atomic_dec(&unix_nr_socks);
641 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
647 static int unix_create(struct net *net, struct socket *sock, int protocol,
650 if (protocol && protocol != PF_UNIX)
651 return -EPROTONOSUPPORT;
653 sock->state = SS_UNCONNECTED;
655 switch (sock->type) {
657 sock->ops = &unix_stream_ops;
660 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
664 sock->type = SOCK_DGRAM;
666 sock->ops = &unix_dgram_ops;
669 sock->ops = &unix_seqpacket_ops;
672 return -ESOCKTNOSUPPORT;
675 return unix_create1(net, sock) ? 0 : -ENOMEM;
678 static int unix_release(struct socket *sock)
680 struct sock *sk = sock->sk;
687 return unix_release_sock(sk, 0);
690 static int unix_autobind(struct socket *sock)
692 struct sock *sk = sock->sk;
693 struct net *net = sock_net(sk);
694 struct unix_sock *u = unix_sk(sk);
695 static u32 ordernum = 1;
696 struct unix_address *addr;
699 mutex_lock(&u->readlock);
706 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
710 addr->name->sun_family = AF_UNIX;
711 atomic_set(&addr->refcnt, 1);
714 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
715 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
717 spin_lock(&unix_table_lock);
718 ordernum = (ordernum+1)&0xFFFFF;
720 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
722 spin_unlock(&unix_table_lock);
723 /* Sanity yield. It is unusual case, but yet... */
724 if (!(ordernum&0xFF))
728 addr->hash ^= sk->sk_type;
730 __unix_remove_socket(sk);
732 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
733 spin_unlock(&unix_table_lock);
736 out: mutex_unlock(&u->readlock);
740 static struct sock *unix_find_other(struct net *net,
741 struct sockaddr_un *sunname, int len,
742 int type, unsigned hash, int *error)
748 if (sunname->sun_path[0]) {
750 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
753 inode = path.dentry->d_inode;
754 err = inode_permission(inode, MAY_WRITE);
759 if (!S_ISSOCK(inode->i_mode))
761 u = unix_find_socket_byinode(net, inode);
765 if (u->sk_type == type)
766 touch_atime(path.mnt, path.dentry);
771 if (u->sk_type != type) {
777 u = unix_find_socket_byname(net, sunname, len, type, hash);
779 struct dentry *dentry;
780 dentry = unix_sk(u)->dentry;
782 touch_atime(unix_sk(u)->mnt, dentry);
796 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
798 struct sock *sk = sock->sk;
799 struct net *net = sock_net(sk);
800 struct unix_sock *u = unix_sk(sk);
801 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
802 struct dentry *dentry = NULL;
806 struct unix_address *addr;
807 struct hlist_head *list;
810 if (sunaddr->sun_family != AF_UNIX)
813 if (addr_len == sizeof(short)) {
814 err = unix_autobind(sock);
818 err = unix_mkname(sunaddr, addr_len, &hash);
823 mutex_lock(&u->readlock);
830 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
834 memcpy(addr->name, sunaddr, addr_len);
835 addr->len = addr_len;
836 addr->hash = hash ^ sk->sk_type;
837 atomic_set(&addr->refcnt, 1);
839 if (sunaddr->sun_path[0]) {
843 * Get the parent directory, calculate the hash for last
846 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
848 goto out_mknod_parent;
850 dentry = lookup_create(&nd, 0);
851 err = PTR_ERR(dentry);
853 goto out_mknod_unlock;
856 * All right, let's create it.
859 (SOCK_INODE(sock)->i_mode & ~current_umask());
860 err = mnt_want_write(nd.path.mnt);
863 err = security_path_mknod(&nd.path, dentry, mode, 0);
865 goto out_mknod_drop_write;
866 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
867 out_mknod_drop_write:
868 mnt_drop_write(nd.path.mnt);
871 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
872 dput(nd.path.dentry);
873 nd.path.dentry = dentry;
875 addr->hash = UNIX_HASH_SIZE;
878 spin_lock(&unix_table_lock);
880 if (!sunaddr->sun_path[0]) {
882 if (__unix_find_socket_byname(net, sunaddr, addr_len,
883 sk->sk_type, hash)) {
884 unix_release_addr(addr);
888 list = &unix_socket_table[addr->hash];
890 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
891 u->dentry = nd.path.dentry;
892 u->mnt = nd.path.mnt;
896 __unix_remove_socket(sk);
898 __unix_insert_socket(list, sk);
901 spin_unlock(&unix_table_lock);
903 mutex_unlock(&u->readlock);
910 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
915 unix_release_addr(addr);
919 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
921 if (unlikely(sk1 == sk2) || !sk2) {
922 unix_state_lock(sk1);
926 unix_state_lock(sk1);
927 unix_state_lock_nested(sk2);
929 unix_state_lock(sk2);
930 unix_state_lock_nested(sk1);
934 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
936 if (unlikely(sk1 == sk2) || !sk2) {
937 unix_state_unlock(sk1);
940 unix_state_unlock(sk1);
941 unix_state_unlock(sk2);
944 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
947 struct sock *sk = sock->sk;
948 struct net *net = sock_net(sk);
949 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
954 if (addr->sa_family != AF_UNSPEC) {
955 err = unix_mkname(sunaddr, alen, &hash);
960 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
961 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
965 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
969 unix_state_double_lock(sk, other);
971 /* Apparently VFS overslept socket death. Retry. */
972 if (sock_flag(other, SOCK_DEAD)) {
973 unix_state_double_unlock(sk, other);
979 if (!unix_may_send(sk, other))
982 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
988 * 1003.1g breaking connected state with AF_UNSPEC
991 unix_state_double_lock(sk, other);
995 * If it was connected, reconnect.
998 struct sock *old_peer = unix_peer(sk);
999 unix_peer(sk) = other;
1000 unix_state_double_unlock(sk, other);
1002 if (other != old_peer)
1003 unix_dgram_disconnected(sk, old_peer);
1006 unix_peer(sk) = other;
1007 unix_state_double_unlock(sk, other);
1012 unix_state_double_unlock(sk, other);
1018 static long unix_wait_for_peer(struct sock *other, long timeo)
1020 struct unix_sock *u = unix_sk(other);
1024 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1026 sched = !sock_flag(other, SOCK_DEAD) &&
1027 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1028 unix_recvq_full(other);
1030 unix_state_unlock(other);
1033 timeo = schedule_timeout(timeo);
1035 finish_wait(&u->peer_wait, &wait);
1039 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1040 int addr_len, int flags)
1042 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1043 struct sock *sk = sock->sk;
1044 struct net *net = sock_net(sk);
1045 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1046 struct sock *newsk = NULL;
1047 struct sock *other = NULL;
1048 struct sk_buff *skb = NULL;
1054 err = unix_mkname(sunaddr, addr_len, &hash);
1059 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1060 (err = unix_autobind(sock)) != 0)
1063 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1065 /* First of all allocate resources.
1066 If we will make it after state is locked,
1067 we will have to recheck all again in any case.
1072 /* create new sock for complete connection */
1073 newsk = unix_create1(sock_net(sk), NULL);
1077 /* Allocate skb for sending to listening sock */
1078 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1083 /* Find listening sock. */
1084 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1088 /* Latch state of peer */
1089 unix_state_lock(other);
1091 /* Apparently VFS overslept socket death. Retry. */
1092 if (sock_flag(other, SOCK_DEAD)) {
1093 unix_state_unlock(other);
1098 err = -ECONNREFUSED;
1099 if (other->sk_state != TCP_LISTEN)
1101 if (other->sk_shutdown & RCV_SHUTDOWN)
1104 if (unix_recvq_full(other)) {
1109 timeo = unix_wait_for_peer(other, timeo);
1111 err = sock_intr_errno(timeo);
1112 if (signal_pending(current))
1120 It is tricky place. We need to grab write lock and cannot
1121 drop lock on peer. It is dangerous because deadlock is
1122 possible. Connect to self case and simultaneous
1123 attempt to connect are eliminated by checking socket
1124 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1125 check this before attempt to grab lock.
1127 Well, and we have to recheck the state after socket locked.
1133 /* This is ok... continue with connect */
1135 case TCP_ESTABLISHED:
1136 /* Socket is already connected */
1144 unix_state_lock_nested(sk);
1146 if (sk->sk_state != st) {
1147 unix_state_unlock(sk);
1148 unix_state_unlock(other);
1153 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1155 unix_state_unlock(sk);
1159 /* The way is open! Fastly set all the necessary fields... */
1162 unix_peer(newsk) = sk;
1163 newsk->sk_state = TCP_ESTABLISHED;
1164 newsk->sk_type = sk->sk_type;
1165 init_peercred(newsk);
1166 newu = unix_sk(newsk);
1167 newsk->sk_wq = &newu->peer_wq;
1168 otheru = unix_sk(other);
1170 /* copy address information from listening to new sock*/
1172 atomic_inc(&otheru->addr->refcnt);
1173 newu->addr = otheru->addr;
1175 if (otheru->dentry) {
1176 newu->dentry = dget(otheru->dentry);
1177 newu->mnt = mntget(otheru->mnt);
1180 /* Set credentials */
1181 copy_peercred(sk, other);
1183 sock->state = SS_CONNECTED;
1184 sk->sk_state = TCP_ESTABLISHED;
1187 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1188 unix_peer(sk) = newsk;
1190 unix_state_unlock(sk);
1192 /* take ten and and send info to listening sock */
1193 spin_lock(&other->sk_receive_queue.lock);
1194 __skb_queue_tail(&other->sk_receive_queue, skb);
1195 spin_unlock(&other->sk_receive_queue.lock);
1196 unix_state_unlock(other);
1197 other->sk_data_ready(other, 0);
1203 unix_state_unlock(other);
1208 unix_release_sock(newsk, 0);
1214 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1216 struct sock *ska = socka->sk, *skb = sockb->sk;
1218 /* Join our sockets back to back */
1221 unix_peer(ska) = skb;
1222 unix_peer(skb) = ska;
1226 if (ska->sk_type != SOCK_DGRAM) {
1227 ska->sk_state = TCP_ESTABLISHED;
1228 skb->sk_state = TCP_ESTABLISHED;
1229 socka->state = SS_CONNECTED;
1230 sockb->state = SS_CONNECTED;
1235 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1237 struct sock *sk = sock->sk;
1239 struct sk_buff *skb;
1243 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1247 if (sk->sk_state != TCP_LISTEN)
1250 /* If socket state is TCP_LISTEN it cannot change (for now...),
1251 * so that no locks are necessary.
1254 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1256 /* This means receive shutdown. */
1263 skb_free_datagram(sk, skb);
1264 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1266 /* attach accepted sock to socket */
1267 unix_state_lock(tsk);
1268 newsock->state = SS_CONNECTED;
1269 sock_graft(tsk, newsock);
1270 unix_state_unlock(tsk);
1278 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1280 struct sock *sk = sock->sk;
1281 struct unix_sock *u;
1282 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1286 sk = unix_peer_get(sk);
1297 unix_state_lock(sk);
1299 sunaddr->sun_family = AF_UNIX;
1300 sunaddr->sun_path[0] = 0;
1301 *uaddr_len = sizeof(short);
1303 struct unix_address *addr = u->addr;
1305 *uaddr_len = addr->len;
1306 memcpy(sunaddr, addr->name, *uaddr_len);
1308 unix_state_unlock(sk);
1314 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1318 scm->fp = UNIXCB(skb).fp;
1319 skb->destructor = sock_wfree;
1320 UNIXCB(skb).fp = NULL;
1322 for (i = scm->fp->count-1; i >= 0; i--)
1323 unix_notinflight(scm->fp->fp[i]);
1326 static void unix_destruct_fds(struct sk_buff *skb)
1328 struct scm_cookie scm;
1329 memset(&scm, 0, sizeof(scm));
1330 unix_detach_fds(&scm, skb);
1332 /* Alas, it calls VFS */
1333 /* So fscking what? fput() had been SMP-safe since the last Summer */
1338 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1343 * Need to duplicate file references for the sake of garbage
1344 * collection. Otherwise a socket in the fps might become a
1345 * candidate for GC while the skb is not yet queued.
1347 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1348 if (!UNIXCB(skb).fp)
1351 for (i = scm->fp->count-1; i >= 0; i--)
1352 unix_inflight(scm->fp->fp[i]);
1353 skb->destructor = unix_destruct_fds;
1358 * Send AF_UNIX data.
1361 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1362 struct msghdr *msg, size_t len)
1364 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1365 struct sock *sk = sock->sk;
1366 struct net *net = sock_net(sk);
1367 struct unix_sock *u = unix_sk(sk);
1368 struct sockaddr_un *sunaddr = msg->msg_name;
1369 struct sock *other = NULL;
1370 int namelen = 0; /* fake GCC */
1373 struct sk_buff *skb;
1375 struct scm_cookie tmp_scm;
1377 if (NULL == siocb->scm)
1378 siocb->scm = &tmp_scm;
1380 err = scm_send(sock, msg, siocb->scm);
1385 if (msg->msg_flags&MSG_OOB)
1388 if (msg->msg_namelen) {
1389 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1396 other = unix_peer_get(sk);
1401 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1402 && (err = unix_autobind(sock)) != 0)
1406 if (len > sk->sk_sndbuf - 32)
1409 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1413 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1414 if (siocb->scm->fp) {
1415 err = unix_attach_fds(siocb->scm, skb);
1419 unix_get_secdata(siocb->scm, skb);
1421 skb_reset_transport_header(skb);
1422 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1426 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1431 if (sunaddr == NULL)
1434 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1440 unix_state_lock(other);
1442 if (!unix_may_send(sk, other))
1445 if (sock_flag(other, SOCK_DEAD)) {
1447 * Check with 1003.1g - what should
1450 unix_state_unlock(other);
1454 unix_state_lock(sk);
1455 if (unix_peer(sk) == other) {
1456 unix_peer(sk) = NULL;
1457 unix_state_unlock(sk);
1459 unix_dgram_disconnected(sk, other);
1461 err = -ECONNREFUSED;
1463 unix_state_unlock(sk);
1473 if (other->sk_shutdown & RCV_SHUTDOWN)
1476 if (sk->sk_type != SOCK_SEQPACKET) {
1477 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1482 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1488 timeo = unix_wait_for_peer(other, timeo);
1490 err = sock_intr_errno(timeo);
1491 if (signal_pending(current))
1497 skb_queue_tail(&other->sk_receive_queue, skb);
1498 unix_state_unlock(other);
1499 other->sk_data_ready(other, len);
1501 scm_destroy(siocb->scm);
1505 unix_state_unlock(other);
1511 scm_destroy(siocb->scm);
1516 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1517 struct msghdr *msg, size_t len)
1519 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1520 struct sock *sk = sock->sk;
1521 struct sock *other = NULL;
1522 struct sockaddr_un *sunaddr = msg->msg_name;
1524 struct sk_buff *skb;
1526 struct scm_cookie tmp_scm;
1527 bool fds_sent = false;
1529 if (NULL == siocb->scm)
1530 siocb->scm = &tmp_scm;
1532 err = scm_send(sock, msg, siocb->scm);
1537 if (msg->msg_flags&MSG_OOB)
1540 if (msg->msg_namelen) {
1541 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1546 other = unix_peer(sk);
1551 if (sk->sk_shutdown & SEND_SHUTDOWN)
1554 while (sent < len) {
1556 * Optimisation for the fact that under 0.01% of X
1557 * messages typically need breaking up.
1562 /* Keep two messages in the pipe so it schedules better */
1563 if (size > ((sk->sk_sndbuf >> 1) - 64))
1564 size = (sk->sk_sndbuf >> 1) - 64;
1566 if (size > SKB_MAX_ALLOC)
1567 size = SKB_MAX_ALLOC;
1573 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1580 * If you pass two values to the sock_alloc_send_skb
1581 * it tries to grab the large buffer with GFP_NOFS
1582 * (which can fail easily), and if it fails grab the
1583 * fallback size buffer which is under a page and will
1586 size = min_t(int, size, skb_tailroom(skb));
1588 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1589 /* Only send the fds in the first buffer */
1590 if (siocb->scm->fp && !fds_sent) {
1591 err = unix_attach_fds(siocb->scm, skb);
1599 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1605 unix_state_lock(other);
1607 if (sock_flag(other, SOCK_DEAD) ||
1608 (other->sk_shutdown & RCV_SHUTDOWN))
1611 skb_queue_tail(&other->sk_receive_queue, skb);
1612 unix_state_unlock(other);
1613 other->sk_data_ready(other, size);
1617 scm_destroy(siocb->scm);
1623 unix_state_unlock(other);
1626 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1627 send_sig(SIGPIPE, current, 0);
1630 scm_destroy(siocb->scm);
1632 return sent ? : err;
1635 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1636 struct msghdr *msg, size_t len)
1639 struct sock *sk = sock->sk;
1641 err = sock_error(sk);
1645 if (sk->sk_state != TCP_ESTABLISHED)
1648 if (msg->msg_namelen)
1649 msg->msg_namelen = 0;
1651 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1654 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1656 struct unix_sock *u = unix_sk(sk);
1658 msg->msg_namelen = 0;
1660 msg->msg_namelen = u->addr->len;
1661 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1665 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1666 struct msghdr *msg, size_t size,
1669 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1670 struct scm_cookie tmp_scm;
1671 struct sock *sk = sock->sk;
1672 struct unix_sock *u = unix_sk(sk);
1673 int noblock = flags & MSG_DONTWAIT;
1674 struct sk_buff *skb;
1681 msg->msg_namelen = 0;
1683 mutex_lock(&u->readlock);
1685 skb = skb_recv_datagram(sk, flags, noblock, &err);
1687 unix_state_lock(sk);
1688 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1689 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1690 (sk->sk_shutdown & RCV_SHUTDOWN))
1692 unix_state_unlock(sk);
1696 wake_up_interruptible_sync(&u->peer_wait);
1699 unix_copy_addr(msg, skb->sk);
1701 if (size > skb->len)
1703 else if (size < skb->len)
1704 msg->msg_flags |= MSG_TRUNC;
1706 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1711 siocb->scm = &tmp_scm;
1712 memset(&tmp_scm, 0, sizeof(tmp_scm));
1714 siocb->scm->creds = *UNIXCREDS(skb);
1715 unix_set_secdata(siocb->scm, skb);
1717 if (!(flags & MSG_PEEK)) {
1719 unix_detach_fds(siocb->scm, skb);
1721 /* It is questionable: on PEEK we could:
1722 - do not return fds - good, but too simple 8)
1723 - return fds, and do not return them on read (old strategy,
1725 - clone fds (I chose it for now, it is the most universal
1728 POSIX 1003.1g does not actually define this clearly
1729 at all. POSIX 1003.1g doesn't define a lot of things
1734 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1738 scm_recv(sock, msg, siocb->scm, flags);
1741 skb_free_datagram(sk, skb);
1743 mutex_unlock(&u->readlock);
1749 * Sleep until data has arrive. But check for races..
1752 static long unix_stream_data_wait(struct sock *sk, long timeo)
1756 unix_state_lock(sk);
1759 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1761 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1763 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1764 signal_pending(current) ||
1768 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1769 unix_state_unlock(sk);
1770 timeo = schedule_timeout(timeo);
1771 unix_state_lock(sk);
1772 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1775 finish_wait(sk_sleep(sk), &wait);
1776 unix_state_unlock(sk);
1782 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1783 struct msghdr *msg, size_t size,
1786 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1787 struct scm_cookie tmp_scm;
1788 struct sock *sk = sock->sk;
1789 struct unix_sock *u = unix_sk(sk);
1790 struct sockaddr_un *sunaddr = msg->msg_name;
1792 int check_creds = 0;
1798 if (sk->sk_state != TCP_ESTABLISHED)
1805 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1806 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1808 msg->msg_namelen = 0;
1810 /* Lock the socket to prevent queue disordering
1811 * while sleeps in memcpy_tomsg
1815 siocb->scm = &tmp_scm;
1816 memset(&tmp_scm, 0, sizeof(tmp_scm));
1819 mutex_lock(&u->readlock);
1823 struct sk_buff *skb;
1825 unix_state_lock(sk);
1826 skb = skb_dequeue(&sk->sk_receive_queue);
1828 if (copied >= target)
1832 * POSIX 1003.1g mandates this order.
1835 err = sock_error(sk);
1838 if (sk->sk_shutdown & RCV_SHUTDOWN)
1841 unix_state_unlock(sk);
1845 mutex_unlock(&u->readlock);
1847 timeo = unix_stream_data_wait(sk, timeo);
1849 if (signal_pending(current)) {
1850 err = sock_intr_errno(timeo);
1853 mutex_lock(&u->readlock);
1856 unix_state_unlock(sk);
1859 unix_state_unlock(sk);
1862 /* Never glue messages from different writers */
1863 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
1864 sizeof(siocb->scm->creds)) != 0) {
1865 skb_queue_head(&sk->sk_receive_queue, skb);
1869 /* Copy credentials */
1870 siocb->scm->creds = *UNIXCREDS(skb);
1874 /* Copy address just once */
1876 unix_copy_addr(msg, skb->sk);
1880 chunk = min_t(unsigned int, skb->len, size);
1881 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1882 skb_queue_head(&sk->sk_receive_queue, skb);
1890 /* Mark read part of skb as used */
1891 if (!(flags & MSG_PEEK)) {
1892 skb_pull(skb, chunk);
1895 unix_detach_fds(siocb->scm, skb);
1897 /* put the skb back if we didn't use it up.. */
1899 skb_queue_head(&sk->sk_receive_queue, skb);
1908 /* It is questionable, see note in unix_dgram_recvmsg.
1911 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1913 /* put message back and return */
1914 skb_queue_head(&sk->sk_receive_queue, skb);
1919 mutex_unlock(&u->readlock);
1920 scm_recv(sock, msg, siocb->scm, flags);
1922 return copied ? : err;
1925 static int unix_shutdown(struct socket *sock, int mode)
1927 struct sock *sk = sock->sk;
1930 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1933 unix_state_lock(sk);
1934 sk->sk_shutdown |= mode;
1935 other = unix_peer(sk);
1938 unix_state_unlock(sk);
1939 sk->sk_state_change(sk);
1942 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1946 if (mode&RCV_SHUTDOWN)
1947 peer_mode |= SEND_SHUTDOWN;
1948 if (mode&SEND_SHUTDOWN)
1949 peer_mode |= RCV_SHUTDOWN;
1950 unix_state_lock(other);
1951 other->sk_shutdown |= peer_mode;
1952 unix_state_unlock(other);
1953 other->sk_state_change(other);
1954 if (peer_mode == SHUTDOWN_MASK)
1955 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1956 else if (peer_mode & RCV_SHUTDOWN)
1957 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1965 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1967 struct sock *sk = sock->sk;
1973 amount = sk_wmem_alloc_get(sk);
1974 err = put_user(amount, (int __user *)arg);
1978 struct sk_buff *skb;
1980 if (sk->sk_state == TCP_LISTEN) {
1985 spin_lock(&sk->sk_receive_queue.lock);
1986 if (sk->sk_type == SOCK_STREAM ||
1987 sk->sk_type == SOCK_SEQPACKET) {
1988 skb_queue_walk(&sk->sk_receive_queue, skb)
1991 skb = skb_peek(&sk->sk_receive_queue);
1995 spin_unlock(&sk->sk_receive_queue.lock);
1996 err = put_user(amount, (int __user *)arg);
2007 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2009 struct sock *sk = sock->sk;
2012 sock_poll_wait(file, sk_sleep(sk), wait);
2015 /* exceptional events? */
2018 if (sk->sk_shutdown == SHUTDOWN_MASK)
2020 if (sk->sk_shutdown & RCV_SHUTDOWN)
2024 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2025 (sk->sk_shutdown & RCV_SHUTDOWN))
2026 mask |= POLLIN | POLLRDNORM;
2028 /* Connection-based need to check for termination and startup */
2029 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2030 sk->sk_state == TCP_CLOSE)
2034 * we set writable also when the other side has shut down the
2035 * connection. This prevents stuck sockets.
2037 if (unix_writable(sk))
2038 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2043 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2046 struct sock *sk = sock->sk, *other;
2047 unsigned int mask, writable;
2049 sock_poll_wait(file, sk_sleep(sk), wait);
2052 /* exceptional events? */
2053 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2055 if (sk->sk_shutdown & RCV_SHUTDOWN)
2057 if (sk->sk_shutdown == SHUTDOWN_MASK)
2061 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2062 (sk->sk_shutdown & RCV_SHUTDOWN))
2063 mask |= POLLIN | POLLRDNORM;
2065 /* Connection-based need to check for termination and startup */
2066 if (sk->sk_type == SOCK_SEQPACKET) {
2067 if (sk->sk_state == TCP_CLOSE)
2069 /* connection hasn't started yet? */
2070 if (sk->sk_state == TCP_SYN_SENT)
2075 writable = unix_writable(sk);
2077 other = unix_peer_get(sk);
2079 if (unix_peer(other) != sk) {
2080 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2082 if (unix_recvq_full(other))
2091 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2093 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2098 #ifdef CONFIG_PROC_FS
2099 static struct sock *first_unix_socket(int *i)
2101 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2102 if (!hlist_empty(&unix_socket_table[*i]))
2103 return __sk_head(&unix_socket_table[*i]);
2108 static struct sock *next_unix_socket(int *i, struct sock *s)
2110 struct sock *next = sk_next(s);
2111 /* More in this chain? */
2114 /* Look for next non-empty chain. */
2115 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2116 if (!hlist_empty(&unix_socket_table[*i]))
2117 return __sk_head(&unix_socket_table[*i]);
2122 struct unix_iter_state {
2123 struct seq_net_private p;
2127 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2129 struct unix_iter_state *iter = seq->private;
2133 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2134 if (sock_net(s) != seq_file_net(seq))
2143 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2144 __acquires(unix_table_lock)
2146 spin_lock(&unix_table_lock);
2147 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2150 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2152 struct unix_iter_state *iter = seq->private;
2153 struct sock *sk = v;
2156 if (v == SEQ_START_TOKEN)
2157 sk = first_unix_socket(&iter->i);
2159 sk = next_unix_socket(&iter->i, sk);
2160 while (sk && (sock_net(sk) != seq_file_net(seq)))
2161 sk = next_unix_socket(&iter->i, sk);
2165 static void unix_seq_stop(struct seq_file *seq, void *v)
2166 __releases(unix_table_lock)
2168 spin_unlock(&unix_table_lock);
2171 static int unix_seq_show(struct seq_file *seq, void *v)
2174 if (v == SEQ_START_TOKEN)
2175 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2179 struct unix_sock *u = unix_sk(s);
2182 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2184 atomic_read(&s->sk_refcnt),
2186 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2189 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2190 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2198 len = u->addr->len - sizeof(short);
2199 if (!UNIX_ABSTRACT(s))
2205 for ( ; i < len; i++)
2206 seq_putc(seq, u->addr->name->sun_path[i]);
2208 unix_state_unlock(s);
2209 seq_putc(seq, '\n');
2215 static const struct seq_operations unix_seq_ops = {
2216 .start = unix_seq_start,
2217 .next = unix_seq_next,
2218 .stop = unix_seq_stop,
2219 .show = unix_seq_show,
2222 static int unix_seq_open(struct inode *inode, struct file *file)
2224 return seq_open_net(inode, file, &unix_seq_ops,
2225 sizeof(struct unix_iter_state));
2228 static const struct file_operations unix_seq_fops = {
2229 .owner = THIS_MODULE,
2230 .open = unix_seq_open,
2232 .llseek = seq_lseek,
2233 .release = seq_release_net,
2238 static const struct net_proto_family unix_family_ops = {
2240 .create = unix_create,
2241 .owner = THIS_MODULE,
2245 static int __net_init unix_net_init(struct net *net)
2247 int error = -ENOMEM;
2249 net->unx.sysctl_max_dgram_qlen = 10;
2250 if (unix_sysctl_register(net))
2253 #ifdef CONFIG_PROC_FS
2254 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2255 unix_sysctl_unregister(net);
2264 static void __net_exit unix_net_exit(struct net *net)
2266 unix_sysctl_unregister(net);
2267 proc_net_remove(net, "unix");
2270 static struct pernet_operations unix_net_ops = {
2271 .init = unix_net_init,
2272 .exit = unix_net_exit,
2275 static int __init af_unix_init(void)
2278 struct sk_buff *dummy_skb;
2280 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2282 rc = proto_register(&unix_proto, 1);
2284 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2289 sock_register(&unix_family_ops);
2290 register_pernet_subsys(&unix_net_ops);
2295 static void __exit af_unix_exit(void)
2297 sock_unregister(PF_UNIX);
2298 proto_unregister(&unix_proto);
2299 unregister_pernet_subsys(&unix_net_ops);
2302 /* Earlier than device_initcall() so that other drivers invoking
2303 request_module() don't end up in a loop when modprobe tries
2304 to use a UNIX socket. But later than subsys_initcall() because
2305 we depend on stuff initialised there */
2306 fs_initcall(af_unix_init);
2307 module_exit(af_unix_exit);
2309 MODULE_LICENSE("GPL");
2310 MODULE_ALIAS_NETPROTO(PF_UNIX);