2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static int l2cap_conn_del(struct hci_conn *conn, int err);
68 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
69 static void l2cap_chan_del(struct sock *sk, int err);
71 static void __l2cap_sock_close(struct sock *sk, int reason);
72 static void l2cap_sock_close(struct sock *sk);
73 static void l2cap_sock_kill(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 /* ---- L2CAP timers ---- */
79 static void l2cap_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
86 __l2cap_sock_close(sk, ETIMEDOUT);
93 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
95 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
96 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
99 static void l2cap_sock_clear_timer(struct sock *sk)
101 BT_DBG("sock %p state %d", sk, sk->sk_state);
102 sk_stop_timer(sk, &sk->sk_timer);
105 static void l2cap_sock_init_timer(struct sock *sk)
107 init_timer(&sk->sk_timer);
108 sk->sk_timer.function = l2cap_sock_timeout;
109 sk->sk_timer.data = (unsigned long)sk;
112 /* ---- L2CAP connections ---- */
113 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
115 struct l2cap_conn *conn;
117 if ((conn = hcon->l2cap_data))
123 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
125 memset(conn, 0, sizeof(struct l2cap_conn));
127 hcon->l2cap_data = conn;
130 conn->mtu = hcon->hdev->acl_mtu;
131 conn->src = &hcon->hdev->bdaddr;
132 conn->dst = &hcon->dst;
134 spin_lock_init(&conn->lock);
135 rwlock_init(&conn->chan_list.lock);
137 BT_DBG("hcon %p conn %p", hcon, conn);
141 static int l2cap_conn_del(struct hci_conn *hcon, int err)
143 struct l2cap_conn *conn;
146 if (!(conn = hcon->l2cap_data))
149 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
152 kfree_skb(conn->rx_skb);
155 while ((sk = conn->chan_list.head)) {
157 l2cap_chan_del(sk, err);
162 hcon->l2cap_data = NULL;
167 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
169 struct l2cap_chan_list *l = &conn->chan_list;
170 write_lock(&l->lock);
171 __l2cap_chan_add(conn, sk, parent);
172 write_unlock(&l->lock);
175 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
179 /* Get next available identificator.
180 * 1 - 128 are used by kernel.
181 * 129 - 199 are reserved.
182 * 200 - 254 are used by utilities like l2ping, etc.
185 spin_lock(&conn->lock);
187 if (++conn->tx_ident > 128)
192 spin_unlock(&conn->lock);
197 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
199 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
201 BT_DBG("code 0x%2.2x", code);
206 return hci_send_acl(conn->hcon, skb, 0);
209 /* ---- Socket interface ---- */
210 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
213 struct hlist_node *node;
214 sk_for_each(sk, node, &l2cap_sk_list.head)
215 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
222 /* Find socket with psm and source bdaddr.
223 * Returns closest match.
225 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
227 struct sock *sk = NULL, *sk1 = NULL;
228 struct hlist_node *node;
230 sk_for_each(sk, node, &l2cap_sk_list.head) {
231 if (state && sk->sk_state != state)
234 if (l2cap_pi(sk)->psm == psm) {
236 if (!bacmp(&bt_sk(sk)->src, src))
240 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
244 return node ? sk : sk1;
247 /* Find socket with given address (psm, src).
248 * Returns locked socket */
249 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
252 read_lock(&l2cap_sk_list.lock);
253 s = __l2cap_get_sock_by_psm(state, psm, src);
254 if (s) bh_lock_sock(s);
255 read_unlock(&l2cap_sk_list.lock);
259 static void l2cap_sock_destruct(struct sock *sk)
263 skb_queue_purge(&sk->sk_receive_queue);
264 skb_queue_purge(&sk->sk_write_queue);
267 static void l2cap_sock_cleanup_listen(struct sock *parent)
271 BT_DBG("parent %p", parent);
273 /* Close not yet accepted channels */
274 while ((sk = bt_accept_dequeue(parent, NULL)))
275 l2cap_sock_close(sk);
277 parent->sk_state = BT_CLOSED;
278 sock_set_flag(parent, SOCK_ZAPPED);
281 /* Kill socket (only if zapped and orphan)
282 * Must be called on unlocked socket.
284 static void l2cap_sock_kill(struct sock *sk)
286 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
289 BT_DBG("sk %p state %d", sk, sk->sk_state);
291 /* Kill poor orphan */
292 bt_sock_unlink(&l2cap_sk_list, sk);
293 sock_set_flag(sk, SOCK_DEAD);
297 static void __l2cap_sock_close(struct sock *sk, int reason)
299 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
301 switch (sk->sk_state) {
303 l2cap_sock_cleanup_listen(sk);
309 if (sk->sk_type == SOCK_SEQPACKET) {
310 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
311 struct l2cap_disconn_req req;
313 sk->sk_state = BT_DISCONN;
314 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
316 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
317 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
318 l2cap_send_cmd(conn, l2cap_get_ident(conn),
319 L2CAP_DISCONN_REQ, sizeof(req), &req);
321 l2cap_chan_del(sk, reason);
327 l2cap_chan_del(sk, reason);
331 sock_set_flag(sk, SOCK_ZAPPED);
336 /* Must be called on unlocked socket. */
337 static void l2cap_sock_close(struct sock *sk)
339 l2cap_sock_clear_timer(sk);
341 __l2cap_sock_close(sk, ECONNRESET);
346 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
348 struct l2cap_pinfo *pi = l2cap_pi(sk);
353 sk->sk_type = parent->sk_type;
354 pi->imtu = l2cap_pi(parent)->imtu;
355 pi->omtu = l2cap_pi(parent)->omtu;
356 pi->link_mode = l2cap_pi(parent)->link_mode;
358 pi->imtu = L2CAP_DEFAULT_MTU;
363 /* Default config options */
364 pi->conf_mtu = L2CAP_DEFAULT_MTU;
365 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
368 static struct proto l2cap_proto = {
370 .owner = THIS_MODULE,
371 .obj_size = sizeof(struct l2cap_pinfo)
374 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
378 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
382 sock_init_data(sock, sk);
383 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
385 sk->sk_destruct = l2cap_sock_destruct;
386 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
388 sock_reset_flag(sk, SOCK_ZAPPED);
390 sk->sk_protocol = proto;
391 sk->sk_state = BT_OPEN;
393 l2cap_sock_init_timer(sk);
395 bt_sock_link(&l2cap_sk_list, sk);
399 static int l2cap_sock_create(struct socket *sock, int protocol)
403 BT_DBG("sock %p", sock);
405 sock->state = SS_UNCONNECTED;
407 if (sock->type != SOCK_SEQPACKET &&
408 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
409 return -ESOCKTNOSUPPORT;
411 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
414 sock->ops = &l2cap_sock_ops;
416 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
420 l2cap_sock_init(sk, NULL);
424 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
426 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
427 struct sock *sk = sock->sk;
430 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
432 if (!addr || addr->sa_family != AF_BLUETOOTH)
437 if (sk->sk_state != BT_OPEN) {
442 write_lock_bh(&l2cap_sk_list.lock);
444 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
447 /* Save source address */
448 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
449 l2cap_pi(sk)->psm = la->l2_psm;
450 l2cap_pi(sk)->sport = la->l2_psm;
451 sk->sk_state = BT_BOUND;
454 write_unlock_bh(&l2cap_sk_list.lock);
461 static int l2cap_do_connect(struct sock *sk)
463 bdaddr_t *src = &bt_sk(sk)->src;
464 bdaddr_t *dst = &bt_sk(sk)->dst;
465 struct l2cap_conn *conn;
466 struct hci_conn *hcon;
467 struct hci_dev *hdev;
470 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
472 if (!(hdev = hci_get_route(dst, src)))
473 return -EHOSTUNREACH;
475 hci_dev_lock_bh(hdev);
479 hcon = hci_connect(hdev, ACL_LINK, dst);
483 conn = l2cap_conn_add(hcon, 0);
491 /* Update source addr of the socket */
492 bacpy(src, conn->src);
494 l2cap_chan_add(conn, sk, NULL);
496 sk->sk_state = BT_CONNECT;
497 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
499 if (hcon->state == BT_CONNECTED) {
500 if (sk->sk_type == SOCK_SEQPACKET) {
501 struct l2cap_conn_req req;
502 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
503 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
504 req.psm = l2cap_pi(sk)->psm;
505 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
506 L2CAP_CONN_REQ, sizeof(req), &req);
508 l2cap_sock_clear_timer(sk);
509 sk->sk_state = BT_CONNECTED;
514 hci_dev_unlock_bh(hdev);
519 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
521 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
522 struct sock *sk = sock->sk;
529 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
534 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
539 switch(sk->sk_state) {
543 /* Already connecting */
547 /* Already connected */
560 /* Set destination address and psm */
561 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
562 l2cap_pi(sk)->psm = la->l2_psm;
564 if ((err = l2cap_do_connect(sk)))
568 err = bt_sock_wait_state(sk, BT_CONNECTED,
569 sock_sndtimeo(sk, flags & O_NONBLOCK));
575 static int l2cap_sock_listen(struct socket *sock, int backlog)
577 struct sock *sk = sock->sk;
580 BT_DBG("sk %p backlog %d", sk, backlog);
584 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
589 if (!l2cap_pi(sk)->psm) {
590 bdaddr_t *src = &bt_sk(sk)->src;
595 write_lock_bh(&l2cap_sk_list.lock);
597 for (psm = 0x1001; psm < 0x1100; psm += 2)
598 if (!__l2cap_get_sock_by_addr(psm, src)) {
599 l2cap_pi(sk)->psm = htobs(psm);
600 l2cap_pi(sk)->sport = htobs(psm);
605 write_unlock_bh(&l2cap_sk_list.lock);
611 sk->sk_max_ack_backlog = backlog;
612 sk->sk_ack_backlog = 0;
613 sk->sk_state = BT_LISTEN;
620 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
622 DECLARE_WAITQUEUE(wait, current);
623 struct sock *sk = sock->sk, *nsk;
629 if (sk->sk_state != BT_LISTEN) {
634 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
636 BT_DBG("sk %p timeo %ld", sk, timeo);
638 /* Wait for an incoming connection. (wake-one). */
639 add_wait_queue_exclusive(sk->sk_sleep, &wait);
640 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
641 set_current_state(TASK_INTERRUPTIBLE);
648 timeo = schedule_timeout(timeo);
651 if (sk->sk_state != BT_LISTEN) {
656 if (signal_pending(current)) {
657 err = sock_intr_errno(timeo);
661 set_current_state(TASK_RUNNING);
662 remove_wait_queue(sk->sk_sleep, &wait);
667 newsock->state = SS_CONNECTED;
669 BT_DBG("new socket %p", nsk);
676 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
678 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
679 struct sock *sk = sock->sk;
681 BT_DBG("sock %p, sk %p", sock, sk);
683 addr->sa_family = AF_BLUETOOTH;
684 *len = sizeof(struct sockaddr_l2);
687 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
689 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
691 la->l2_psm = l2cap_pi(sk)->psm;
695 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct sk_buff *skb, **frag;
699 int err, hlen, count, sent=0;
700 struct l2cap_hdr *lh;
702 BT_DBG("sk %p len %d", sk, len);
704 /* First fragment (with L2CAP header) */
705 if (sk->sk_type == SOCK_DGRAM)
706 hlen = L2CAP_HDR_SIZE + 2;
708 hlen = L2CAP_HDR_SIZE;
710 count = min_t(unsigned int, (conn->mtu - hlen), len);
712 skb = bt_skb_send_alloc(sk, hlen + count,
713 msg->msg_flags & MSG_DONTWAIT, &err);
717 /* Create L2CAP header */
718 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
719 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
720 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
722 if (sk->sk_type == SOCK_DGRAM)
723 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
725 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
733 /* Continuation fragments (no L2CAP header) */
734 frag = &skb_shinfo(skb)->frag_list;
736 count = min_t(unsigned int, conn->mtu, len);
738 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
742 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
750 frag = &(*frag)->next;
753 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
763 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
765 struct sock *sk = sock->sk;
768 BT_DBG("sock %p, sk %p", sock, sk);
771 return sock_error(sk);
773 if (msg->msg_flags & MSG_OOB)
776 /* Check outgoing MTU */
777 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
782 if (sk->sk_state == BT_CONNECTED)
783 err = l2cap_do_send(sk, msg, len);
791 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
793 struct sock *sk = sock->sk;
794 struct l2cap_options opts;
804 len = min_t(unsigned int, sizeof(opts), optlen);
805 if (copy_from_user((char *) &opts, optval, len)) {
809 l2cap_pi(sk)->imtu = opts.imtu;
810 l2cap_pi(sk)->omtu = opts.omtu;
814 if (get_user(opt, (u32 __user *) optval)) {
819 l2cap_pi(sk)->link_mode = opt;
831 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
833 struct sock *sk = sock->sk;
834 struct l2cap_options opts;
835 struct l2cap_conninfo cinfo;
840 if (get_user(len, optlen))
847 opts.imtu = l2cap_pi(sk)->imtu;
848 opts.omtu = l2cap_pi(sk)->omtu;
849 opts.flush_to = l2cap_pi(sk)->flush_to;
852 len = min_t(unsigned int, len, sizeof(opts));
853 if (copy_to_user(optval, (char *) &opts, len))
859 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
864 if (sk->sk_state != BT_CONNECTED) {
869 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
870 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
872 len = min_t(unsigned int, len, sizeof(cinfo));
873 if (copy_to_user(optval, (char *) &cinfo, len))
887 static int l2cap_sock_shutdown(struct socket *sock, int how)
889 struct sock *sk = sock->sk;
892 BT_DBG("sock %p, sk %p", sock, sk);
898 if (!sk->sk_shutdown) {
899 sk->sk_shutdown = SHUTDOWN_MASK;
900 l2cap_sock_clear_timer(sk);
901 __l2cap_sock_close(sk, 0);
903 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
904 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
910 static int l2cap_sock_release(struct socket *sock)
912 struct sock *sk = sock->sk;
915 BT_DBG("sock %p, sk %p", sock, sk);
920 err = l2cap_sock_shutdown(sock, 2);
927 /* ---- L2CAP channels ---- */
928 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
931 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
932 if (l2cap_pi(s)->dcid == cid)
938 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
941 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
942 if (l2cap_pi(s)->scid == cid)
948 /* Find channel with given SCID.
949 * Returns locked socket */
950 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
954 s = __l2cap_get_chan_by_scid(l, cid);
955 if (s) bh_lock_sock(s);
956 read_unlock(&l->lock);
960 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
963 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
964 if (l2cap_pi(s)->ident == ident)
970 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
974 s = __l2cap_get_chan_by_ident(l, ident);
975 if (s) bh_lock_sock(s);
976 read_unlock(&l->lock);
980 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
984 for (; cid < 0xffff; cid++) {
985 if(!__l2cap_get_chan_by_scid(l, cid))
992 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
997 l2cap_pi(l->head)->prev_c = sk;
999 l2cap_pi(sk)->next_c = l->head;
1000 l2cap_pi(sk)->prev_c = NULL;
1004 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
1006 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1008 write_lock(&l->lock);
1013 l2cap_pi(next)->prev_c = prev;
1015 l2cap_pi(prev)->next_c = next;
1016 write_unlock(&l->lock);
1021 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1023 struct l2cap_chan_list *l = &conn->chan_list;
1025 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1027 l2cap_pi(sk)->conn = conn;
1029 if (sk->sk_type == SOCK_SEQPACKET) {
1030 /* Alloc CID for connection-oriented socket */
1031 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1032 } else if (sk->sk_type == SOCK_DGRAM) {
1033 /* Connectionless socket */
1034 l2cap_pi(sk)->scid = 0x0002;
1035 l2cap_pi(sk)->dcid = 0x0002;
1036 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1038 /* Raw socket can send/recv signalling messages only */
1039 l2cap_pi(sk)->scid = 0x0001;
1040 l2cap_pi(sk)->dcid = 0x0001;
1041 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1044 __l2cap_chan_link(l, sk);
1047 bt_accept_enqueue(parent, sk);
1051 * Must be called on the locked socket. */
1052 static void l2cap_chan_del(struct sock *sk, int err)
1054 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1055 struct sock *parent = bt_sk(sk)->parent;
1057 l2cap_sock_clear_timer(sk);
1059 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1062 /* Unlink from channel list */
1063 l2cap_chan_unlink(&conn->chan_list, sk);
1064 l2cap_pi(sk)->conn = NULL;
1065 hci_conn_put(conn->hcon);
1068 sk->sk_state = BT_CLOSED;
1069 sock_set_flag(sk, SOCK_ZAPPED);
1075 bt_accept_unlink(sk);
1076 parent->sk_data_ready(parent, 0);
1078 sk->sk_state_change(sk);
1081 static void l2cap_conn_ready(struct l2cap_conn *conn)
1083 struct l2cap_chan_list *l = &conn->chan_list;
1086 BT_DBG("conn %p", conn);
1088 read_lock(&l->lock);
1090 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1093 if (sk->sk_type != SOCK_SEQPACKET) {
1094 l2cap_sock_clear_timer(sk);
1095 sk->sk_state = BT_CONNECTED;
1096 sk->sk_state_change(sk);
1097 } else if (sk->sk_state == BT_CONNECT) {
1098 struct l2cap_conn_req req;
1099 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1100 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1101 req.psm = l2cap_pi(sk)->psm;
1102 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1108 read_unlock(&l->lock);
1111 /* Notify sockets that we cannot guaranty reliability anymore */
1112 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1114 struct l2cap_chan_list *l = &conn->chan_list;
1117 BT_DBG("conn %p", conn);
1119 read_lock(&l->lock);
1120 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1121 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1124 read_unlock(&l->lock);
1127 static void l2cap_chan_ready(struct sock *sk)
1129 struct sock *parent = bt_sk(sk)->parent;
1131 BT_DBG("sk %p, parent %p", sk, parent);
1133 l2cap_pi(sk)->conf_state = 0;
1134 l2cap_sock_clear_timer(sk);
1137 /* Outgoing channel.
1138 * Wake up socket sleeping on connect.
1140 sk->sk_state = BT_CONNECTED;
1141 sk->sk_state_change(sk);
1143 /* Incoming channel.
1144 * Wake up socket sleeping on accept.
1146 parent->sk_data_ready(parent, 0);
1150 /* Copy frame to all raw sockets on that connection */
1151 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1153 struct l2cap_chan_list *l = &conn->chan_list;
1154 struct sk_buff *nskb;
1157 BT_DBG("conn %p", conn);
1159 read_lock(&l->lock);
1160 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1161 if (sk->sk_type != SOCK_RAW)
1164 /* Don't send frame to the socket it came from */
1168 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1171 if (sock_queue_rcv_skb(sk, nskb))
1174 read_unlock(&l->lock);
1177 /* ---- L2CAP signalling commands ---- */
1178 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1179 u8 code, u8 ident, u16 dlen, void *data)
1181 struct sk_buff *skb, **frag;
1182 struct l2cap_cmd_hdr *cmd;
1183 struct l2cap_hdr *lh;
1186 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1188 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1189 count = min_t(unsigned int, conn->mtu, len);
1191 skb = bt_skb_alloc(count, GFP_ATOMIC);
1195 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1196 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1197 lh->cid = __cpu_to_le16(0x0001);
1199 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1202 cmd->len = __cpu_to_le16(dlen);
1205 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1206 memcpy(skb_put(skb, count), data, count);
1212 /* Continuation fragments (no L2CAP header) */
1213 frag = &skb_shinfo(skb)->frag_list;
1215 count = min_t(unsigned int, conn->mtu, len);
1217 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1221 memcpy(skb_put(*frag, count), data, count);
1226 frag = &(*frag)->next;
1236 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1238 struct l2cap_conf_opt *opt = *ptr;
1241 len = L2CAP_CONF_OPT_SIZE + opt->len;
1249 *val = *((u8 *) opt->val);
1253 *val = __le16_to_cpu(*((u16 *)opt->val));
1257 *val = __le32_to_cpu(*((u32 *)opt->val));
1261 *val = (unsigned long) opt->val;
1265 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1269 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1271 int type, hint, olen;
1275 BT_DBG("sk %p len %d", sk, len);
1277 while (len >= L2CAP_CONF_OPT_SIZE) {
1278 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1284 case L2CAP_CONF_MTU:
1285 l2cap_pi(sk)->conf_mtu = val;
1288 case L2CAP_CONF_FLUSH_TO:
1289 l2cap_pi(sk)->flush_to = val;
1292 case L2CAP_CONF_QOS:
1299 /* FIXME: Reject unknown option */
1305 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1307 struct l2cap_conf_opt *opt = *ptr;
1309 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1316 *((u8 *) opt->val) = val;
1320 *((u16 *) opt->val) = __cpu_to_le16(val);
1324 *((u32 *) opt->val) = __cpu_to_le32(val);
1328 memcpy(opt->val, (void *) val, len);
1332 *ptr += L2CAP_CONF_OPT_SIZE + len;
1335 static int l2cap_build_conf_req(struct sock *sk, void *data)
1337 struct l2cap_pinfo *pi = l2cap_pi(sk);
1338 struct l2cap_conf_req *req = data;
1339 void *ptr = req->data;
1341 BT_DBG("sk %p", sk);
1343 if (pi->imtu != L2CAP_DEFAULT_MTU)
1344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1346 /* FIXME: Need actual value of the flush timeout */
1347 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1348 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1350 req->dcid = __cpu_to_le16(pi->dcid);
1351 req->flags = __cpu_to_le16(0);
1356 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1358 struct l2cap_pinfo *pi = l2cap_pi(sk);
1361 /* Configure output options and let the other side know
1362 * which ones we don't like. */
1363 if (pi->conf_mtu < pi->omtu) {
1364 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1365 result = L2CAP_CONF_UNACCEPT;
1367 pi->omtu = pi->conf_mtu;
1370 BT_DBG("sk %p result %d", sk, result);
1374 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1376 struct l2cap_conf_rsp *rsp = data;
1377 void *ptr = rsp->data;
1380 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1383 *result = l2cap_conf_output(sk, &ptr);
1387 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1388 rsp->result = __cpu_to_le16(result ? *result : 0);
1389 rsp->flags = __cpu_to_le16(flags);
1394 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1396 struct l2cap_chan_list *list = &conn->chan_list;
1397 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1398 struct l2cap_conn_rsp rsp;
1399 struct sock *sk, *parent;
1400 int result = 0, status = 0;
1402 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1405 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1407 /* Check if we have socket listening on psm */
1408 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1410 result = L2CAP_CR_BAD_PSM;
1414 result = L2CAP_CR_NO_MEM;
1416 /* Check for backlog size */
1417 if (sk_acceptq_is_full(parent)) {
1418 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1422 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1426 write_lock(&list->lock);
1428 /* Check if we already have channel with that dcid */
1429 if (__l2cap_get_chan_by_dcid(list, scid)) {
1430 write_unlock(&list->lock);
1431 sock_set_flag(sk, SOCK_ZAPPED);
1432 l2cap_sock_kill(sk);
1436 hci_conn_hold(conn->hcon);
1438 l2cap_sock_init(sk, parent);
1439 bacpy(&bt_sk(sk)->src, conn->src);
1440 bacpy(&bt_sk(sk)->dst, conn->dst);
1441 l2cap_pi(sk)->psm = psm;
1442 l2cap_pi(sk)->dcid = scid;
1444 __l2cap_chan_add(conn, sk, parent);
1445 dcid = l2cap_pi(sk)->scid;
1447 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1449 /* Service level security */
1450 result = L2CAP_CR_PEND;
1451 status = L2CAP_CS_AUTHEN_PEND;
1452 sk->sk_state = BT_CONNECT2;
1453 l2cap_pi(sk)->ident = cmd->ident;
1455 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1456 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1457 if (!hci_conn_encrypt(conn->hcon))
1459 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1460 if (!hci_conn_auth(conn->hcon))
1464 sk->sk_state = BT_CONFIG;
1465 result = status = 0;
1468 write_unlock(&list->lock);
1471 bh_unlock_sock(parent);
1474 rsp.scid = __cpu_to_le16(scid);
1475 rsp.dcid = __cpu_to_le16(dcid);
1476 rsp.result = __cpu_to_le16(result);
1477 rsp.status = __cpu_to_le16(status);
1478 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1482 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1484 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1485 u16 scid, dcid, result, status;
1489 scid = __le16_to_cpu(rsp->scid);
1490 dcid = __le16_to_cpu(rsp->dcid);
1491 result = __le16_to_cpu(rsp->result);
1492 status = __le16_to_cpu(rsp->status);
1494 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1497 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1500 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1505 case L2CAP_CR_SUCCESS:
1506 sk->sk_state = BT_CONFIG;
1507 l2cap_pi(sk)->ident = 0;
1508 l2cap_pi(sk)->dcid = dcid;
1509 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1511 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1512 l2cap_build_conf_req(sk, req), req);
1519 l2cap_chan_del(sk, ECONNREFUSED);
1527 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1529 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1535 dcid = __le16_to_cpu(req->dcid);
1536 flags = __le16_to_cpu(req->flags);
1538 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1540 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1543 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1545 if (flags & 0x0001) {
1546 /* Incomplete config. Send empty response. */
1547 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1548 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1552 /* Complete config. */
1553 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1554 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1559 /* Output config done */
1560 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1562 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1563 sk->sk_state = BT_CONNECTED;
1564 l2cap_chan_ready(sk);
1565 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1567 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1568 l2cap_build_conf_req(sk, req), req);
1576 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1578 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1579 u16 scid, flags, result;
1582 scid = __le16_to_cpu(rsp->scid);
1583 flags = __le16_to_cpu(rsp->flags);
1584 result = __le16_to_cpu(rsp->result);
1586 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1588 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1592 case L2CAP_CONF_SUCCESS:
1595 case L2CAP_CONF_UNACCEPT:
1596 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1598 /* It does not make sense to adjust L2CAP parameters
1599 * that are currently defined in the spec. We simply
1600 * resend config request that we sent earlier. It is
1601 * stupid, but it helps qualification testing which
1602 * expects at least some response from us. */
1603 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1604 l2cap_build_conf_req(sk, req), req);
1609 sk->sk_state = BT_DISCONN;
1610 sk->sk_err = ECONNRESET;
1611 l2cap_sock_set_timer(sk, HZ * 5);
1613 struct l2cap_disconn_req req;
1614 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1615 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1616 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1617 L2CAP_DISCONN_REQ, sizeof(req), &req);
1625 /* Input config done */
1626 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1628 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1629 sk->sk_state = BT_CONNECTED;
1630 l2cap_chan_ready(sk);
1638 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1640 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1641 struct l2cap_disconn_rsp rsp;
1645 scid = __le16_to_cpu(req->scid);
1646 dcid = __le16_to_cpu(req->dcid);
1648 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1650 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1653 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1654 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1655 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1657 sk->sk_shutdown = SHUTDOWN_MASK;
1659 l2cap_chan_del(sk, ECONNRESET);
1662 l2cap_sock_kill(sk);
1666 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1668 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1672 scid = __le16_to_cpu(rsp->scid);
1673 dcid = __le16_to_cpu(rsp->dcid);
1675 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1677 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1680 l2cap_chan_del(sk, 0);
1683 l2cap_sock_kill(sk);
1687 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1689 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1690 struct l2cap_info_rsp rsp;
1693 type = __le16_to_cpu(req->type);
1695 BT_DBG("type 0x%4.4x", type);
1697 rsp.type = __cpu_to_le16(type);
1698 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1699 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1704 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1706 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1709 type = __le16_to_cpu(rsp->type);
1710 result = __le16_to_cpu(rsp->result);
1712 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1717 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1719 u8 *data = skb->data;
1721 struct l2cap_cmd_hdr cmd;
1724 l2cap_raw_recv(conn, skb);
1726 while (len >= L2CAP_CMD_HDR_SIZE) {
1727 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1728 data += L2CAP_CMD_HDR_SIZE;
1729 len -= L2CAP_CMD_HDR_SIZE;
1731 cmd.len = __le16_to_cpu(cmd.len);
1733 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1735 if (cmd.len > len || !cmd.ident) {
1736 BT_DBG("corrupted command");
1741 case L2CAP_COMMAND_REJ:
1742 /* FIXME: We should process this */
1745 case L2CAP_CONN_REQ:
1746 err = l2cap_connect_req(conn, &cmd, data);
1749 case L2CAP_CONN_RSP:
1750 err = l2cap_connect_rsp(conn, &cmd, data);
1753 case L2CAP_CONF_REQ:
1754 err = l2cap_config_req(conn, &cmd, data);
1757 case L2CAP_CONF_RSP:
1758 err = l2cap_config_rsp(conn, &cmd, data);
1761 case L2CAP_DISCONN_REQ:
1762 err = l2cap_disconnect_req(conn, &cmd, data);
1765 case L2CAP_DISCONN_RSP:
1766 err = l2cap_disconnect_rsp(conn, &cmd, data);
1769 case L2CAP_ECHO_REQ:
1770 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1773 case L2CAP_ECHO_RSP:
1776 case L2CAP_INFO_REQ:
1777 err = l2cap_information_req(conn, &cmd, data);
1780 case L2CAP_INFO_RSP:
1781 err = l2cap_information_rsp(conn, &cmd, data);
1785 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1791 struct l2cap_cmd_rej rej;
1792 BT_DBG("error %d", err);
1794 /* FIXME: Map err to a valid reason */
1795 rej.reason = __cpu_to_le16(0);
1796 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1806 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1810 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1812 BT_DBG("unknown cid 0x%4.4x", cid);
1816 BT_DBG("sk %p, len %d", sk, skb->len);
1818 if (sk->sk_state != BT_CONNECTED)
1821 if (l2cap_pi(sk)->imtu < skb->len)
1824 /* If socket recv buffers overflows we drop data here
1825 * which is *bad* because L2CAP has to be reliable.
1826 * But we don't have any other choice. L2CAP doesn't
1827 * provide flow control mechanism. */
1829 if (!sock_queue_rcv_skb(sk, skb))
1836 if (sk) bh_unlock_sock(sk);
1840 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1844 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1848 BT_DBG("sk %p, len %d", sk, skb->len);
1850 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1853 if (l2cap_pi(sk)->imtu < skb->len)
1856 if (!sock_queue_rcv_skb(sk, skb))
1863 if (sk) bh_unlock_sock(sk);
1867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1869 struct l2cap_hdr *lh = (void *) skb->data;
1872 skb_pull(skb, L2CAP_HDR_SIZE);
1873 cid = __le16_to_cpu(lh->cid);
1874 len = __le16_to_cpu(lh->len);
1876 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1880 l2cap_sig_channel(conn, skb);
1884 psm = get_unaligned((u16 *) skb->data);
1886 l2cap_conless_channel(conn, psm, skb);
1890 l2cap_data_channel(conn, cid, skb);
1895 /* ---- L2CAP interface with lower layer (HCI) ---- */
1897 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1899 int exact = 0, lm1 = 0, lm2 = 0;
1900 register struct sock *sk;
1901 struct hlist_node *node;
1903 if (type != ACL_LINK)
1906 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1908 /* Find listening sockets and check their link_mode */
1909 read_lock(&l2cap_sk_list.lock);
1910 sk_for_each(sk, node, &l2cap_sk_list.head) {
1911 if (sk->sk_state != BT_LISTEN)
1914 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1915 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1917 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1918 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1920 read_unlock(&l2cap_sk_list.lock);
1922 return exact ? lm1 : lm2;
1925 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1927 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1929 if (hcon->type != ACL_LINK)
1933 struct l2cap_conn *conn;
1935 conn = l2cap_conn_add(hcon, status);
1937 l2cap_conn_ready(conn);
1939 l2cap_conn_del(hcon, bt_err(status));
1944 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1946 BT_DBG("hcon %p reason %d", hcon, reason);
1948 if (hcon->type != ACL_LINK)
1951 l2cap_conn_del(hcon, bt_err(reason));
1955 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1957 struct l2cap_chan_list *l;
1958 struct l2cap_conn *conn;
1959 struct l2cap_conn_rsp rsp;
1963 if (!(conn = hcon->l2cap_data))
1965 l = &conn->chan_list;
1967 BT_DBG("conn %p", conn);
1969 read_lock(&l->lock);
1971 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1974 if (sk->sk_state != BT_CONNECT2 ||
1975 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1976 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1982 sk->sk_state = BT_CONFIG;
1985 sk->sk_state = BT_DISCONN;
1986 l2cap_sock_set_timer(sk, HZ/10);
1987 result = L2CAP_CR_SEC_BLOCK;
1990 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1991 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1992 rsp.result = __cpu_to_le16(result);
1993 rsp.status = __cpu_to_le16(0);
1994 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1995 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2000 read_unlock(&l->lock);
2004 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2006 struct l2cap_chan_list *l;
2007 struct l2cap_conn *conn;
2008 struct l2cap_conn_rsp rsp;
2012 if (!(conn = hcon->l2cap_data))
2014 l = &conn->chan_list;
2016 BT_DBG("conn %p", conn);
2018 read_lock(&l->lock);
2020 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2023 if (sk->sk_state != BT_CONNECT2) {
2029 sk->sk_state = BT_CONFIG;
2032 sk->sk_state = BT_DISCONN;
2033 l2cap_sock_set_timer(sk, HZ/10);
2034 result = L2CAP_CR_SEC_BLOCK;
2037 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2038 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2039 rsp.result = __cpu_to_le16(result);
2040 rsp.status = __cpu_to_le16(0);
2041 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2042 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2044 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2045 hci_conn_change_link_key(hcon);
2050 read_unlock(&l->lock);
2054 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2056 struct l2cap_conn *conn = hcon->l2cap_data;
2058 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2061 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2063 if (flags & ACL_START) {
2064 struct l2cap_hdr *hdr;
2068 BT_ERR("Unexpected start frame (len %d)", skb->len);
2069 kfree_skb(conn->rx_skb);
2070 conn->rx_skb = NULL;
2072 l2cap_conn_unreliable(conn, ECOMM);
2076 BT_ERR("Frame is too short (len %d)", skb->len);
2077 l2cap_conn_unreliable(conn, ECOMM);
2081 hdr = (struct l2cap_hdr *) skb->data;
2082 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2084 if (len == skb->len) {
2085 /* Complete frame received */
2086 l2cap_recv_frame(conn, skb);
2090 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2092 if (skb->len > len) {
2093 BT_ERR("Frame is too long (len %d, expected len %d)",
2095 l2cap_conn_unreliable(conn, ECOMM);
2099 /* Allocate skb for the complete frame (with header) */
2100 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2103 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2104 conn->rx_len = len - skb->len;
2106 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2108 if (!conn->rx_len) {
2109 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2110 l2cap_conn_unreliable(conn, ECOMM);
2114 if (skb->len > conn->rx_len) {
2115 BT_ERR("Fragment is too long (len %d, expected %d)",
2116 skb->len, conn->rx_len);
2117 kfree_skb(conn->rx_skb);
2118 conn->rx_skb = NULL;
2120 l2cap_conn_unreliable(conn, ECOMM);
2124 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2125 conn->rx_len -= skb->len;
2127 if (!conn->rx_len) {
2128 /* Complete frame received */
2129 l2cap_recv_frame(conn, conn->rx_skb);
2130 conn->rx_skb = NULL;
2139 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2142 struct hlist_node *node;
2145 read_lock_bh(&l2cap_sk_list.lock);
2147 sk_for_each(sk, node, &l2cap_sk_list.head) {
2148 struct l2cap_pinfo *pi = l2cap_pi(sk);
2150 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2151 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2152 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2153 pi->omtu, pi->link_mode);
2156 read_unlock_bh(&l2cap_sk_list.lock);
2161 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2163 static struct proto_ops l2cap_sock_ops = {
2164 .family = PF_BLUETOOTH,
2165 .owner = THIS_MODULE,
2166 .release = l2cap_sock_release,
2167 .bind = l2cap_sock_bind,
2168 .connect = l2cap_sock_connect,
2169 .listen = l2cap_sock_listen,
2170 .accept = l2cap_sock_accept,
2171 .getname = l2cap_sock_getname,
2172 .sendmsg = l2cap_sock_sendmsg,
2173 .recvmsg = bt_sock_recvmsg,
2174 .poll = bt_sock_poll,
2175 .mmap = sock_no_mmap,
2176 .socketpair = sock_no_socketpair,
2177 .ioctl = sock_no_ioctl,
2178 .shutdown = l2cap_sock_shutdown,
2179 .setsockopt = l2cap_sock_setsockopt,
2180 .getsockopt = l2cap_sock_getsockopt
2183 static struct net_proto_family l2cap_sock_family_ops = {
2184 .family = PF_BLUETOOTH,
2185 .owner = THIS_MODULE,
2186 .create = l2cap_sock_create,
2189 static struct hci_proto l2cap_hci_proto = {
2191 .id = HCI_PROTO_L2CAP,
2192 .connect_ind = l2cap_connect_ind,
2193 .connect_cfm = l2cap_connect_cfm,
2194 .disconn_ind = l2cap_disconn_ind,
2195 .auth_cfm = l2cap_auth_cfm,
2196 .encrypt_cfm = l2cap_encrypt_cfm,
2197 .recv_acldata = l2cap_recv_acldata
2200 static int __init l2cap_init(void)
2204 err = proto_register(&l2cap_proto, 0);
2208 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2210 BT_ERR("L2CAP socket registration failed");
2214 err = hci_register_proto(&l2cap_hci_proto);
2216 BT_ERR("L2CAP protocol registration failed");
2217 bt_sock_unregister(BTPROTO_L2CAP);
2221 class_create_file(&bt_class, &class_attr_l2cap);
2223 BT_INFO("L2CAP ver %s", VERSION);
2224 BT_INFO("L2CAP socket layer initialized");
2229 proto_unregister(&l2cap_proto);
2233 static void __exit l2cap_exit(void)
2235 class_remove_file(&bt_class, &class_attr_l2cap);
2237 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2238 BT_ERR("L2CAP socket unregistration failed");
2240 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2241 BT_ERR("L2CAP protocol unregistration failed");
2243 proto_unregister(&l2cap_proto);
2246 void l2cap_load(void)
2248 /* Dummy function to trigger automatic L2CAP module loading by
2249 * other modules that use L2CAP sockets but don't use any other
2250 * symbols from it. */
2253 EXPORT_SYMBOL(l2cap_load);
2255 module_init(l2cap_init);
2256 module_exit(l2cap_exit);
2258 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2259 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2260 MODULE_VERSION(VERSION);
2261 MODULE_LICENSE("GPL");
2262 MODULE_ALIAS("bt-proto-0");