2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.11"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECT &&
81 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
82 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
83 reason = ECONNREFUSED;
87 __l2cap_sock_close(sk, reason);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* Service level security */
262 static inline int l2cap_check_link_mode(struct sock *sk)
264 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
266 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
267 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
268 return hci_conn_encrypt(conn->hcon);
270 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
271 return hci_conn_auth(conn->hcon);
276 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
280 /* Get next available identificator.
281 * 1 - 128 are used by kernel.
282 * 129 - 199 are reserved.
283 * 200 - 254 are used by utilities like l2ping, etc.
286 spin_lock_bh(&conn->lock);
288 if (++conn->tx_ident > 128)
293 spin_unlock_bh(&conn->lock);
298 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
300 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
302 BT_DBG("code 0x%2.2x", code);
307 return hci_send_acl(conn->hcon, skb, 0);
310 static void l2cap_do_start(struct sock *sk)
312 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
314 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
315 if (l2cap_check_link_mode(sk)) {
316 struct l2cap_conn_req req;
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
320 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
323 L2CAP_CONN_REQ, sizeof(req), &req);
326 struct l2cap_info_req req;
327 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
329 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
330 conn->info_ident = l2cap_get_ident(conn);
332 mod_timer(&conn->info_timer, jiffies +
333 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
335 l2cap_send_cmd(conn, conn->info_ident,
336 L2CAP_INFO_REQ, sizeof(req), &req);
340 /* ---- L2CAP connections ---- */
341 static void l2cap_conn_start(struct l2cap_conn *conn)
343 struct l2cap_chan_list *l = &conn->chan_list;
346 BT_DBG("conn %p", conn);
350 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (sk->sk_type != SOCK_SEQPACKET) {
358 if (sk->sk_state == BT_CONNECT) {
359 if (l2cap_check_link_mode(sk)) {
360 struct l2cap_conn_req req;
361 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
362 req.psm = l2cap_pi(sk)->psm;
364 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
366 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
367 L2CAP_CONN_REQ, sizeof(req), &req);
369 } else if (sk->sk_state == BT_CONNECT2) {
370 struct l2cap_conn_rsp rsp;
371 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
372 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
374 if (l2cap_check_link_mode(sk)) {
375 sk->sk_state = BT_CONFIG;
376 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
377 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
383 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
384 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
390 read_unlock(&l->lock);
393 static void l2cap_conn_ready(struct l2cap_conn *conn)
395 struct l2cap_chan_list *l = &conn->chan_list;
398 BT_DBG("conn %p", conn);
402 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
405 if (sk->sk_type != SOCK_SEQPACKET) {
406 l2cap_sock_clear_timer(sk);
407 sk->sk_state = BT_CONNECTED;
408 sk->sk_state_change(sk);
409 } else if (sk->sk_state == BT_CONNECT)
415 read_unlock(&l->lock);
418 /* Notify sockets that we cannot guaranty reliability anymore */
419 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
421 struct l2cap_chan_list *l = &conn->chan_list;
424 BT_DBG("conn %p", conn);
428 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
429 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
433 read_unlock(&l->lock);
436 static void l2cap_info_timeout(unsigned long arg)
438 struct l2cap_conn *conn = (void *) arg;
440 conn->info_ident = 0;
442 l2cap_conn_start(conn);
445 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
447 struct l2cap_conn *conn = hcon->l2cap_data;
452 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
456 hcon->l2cap_data = conn;
459 BT_DBG("hcon %p conn %p", hcon, conn);
461 conn->mtu = hcon->hdev->acl_mtu;
462 conn->src = &hcon->hdev->bdaddr;
463 conn->dst = &hcon->dst;
467 setup_timer(&conn->info_timer, l2cap_info_timeout,
468 (unsigned long) conn);
470 spin_lock_init(&conn->lock);
471 rwlock_init(&conn->chan_list.lock);
476 static void l2cap_conn_del(struct hci_conn *hcon, int err)
478 struct l2cap_conn *conn = hcon->l2cap_data;
484 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
487 kfree_skb(conn->rx_skb);
490 while ((sk = conn->chan_list.head)) {
492 l2cap_chan_del(sk, err);
497 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
498 del_timer_sync(&conn->info_timer);
500 hcon->l2cap_data = NULL;
504 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 write_lock_bh(&l->lock);
508 __l2cap_chan_add(conn, sk, parent);
509 write_unlock_bh(&l->lock);
512 /* ---- Socket interface ---- */
513 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
516 struct hlist_node *node;
517 sk_for_each(sk, node, &l2cap_sk_list.head)
518 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
525 /* Find socket with psm and source bdaddr.
526 * Returns closest match.
528 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
530 struct sock *sk = NULL, *sk1 = NULL;
531 struct hlist_node *node;
533 sk_for_each(sk, node, &l2cap_sk_list.head) {
534 if (state && sk->sk_state != state)
537 if (l2cap_pi(sk)->psm == psm) {
539 if (!bacmp(&bt_sk(sk)->src, src))
543 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
547 return node ? sk : sk1;
550 /* Find socket with given address (psm, src).
551 * Returns locked socket */
552 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
555 read_lock(&l2cap_sk_list.lock);
556 s = __l2cap_get_sock_by_psm(state, psm, src);
557 if (s) bh_lock_sock(s);
558 read_unlock(&l2cap_sk_list.lock);
562 static void l2cap_sock_destruct(struct sock *sk)
566 skb_queue_purge(&sk->sk_receive_queue);
567 skb_queue_purge(&sk->sk_write_queue);
570 static void l2cap_sock_cleanup_listen(struct sock *parent)
574 BT_DBG("parent %p", parent);
576 /* Close not yet accepted channels */
577 while ((sk = bt_accept_dequeue(parent, NULL)))
578 l2cap_sock_close(sk);
580 parent->sk_state = BT_CLOSED;
581 sock_set_flag(parent, SOCK_ZAPPED);
584 /* Kill socket (only if zapped and orphan)
585 * Must be called on unlocked socket.
587 static void l2cap_sock_kill(struct sock *sk)
589 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
592 BT_DBG("sk %p state %d", sk, sk->sk_state);
594 /* Kill poor orphan */
595 bt_sock_unlink(&l2cap_sk_list, sk);
596 sock_set_flag(sk, SOCK_DEAD);
600 static void __l2cap_sock_close(struct sock *sk, int reason)
602 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
604 switch (sk->sk_state) {
606 l2cap_sock_cleanup_listen(sk);
612 if (sk->sk_type == SOCK_SEQPACKET) {
613 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
614 struct l2cap_disconn_req req;
616 sk->sk_state = BT_DISCONN;
617 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
619 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
620 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
621 l2cap_send_cmd(conn, l2cap_get_ident(conn),
622 L2CAP_DISCONN_REQ, sizeof(req), &req);
624 l2cap_chan_del(sk, reason);
629 l2cap_chan_del(sk, reason);
633 sock_set_flag(sk, SOCK_ZAPPED);
638 /* Must be called on unlocked socket. */
639 static void l2cap_sock_close(struct sock *sk)
641 l2cap_sock_clear_timer(sk);
643 __l2cap_sock_close(sk, ECONNRESET);
648 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
650 struct l2cap_pinfo *pi = l2cap_pi(sk);
655 sk->sk_type = parent->sk_type;
656 pi->imtu = l2cap_pi(parent)->imtu;
657 pi->omtu = l2cap_pi(parent)->omtu;
658 pi->link_mode = l2cap_pi(parent)->link_mode;
660 pi->imtu = L2CAP_DEFAULT_MTU;
665 /* Default config options */
667 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
670 static struct proto l2cap_proto = {
672 .owner = THIS_MODULE,
673 .obj_size = sizeof(struct l2cap_pinfo)
676 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
680 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
684 sock_init_data(sock, sk);
685 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
687 sk->sk_destruct = l2cap_sock_destruct;
688 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
690 sock_reset_flag(sk, SOCK_ZAPPED);
692 sk->sk_protocol = proto;
693 sk->sk_state = BT_OPEN;
695 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
697 bt_sock_link(&l2cap_sk_list, sk);
701 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
705 BT_DBG("sock %p", sock);
707 sock->state = SS_UNCONNECTED;
709 if (sock->type != SOCK_SEQPACKET &&
710 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
711 return -ESOCKTNOSUPPORT;
713 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
716 sock->ops = &l2cap_sock_ops;
718 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
722 l2cap_sock_init(sk, NULL);
726 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
728 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
729 struct sock *sk = sock->sk;
732 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
734 if (!addr || addr->sa_family != AF_BLUETOOTH)
739 if (sk->sk_state != BT_OPEN) {
744 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
745 !capable(CAP_NET_BIND_SERVICE)) {
750 write_lock_bh(&l2cap_sk_list.lock);
752 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
755 /* Save source address */
756 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
757 l2cap_pi(sk)->psm = la->l2_psm;
758 l2cap_pi(sk)->sport = la->l2_psm;
759 sk->sk_state = BT_BOUND;
762 write_unlock_bh(&l2cap_sk_list.lock);
769 static int l2cap_do_connect(struct sock *sk)
771 bdaddr_t *src = &bt_sk(sk)->src;
772 bdaddr_t *dst = &bt_sk(sk)->dst;
773 struct l2cap_conn *conn;
774 struct hci_conn *hcon;
775 struct hci_dev *hdev;
779 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
781 if (!(hdev = hci_get_route(dst, src)))
782 return -EHOSTUNREACH;
784 hci_dev_lock_bh(hdev);
788 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
789 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
790 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
791 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
792 auth_type = HCI_AT_NO_BONDING_MITM;
794 auth_type = HCI_AT_GENERAL_BONDING_MITM;
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING;
799 auth_type = HCI_AT_GENERAL_BONDING;
802 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
806 conn = l2cap_conn_add(hcon, 0);
814 /* Update source addr of the socket */
815 bacpy(src, conn->src);
817 l2cap_chan_add(conn, sk, NULL);
819 sk->sk_state = BT_CONNECT;
820 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
822 if (hcon->state == BT_CONNECTED) {
823 if (sk->sk_type != SOCK_SEQPACKET) {
824 l2cap_sock_clear_timer(sk);
825 sk->sk_state = BT_CONNECTED;
831 hci_dev_unlock_bh(hdev);
836 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
838 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
839 struct sock *sk = sock->sk;
846 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
851 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
856 switch(sk->sk_state) {
860 /* Already connecting */
864 /* Already connected */
877 /* Set destination address and psm */
878 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
879 l2cap_pi(sk)->psm = la->l2_psm;
881 if ((err = l2cap_do_connect(sk)))
885 err = bt_sock_wait_state(sk, BT_CONNECTED,
886 sock_sndtimeo(sk, flags & O_NONBLOCK));
892 static int l2cap_sock_listen(struct socket *sock, int backlog)
894 struct sock *sk = sock->sk;
897 BT_DBG("sk %p backlog %d", sk, backlog);
901 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
906 if (!l2cap_pi(sk)->psm) {
907 bdaddr_t *src = &bt_sk(sk)->src;
912 write_lock_bh(&l2cap_sk_list.lock);
914 for (psm = 0x1001; psm < 0x1100; psm += 2)
915 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
916 l2cap_pi(sk)->psm = htobs(psm);
917 l2cap_pi(sk)->sport = htobs(psm);
922 write_unlock_bh(&l2cap_sk_list.lock);
928 sk->sk_max_ack_backlog = backlog;
929 sk->sk_ack_backlog = 0;
930 sk->sk_state = BT_LISTEN;
937 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
939 DECLARE_WAITQUEUE(wait, current);
940 struct sock *sk = sock->sk, *nsk;
944 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
946 if (sk->sk_state != BT_LISTEN) {
951 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
953 BT_DBG("sk %p timeo %ld", sk, timeo);
955 /* Wait for an incoming connection. (wake-one). */
956 add_wait_queue_exclusive(sk->sk_sleep, &wait);
957 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
958 set_current_state(TASK_INTERRUPTIBLE);
965 timeo = schedule_timeout(timeo);
966 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
968 if (sk->sk_state != BT_LISTEN) {
973 if (signal_pending(current)) {
974 err = sock_intr_errno(timeo);
978 set_current_state(TASK_RUNNING);
979 remove_wait_queue(sk->sk_sleep, &wait);
984 newsock->state = SS_CONNECTED;
986 BT_DBG("new socket %p", nsk);
993 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
995 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
996 struct sock *sk = sock->sk;
998 BT_DBG("sock %p, sk %p", sock, sk);
1000 addr->sa_family = AF_BLUETOOTH;
1001 *len = sizeof(struct sockaddr_l2);
1004 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1006 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1008 la->l2_psm = l2cap_pi(sk)->psm;
1012 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1014 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1015 struct sk_buff *skb, **frag;
1016 int err, hlen, count, sent=0;
1017 struct l2cap_hdr *lh;
1019 BT_DBG("sk %p len %d", sk, len);
1021 /* First fragment (with L2CAP header) */
1022 if (sk->sk_type == SOCK_DGRAM)
1023 hlen = L2CAP_HDR_SIZE + 2;
1025 hlen = L2CAP_HDR_SIZE;
1027 count = min_t(unsigned int, (conn->mtu - hlen), len);
1029 skb = bt_skb_send_alloc(sk, hlen + count,
1030 msg->msg_flags & MSG_DONTWAIT, &err);
1034 /* Create L2CAP header */
1035 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1036 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1037 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1039 if (sk->sk_type == SOCK_DGRAM)
1040 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1042 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1050 /* Continuation fragments (no L2CAP header) */
1051 frag = &skb_shinfo(skb)->frag_list;
1053 count = min_t(unsigned int, conn->mtu, len);
1055 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1059 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1067 frag = &(*frag)->next;
1070 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1080 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1082 struct sock *sk = sock->sk;
1085 BT_DBG("sock %p, sk %p", sock, sk);
1087 err = sock_error(sk);
1091 if (msg->msg_flags & MSG_OOB)
1094 /* Check outgoing MTU */
1095 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1100 if (sk->sk_state == BT_CONNECTED)
1101 err = l2cap_do_send(sk, msg, len);
1109 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1111 struct sock *sk = sock->sk;
1112 struct l2cap_options opts;
1116 BT_DBG("sk %p", sk);
1122 opts.imtu = l2cap_pi(sk)->imtu;
1123 opts.omtu = l2cap_pi(sk)->omtu;
1124 opts.flush_to = l2cap_pi(sk)->flush_to;
1125 opts.mode = L2CAP_MODE_BASIC;
1127 len = min_t(unsigned int, sizeof(opts), optlen);
1128 if (copy_from_user((char *) &opts, optval, len)) {
1133 l2cap_pi(sk)->imtu = opts.imtu;
1134 l2cap_pi(sk)->omtu = opts.omtu;
1138 if (get_user(opt, (u32 __user *) optval)) {
1143 l2cap_pi(sk)->link_mode = opt;
1155 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1157 struct sock *sk = sock->sk;
1160 BT_DBG("sk %p", sk);
1162 if (level == SOL_L2CAP)
1163 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1177 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1179 struct sock *sk = sock->sk;
1180 struct l2cap_options opts;
1181 struct l2cap_conninfo cinfo;
1184 BT_DBG("sk %p", sk);
1186 if (get_user(len, optlen))
1193 opts.imtu = l2cap_pi(sk)->imtu;
1194 opts.omtu = l2cap_pi(sk)->omtu;
1195 opts.flush_to = l2cap_pi(sk)->flush_to;
1196 opts.mode = L2CAP_MODE_BASIC;
1198 len = min_t(unsigned int, len, sizeof(opts));
1199 if (copy_to_user(optval, (char *) &opts, len))
1205 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1209 case L2CAP_CONNINFO:
1210 if (sk->sk_state != BT_CONNECTED) {
1215 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1216 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1218 len = min_t(unsigned int, len, sizeof(cinfo));
1219 if (copy_to_user(optval, (char *) &cinfo, len))
1233 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1235 struct sock *sk = sock->sk;
1238 BT_DBG("sk %p", sk);
1240 if (level == SOL_L2CAP)
1241 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1243 if (get_user(len, optlen))
1258 static int l2cap_sock_shutdown(struct socket *sock, int how)
1260 struct sock *sk = sock->sk;
1263 BT_DBG("sock %p, sk %p", sock, sk);
1269 if (!sk->sk_shutdown) {
1270 sk->sk_shutdown = SHUTDOWN_MASK;
1271 l2cap_sock_clear_timer(sk);
1272 __l2cap_sock_close(sk, 0);
1274 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1275 err = bt_sock_wait_state(sk, BT_CLOSED,
1282 static int l2cap_sock_release(struct socket *sock)
1284 struct sock *sk = sock->sk;
1287 BT_DBG("sock %p, sk %p", sock, sk);
1292 err = l2cap_sock_shutdown(sock, 2);
1295 l2cap_sock_kill(sk);
1299 static void l2cap_chan_ready(struct sock *sk)
1301 struct sock *parent = bt_sk(sk)->parent;
1303 BT_DBG("sk %p, parent %p", sk, parent);
1305 l2cap_pi(sk)->conf_state = 0;
1306 l2cap_sock_clear_timer(sk);
1309 /* Outgoing channel.
1310 * Wake up socket sleeping on connect.
1312 sk->sk_state = BT_CONNECTED;
1313 sk->sk_state_change(sk);
1315 /* Incoming channel.
1316 * Wake up socket sleeping on accept.
1318 parent->sk_data_ready(parent, 0);
1321 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1322 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1323 hci_conn_change_link_key(conn->hcon);
1327 /* Copy frame to all raw sockets on that connection */
1328 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1330 struct l2cap_chan_list *l = &conn->chan_list;
1331 struct sk_buff *nskb;
1334 BT_DBG("conn %p", conn);
1336 read_lock(&l->lock);
1337 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1338 if (sk->sk_type != SOCK_RAW)
1341 /* Don't send frame to the socket it came from */
1345 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1348 if (sock_queue_rcv_skb(sk, nskb))
1351 read_unlock(&l->lock);
1354 /* ---- L2CAP signalling commands ---- */
1355 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1356 u8 code, u8 ident, u16 dlen, void *data)
1358 struct sk_buff *skb, **frag;
1359 struct l2cap_cmd_hdr *cmd;
1360 struct l2cap_hdr *lh;
1363 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1365 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1366 count = min_t(unsigned int, conn->mtu, len);
1368 skb = bt_skb_alloc(count, GFP_ATOMIC);
1372 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1373 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1374 lh->cid = cpu_to_le16(0x0001);
1376 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1379 cmd->len = cpu_to_le16(dlen);
1382 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1383 memcpy(skb_put(skb, count), data, count);
1389 /* Continuation fragments (no L2CAP header) */
1390 frag = &skb_shinfo(skb)->frag_list;
1392 count = min_t(unsigned int, conn->mtu, len);
1394 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1398 memcpy(skb_put(*frag, count), data, count);
1403 frag = &(*frag)->next;
1413 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1415 struct l2cap_conf_opt *opt = *ptr;
1418 len = L2CAP_CONF_OPT_SIZE + opt->len;
1426 *val = *((u8 *) opt->val);
1430 *val = __le16_to_cpu(*((__le16 *) opt->val));
1434 *val = __le32_to_cpu(*((__le32 *) opt->val));
1438 *val = (unsigned long) opt->val;
1442 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1446 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1448 struct l2cap_conf_opt *opt = *ptr;
1450 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1457 *((u8 *) opt->val) = val;
1461 *((__le16 *) opt->val) = cpu_to_le16(val);
1465 *((__le32 *) opt->val) = cpu_to_le32(val);
1469 memcpy(opt->val, (void *) val, len);
1473 *ptr += L2CAP_CONF_OPT_SIZE + len;
1476 static int l2cap_build_conf_req(struct sock *sk, void *data)
1478 struct l2cap_pinfo *pi = l2cap_pi(sk);
1479 struct l2cap_conf_req *req = data;
1480 void *ptr = req->data;
1482 BT_DBG("sk %p", sk);
1484 if (pi->imtu != L2CAP_DEFAULT_MTU)
1485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1487 /* FIXME: Need actual value of the flush timeout */
1488 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1489 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1491 req->dcid = cpu_to_le16(pi->dcid);
1492 req->flags = cpu_to_le16(0);
1497 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1499 struct l2cap_pinfo *pi = l2cap_pi(sk);
1500 struct l2cap_conf_rsp *rsp = data;
1501 void *ptr = rsp->data;
1502 void *req = pi->conf_req;
1503 int len = pi->conf_len;
1504 int type, hint, olen;
1506 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1507 u16 mtu = L2CAP_DEFAULT_MTU;
1508 u16 result = L2CAP_CONF_SUCCESS;
1510 BT_DBG("sk %p", sk);
1512 while (len >= L2CAP_CONF_OPT_SIZE) {
1513 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1519 case L2CAP_CONF_MTU:
1523 case L2CAP_CONF_FLUSH_TO:
1527 case L2CAP_CONF_QOS:
1530 case L2CAP_CONF_RFC:
1531 if (olen == sizeof(rfc))
1532 memcpy(&rfc, (void *) val, olen);
1539 result = L2CAP_CONF_UNKNOWN;
1540 *((u8 *) ptr++) = type;
1545 if (result == L2CAP_CONF_SUCCESS) {
1546 /* Configure output options and let the other side know
1547 * which ones we don't like. */
1549 if (rfc.mode == L2CAP_MODE_BASIC) {
1551 result = L2CAP_CONF_UNACCEPT;
1554 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1557 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1559 result = L2CAP_CONF_UNACCEPT;
1561 memset(&rfc, 0, sizeof(rfc));
1562 rfc.mode = L2CAP_MODE_BASIC;
1564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1565 sizeof(rfc), (unsigned long) &rfc);
1569 rsp->scid = cpu_to_le16(pi->dcid);
1570 rsp->result = cpu_to_le16(result);
1571 rsp->flags = cpu_to_le16(0x0000);
1576 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1578 struct l2cap_conf_rsp *rsp = data;
1579 void *ptr = rsp->data;
1581 BT_DBG("sk %p", sk);
1583 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1584 rsp->result = cpu_to_le16(result);
1585 rsp->flags = cpu_to_le16(flags);
1590 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1592 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1594 if (rej->reason != 0x0000)
1597 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1598 cmd->ident == conn->info_ident) {
1599 conn->info_ident = 0;
1600 del_timer(&conn->info_timer);
1601 l2cap_conn_start(conn);
1607 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1609 struct l2cap_chan_list *list = &conn->chan_list;
1610 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1611 struct l2cap_conn_rsp rsp;
1612 struct sock *sk, *parent;
1613 int result, status = L2CAP_CS_NO_INFO;
1615 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1616 __le16 psm = req->psm;
1618 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1620 /* Check if we have socket listening on psm */
1621 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1623 result = L2CAP_CR_BAD_PSM;
1627 /* Check if the ACL is secure enough (if not SDP) */
1628 if (psm != cpu_to_le16(0x0001) &&
1629 !hci_conn_check_link_mode(conn->hcon)) {
1630 result = L2CAP_CR_SEC_BLOCK;
1634 result = L2CAP_CR_NO_MEM;
1636 /* Check for backlog size */
1637 if (sk_acceptq_is_full(parent)) {
1638 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1642 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1646 write_lock_bh(&list->lock);
1648 /* Check if we already have channel with that dcid */
1649 if (__l2cap_get_chan_by_dcid(list, scid)) {
1650 write_unlock_bh(&list->lock);
1651 sock_set_flag(sk, SOCK_ZAPPED);
1652 l2cap_sock_kill(sk);
1656 hci_conn_hold(conn->hcon);
1658 l2cap_sock_init(sk, parent);
1659 bacpy(&bt_sk(sk)->src, conn->src);
1660 bacpy(&bt_sk(sk)->dst, conn->dst);
1661 l2cap_pi(sk)->psm = psm;
1662 l2cap_pi(sk)->dcid = scid;
1664 __l2cap_chan_add(conn, sk, parent);
1665 dcid = l2cap_pi(sk)->scid;
1667 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1669 l2cap_pi(sk)->ident = cmd->ident;
1671 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1672 if (l2cap_check_link_mode(sk)) {
1673 sk->sk_state = BT_CONFIG;
1674 result = L2CAP_CR_SUCCESS;
1675 status = L2CAP_CS_NO_INFO;
1677 sk->sk_state = BT_CONNECT2;
1678 result = L2CAP_CR_PEND;
1679 status = L2CAP_CS_AUTHEN_PEND;
1682 sk->sk_state = BT_CONNECT2;
1683 result = L2CAP_CR_PEND;
1684 status = L2CAP_CS_NO_INFO;
1687 write_unlock_bh(&list->lock);
1690 bh_unlock_sock(parent);
1693 rsp.scid = cpu_to_le16(scid);
1694 rsp.dcid = cpu_to_le16(dcid);
1695 rsp.result = cpu_to_le16(result);
1696 rsp.status = cpu_to_le16(status);
1697 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1699 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1700 struct l2cap_info_req info;
1701 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1703 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1704 conn->info_ident = l2cap_get_ident(conn);
1706 mod_timer(&conn->info_timer, jiffies +
1707 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1709 l2cap_send_cmd(conn, conn->info_ident,
1710 L2CAP_INFO_REQ, sizeof(info), &info);
1716 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1718 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1719 u16 scid, dcid, result, status;
1723 scid = __le16_to_cpu(rsp->scid);
1724 dcid = __le16_to_cpu(rsp->dcid);
1725 result = __le16_to_cpu(rsp->result);
1726 status = __le16_to_cpu(rsp->status);
1728 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1731 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1734 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1739 case L2CAP_CR_SUCCESS:
1740 sk->sk_state = BT_CONFIG;
1741 l2cap_pi(sk)->ident = 0;
1742 l2cap_pi(sk)->dcid = dcid;
1743 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1745 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1746 l2cap_build_conf_req(sk, req), req);
1753 l2cap_chan_del(sk, ECONNREFUSED);
1761 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1763 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1769 dcid = __le16_to_cpu(req->dcid);
1770 flags = __le16_to_cpu(req->flags);
1772 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1774 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1777 if (sk->sk_state == BT_DISCONN)
1780 /* Reject if config buffer is too small. */
1781 len = cmd_len - sizeof(*req);
1782 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1783 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1784 l2cap_build_conf_rsp(sk, rsp,
1785 L2CAP_CONF_REJECT, flags), rsp);
1790 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1791 l2cap_pi(sk)->conf_len += len;
1793 if (flags & 0x0001) {
1794 /* Incomplete config. Send empty response. */
1795 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1796 l2cap_build_conf_rsp(sk, rsp,
1797 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1801 /* Complete config. */
1802 len = l2cap_parse_conf_req(sk, rsp);
1806 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1808 /* Reset config buffer. */
1809 l2cap_pi(sk)->conf_len = 0;
1811 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1814 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1815 sk->sk_state = BT_CONNECTED;
1816 l2cap_chan_ready(sk);
1820 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1823 l2cap_build_conf_req(sk, buf), buf);
1831 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1833 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1834 u16 scid, flags, result;
1837 scid = __le16_to_cpu(rsp->scid);
1838 flags = __le16_to_cpu(rsp->flags);
1839 result = __le16_to_cpu(rsp->result);
1841 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1843 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1847 case L2CAP_CONF_SUCCESS:
1850 case L2CAP_CONF_UNACCEPT:
1851 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1853 /* It does not make sense to adjust L2CAP parameters
1854 * that are currently defined in the spec. We simply
1855 * resend config request that we sent earlier. It is
1856 * stupid, but it helps qualification testing which
1857 * expects at least some response from us. */
1858 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1859 l2cap_build_conf_req(sk, req), req);
1864 sk->sk_state = BT_DISCONN;
1865 sk->sk_err = ECONNRESET;
1866 l2cap_sock_set_timer(sk, HZ * 5);
1868 struct l2cap_disconn_req req;
1869 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1870 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1871 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1872 L2CAP_DISCONN_REQ, sizeof(req), &req);
1880 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1882 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1883 sk->sk_state = BT_CONNECTED;
1884 l2cap_chan_ready(sk);
1892 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1894 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1895 struct l2cap_disconn_rsp rsp;
1899 scid = __le16_to_cpu(req->scid);
1900 dcid = __le16_to_cpu(req->dcid);
1902 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1904 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1907 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1908 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1909 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1911 sk->sk_shutdown = SHUTDOWN_MASK;
1913 l2cap_chan_del(sk, ECONNRESET);
1916 l2cap_sock_kill(sk);
1920 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1922 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1926 scid = __le16_to_cpu(rsp->scid);
1927 dcid = __le16_to_cpu(rsp->dcid);
1929 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1931 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1934 l2cap_chan_del(sk, 0);
1937 l2cap_sock_kill(sk);
1941 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1943 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1946 type = __le16_to_cpu(req->type);
1948 BT_DBG("type 0x%4.4x", type);
1950 if (type == L2CAP_IT_FEAT_MASK) {
1952 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1953 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1954 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1955 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1956 l2cap_send_cmd(conn, cmd->ident,
1957 L2CAP_INFO_RSP, sizeof(buf), buf);
1959 struct l2cap_info_rsp rsp;
1960 rsp.type = cpu_to_le16(type);
1961 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1962 l2cap_send_cmd(conn, cmd->ident,
1963 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1969 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1971 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1974 type = __le16_to_cpu(rsp->type);
1975 result = __le16_to_cpu(rsp->result);
1977 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1979 conn->info_ident = 0;
1981 del_timer(&conn->info_timer);
1983 if (type == L2CAP_IT_FEAT_MASK)
1984 conn->feat_mask = get_unaligned_le32(rsp->data);
1986 l2cap_conn_start(conn);
1991 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1993 u8 *data = skb->data;
1995 struct l2cap_cmd_hdr cmd;
1998 l2cap_raw_recv(conn, skb);
2000 while (len >= L2CAP_CMD_HDR_SIZE) {
2002 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2003 data += L2CAP_CMD_HDR_SIZE;
2004 len -= L2CAP_CMD_HDR_SIZE;
2006 cmd_len = le16_to_cpu(cmd.len);
2008 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2010 if (cmd_len > len || !cmd.ident) {
2011 BT_DBG("corrupted command");
2016 case L2CAP_COMMAND_REJ:
2017 l2cap_command_rej(conn, &cmd, data);
2020 case L2CAP_CONN_REQ:
2021 err = l2cap_connect_req(conn, &cmd, data);
2024 case L2CAP_CONN_RSP:
2025 err = l2cap_connect_rsp(conn, &cmd, data);
2028 case L2CAP_CONF_REQ:
2029 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2032 case L2CAP_CONF_RSP:
2033 err = l2cap_config_rsp(conn, &cmd, data);
2036 case L2CAP_DISCONN_REQ:
2037 err = l2cap_disconnect_req(conn, &cmd, data);
2040 case L2CAP_DISCONN_RSP:
2041 err = l2cap_disconnect_rsp(conn, &cmd, data);
2044 case L2CAP_ECHO_REQ:
2045 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2048 case L2CAP_ECHO_RSP:
2051 case L2CAP_INFO_REQ:
2052 err = l2cap_information_req(conn, &cmd, data);
2055 case L2CAP_INFO_RSP:
2056 err = l2cap_information_rsp(conn, &cmd, data);
2060 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2066 struct l2cap_cmd_rej rej;
2067 BT_DBG("error %d", err);
2069 /* FIXME: Map err to a valid reason */
2070 rej.reason = cpu_to_le16(0);
2071 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2081 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2085 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2087 BT_DBG("unknown cid 0x%4.4x", cid);
2091 BT_DBG("sk %p, len %d", sk, skb->len);
2093 if (sk->sk_state != BT_CONNECTED)
2096 if (l2cap_pi(sk)->imtu < skb->len)
2099 /* If socket recv buffers overflows we drop data here
2100 * which is *bad* because L2CAP has to be reliable.
2101 * But we don't have any other choice. L2CAP doesn't
2102 * provide flow control mechanism. */
2104 if (!sock_queue_rcv_skb(sk, skb))
2117 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2121 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2125 BT_DBG("sk %p, len %d", sk, skb->len);
2127 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2130 if (l2cap_pi(sk)->imtu < skb->len)
2133 if (!sock_queue_rcv_skb(sk, skb))
2140 if (sk) bh_unlock_sock(sk);
2144 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2146 struct l2cap_hdr *lh = (void *) skb->data;
2150 skb_pull(skb, L2CAP_HDR_SIZE);
2151 cid = __le16_to_cpu(lh->cid);
2152 len = __le16_to_cpu(lh->len);
2154 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2158 l2cap_sig_channel(conn, skb);
2162 psm = get_unaligned((__le16 *) skb->data);
2164 l2cap_conless_channel(conn, psm, skb);
2168 l2cap_data_channel(conn, cid, skb);
2173 /* ---- L2CAP interface with lower layer (HCI) ---- */
2175 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2177 int exact = 0, lm1 = 0, lm2 = 0;
2178 register struct sock *sk;
2179 struct hlist_node *node;
2181 if (type != ACL_LINK)
2184 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2186 /* Find listening sockets and check their link_mode */
2187 read_lock(&l2cap_sk_list.lock);
2188 sk_for_each(sk, node, &l2cap_sk_list.head) {
2189 if (sk->sk_state != BT_LISTEN)
2192 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2193 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2195 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2196 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2198 read_unlock(&l2cap_sk_list.lock);
2200 return exact ? lm1 : lm2;
2203 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2205 struct l2cap_conn *conn;
2207 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2209 if (hcon->type != ACL_LINK)
2213 conn = l2cap_conn_add(hcon, status);
2215 l2cap_conn_ready(conn);
2217 l2cap_conn_del(hcon, bt_err(status));
2222 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2224 BT_DBG("hcon %p reason %d", hcon, reason);
2226 if (hcon->type != ACL_LINK)
2229 l2cap_conn_del(hcon, bt_err(reason));
2234 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2236 struct l2cap_chan_list *l;
2237 struct l2cap_conn *conn = hcon->l2cap_data;
2243 l = &conn->chan_list;
2245 BT_DBG("conn %p", conn);
2247 read_lock(&l->lock);
2249 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2250 struct l2cap_pinfo *pi = l2cap_pi(sk);
2254 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2255 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2261 if (sk->sk_state == BT_CONNECT) {
2263 struct l2cap_conn_req req;
2264 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2265 req.psm = l2cap_pi(sk)->psm;
2267 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2269 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2270 L2CAP_CONN_REQ, sizeof(req), &req);
2272 l2cap_sock_clear_timer(sk);
2273 l2cap_sock_set_timer(sk, HZ / 10);
2275 } else if (sk->sk_state == BT_CONNECT2) {
2276 struct l2cap_conn_rsp rsp;
2280 sk->sk_state = BT_CONFIG;
2281 result = L2CAP_CR_SUCCESS;
2283 sk->sk_state = BT_DISCONN;
2284 l2cap_sock_set_timer(sk, HZ / 10);
2285 result = L2CAP_CR_SEC_BLOCK;
2288 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2289 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2290 rsp.result = cpu_to_le16(result);
2291 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2292 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2293 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2299 read_unlock(&l->lock);
2304 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2306 struct l2cap_chan_list *l;
2307 struct l2cap_conn *conn = hcon->l2cap_data;
2313 l = &conn->chan_list;
2315 BT_DBG("conn %p", conn);
2317 read_lock(&l->lock);
2319 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2320 struct l2cap_pinfo *pi = l2cap_pi(sk);
2324 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2325 (sk->sk_state == BT_CONNECTED ||
2326 sk->sk_state == BT_CONFIG) &&
2327 !status && encrypt == 0x00) {
2328 __l2cap_sock_close(sk, ECONNREFUSED);
2333 if (sk->sk_state == BT_CONNECT) {
2335 struct l2cap_conn_req req;
2336 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2337 req.psm = l2cap_pi(sk)->psm;
2339 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2341 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2342 L2CAP_CONN_REQ, sizeof(req), &req);
2344 l2cap_sock_clear_timer(sk);
2345 l2cap_sock_set_timer(sk, HZ / 10);
2347 } else if (sk->sk_state == BT_CONNECT2) {
2348 struct l2cap_conn_rsp rsp;
2352 sk->sk_state = BT_CONFIG;
2353 result = L2CAP_CR_SUCCESS;
2355 sk->sk_state = BT_DISCONN;
2356 l2cap_sock_set_timer(sk, HZ / 10);
2357 result = L2CAP_CR_SEC_BLOCK;
2360 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2361 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2362 rsp.result = cpu_to_le16(result);
2363 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2364 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2365 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2371 read_unlock(&l->lock);
2376 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2378 struct l2cap_conn *conn = hcon->l2cap_data;
2380 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2383 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2385 if (flags & ACL_START) {
2386 struct l2cap_hdr *hdr;
2390 BT_ERR("Unexpected start frame (len %d)", skb->len);
2391 kfree_skb(conn->rx_skb);
2392 conn->rx_skb = NULL;
2394 l2cap_conn_unreliable(conn, ECOMM);
2398 BT_ERR("Frame is too short (len %d)", skb->len);
2399 l2cap_conn_unreliable(conn, ECOMM);
2403 hdr = (struct l2cap_hdr *) skb->data;
2404 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2406 if (len == skb->len) {
2407 /* Complete frame received */
2408 l2cap_recv_frame(conn, skb);
2412 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2414 if (skb->len > len) {
2415 BT_ERR("Frame is too long (len %d, expected len %d)",
2417 l2cap_conn_unreliable(conn, ECOMM);
2421 /* Allocate skb for the complete frame (with header) */
2422 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2425 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2427 conn->rx_len = len - skb->len;
2429 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2431 if (!conn->rx_len) {
2432 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2433 l2cap_conn_unreliable(conn, ECOMM);
2437 if (skb->len > conn->rx_len) {
2438 BT_ERR("Fragment is too long (len %d, expected %d)",
2439 skb->len, conn->rx_len);
2440 kfree_skb(conn->rx_skb);
2441 conn->rx_skb = NULL;
2443 l2cap_conn_unreliable(conn, ECOMM);
2447 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2449 conn->rx_len -= skb->len;
2451 if (!conn->rx_len) {
2452 /* Complete frame received */
2453 l2cap_recv_frame(conn, conn->rx_skb);
2454 conn->rx_skb = NULL;
2463 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2466 struct hlist_node *node;
2469 read_lock_bh(&l2cap_sk_list.lock);
2471 sk_for_each(sk, node, &l2cap_sk_list.head) {
2472 struct l2cap_pinfo *pi = l2cap_pi(sk);
2474 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2475 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2476 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2477 pi->imtu, pi->omtu, pi->link_mode);
2480 read_unlock_bh(&l2cap_sk_list.lock);
2485 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2487 static const struct proto_ops l2cap_sock_ops = {
2488 .family = PF_BLUETOOTH,
2489 .owner = THIS_MODULE,
2490 .release = l2cap_sock_release,
2491 .bind = l2cap_sock_bind,
2492 .connect = l2cap_sock_connect,
2493 .listen = l2cap_sock_listen,
2494 .accept = l2cap_sock_accept,
2495 .getname = l2cap_sock_getname,
2496 .sendmsg = l2cap_sock_sendmsg,
2497 .recvmsg = bt_sock_recvmsg,
2498 .poll = bt_sock_poll,
2499 .ioctl = bt_sock_ioctl,
2500 .mmap = sock_no_mmap,
2501 .socketpair = sock_no_socketpair,
2502 .shutdown = l2cap_sock_shutdown,
2503 .setsockopt = l2cap_sock_setsockopt,
2504 .getsockopt = l2cap_sock_getsockopt
2507 static struct net_proto_family l2cap_sock_family_ops = {
2508 .family = PF_BLUETOOTH,
2509 .owner = THIS_MODULE,
2510 .create = l2cap_sock_create,
2513 static struct hci_proto l2cap_hci_proto = {
2515 .id = HCI_PROTO_L2CAP,
2516 .connect_ind = l2cap_connect_ind,
2517 .connect_cfm = l2cap_connect_cfm,
2518 .disconn_ind = l2cap_disconn_ind,
2519 .auth_cfm = l2cap_auth_cfm,
2520 .encrypt_cfm = l2cap_encrypt_cfm,
2521 .recv_acldata = l2cap_recv_acldata
2524 static int __init l2cap_init(void)
2528 err = proto_register(&l2cap_proto, 0);
2532 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2534 BT_ERR("L2CAP socket registration failed");
2538 err = hci_register_proto(&l2cap_hci_proto);
2540 BT_ERR("L2CAP protocol registration failed");
2541 bt_sock_unregister(BTPROTO_L2CAP);
2545 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2546 BT_ERR("Failed to create L2CAP info file");
2548 BT_INFO("L2CAP ver %s", VERSION);
2549 BT_INFO("L2CAP socket layer initialized");
2554 proto_unregister(&l2cap_proto);
2558 static void __exit l2cap_exit(void)
2560 class_remove_file(bt_class, &class_attr_l2cap);
2562 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2563 BT_ERR("L2CAP socket unregistration failed");
2565 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2566 BT_ERR("L2CAP protocol unregistration failed");
2568 proto_unregister(&l2cap_proto);
2571 void l2cap_load(void)
2573 /* Dummy function to trigger automatic L2CAP module loading by
2574 * other modules that use L2CAP sockets but don't use any other
2575 * symbols from it. */
2578 EXPORT_SYMBOL(l2cap_load);
2580 module_init(l2cap_init);
2581 module_exit(l2cap_exit);
2583 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2584 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2585 MODULE_VERSION(VERSION);
2586 MODULE_LICENSE("GPL");
2587 MODULE_ALIAS("bt-proto-0");