2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static void l2cap_do_start(struct sock *sk)
338 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
340 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
344 if (l2cap_check_security(sk)) {
345 struct l2cap_conn_req req;
346 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
347 req.psm = l2cap_pi(sk)->psm;
349 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
351 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
352 L2CAP_CONN_REQ, sizeof(req), &req);
355 struct l2cap_info_req req;
356 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
358 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
359 conn->info_ident = l2cap_get_ident(conn);
361 mod_timer(&conn->info_timer, jiffies +
362 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
364 l2cap_send_cmd(conn, conn->info_ident,
365 L2CAP_INFO_REQ, sizeof(req), &req);
369 /* ---- L2CAP connections ---- */
370 static void l2cap_conn_start(struct l2cap_conn *conn)
372 struct l2cap_chan_list *l = &conn->chan_list;
375 BT_DBG("conn %p", conn);
379 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
382 if (sk->sk_type != SOCK_SEQPACKET) {
387 if (sk->sk_state == BT_CONNECT) {
388 if (l2cap_check_security(sk)) {
389 struct l2cap_conn_req req;
390 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
391 req.psm = l2cap_pi(sk)->psm;
393 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
395 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
396 L2CAP_CONN_REQ, sizeof(req), &req);
398 } else if (sk->sk_state == BT_CONNECT2) {
399 struct l2cap_conn_rsp rsp;
400 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
401 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
403 if (l2cap_check_security(sk)) {
404 if (bt_sk(sk)->defer_setup) {
405 struct sock *parent = bt_sk(sk)->parent;
406 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
407 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
408 parent->sk_data_ready(parent, 0);
411 sk->sk_state = BT_CONFIG;
412 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
413 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
416 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
417 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
420 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
421 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
427 read_unlock(&l->lock);
430 static void l2cap_conn_ready(struct l2cap_conn *conn)
432 struct l2cap_chan_list *l = &conn->chan_list;
435 BT_DBG("conn %p", conn);
439 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
442 if (sk->sk_type != SOCK_SEQPACKET) {
443 l2cap_sock_clear_timer(sk);
444 sk->sk_state = BT_CONNECTED;
445 sk->sk_state_change(sk);
446 } else if (sk->sk_state == BT_CONNECT)
452 read_unlock(&l->lock);
455 /* Notify sockets that we cannot guaranty reliability anymore */
456 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
458 struct l2cap_chan_list *l = &conn->chan_list;
461 BT_DBG("conn %p", conn);
465 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
466 if (l2cap_pi(sk)->force_reliable)
470 read_unlock(&l->lock);
473 static void l2cap_info_timeout(unsigned long arg)
475 struct l2cap_conn *conn = (void *) arg;
477 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
478 conn->info_ident = 0;
480 l2cap_conn_start(conn);
483 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
485 struct l2cap_conn *conn = hcon->l2cap_data;
490 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
494 hcon->l2cap_data = conn;
497 BT_DBG("hcon %p conn %p", hcon, conn);
499 conn->mtu = hcon->hdev->acl_mtu;
500 conn->src = &hcon->hdev->bdaddr;
501 conn->dst = &hcon->dst;
505 setup_timer(&conn->info_timer, l2cap_info_timeout,
506 (unsigned long) conn);
508 spin_lock_init(&conn->lock);
509 rwlock_init(&conn->chan_list.lock);
511 conn->disc_reason = 0x13;
516 static void l2cap_conn_del(struct hci_conn *hcon, int err)
518 struct l2cap_conn *conn = hcon->l2cap_data;
524 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
526 kfree_skb(conn->rx_skb);
529 while ((sk = conn->chan_list.head)) {
531 l2cap_chan_del(sk, err);
536 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
537 del_timer_sync(&conn->info_timer);
539 hcon->l2cap_data = NULL;
543 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
545 struct l2cap_chan_list *l = &conn->chan_list;
546 write_lock_bh(&l->lock);
547 __l2cap_chan_add(conn, sk, parent);
548 write_unlock_bh(&l->lock);
551 /* ---- Socket interface ---- */
552 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
555 struct hlist_node *node;
556 sk_for_each(sk, node, &l2cap_sk_list.head)
557 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
564 /* Find socket with psm and source bdaddr.
565 * Returns closest match.
567 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
569 struct sock *sk = NULL, *sk1 = NULL;
570 struct hlist_node *node;
572 sk_for_each(sk, node, &l2cap_sk_list.head) {
573 if (state && sk->sk_state != state)
576 if (l2cap_pi(sk)->psm == psm) {
578 if (!bacmp(&bt_sk(sk)->src, src))
582 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
586 return node ? sk : sk1;
589 /* Find socket with given address (psm, src).
590 * Returns locked socket */
591 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
594 read_lock(&l2cap_sk_list.lock);
595 s = __l2cap_get_sock_by_psm(state, psm, src);
598 read_unlock(&l2cap_sk_list.lock);
602 static void l2cap_sock_destruct(struct sock *sk)
606 skb_queue_purge(&sk->sk_receive_queue);
607 skb_queue_purge(&sk->sk_write_queue);
610 static void l2cap_sock_cleanup_listen(struct sock *parent)
614 BT_DBG("parent %p", parent);
616 /* Close not yet accepted channels */
617 while ((sk = bt_accept_dequeue(parent, NULL)))
618 l2cap_sock_close(sk);
620 parent->sk_state = BT_CLOSED;
621 sock_set_flag(parent, SOCK_ZAPPED);
624 /* Kill socket (only if zapped and orphan)
625 * Must be called on unlocked socket.
627 static void l2cap_sock_kill(struct sock *sk)
629 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
632 BT_DBG("sk %p state %d", sk, sk->sk_state);
634 /* Kill poor orphan */
635 bt_sock_unlink(&l2cap_sk_list, sk);
636 sock_set_flag(sk, SOCK_DEAD);
640 static void __l2cap_sock_close(struct sock *sk, int reason)
642 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
644 switch (sk->sk_state) {
646 l2cap_sock_cleanup_listen(sk);
651 if (sk->sk_type == SOCK_SEQPACKET) {
652 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
653 struct l2cap_disconn_req req;
655 sk->sk_state = BT_DISCONN;
656 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
659 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
660 l2cap_send_cmd(conn, l2cap_get_ident(conn),
661 L2CAP_DISCONN_REQ, sizeof(req), &req);
663 l2cap_chan_del(sk, reason);
667 if (sk->sk_type == SOCK_SEQPACKET) {
668 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
669 struct l2cap_conn_rsp rsp;
672 if (bt_sk(sk)->defer_setup)
673 result = L2CAP_CR_SEC_BLOCK;
675 result = L2CAP_CR_BAD_PSM;
677 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
678 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
679 rsp.result = cpu_to_le16(result);
680 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
681 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
682 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
684 l2cap_chan_del(sk, reason);
689 l2cap_chan_del(sk, reason);
693 sock_set_flag(sk, SOCK_ZAPPED);
698 /* Must be called on unlocked socket. */
699 static void l2cap_sock_close(struct sock *sk)
701 l2cap_sock_clear_timer(sk);
703 __l2cap_sock_close(sk, ECONNRESET);
708 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
710 struct l2cap_pinfo *pi = l2cap_pi(sk);
715 sk->sk_type = parent->sk_type;
716 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
718 pi->imtu = l2cap_pi(parent)->imtu;
719 pi->omtu = l2cap_pi(parent)->omtu;
720 pi->mode = l2cap_pi(parent)->mode;
721 pi->fcs = l2cap_pi(parent)->fcs;
722 pi->sec_level = l2cap_pi(parent)->sec_level;
723 pi->role_switch = l2cap_pi(parent)->role_switch;
724 pi->force_reliable = l2cap_pi(parent)->force_reliable;
726 pi->imtu = L2CAP_DEFAULT_MTU;
728 pi->mode = L2CAP_MODE_BASIC;
729 pi->fcs = L2CAP_FCS_CRC16;
730 pi->sec_level = BT_SECURITY_LOW;
732 pi->force_reliable = 0;
735 /* Default config options */
737 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
740 static struct proto l2cap_proto = {
742 .owner = THIS_MODULE,
743 .obj_size = sizeof(struct l2cap_pinfo)
746 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
750 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
754 sock_init_data(sock, sk);
755 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
757 sk->sk_destruct = l2cap_sock_destruct;
758 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
760 sock_reset_flag(sk, SOCK_ZAPPED);
762 sk->sk_protocol = proto;
763 sk->sk_state = BT_OPEN;
765 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
767 bt_sock_link(&l2cap_sk_list, sk);
771 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
775 BT_DBG("sock %p", sock);
777 sock->state = SS_UNCONNECTED;
779 if (sock->type != SOCK_SEQPACKET &&
780 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
781 return -ESOCKTNOSUPPORT;
783 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
786 sock->ops = &l2cap_sock_ops;
788 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
792 l2cap_sock_init(sk, NULL);
796 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
798 struct sock *sk = sock->sk;
799 struct sockaddr_l2 la;
804 if (!addr || addr->sa_family != AF_BLUETOOTH)
807 memset(&la, 0, sizeof(la));
808 len = min_t(unsigned int, sizeof(la), alen);
809 memcpy(&la, addr, len);
816 if (sk->sk_state != BT_OPEN) {
821 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
822 !capable(CAP_NET_BIND_SERVICE)) {
827 write_lock_bh(&l2cap_sk_list.lock);
829 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
832 /* Save source address */
833 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
834 l2cap_pi(sk)->psm = la.l2_psm;
835 l2cap_pi(sk)->sport = la.l2_psm;
836 sk->sk_state = BT_BOUND;
838 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
839 __le16_to_cpu(la.l2_psm) == 0x0003)
840 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
843 write_unlock_bh(&l2cap_sk_list.lock);
850 static int l2cap_do_connect(struct sock *sk)
852 bdaddr_t *src = &bt_sk(sk)->src;
853 bdaddr_t *dst = &bt_sk(sk)->dst;
854 struct l2cap_conn *conn;
855 struct hci_conn *hcon;
856 struct hci_dev *hdev;
860 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
863 hdev = hci_get_route(dst, src);
865 return -EHOSTUNREACH;
867 hci_dev_lock_bh(hdev);
871 if (sk->sk_type == SOCK_RAW) {
872 switch (l2cap_pi(sk)->sec_level) {
873 case BT_SECURITY_HIGH:
874 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
876 case BT_SECURITY_MEDIUM:
877 auth_type = HCI_AT_DEDICATED_BONDING;
880 auth_type = HCI_AT_NO_BONDING;
883 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
884 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
885 auth_type = HCI_AT_NO_BONDING_MITM;
887 auth_type = HCI_AT_NO_BONDING;
889 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
890 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
892 switch (l2cap_pi(sk)->sec_level) {
893 case BT_SECURITY_HIGH:
894 auth_type = HCI_AT_GENERAL_BONDING_MITM;
896 case BT_SECURITY_MEDIUM:
897 auth_type = HCI_AT_GENERAL_BONDING;
900 auth_type = HCI_AT_NO_BONDING;
905 hcon = hci_connect(hdev, ACL_LINK, dst,
906 l2cap_pi(sk)->sec_level, auth_type);
910 conn = l2cap_conn_add(hcon, 0);
918 /* Update source addr of the socket */
919 bacpy(src, conn->src);
921 l2cap_chan_add(conn, sk, NULL);
923 sk->sk_state = BT_CONNECT;
924 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
926 if (hcon->state == BT_CONNECTED) {
927 if (sk->sk_type != SOCK_SEQPACKET) {
928 l2cap_sock_clear_timer(sk);
929 sk->sk_state = BT_CONNECTED;
935 hci_dev_unlock_bh(hdev);
940 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
942 struct sock *sk = sock->sk;
943 struct sockaddr_l2 la;
948 if (!addr || addr->sa_family != AF_BLUETOOTH)
951 memset(&la, 0, sizeof(la));
952 len = min_t(unsigned int, sizeof(la), alen);
953 memcpy(&la, addr, len);
960 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
965 switch (l2cap_pi(sk)->mode) {
966 case L2CAP_MODE_BASIC:
968 case L2CAP_MODE_ERTM:
977 switch (sk->sk_state) {
981 /* Already connecting */
985 /* Already connected */
998 /* Set destination address and psm */
999 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1000 l2cap_pi(sk)->psm = la.l2_psm;
1002 err = l2cap_do_connect(sk);
1007 err = bt_sock_wait_state(sk, BT_CONNECTED,
1008 sock_sndtimeo(sk, flags & O_NONBLOCK));
1014 static int l2cap_sock_listen(struct socket *sock, int backlog)
1016 struct sock *sk = sock->sk;
1019 BT_DBG("sk %p backlog %d", sk, backlog);
1023 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1028 switch (l2cap_pi(sk)->mode) {
1029 case L2CAP_MODE_BASIC:
1031 case L2CAP_MODE_ERTM:
1040 if (!l2cap_pi(sk)->psm) {
1041 bdaddr_t *src = &bt_sk(sk)->src;
1046 write_lock_bh(&l2cap_sk_list.lock);
1048 for (psm = 0x1001; psm < 0x1100; psm += 2)
1049 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1050 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1051 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1056 write_unlock_bh(&l2cap_sk_list.lock);
1062 sk->sk_max_ack_backlog = backlog;
1063 sk->sk_ack_backlog = 0;
1064 sk->sk_state = BT_LISTEN;
1071 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct sock *sk = sock->sk, *nsk;
1078 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1080 if (sk->sk_state != BT_LISTEN) {
1085 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1087 BT_DBG("sk %p timeo %ld", sk, timeo);
1089 /* Wait for an incoming connection. (wake-one). */
1090 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1091 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1092 set_current_state(TASK_INTERRUPTIBLE);
1099 timeo = schedule_timeout(timeo);
1100 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1102 if (sk->sk_state != BT_LISTEN) {
1107 if (signal_pending(current)) {
1108 err = sock_intr_errno(timeo);
1112 set_current_state(TASK_RUNNING);
1113 remove_wait_queue(sk->sk_sleep, &wait);
1118 newsock->state = SS_CONNECTED;
1120 BT_DBG("new socket %p", nsk);
1127 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1129 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1130 struct sock *sk = sock->sk;
1132 BT_DBG("sock %p, sk %p", sock, sk);
1134 addr->sa_family = AF_BLUETOOTH;
1135 *len = sizeof(struct sockaddr_l2);
1138 la->l2_psm = l2cap_pi(sk)->psm;
1139 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1140 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1142 la->l2_psm = l2cap_pi(sk)->sport;
1143 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1144 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1150 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1152 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1153 struct sk_buff *skb, **frag;
1154 int err, hlen, count, sent = 0;
1155 struct l2cap_hdr *lh;
1157 BT_DBG("sk %p len %d", sk, len);
1159 /* First fragment (with L2CAP header) */
1160 if (sk->sk_type == SOCK_DGRAM)
1161 hlen = L2CAP_HDR_SIZE + 2;
1163 hlen = L2CAP_HDR_SIZE;
1165 count = min_t(unsigned int, (conn->mtu - hlen), len);
1167 skb = bt_skb_send_alloc(sk, hlen + count,
1168 msg->msg_flags & MSG_DONTWAIT, &err);
1172 /* Create L2CAP header */
1173 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1174 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1175 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1177 if (sk->sk_type == SOCK_DGRAM)
1178 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1180 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1188 /* Continuation fragments (no L2CAP header) */
1189 frag = &skb_shinfo(skb)->frag_list;
1191 count = min_t(unsigned int, conn->mtu, len);
1193 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1197 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1205 frag = &(*frag)->next;
1207 err = hci_send_acl(conn->hcon, skb, 0);
1218 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1220 struct sock *sk = sock->sk;
1223 BT_DBG("sock %p, sk %p", sock, sk);
1225 err = sock_error(sk);
1229 if (msg->msg_flags & MSG_OOB)
1232 /* Check outgoing MTU */
1233 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1238 if (sk->sk_state == BT_CONNECTED)
1239 err = l2cap_do_send(sk, msg, len);
1247 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1249 struct sock *sk = sock->sk;
1253 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1254 struct l2cap_conn_rsp rsp;
1256 sk->sk_state = BT_CONFIG;
1258 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1260 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1261 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1262 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1263 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1271 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1274 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1276 struct sock *sk = sock->sk;
1277 struct l2cap_options opts;
1281 BT_DBG("sk %p", sk);
1287 opts.imtu = l2cap_pi(sk)->imtu;
1288 opts.omtu = l2cap_pi(sk)->omtu;
1289 opts.flush_to = l2cap_pi(sk)->flush_to;
1290 opts.mode = l2cap_pi(sk)->mode;
1292 len = min_t(unsigned int, sizeof(opts), optlen);
1293 if (copy_from_user((char *) &opts, optval, len)) {
1298 l2cap_pi(sk)->imtu = opts.imtu;
1299 l2cap_pi(sk)->omtu = opts.omtu;
1300 l2cap_pi(sk)->mode = opts.mode;
1304 if (get_user(opt, (u32 __user *) optval)) {
1309 if (opt & L2CAP_LM_AUTH)
1310 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1311 if (opt & L2CAP_LM_ENCRYPT)
1312 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1313 if (opt & L2CAP_LM_SECURE)
1314 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1316 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1317 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1329 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1331 struct sock *sk = sock->sk;
1332 struct bt_security sec;
1336 BT_DBG("sk %p", sk);
1338 if (level == SOL_L2CAP)
1339 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1341 if (level != SOL_BLUETOOTH)
1342 return -ENOPROTOOPT;
1348 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1353 sec.level = BT_SECURITY_LOW;
1355 len = min_t(unsigned int, sizeof(sec), optlen);
1356 if (copy_from_user((char *) &sec, optval, len)) {
1361 if (sec.level < BT_SECURITY_LOW ||
1362 sec.level > BT_SECURITY_HIGH) {
1367 l2cap_pi(sk)->sec_level = sec.level;
1370 case BT_DEFER_SETUP:
1371 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1376 if (get_user(opt, (u32 __user *) optval)) {
1381 bt_sk(sk)->defer_setup = opt;
1393 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1395 struct sock *sk = sock->sk;
1396 struct l2cap_options opts;
1397 struct l2cap_conninfo cinfo;
1401 BT_DBG("sk %p", sk);
1403 if (get_user(len, optlen))
1410 opts.imtu = l2cap_pi(sk)->imtu;
1411 opts.omtu = l2cap_pi(sk)->omtu;
1412 opts.flush_to = l2cap_pi(sk)->flush_to;
1413 opts.mode = l2cap_pi(sk)->mode;
1415 len = min_t(unsigned int, len, sizeof(opts));
1416 if (copy_to_user(optval, (char *) &opts, len))
1422 switch (l2cap_pi(sk)->sec_level) {
1423 case BT_SECURITY_LOW:
1424 opt = L2CAP_LM_AUTH;
1426 case BT_SECURITY_MEDIUM:
1427 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1429 case BT_SECURITY_HIGH:
1430 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1438 if (l2cap_pi(sk)->role_switch)
1439 opt |= L2CAP_LM_MASTER;
1441 if (l2cap_pi(sk)->force_reliable)
1442 opt |= L2CAP_LM_RELIABLE;
1444 if (put_user(opt, (u32 __user *) optval))
1448 case L2CAP_CONNINFO:
1449 if (sk->sk_state != BT_CONNECTED &&
1450 !(sk->sk_state == BT_CONNECT2 &&
1451 bt_sk(sk)->defer_setup)) {
1456 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1457 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1459 len = min_t(unsigned int, len, sizeof(cinfo));
1460 if (copy_to_user(optval, (char *) &cinfo, len))
1474 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1476 struct sock *sk = sock->sk;
1477 struct bt_security sec;
1480 BT_DBG("sk %p", sk);
1482 if (level == SOL_L2CAP)
1483 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1485 if (level != SOL_BLUETOOTH)
1486 return -ENOPROTOOPT;
1488 if (get_user(len, optlen))
1495 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1500 sec.level = l2cap_pi(sk)->sec_level;
1502 len = min_t(unsigned int, len, sizeof(sec));
1503 if (copy_to_user(optval, (char *) &sec, len))
1508 case BT_DEFER_SETUP:
1509 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1514 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1528 static int l2cap_sock_shutdown(struct socket *sock, int how)
1530 struct sock *sk = sock->sk;
1533 BT_DBG("sock %p, sk %p", sock, sk);
1539 if (!sk->sk_shutdown) {
1540 sk->sk_shutdown = SHUTDOWN_MASK;
1541 l2cap_sock_clear_timer(sk);
1542 __l2cap_sock_close(sk, 0);
1544 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1545 err = bt_sock_wait_state(sk, BT_CLOSED,
1552 static int l2cap_sock_release(struct socket *sock)
1554 struct sock *sk = sock->sk;
1557 BT_DBG("sock %p, sk %p", sock, sk);
1562 err = l2cap_sock_shutdown(sock, 2);
1565 l2cap_sock_kill(sk);
1569 static void l2cap_chan_ready(struct sock *sk)
1571 struct sock *parent = bt_sk(sk)->parent;
1573 BT_DBG("sk %p, parent %p", sk, parent);
1575 l2cap_pi(sk)->conf_state = 0;
1576 l2cap_sock_clear_timer(sk);
1579 /* Outgoing channel.
1580 * Wake up socket sleeping on connect.
1582 sk->sk_state = BT_CONNECTED;
1583 sk->sk_state_change(sk);
1585 /* Incoming channel.
1586 * Wake up socket sleeping on accept.
1588 parent->sk_data_ready(parent, 0);
1592 /* Copy frame to all raw sockets on that connection */
1593 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1595 struct l2cap_chan_list *l = &conn->chan_list;
1596 struct sk_buff *nskb;
1599 BT_DBG("conn %p", conn);
1601 read_lock(&l->lock);
1602 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1603 if (sk->sk_type != SOCK_RAW)
1606 /* Don't send frame to the socket it came from */
1609 nskb = skb_clone(skb, GFP_ATOMIC);
1613 if (sock_queue_rcv_skb(sk, nskb))
1616 read_unlock(&l->lock);
1619 /* ---- L2CAP signalling commands ---- */
1620 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1621 u8 code, u8 ident, u16 dlen, void *data)
1623 struct sk_buff *skb, **frag;
1624 struct l2cap_cmd_hdr *cmd;
1625 struct l2cap_hdr *lh;
1628 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1629 conn, code, ident, dlen);
1631 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1632 count = min_t(unsigned int, conn->mtu, len);
1634 skb = bt_skb_alloc(count, GFP_ATOMIC);
1638 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1639 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1640 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1642 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1645 cmd->len = cpu_to_le16(dlen);
1648 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1649 memcpy(skb_put(skb, count), data, count);
1655 /* Continuation fragments (no L2CAP header) */
1656 frag = &skb_shinfo(skb)->frag_list;
1658 count = min_t(unsigned int, conn->mtu, len);
1660 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1664 memcpy(skb_put(*frag, count), data, count);
1669 frag = &(*frag)->next;
1679 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1681 struct l2cap_conf_opt *opt = *ptr;
1684 len = L2CAP_CONF_OPT_SIZE + opt->len;
1692 *val = *((u8 *) opt->val);
1696 *val = __le16_to_cpu(*((__le16 *) opt->val));
1700 *val = __le32_to_cpu(*((__le32 *) opt->val));
1704 *val = (unsigned long) opt->val;
1708 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1712 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1714 struct l2cap_conf_opt *opt = *ptr;
1716 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1723 *((u8 *) opt->val) = val;
1727 *((__le16 *) opt->val) = cpu_to_le16(val);
1731 *((__le32 *) opt->val) = cpu_to_le32(val);
1735 memcpy(opt->val, (void *) val, len);
1739 *ptr += L2CAP_CONF_OPT_SIZE + len;
1742 static int l2cap_build_conf_req(struct sock *sk, void *data)
1744 struct l2cap_pinfo *pi = l2cap_pi(sk);
1745 struct l2cap_conf_req *req = data;
1746 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1747 void *ptr = req->data;
1749 BT_DBG("sk %p", sk);
1752 case L2CAP_MODE_BASIC:
1753 if (pi->imtu != L2CAP_DEFAULT_MTU)
1754 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1757 case L2CAP_MODE_ERTM:
1758 rfc.mode = L2CAP_MODE_ERTM;
1759 rfc.txwin_size = L2CAP_DEFAULT_RX_WINDOW;
1760 rfc.max_transmit = L2CAP_DEFAULT_MAX_RECEIVE;
1761 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
1762 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
1763 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_RX_APDU);
1765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1766 sizeof(rfc), (unsigned long) &rfc);
1770 /* FIXME: Need actual value of the flush timeout */
1771 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1772 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1774 req->dcid = cpu_to_le16(pi->dcid);
1775 req->flags = cpu_to_le16(0);
1780 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1782 struct l2cap_pinfo *pi = l2cap_pi(sk);
1783 struct l2cap_conf_rsp *rsp = data;
1784 void *ptr = rsp->data;
1785 void *req = pi->conf_req;
1786 int len = pi->conf_len;
1787 int type, hint, olen;
1789 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1790 u16 mtu = L2CAP_DEFAULT_MTU;
1791 u16 result = L2CAP_CONF_SUCCESS;
1793 BT_DBG("sk %p", sk);
1795 while (len >= L2CAP_CONF_OPT_SIZE) {
1796 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1798 hint = type & L2CAP_CONF_HINT;
1799 type &= L2CAP_CONF_MASK;
1802 case L2CAP_CONF_MTU:
1806 case L2CAP_CONF_FLUSH_TO:
1810 case L2CAP_CONF_QOS:
1813 case L2CAP_CONF_RFC:
1814 if (olen == sizeof(rfc))
1815 memcpy(&rfc, (void *) val, olen);
1822 result = L2CAP_CONF_UNKNOWN;
1823 *((u8 *) ptr++) = type;
1828 if (result == L2CAP_CONF_SUCCESS) {
1829 /* Configure output options and let the other side know
1830 * which ones we don't like. */
1832 if (rfc.mode == L2CAP_MODE_BASIC) {
1834 result = L2CAP_CONF_UNACCEPT;
1837 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1840 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1842 result = L2CAP_CONF_UNACCEPT;
1844 memset(&rfc, 0, sizeof(rfc));
1845 rfc.mode = L2CAP_MODE_BASIC;
1847 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1848 sizeof(rfc), (unsigned long) &rfc);
1852 rsp->scid = cpu_to_le16(pi->dcid);
1853 rsp->result = cpu_to_le16(result);
1854 rsp->flags = cpu_to_le16(0x0000);
1859 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1861 struct l2cap_conf_rsp *rsp = data;
1862 void *ptr = rsp->data;
1864 BT_DBG("sk %p", sk);
1866 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1867 rsp->result = cpu_to_le16(result);
1868 rsp->flags = cpu_to_le16(flags);
1873 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1875 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1877 if (rej->reason != 0x0000)
1880 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1881 cmd->ident == conn->info_ident) {
1882 del_timer(&conn->info_timer);
1884 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1885 conn->info_ident = 0;
1887 l2cap_conn_start(conn);
1893 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1895 struct l2cap_chan_list *list = &conn->chan_list;
1896 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1897 struct l2cap_conn_rsp rsp;
1898 struct sock *sk, *parent;
1899 int result, status = L2CAP_CS_NO_INFO;
1901 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1902 __le16 psm = req->psm;
1904 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1906 /* Check if we have socket listening on psm */
1907 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1909 result = L2CAP_CR_BAD_PSM;
1913 /* Check if the ACL is secure enough (if not SDP) */
1914 if (psm != cpu_to_le16(0x0001) &&
1915 !hci_conn_check_link_mode(conn->hcon)) {
1916 conn->disc_reason = 0x05;
1917 result = L2CAP_CR_SEC_BLOCK;
1921 result = L2CAP_CR_NO_MEM;
1923 /* Check for backlog size */
1924 if (sk_acceptq_is_full(parent)) {
1925 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1929 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1933 write_lock_bh(&list->lock);
1935 /* Check if we already have channel with that dcid */
1936 if (__l2cap_get_chan_by_dcid(list, scid)) {
1937 write_unlock_bh(&list->lock);
1938 sock_set_flag(sk, SOCK_ZAPPED);
1939 l2cap_sock_kill(sk);
1943 hci_conn_hold(conn->hcon);
1945 l2cap_sock_init(sk, parent);
1946 bacpy(&bt_sk(sk)->src, conn->src);
1947 bacpy(&bt_sk(sk)->dst, conn->dst);
1948 l2cap_pi(sk)->psm = psm;
1949 l2cap_pi(sk)->dcid = scid;
1951 __l2cap_chan_add(conn, sk, parent);
1952 dcid = l2cap_pi(sk)->scid;
1954 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1956 l2cap_pi(sk)->ident = cmd->ident;
1958 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1959 if (l2cap_check_security(sk)) {
1960 if (bt_sk(sk)->defer_setup) {
1961 sk->sk_state = BT_CONNECT2;
1962 result = L2CAP_CR_PEND;
1963 status = L2CAP_CS_AUTHOR_PEND;
1964 parent->sk_data_ready(parent, 0);
1966 sk->sk_state = BT_CONFIG;
1967 result = L2CAP_CR_SUCCESS;
1968 status = L2CAP_CS_NO_INFO;
1971 sk->sk_state = BT_CONNECT2;
1972 result = L2CAP_CR_PEND;
1973 status = L2CAP_CS_AUTHEN_PEND;
1976 sk->sk_state = BT_CONNECT2;
1977 result = L2CAP_CR_PEND;
1978 status = L2CAP_CS_NO_INFO;
1981 write_unlock_bh(&list->lock);
1984 bh_unlock_sock(parent);
1987 rsp.scid = cpu_to_le16(scid);
1988 rsp.dcid = cpu_to_le16(dcid);
1989 rsp.result = cpu_to_le16(result);
1990 rsp.status = cpu_to_le16(status);
1991 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1993 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1994 struct l2cap_info_req info;
1995 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1998 conn->info_ident = l2cap_get_ident(conn);
2000 mod_timer(&conn->info_timer, jiffies +
2001 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2003 l2cap_send_cmd(conn, conn->info_ident,
2004 L2CAP_INFO_REQ, sizeof(info), &info);
2010 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2012 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2013 u16 scid, dcid, result, status;
2017 scid = __le16_to_cpu(rsp->scid);
2018 dcid = __le16_to_cpu(rsp->dcid);
2019 result = __le16_to_cpu(rsp->result);
2020 status = __le16_to_cpu(rsp->status);
2022 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2025 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2029 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2035 case L2CAP_CR_SUCCESS:
2036 sk->sk_state = BT_CONFIG;
2037 l2cap_pi(sk)->ident = 0;
2038 l2cap_pi(sk)->dcid = dcid;
2039 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2041 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2043 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2044 l2cap_build_conf_req(sk, req), req);
2048 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2052 l2cap_chan_del(sk, ECONNREFUSED);
2060 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2062 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2068 dcid = __le16_to_cpu(req->dcid);
2069 flags = __le16_to_cpu(req->flags);
2071 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2073 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2077 if (sk->sk_state == BT_DISCONN)
2080 /* Reject if config buffer is too small. */
2081 len = cmd_len - sizeof(*req);
2082 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2083 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2084 l2cap_build_conf_rsp(sk, rsp,
2085 L2CAP_CONF_REJECT, flags), rsp);
2090 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2091 l2cap_pi(sk)->conf_len += len;
2093 if (flags & 0x0001) {
2094 /* Incomplete config. Send empty response. */
2095 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2096 l2cap_build_conf_rsp(sk, rsp,
2097 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2101 /* Complete config. */
2102 len = l2cap_parse_conf_req(sk, rsp);
2106 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2108 /* Reset config buffer. */
2109 l2cap_pi(sk)->conf_len = 0;
2111 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2114 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2115 sk->sk_state = BT_CONNECTED;
2116 l2cap_chan_ready(sk);
2120 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2122 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2123 l2cap_build_conf_req(sk, buf), buf);
2131 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2133 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2134 u16 scid, flags, result;
2137 scid = __le16_to_cpu(rsp->scid);
2138 flags = __le16_to_cpu(rsp->flags);
2139 result = __le16_to_cpu(rsp->result);
2141 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2142 scid, flags, result);
2144 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2149 case L2CAP_CONF_SUCCESS:
2152 case L2CAP_CONF_UNACCEPT:
2153 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2155 /* It does not make sense to adjust L2CAP parameters
2156 * that are currently defined in the spec. We simply
2157 * resend config request that we sent earlier. It is
2158 * stupid, but it helps qualification testing which
2159 * expects at least some response from us. */
2160 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2161 l2cap_build_conf_req(sk, req), req);
2166 sk->sk_state = BT_DISCONN;
2167 sk->sk_err = ECONNRESET;
2168 l2cap_sock_set_timer(sk, HZ * 5);
2170 struct l2cap_disconn_req req;
2171 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2172 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2173 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2174 L2CAP_DISCONN_REQ, sizeof(req), &req);
2182 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2184 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2185 sk->sk_state = BT_CONNECTED;
2186 l2cap_chan_ready(sk);
2194 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2196 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2197 struct l2cap_disconn_rsp rsp;
2201 scid = __le16_to_cpu(req->scid);
2202 dcid = __le16_to_cpu(req->dcid);
2204 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2206 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2210 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2211 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2212 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2214 sk->sk_shutdown = SHUTDOWN_MASK;
2216 l2cap_chan_del(sk, ECONNRESET);
2219 l2cap_sock_kill(sk);
2223 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2225 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2229 scid = __le16_to_cpu(rsp->scid);
2230 dcid = __le16_to_cpu(rsp->dcid);
2232 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2234 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2238 l2cap_chan_del(sk, 0);
2241 l2cap_sock_kill(sk);
2245 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2247 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2250 type = __le16_to_cpu(req->type);
2252 BT_DBG("type 0x%4.4x", type);
2254 if (type == L2CAP_IT_FEAT_MASK) {
2256 u32 feat_mask = l2cap_feat_mask;
2257 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2258 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2259 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2261 feat_mask |= L2CAP_FEAT_ERTM;
2262 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2263 l2cap_send_cmd(conn, cmd->ident,
2264 L2CAP_INFO_RSP, sizeof(buf), buf);
2265 } else if (type == L2CAP_IT_FIXED_CHAN) {
2267 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2268 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2269 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2270 memcpy(buf + 4, l2cap_fixed_chan, 8);
2271 l2cap_send_cmd(conn, cmd->ident,
2272 L2CAP_INFO_RSP, sizeof(buf), buf);
2274 struct l2cap_info_rsp rsp;
2275 rsp.type = cpu_to_le16(type);
2276 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2277 l2cap_send_cmd(conn, cmd->ident,
2278 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2284 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2286 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2289 type = __le16_to_cpu(rsp->type);
2290 result = __le16_to_cpu(rsp->result);
2292 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2294 del_timer(&conn->info_timer);
2296 if (type == L2CAP_IT_FEAT_MASK) {
2297 conn->feat_mask = get_unaligned_le32(rsp->data);
2299 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2300 struct l2cap_info_req req;
2301 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2303 conn->info_ident = l2cap_get_ident(conn);
2305 l2cap_send_cmd(conn, conn->info_ident,
2306 L2CAP_INFO_REQ, sizeof(req), &req);
2308 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2309 conn->info_ident = 0;
2311 l2cap_conn_start(conn);
2313 } else if (type == L2CAP_IT_FIXED_CHAN) {
2314 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2315 conn->info_ident = 0;
2317 l2cap_conn_start(conn);
2323 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2325 u8 *data = skb->data;
2327 struct l2cap_cmd_hdr cmd;
2330 l2cap_raw_recv(conn, skb);
2332 while (len >= L2CAP_CMD_HDR_SIZE) {
2334 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2335 data += L2CAP_CMD_HDR_SIZE;
2336 len -= L2CAP_CMD_HDR_SIZE;
2338 cmd_len = le16_to_cpu(cmd.len);
2340 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2342 if (cmd_len > len || !cmd.ident) {
2343 BT_DBG("corrupted command");
2348 case L2CAP_COMMAND_REJ:
2349 l2cap_command_rej(conn, &cmd, data);
2352 case L2CAP_CONN_REQ:
2353 err = l2cap_connect_req(conn, &cmd, data);
2356 case L2CAP_CONN_RSP:
2357 err = l2cap_connect_rsp(conn, &cmd, data);
2360 case L2CAP_CONF_REQ:
2361 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2364 case L2CAP_CONF_RSP:
2365 err = l2cap_config_rsp(conn, &cmd, data);
2368 case L2CAP_DISCONN_REQ:
2369 err = l2cap_disconnect_req(conn, &cmd, data);
2372 case L2CAP_DISCONN_RSP:
2373 err = l2cap_disconnect_rsp(conn, &cmd, data);
2376 case L2CAP_ECHO_REQ:
2377 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2380 case L2CAP_ECHO_RSP:
2383 case L2CAP_INFO_REQ:
2384 err = l2cap_information_req(conn, &cmd, data);
2387 case L2CAP_INFO_RSP:
2388 err = l2cap_information_rsp(conn, &cmd, data);
2392 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2398 struct l2cap_cmd_rej rej;
2399 BT_DBG("error %d", err);
2401 /* FIXME: Map err to a valid reason */
2402 rej.reason = cpu_to_le16(0);
2403 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2413 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2417 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2419 BT_DBG("unknown cid 0x%4.4x", cid);
2423 BT_DBG("sk %p, len %d", sk, skb->len);
2425 if (sk->sk_state != BT_CONNECTED)
2428 if (l2cap_pi(sk)->imtu < skb->len)
2431 /* If socket recv buffers overflows we drop data here
2432 * which is *bad* because L2CAP has to be reliable.
2433 * But we don't have any other choice. L2CAP doesn't
2434 * provide flow control mechanism. */
2436 if (!sock_queue_rcv_skb(sk, skb))
2449 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2453 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2457 BT_DBG("sk %p, len %d", sk, skb->len);
2459 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2462 if (l2cap_pi(sk)->imtu < skb->len)
2465 if (!sock_queue_rcv_skb(sk, skb))
2477 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2479 struct l2cap_hdr *lh = (void *) skb->data;
2483 skb_pull(skb, L2CAP_HDR_SIZE);
2484 cid = __le16_to_cpu(lh->cid);
2485 len = __le16_to_cpu(lh->len);
2487 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2490 case L2CAP_CID_SIGNALING:
2491 l2cap_sig_channel(conn, skb);
2494 case L2CAP_CID_CONN_LESS:
2495 psm = get_unaligned((__le16 *) skb->data);
2497 l2cap_conless_channel(conn, psm, skb);
2501 l2cap_data_channel(conn, cid, skb);
2506 /* ---- L2CAP interface with lower layer (HCI) ---- */
2508 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2510 int exact = 0, lm1 = 0, lm2 = 0;
2511 register struct sock *sk;
2512 struct hlist_node *node;
2514 if (type != ACL_LINK)
2517 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2519 /* Find listening sockets and check their link_mode */
2520 read_lock(&l2cap_sk_list.lock);
2521 sk_for_each(sk, node, &l2cap_sk_list.head) {
2522 if (sk->sk_state != BT_LISTEN)
2525 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2526 lm1 |= HCI_LM_ACCEPT;
2527 if (l2cap_pi(sk)->role_switch)
2528 lm1 |= HCI_LM_MASTER;
2530 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2531 lm2 |= HCI_LM_ACCEPT;
2532 if (l2cap_pi(sk)->role_switch)
2533 lm2 |= HCI_LM_MASTER;
2536 read_unlock(&l2cap_sk_list.lock);
2538 return exact ? lm1 : lm2;
2541 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2543 struct l2cap_conn *conn;
2545 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2547 if (hcon->type != ACL_LINK)
2551 conn = l2cap_conn_add(hcon, status);
2553 l2cap_conn_ready(conn);
2555 l2cap_conn_del(hcon, bt_err(status));
2560 static int l2cap_disconn_ind(struct hci_conn *hcon)
2562 struct l2cap_conn *conn = hcon->l2cap_data;
2564 BT_DBG("hcon %p", hcon);
2566 if (hcon->type != ACL_LINK || !conn)
2569 return conn->disc_reason;
2572 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2574 BT_DBG("hcon %p reason %d", hcon, reason);
2576 if (hcon->type != ACL_LINK)
2579 l2cap_conn_del(hcon, bt_err(reason));
2584 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2586 if (sk->sk_type != SOCK_SEQPACKET)
2589 if (encrypt == 0x00) {
2590 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2591 l2cap_sock_clear_timer(sk);
2592 l2cap_sock_set_timer(sk, HZ * 5);
2593 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2594 __l2cap_sock_close(sk, ECONNREFUSED);
2596 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2597 l2cap_sock_clear_timer(sk);
2601 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2603 struct l2cap_chan_list *l;
2604 struct l2cap_conn *conn = hcon->l2cap_data;
2610 l = &conn->chan_list;
2612 BT_DBG("conn %p", conn);
2614 read_lock(&l->lock);
2616 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2619 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2624 if (!status && (sk->sk_state == BT_CONNECTED ||
2625 sk->sk_state == BT_CONFIG)) {
2626 l2cap_check_encryption(sk, encrypt);
2631 if (sk->sk_state == BT_CONNECT) {
2633 struct l2cap_conn_req req;
2634 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2635 req.psm = l2cap_pi(sk)->psm;
2637 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2639 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2640 L2CAP_CONN_REQ, sizeof(req), &req);
2642 l2cap_sock_clear_timer(sk);
2643 l2cap_sock_set_timer(sk, HZ / 10);
2645 } else if (sk->sk_state == BT_CONNECT2) {
2646 struct l2cap_conn_rsp rsp;
2650 sk->sk_state = BT_CONFIG;
2651 result = L2CAP_CR_SUCCESS;
2653 sk->sk_state = BT_DISCONN;
2654 l2cap_sock_set_timer(sk, HZ / 10);
2655 result = L2CAP_CR_SEC_BLOCK;
2658 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2659 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2660 rsp.result = cpu_to_le16(result);
2661 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2662 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2663 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2669 read_unlock(&l->lock);
2674 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2676 struct l2cap_conn *conn = hcon->l2cap_data;
2678 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2681 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2683 if (flags & ACL_START) {
2684 struct l2cap_hdr *hdr;
2688 BT_ERR("Unexpected start frame (len %d)", skb->len);
2689 kfree_skb(conn->rx_skb);
2690 conn->rx_skb = NULL;
2692 l2cap_conn_unreliable(conn, ECOMM);
2696 BT_ERR("Frame is too short (len %d)", skb->len);
2697 l2cap_conn_unreliable(conn, ECOMM);
2701 hdr = (struct l2cap_hdr *) skb->data;
2702 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2704 if (len == skb->len) {
2705 /* Complete frame received */
2706 l2cap_recv_frame(conn, skb);
2710 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2712 if (skb->len > len) {
2713 BT_ERR("Frame is too long (len %d, expected len %d)",
2715 l2cap_conn_unreliable(conn, ECOMM);
2719 /* Allocate skb for the complete frame (with header) */
2720 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
2724 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2726 conn->rx_len = len - skb->len;
2728 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2730 if (!conn->rx_len) {
2731 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2732 l2cap_conn_unreliable(conn, ECOMM);
2736 if (skb->len > conn->rx_len) {
2737 BT_ERR("Fragment is too long (len %d, expected %d)",
2738 skb->len, conn->rx_len);
2739 kfree_skb(conn->rx_skb);
2740 conn->rx_skb = NULL;
2742 l2cap_conn_unreliable(conn, ECOMM);
2746 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2748 conn->rx_len -= skb->len;
2750 if (!conn->rx_len) {
2751 /* Complete frame received */
2752 l2cap_recv_frame(conn, conn->rx_skb);
2753 conn->rx_skb = NULL;
2762 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2765 struct hlist_node *node;
2768 read_lock_bh(&l2cap_sk_list.lock);
2770 sk_for_each(sk, node, &l2cap_sk_list.head) {
2771 struct l2cap_pinfo *pi = l2cap_pi(sk);
2773 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2774 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2775 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
2776 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
2779 read_unlock_bh(&l2cap_sk_list.lock);
2784 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2786 static const struct proto_ops l2cap_sock_ops = {
2787 .family = PF_BLUETOOTH,
2788 .owner = THIS_MODULE,
2789 .release = l2cap_sock_release,
2790 .bind = l2cap_sock_bind,
2791 .connect = l2cap_sock_connect,
2792 .listen = l2cap_sock_listen,
2793 .accept = l2cap_sock_accept,
2794 .getname = l2cap_sock_getname,
2795 .sendmsg = l2cap_sock_sendmsg,
2796 .recvmsg = l2cap_sock_recvmsg,
2797 .poll = bt_sock_poll,
2798 .ioctl = bt_sock_ioctl,
2799 .mmap = sock_no_mmap,
2800 .socketpair = sock_no_socketpair,
2801 .shutdown = l2cap_sock_shutdown,
2802 .setsockopt = l2cap_sock_setsockopt,
2803 .getsockopt = l2cap_sock_getsockopt
2806 static struct net_proto_family l2cap_sock_family_ops = {
2807 .family = PF_BLUETOOTH,
2808 .owner = THIS_MODULE,
2809 .create = l2cap_sock_create,
2812 static struct hci_proto l2cap_hci_proto = {
2814 .id = HCI_PROTO_L2CAP,
2815 .connect_ind = l2cap_connect_ind,
2816 .connect_cfm = l2cap_connect_cfm,
2817 .disconn_ind = l2cap_disconn_ind,
2818 .disconn_cfm = l2cap_disconn_cfm,
2819 .security_cfm = l2cap_security_cfm,
2820 .recv_acldata = l2cap_recv_acldata
2823 static int __init l2cap_init(void)
2827 err = proto_register(&l2cap_proto, 0);
2831 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2833 BT_ERR("L2CAP socket registration failed");
2837 err = hci_register_proto(&l2cap_hci_proto);
2839 BT_ERR("L2CAP protocol registration failed");
2840 bt_sock_unregister(BTPROTO_L2CAP);
2844 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2845 BT_ERR("Failed to create L2CAP info file");
2847 BT_INFO("L2CAP ver %s", VERSION);
2848 BT_INFO("L2CAP socket layer initialized");
2853 proto_unregister(&l2cap_proto);
2857 static void __exit l2cap_exit(void)
2859 class_remove_file(bt_class, &class_attr_l2cap);
2861 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2862 BT_ERR("L2CAP socket unregistration failed");
2864 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2865 BT_ERR("L2CAP protocol unregistration failed");
2867 proto_unregister(&l2cap_proto);
2870 void l2cap_load(void)
2872 /* Dummy function to trigger automatic L2CAP module loading by
2873 * other modules that use L2CAP sockets but don't use any other
2874 * symbols from it. */
2877 EXPORT_SYMBOL(l2cap_load);
2879 module_init(l2cap_init);
2880 module_exit(l2cap_exit);
2882 module_param(enable_ertm, bool, 0644);
2883 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
2885 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2886 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2887 MODULE_VERSION(VERSION);
2888 MODULE_LICENSE("GPL");
2889 MODULE_ALIAS("bt-proto-0");