2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static void l2cap_busy_work(struct work_struct *work);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
74 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
76 struct l2cap_chan *chan, int err);
78 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80 /* ---- L2CAP channels ---- */
82 static inline void chan_hold(struct l2cap_chan *c)
84 atomic_inc(&c->refcnt);
87 static inline void chan_put(struct l2cap_chan *c)
89 if (atomic_dec_and_test(&c->refcnt))
93 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
97 list_for_each_entry(c, &conn->chan_l, list) {
105 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 list_for_each_entry(c, &conn->chan_l, list) {
116 /* Find channel with given SCID.
117 * Returns locked socket */
118 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
120 struct l2cap_chan *c;
122 read_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
126 read_unlock(&conn->chan_lock);
130 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
132 struct l2cap_chan *c;
134 list_for_each_entry(c, &conn->chan_l, list) {
135 if (c->ident == ident)
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 struct l2cap_chan *c;
145 read_lock(&conn->chan_lock);
146 c = __l2cap_get_chan_by_ident(conn, ident);
149 read_unlock(&conn->chan_lock);
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 struct l2cap_chan *c;
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
167 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 write_lock_bh(&chan_list_lock);
173 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
186 for (p = 0x1001; p < 0x1100; p += 2)
187 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
188 chan->psm = cpu_to_le16(p);
189 chan->sport = cpu_to_le16(p);
196 write_unlock_bh(&chan_list_lock);
200 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
202 write_lock_bh(&chan_list_lock);
206 write_unlock_bh(&chan_list_lock);
211 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
213 u16 cid = L2CAP_CID_DYN_START;
215 for (; cid < L2CAP_CID_DYN_END; cid++) {
216 if (!__l2cap_get_chan_by_scid(conn, cid))
223 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
225 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
227 if (!mod_timer(timer, jiffies + timeout))
231 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
233 BT_DBG("chan %p state %d", chan, chan->state);
235 if (timer_pending(timer) && del_timer(timer))
239 static void l2cap_state_change(struct l2cap_chan *chan, int state)
242 chan->ops->state_change(chan->data, state);
245 static void l2cap_chan_timeout(unsigned long arg)
247 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
248 struct sock *sk = chan->sk;
251 BT_DBG("chan %p state %d", chan, chan->state);
255 if (sock_owned_by_user(sk)) {
256 /* sk is owned by user. Try again later */
257 __set_chan_timer(chan, HZ / 5);
263 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
264 reason = ECONNREFUSED;
265 else if (chan->state == BT_CONNECT &&
266 chan->sec_level != BT_SECURITY_SDP)
267 reason = ECONNREFUSED;
271 l2cap_chan_close(chan, reason);
275 chan->ops->close(chan->data);
279 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
281 struct l2cap_chan *chan;
283 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
289 write_lock_bh(&chan_list_lock);
290 list_add(&chan->global_l, &chan_list);
291 write_unlock_bh(&chan_list_lock);
293 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
295 chan->state = BT_OPEN;
297 atomic_set(&chan->refcnt, 1);
302 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 write_lock_bh(&chan_list_lock);
305 list_del(&chan->global_l);
306 write_unlock_bh(&chan_list_lock);
311 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
314 chan->psm, chan->dcid);
316 conn->disc_reason = 0x13;
320 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
321 if (conn->hcon->type == LE_LINK) {
323 chan->omtu = L2CAP_LE_DEFAULT_MTU;
324 chan->scid = L2CAP_CID_LE_DATA;
325 chan->dcid = L2CAP_CID_LE_DATA;
327 /* Alloc CID for connection-oriented socket */
328 chan->scid = l2cap_alloc_cid(conn);
329 chan->omtu = L2CAP_DEFAULT_MTU;
331 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
332 /* Connectionless socket */
333 chan->scid = L2CAP_CID_CONN_LESS;
334 chan->dcid = L2CAP_CID_CONN_LESS;
335 chan->omtu = L2CAP_DEFAULT_MTU;
337 /* Raw socket can send/recv signalling messages only */
338 chan->scid = L2CAP_CID_SIGNALING;
339 chan->dcid = L2CAP_CID_SIGNALING;
340 chan->omtu = L2CAP_DEFAULT_MTU;
345 list_add(&chan->list, &conn->chan_l);
349 * Must be called on the locked socket. */
350 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
352 struct sock *sk = chan->sk;
353 struct l2cap_conn *conn = chan->conn;
354 struct sock *parent = bt_sk(sk)->parent;
356 __clear_chan_timer(chan);
358 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
361 /* Delete from channel list */
362 write_lock_bh(&conn->chan_lock);
363 list_del(&chan->list);
364 write_unlock_bh(&conn->chan_lock);
368 hci_conn_put(conn->hcon);
371 l2cap_state_change(chan, BT_CLOSED);
372 sock_set_flag(sk, SOCK_ZAPPED);
378 bt_accept_unlink(sk);
379 parent->sk_data_ready(parent, 0);
381 sk->sk_state_change(sk);
383 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
384 chan->conf_state & L2CAP_CONF_INPUT_DONE))
387 skb_queue_purge(&chan->tx_q);
389 if (chan->mode == L2CAP_MODE_ERTM) {
390 struct srej_list *l, *tmp;
392 __clear_retrans_timer(chan);
393 __clear_monitor_timer(chan);
394 __clear_ack_timer(chan);
396 skb_queue_purge(&chan->srej_q);
397 skb_queue_purge(&chan->busy_q);
399 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
406 static void l2cap_chan_cleanup_listen(struct sock *parent)
410 BT_DBG("parent %p", parent);
412 /* Close not yet accepted channels */
413 while ((sk = bt_accept_dequeue(parent, NULL))) {
414 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
415 __clear_chan_timer(chan);
417 l2cap_chan_close(chan, ECONNRESET);
419 chan->ops->close(chan->data);
423 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
425 struct l2cap_conn *conn = chan->conn;
426 struct sock *sk = chan->sk;
428 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
430 switch (chan->state) {
432 l2cap_chan_cleanup_listen(sk);
434 l2cap_state_change(chan, BT_CLOSED);
435 sock_set_flag(sk, SOCK_ZAPPED);
440 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
441 conn->hcon->type == ACL_LINK) {
442 __clear_chan_timer(chan);
443 __set_chan_timer(chan, sk->sk_sndtimeo);
444 l2cap_send_disconn_req(conn, chan, reason);
446 l2cap_chan_del(chan, reason);
450 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
451 conn->hcon->type == ACL_LINK) {
452 struct l2cap_conn_rsp rsp;
455 if (bt_sk(sk)->defer_setup)
456 result = L2CAP_CR_SEC_BLOCK;
458 result = L2CAP_CR_BAD_PSM;
459 l2cap_state_change(chan, BT_DISCONN);
461 rsp.scid = cpu_to_le16(chan->dcid);
462 rsp.dcid = cpu_to_le16(chan->scid);
463 rsp.result = cpu_to_le16(result);
464 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
465 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
469 l2cap_chan_del(chan, reason);
474 l2cap_chan_del(chan, reason);
478 sock_set_flag(sk, SOCK_ZAPPED);
483 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
485 if (chan->chan_type == L2CAP_CHAN_RAW) {
486 switch (chan->sec_level) {
487 case BT_SECURITY_HIGH:
488 return HCI_AT_DEDICATED_BONDING_MITM;
489 case BT_SECURITY_MEDIUM:
490 return HCI_AT_DEDICATED_BONDING;
492 return HCI_AT_NO_BONDING;
494 } else if (chan->psm == cpu_to_le16(0x0001)) {
495 if (chan->sec_level == BT_SECURITY_LOW)
496 chan->sec_level = BT_SECURITY_SDP;
498 if (chan->sec_level == BT_SECURITY_HIGH)
499 return HCI_AT_NO_BONDING_MITM;
501 return HCI_AT_NO_BONDING;
503 switch (chan->sec_level) {
504 case BT_SECURITY_HIGH:
505 return HCI_AT_GENERAL_BONDING_MITM;
506 case BT_SECURITY_MEDIUM:
507 return HCI_AT_GENERAL_BONDING;
509 return HCI_AT_NO_BONDING;
514 /* Service level security */
515 static inline int l2cap_check_security(struct l2cap_chan *chan)
517 struct l2cap_conn *conn = chan->conn;
520 auth_type = l2cap_get_auth_type(chan);
522 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
525 static u8 l2cap_get_ident(struct l2cap_conn *conn)
529 /* Get next available identificator.
530 * 1 - 128 are used by kernel.
531 * 129 - 199 are reserved.
532 * 200 - 254 are used by utilities like l2ping, etc.
535 spin_lock_bh(&conn->lock);
537 if (++conn->tx_ident > 128)
542 spin_unlock_bh(&conn->lock);
547 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
549 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
552 BT_DBG("code 0x%2.2x", code);
557 if (lmp_no_flush_capable(conn->hcon->hdev))
558 flags = ACL_START_NO_FLUSH;
562 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
564 hci_send_acl(conn->hcon, skb, flags);
567 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
570 struct l2cap_hdr *lh;
571 struct l2cap_conn *conn = chan->conn;
572 int count, hlen = L2CAP_HDR_SIZE + 2;
575 if (chan->state != BT_CONNECTED)
578 if (chan->fcs == L2CAP_FCS_CRC16)
581 BT_DBG("chan %p, control 0x%2.2x", chan, control);
583 count = min_t(unsigned int, conn->mtu, hlen);
584 control |= L2CAP_CTRL_FRAME_TYPE;
586 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
587 control |= L2CAP_CTRL_FINAL;
588 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
591 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
592 control |= L2CAP_CTRL_POLL;
593 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
596 skb = bt_skb_alloc(count, GFP_ATOMIC);
600 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
601 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
602 lh->cid = cpu_to_le16(chan->dcid);
603 put_unaligned_le16(control, skb_put(skb, 2));
605 if (chan->fcs == L2CAP_FCS_CRC16) {
606 u16 fcs = crc16(0, (u8 *)lh, count - 2);
607 put_unaligned_le16(fcs, skb_put(skb, 2));
610 if (lmp_no_flush_capable(conn->hcon->hdev))
611 flags = ACL_START_NO_FLUSH;
615 bt_cb(skb)->force_active = chan->force_active;
617 hci_send_acl(chan->conn->hcon, skb, flags);
620 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
622 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
623 control |= L2CAP_SUPER_RCV_NOT_READY;
624 chan->conn_state |= L2CAP_CONN_RNR_SENT;
626 control |= L2CAP_SUPER_RCV_READY;
628 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
630 l2cap_send_sframe(chan, control);
633 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
635 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
638 static void l2cap_do_start(struct l2cap_chan *chan)
640 struct l2cap_conn *conn = chan->conn;
642 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
643 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
646 if (l2cap_check_security(chan) &&
647 __l2cap_no_conn_pending(chan)) {
648 struct l2cap_conn_req req;
649 req.scid = cpu_to_le16(chan->scid);
652 chan->ident = l2cap_get_ident(conn);
653 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
659 struct l2cap_info_req req;
660 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
662 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
663 conn->info_ident = l2cap_get_ident(conn);
665 mod_timer(&conn->info_timer, jiffies +
666 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
668 l2cap_send_cmd(conn, conn->info_ident,
669 L2CAP_INFO_REQ, sizeof(req), &req);
673 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
675 u32 local_feat_mask = l2cap_feat_mask;
677 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
680 case L2CAP_MODE_ERTM:
681 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
682 case L2CAP_MODE_STREAMING:
683 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
689 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
692 struct l2cap_disconn_req req;
699 if (chan->mode == L2CAP_MODE_ERTM) {
700 __clear_retrans_timer(chan);
701 __clear_monitor_timer(chan);
702 __clear_ack_timer(chan);
705 req.dcid = cpu_to_le16(chan->dcid);
706 req.scid = cpu_to_le16(chan->scid);
707 l2cap_send_cmd(conn, l2cap_get_ident(conn),
708 L2CAP_DISCONN_REQ, sizeof(req), &req);
710 l2cap_state_change(chan, BT_DISCONN);
714 /* ---- L2CAP connections ---- */
715 static void l2cap_conn_start(struct l2cap_conn *conn)
717 struct l2cap_chan *chan, *tmp;
719 BT_DBG("conn %p", conn);
721 read_lock(&conn->chan_lock);
723 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
724 struct sock *sk = chan->sk;
728 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
733 if (chan->state == BT_CONNECT) {
734 struct l2cap_conn_req req;
736 if (!l2cap_check_security(chan) ||
737 !__l2cap_no_conn_pending(chan)) {
742 if (!l2cap_mode_supported(chan->mode,
744 && chan->conf_state &
745 L2CAP_CONF_STATE2_DEVICE) {
746 /* l2cap_chan_close() calls list_del(chan)
747 * so release the lock */
748 read_unlock_bh(&conn->chan_lock);
749 l2cap_chan_close(chan, ECONNRESET);
750 read_lock_bh(&conn->chan_lock);
755 req.scid = cpu_to_le16(chan->scid);
758 chan->ident = l2cap_get_ident(conn);
759 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
761 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
764 } else if (chan->state == BT_CONNECT2) {
765 struct l2cap_conn_rsp rsp;
767 rsp.scid = cpu_to_le16(chan->dcid);
768 rsp.dcid = cpu_to_le16(chan->scid);
770 if (l2cap_check_security(chan)) {
771 if (bt_sk(sk)->defer_setup) {
772 struct sock *parent = bt_sk(sk)->parent;
773 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
774 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
775 parent->sk_data_ready(parent, 0);
778 l2cap_state_change(chan, BT_CONFIG);
779 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
780 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
784 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
787 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
790 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
791 rsp.result != L2CAP_CR_SUCCESS) {
796 chan->conf_state |= L2CAP_CONF_REQ_SENT;
797 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
798 l2cap_build_conf_req(chan, buf), buf);
799 chan->num_conf_req++;
805 read_unlock(&conn->chan_lock);
808 /* Find socket with cid and source bdaddr.
809 * Returns closest match, locked.
811 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
813 struct l2cap_chan *c, *c1 = NULL;
815 read_lock(&chan_list_lock);
817 list_for_each_entry(c, &chan_list, global_l) {
818 struct sock *sk = c->sk;
820 if (state && c->state != state)
823 if (c->scid == cid) {
825 if (!bacmp(&bt_sk(sk)->src, src)) {
826 read_unlock(&chan_list_lock);
831 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
836 read_unlock(&chan_list_lock);
841 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
843 struct sock *parent, *sk;
844 struct l2cap_chan *chan, *pchan;
848 /* Check if we have socket listening on cid */
849 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
856 bh_lock_sock(parent);
858 /* Check for backlog size */
859 if (sk_acceptq_is_full(parent)) {
860 BT_DBG("backlog full %d", parent->sk_ack_backlog);
864 chan = pchan->ops->new_connection(pchan->data);
870 write_lock_bh(&conn->chan_lock);
872 hci_conn_hold(conn->hcon);
874 bacpy(&bt_sk(sk)->src, conn->src);
875 bacpy(&bt_sk(sk)->dst, conn->dst);
877 bt_accept_enqueue(parent, sk);
879 __l2cap_chan_add(conn, chan);
881 __set_chan_timer(chan, sk->sk_sndtimeo);
883 l2cap_state_change(chan, BT_CONNECTED);
884 parent->sk_data_ready(parent, 0);
886 write_unlock_bh(&conn->chan_lock);
889 bh_unlock_sock(parent);
892 static void l2cap_conn_ready(struct l2cap_conn *conn)
894 struct l2cap_chan *chan;
896 BT_DBG("conn %p", conn);
898 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
899 l2cap_le_conn_ready(conn);
901 read_lock(&conn->chan_lock);
903 list_for_each_entry(chan, &conn->chan_l, list) {
904 struct sock *sk = chan->sk;
908 if (conn->hcon->type == LE_LINK) {
909 __clear_chan_timer(chan);
910 l2cap_state_change(chan, BT_CONNECTED);
911 sk->sk_state_change(sk);
914 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
915 __clear_chan_timer(chan);
916 l2cap_state_change(chan, BT_CONNECTED);
917 sk->sk_state_change(sk);
918 } else if (chan->state == BT_CONNECT)
919 l2cap_do_start(chan);
924 read_unlock(&conn->chan_lock);
927 /* Notify sockets that we cannot guaranty reliability anymore */
928 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
930 struct l2cap_chan *chan;
932 BT_DBG("conn %p", conn);
934 read_lock(&conn->chan_lock);
936 list_for_each_entry(chan, &conn->chan_l, list) {
937 struct sock *sk = chan->sk;
939 if (chan->force_reliable)
943 read_unlock(&conn->chan_lock);
946 static void l2cap_info_timeout(unsigned long arg)
948 struct l2cap_conn *conn = (void *) arg;
950 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
951 conn->info_ident = 0;
953 l2cap_conn_start(conn);
956 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
958 struct l2cap_conn *conn = hcon->l2cap_data;
963 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
967 hcon->l2cap_data = conn;
970 BT_DBG("hcon %p conn %p", hcon, conn);
972 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
973 conn->mtu = hcon->hdev->le_mtu;
975 conn->mtu = hcon->hdev->acl_mtu;
977 conn->src = &hcon->hdev->bdaddr;
978 conn->dst = &hcon->dst;
982 spin_lock_init(&conn->lock);
983 rwlock_init(&conn->chan_lock);
985 INIT_LIST_HEAD(&conn->chan_l);
987 if (hcon->type != LE_LINK)
988 setup_timer(&conn->info_timer, l2cap_info_timeout,
989 (unsigned long) conn);
991 conn->disc_reason = 0x13;
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1013 l2cap_chan_del(chan, err);
1015 chan->ops->close(chan->data);
1018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1019 del_timer_sync(&conn->info_timer);
1021 hcon->l2cap_data = NULL;
1025 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1027 write_lock_bh(&conn->chan_lock);
1028 __l2cap_chan_add(conn, chan);
1029 write_unlock_bh(&conn->chan_lock);
1032 /* ---- Socket interface ---- */
1034 /* Find socket with psm and source bdaddr.
1035 * Returns closest match.
1037 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1039 struct l2cap_chan *c, *c1 = NULL;
1041 read_lock(&chan_list_lock);
1043 list_for_each_entry(c, &chan_list, global_l) {
1044 struct sock *sk = c->sk;
1046 if (state && c->state != state)
1049 if (c->psm == psm) {
1051 if (!bacmp(&bt_sk(sk)->src, src)) {
1052 read_unlock(&chan_list_lock);
1057 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1062 read_unlock(&chan_list_lock);
1067 int l2cap_chan_connect(struct l2cap_chan *chan)
1069 struct sock *sk = chan->sk;
1070 bdaddr_t *src = &bt_sk(sk)->src;
1071 bdaddr_t *dst = &bt_sk(sk)->dst;
1072 struct l2cap_conn *conn;
1073 struct hci_conn *hcon;
1074 struct hci_dev *hdev;
1078 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1081 hdev = hci_get_route(dst, src);
1083 return -EHOSTUNREACH;
1085 hci_dev_lock_bh(hdev);
1087 auth_type = l2cap_get_auth_type(chan);
1089 if (chan->dcid == L2CAP_CID_LE_DATA)
1090 hcon = hci_connect(hdev, LE_LINK, dst,
1091 chan->sec_level, auth_type);
1093 hcon = hci_connect(hdev, ACL_LINK, dst,
1094 chan->sec_level, auth_type);
1097 err = PTR_ERR(hcon);
1101 conn = l2cap_conn_add(hcon, 0);
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1111 l2cap_chan_add(conn, chan);
1113 l2cap_state_change(chan, BT_CONNECT);
1114 __set_chan_timer(chan, sk->sk_sndtimeo);
1116 if (hcon->state == BT_CONNECTED) {
1117 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1118 __clear_chan_timer(chan);
1119 if (l2cap_check_security(chan))
1120 l2cap_state_change(chan, BT_CONNECTED);
1122 l2cap_do_start(chan);
1128 hci_dev_unlock_bh(hdev);
1133 int __l2cap_wait_ack(struct sock *sk)
1135 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1136 DECLARE_WAITQUEUE(wait, current);
1140 add_wait_queue(sk_sleep(sk), &wait);
1141 while ((chan->unacked_frames > 0 && chan->conn)) {
1142 set_current_state(TASK_INTERRUPTIBLE);
1147 if (signal_pending(current)) {
1148 err = sock_intr_errno(timeo);
1153 timeo = schedule_timeout(timeo);
1156 err = sock_error(sk);
1160 set_current_state(TASK_RUNNING);
1161 remove_wait_queue(sk_sleep(sk), &wait);
1165 static void l2cap_monitor_timeout(unsigned long arg)
1167 struct l2cap_chan *chan = (void *) arg;
1168 struct sock *sk = chan->sk;
1170 BT_DBG("chan %p", chan);
1173 if (chan->retry_count >= chan->remote_max_tx) {
1174 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1179 chan->retry_count++;
1180 __set_monitor_timer(chan);
1182 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1186 static void l2cap_retrans_timeout(unsigned long arg)
1188 struct l2cap_chan *chan = (void *) arg;
1189 struct sock *sk = chan->sk;
1191 BT_DBG("chan %p", chan);
1194 chan->retry_count = 1;
1195 __set_monitor_timer(chan);
1197 chan->conn_state |= L2CAP_CONN_WAIT_F;
1199 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1203 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1205 struct sk_buff *skb;
1207 while ((skb = skb_peek(&chan->tx_q)) &&
1208 chan->unacked_frames) {
1209 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1212 skb = skb_dequeue(&chan->tx_q);
1215 chan->unacked_frames--;
1218 if (!chan->unacked_frames)
1219 __clear_retrans_timer(chan);
1222 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1224 struct hci_conn *hcon = chan->conn->hcon;
1227 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1229 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1230 flags = ACL_START_NO_FLUSH;
1234 bt_cb(skb)->force_active = chan->force_active;
1235 hci_send_acl(hcon, skb, flags);
1238 void l2cap_streaming_send(struct l2cap_chan *chan)
1240 struct sk_buff *skb;
1243 while ((skb = skb_dequeue(&chan->tx_q))) {
1244 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1245 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1246 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1248 if (chan->fcs == L2CAP_FCS_CRC16) {
1249 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1250 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1253 l2cap_do_send(chan, skb);
1255 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1259 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1261 struct sk_buff *skb, *tx_skb;
1264 skb = skb_peek(&chan->tx_q);
1269 if (bt_cb(skb)->tx_seq == tx_seq)
1272 if (skb_queue_is_last(&chan->tx_q, skb))
1275 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1277 if (chan->remote_max_tx &&
1278 bt_cb(skb)->retries == chan->remote_max_tx) {
1279 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1283 tx_skb = skb_clone(skb, GFP_ATOMIC);
1284 bt_cb(skb)->retries++;
1285 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1286 control &= L2CAP_CTRL_SAR;
1288 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1289 control |= L2CAP_CTRL_FINAL;
1290 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1293 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1294 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1296 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1299 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1300 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1303 l2cap_do_send(chan, tx_skb);
1306 int l2cap_ertm_send(struct l2cap_chan *chan)
1308 struct sk_buff *skb, *tx_skb;
1312 if (chan->state != BT_CONNECTED)
1315 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1317 if (chan->remote_max_tx &&
1318 bt_cb(skb)->retries == chan->remote_max_tx) {
1319 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1323 tx_skb = skb_clone(skb, GFP_ATOMIC);
1325 bt_cb(skb)->retries++;
1327 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1328 control &= L2CAP_CTRL_SAR;
1330 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1331 control |= L2CAP_CTRL_FINAL;
1332 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1334 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1335 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1336 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1339 if (chan->fcs == L2CAP_FCS_CRC16) {
1340 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1341 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1344 l2cap_do_send(chan, tx_skb);
1346 __set_retrans_timer(chan);
1348 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1349 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1351 if (bt_cb(skb)->retries == 1)
1352 chan->unacked_frames++;
1354 chan->frames_sent++;
1356 if (skb_queue_is_last(&chan->tx_q, skb))
1357 chan->tx_send_head = NULL;
1359 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1367 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1371 if (!skb_queue_empty(&chan->tx_q))
1372 chan->tx_send_head = chan->tx_q.next;
1374 chan->next_tx_seq = chan->expected_ack_seq;
1375 ret = l2cap_ertm_send(chan);
1379 static void l2cap_send_ack(struct l2cap_chan *chan)
1383 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1385 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1386 control |= L2CAP_SUPER_RCV_NOT_READY;
1387 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1388 l2cap_send_sframe(chan, control);
1392 if (l2cap_ertm_send(chan) > 0)
1395 control |= L2CAP_SUPER_RCV_READY;
1396 l2cap_send_sframe(chan, control);
1399 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1401 struct srej_list *tail;
1404 control = L2CAP_SUPER_SELECT_REJECT;
1405 control |= L2CAP_CTRL_FINAL;
1407 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1408 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1410 l2cap_send_sframe(chan, control);
1413 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1415 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1416 struct sk_buff **frag;
1419 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1425 /* Continuation fragments (no L2CAP header) */
1426 frag = &skb_shinfo(skb)->frag_list;
1428 count = min_t(unsigned int, conn->mtu, len);
1430 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1433 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1439 frag = &(*frag)->next;
1445 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1447 struct sock *sk = chan->sk;
1448 struct l2cap_conn *conn = chan->conn;
1449 struct sk_buff *skb;
1450 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1451 struct l2cap_hdr *lh;
1453 BT_DBG("sk %p len %d", sk, (int)len);
1455 count = min_t(unsigned int, (conn->mtu - hlen), len);
1456 skb = bt_skb_send_alloc(sk, count + hlen,
1457 msg->msg_flags & MSG_DONTWAIT, &err);
1459 return ERR_PTR(err);
1461 /* Create L2CAP header */
1462 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1463 lh->cid = cpu_to_le16(chan->dcid);
1464 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1465 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1467 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1468 if (unlikely(err < 0)) {
1470 return ERR_PTR(err);
1475 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1477 struct sock *sk = chan->sk;
1478 struct l2cap_conn *conn = chan->conn;
1479 struct sk_buff *skb;
1480 int err, count, hlen = L2CAP_HDR_SIZE;
1481 struct l2cap_hdr *lh;
1483 BT_DBG("sk %p len %d", sk, (int)len);
1485 count = min_t(unsigned int, (conn->mtu - hlen), len);
1486 skb = bt_skb_send_alloc(sk, count + hlen,
1487 msg->msg_flags & MSG_DONTWAIT, &err);
1489 return ERR_PTR(err);
1491 /* Create L2CAP header */
1492 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1493 lh->cid = cpu_to_le16(chan->dcid);
1494 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1496 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1497 if (unlikely(err < 0)) {
1499 return ERR_PTR(err);
1504 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1506 struct sock *sk = chan->sk;
1507 struct l2cap_conn *conn = chan->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d", sk, (int)len);
1515 return ERR_PTR(-ENOTCONN);
1520 if (chan->fcs == L2CAP_FCS_CRC16)
1523 count = min_t(unsigned int, (conn->mtu - hlen), len);
1524 skb = bt_skb_send_alloc(sk, count + hlen,
1525 msg->msg_flags & MSG_DONTWAIT, &err);
1527 return ERR_PTR(err);
1529 /* Create L2CAP header */
1530 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1531 lh->cid = cpu_to_le16(chan->dcid);
1532 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1533 put_unaligned_le16(control, skb_put(skb, 2));
1535 put_unaligned_le16(sdulen, skb_put(skb, 2));
1537 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1538 if (unlikely(err < 0)) {
1540 return ERR_PTR(err);
1543 if (chan->fcs == L2CAP_FCS_CRC16)
1544 put_unaligned_le16(0, skb_put(skb, 2));
1546 bt_cb(skb)->retries = 0;
1550 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1552 struct sk_buff *skb;
1553 struct sk_buff_head sar_queue;
1557 skb_queue_head_init(&sar_queue);
1558 control = L2CAP_SDU_START;
1559 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1561 return PTR_ERR(skb);
1563 __skb_queue_tail(&sar_queue, skb);
1564 len -= chan->remote_mps;
1565 size += chan->remote_mps;
1570 if (len > chan->remote_mps) {
1571 control = L2CAP_SDU_CONTINUE;
1572 buflen = chan->remote_mps;
1574 control = L2CAP_SDU_END;
1578 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1580 skb_queue_purge(&sar_queue);
1581 return PTR_ERR(skb);
1584 __skb_queue_tail(&sar_queue, skb);
1588 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1589 if (chan->tx_send_head == NULL)
1590 chan->tx_send_head = sar_queue.next;
1595 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1597 struct sk_buff *skb;
1601 /* Connectionless channel */
1602 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1603 skb = l2cap_create_connless_pdu(chan, msg, len);
1605 return PTR_ERR(skb);
1607 l2cap_do_send(chan, skb);
1611 switch (chan->mode) {
1612 case L2CAP_MODE_BASIC:
1613 /* Check outgoing MTU */
1614 if (len > chan->omtu)
1617 /* Create a basic PDU */
1618 skb = l2cap_create_basic_pdu(chan, msg, len);
1620 return PTR_ERR(skb);
1622 l2cap_do_send(chan, skb);
1626 case L2CAP_MODE_ERTM:
1627 case L2CAP_MODE_STREAMING:
1628 /* Entire SDU fits into one PDU */
1629 if (len <= chan->remote_mps) {
1630 control = L2CAP_SDU_UNSEGMENTED;
1631 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1634 return PTR_ERR(skb);
1636 __skb_queue_tail(&chan->tx_q, skb);
1638 if (chan->tx_send_head == NULL)
1639 chan->tx_send_head = skb;
1642 /* Segment SDU into multiples PDUs */
1643 err = l2cap_sar_segment_sdu(chan, msg, len);
1648 if (chan->mode == L2CAP_MODE_STREAMING) {
1649 l2cap_streaming_send(chan);
1654 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1655 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
1660 err = l2cap_ertm_send(chan);
1667 BT_DBG("bad state %1.1x", chan->mode);
1674 static void l2cap_chan_ready(struct sock *sk)
1676 struct sock *parent = bt_sk(sk)->parent;
1677 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1679 BT_DBG("sk %p, parent %p", sk, parent);
1681 chan->conf_state = 0;
1682 __clear_chan_timer(chan);
1685 /* Outgoing channel.
1686 * Wake up socket sleeping on connect.
1688 l2cap_state_change(chan, BT_CONNECTED);
1689 sk->sk_state_change(sk);
1691 /* Incoming channel.
1692 * Wake up socket sleeping on accept.
1694 parent->sk_data_ready(parent, 0);
1698 /* Copy frame to all raw sockets on that connection */
1699 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1701 struct sk_buff *nskb;
1702 struct l2cap_chan *chan;
1704 BT_DBG("conn %p", conn);
1706 read_lock(&conn->chan_lock);
1707 list_for_each_entry(chan, &conn->chan_l, list) {
1708 struct sock *sk = chan->sk;
1709 if (chan->chan_type != L2CAP_CHAN_RAW)
1712 /* Don't send frame to the socket it came from */
1715 nskb = skb_clone(skb, GFP_ATOMIC);
1719 if (chan->ops->recv(chan->data, nskb))
1722 read_unlock(&conn->chan_lock);
1725 /* ---- L2CAP signalling commands ---- */
1726 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1727 u8 code, u8 ident, u16 dlen, void *data)
1729 struct sk_buff *skb, **frag;
1730 struct l2cap_cmd_hdr *cmd;
1731 struct l2cap_hdr *lh;
1734 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1735 conn, code, ident, dlen);
1737 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1738 count = min_t(unsigned int, conn->mtu, len);
1740 skb = bt_skb_alloc(count, GFP_ATOMIC);
1744 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1745 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1747 if (conn->hcon->type == LE_LINK)
1748 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1750 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1752 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1755 cmd->len = cpu_to_le16(dlen);
1758 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1759 memcpy(skb_put(skb, count), data, count);
1765 /* Continuation fragments (no L2CAP header) */
1766 frag = &skb_shinfo(skb)->frag_list;
1768 count = min_t(unsigned int, conn->mtu, len);
1770 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1774 memcpy(skb_put(*frag, count), data, count);
1779 frag = &(*frag)->next;
1789 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1791 struct l2cap_conf_opt *opt = *ptr;
1794 len = L2CAP_CONF_OPT_SIZE + opt->len;
1802 *val = *((u8 *) opt->val);
1806 *val = get_unaligned_le16(opt->val);
1810 *val = get_unaligned_le32(opt->val);
1814 *val = (unsigned long) opt->val;
1818 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1822 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1824 struct l2cap_conf_opt *opt = *ptr;
1826 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1833 *((u8 *) opt->val) = val;
1837 put_unaligned_le16(val, opt->val);
1841 put_unaligned_le32(val, opt->val);
1845 memcpy(opt->val, (void *) val, len);
1849 *ptr += L2CAP_CONF_OPT_SIZE + len;
1852 static void l2cap_ack_timeout(unsigned long arg)
1854 struct l2cap_chan *chan = (void *) arg;
1856 bh_lock_sock(chan->sk);
1857 l2cap_send_ack(chan);
1858 bh_unlock_sock(chan->sk);
1861 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1863 struct sock *sk = chan->sk;
1865 chan->expected_ack_seq = 0;
1866 chan->unacked_frames = 0;
1867 chan->buffer_seq = 0;
1868 chan->num_acked = 0;
1869 chan->frames_sent = 0;
1871 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1872 (unsigned long) chan);
1873 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1874 (unsigned long) chan);
1875 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1877 skb_queue_head_init(&chan->srej_q);
1878 skb_queue_head_init(&chan->busy_q);
1880 INIT_LIST_HEAD(&chan->srej_l);
1882 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1884 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1887 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1890 case L2CAP_MODE_STREAMING:
1891 case L2CAP_MODE_ERTM:
1892 if (l2cap_mode_supported(mode, remote_feat_mask))
1896 return L2CAP_MODE_BASIC;
1900 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1902 struct l2cap_conf_req *req = data;
1903 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1904 void *ptr = req->data;
1906 BT_DBG("chan %p", chan);
1908 if (chan->num_conf_req || chan->num_conf_rsp)
1911 switch (chan->mode) {
1912 case L2CAP_MODE_STREAMING:
1913 case L2CAP_MODE_ERTM:
1914 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1919 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1924 if (chan->imtu != L2CAP_DEFAULT_MTU)
1925 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1927 switch (chan->mode) {
1928 case L2CAP_MODE_BASIC:
1929 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1930 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1933 rfc.mode = L2CAP_MODE_BASIC;
1935 rfc.max_transmit = 0;
1936 rfc.retrans_timeout = 0;
1937 rfc.monitor_timeout = 0;
1938 rfc.max_pdu_size = 0;
1940 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1941 (unsigned long) &rfc);
1944 case L2CAP_MODE_ERTM:
1945 rfc.mode = L2CAP_MODE_ERTM;
1946 rfc.txwin_size = chan->tx_win;
1947 rfc.max_transmit = chan->max_tx;
1948 rfc.retrans_timeout = 0;
1949 rfc.monitor_timeout = 0;
1950 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1951 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1952 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1955 (unsigned long) &rfc);
1957 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1960 if (chan->fcs == L2CAP_FCS_NONE ||
1961 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1962 chan->fcs = L2CAP_FCS_NONE;
1963 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1967 case L2CAP_MODE_STREAMING:
1968 rfc.mode = L2CAP_MODE_STREAMING;
1970 rfc.max_transmit = 0;
1971 rfc.retrans_timeout = 0;
1972 rfc.monitor_timeout = 0;
1973 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1974 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1975 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1977 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1978 (unsigned long) &rfc);
1980 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1983 if (chan->fcs == L2CAP_FCS_NONE ||
1984 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1985 chan->fcs = L2CAP_FCS_NONE;
1986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1991 req->dcid = cpu_to_le16(chan->dcid);
1992 req->flags = cpu_to_le16(0);
1997 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1999 struct l2cap_conf_rsp *rsp = data;
2000 void *ptr = rsp->data;
2001 void *req = chan->conf_req;
2002 int len = chan->conf_len;
2003 int type, hint, olen;
2005 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2006 u16 mtu = L2CAP_DEFAULT_MTU;
2007 u16 result = L2CAP_CONF_SUCCESS;
2009 BT_DBG("chan %p", chan);
2011 while (len >= L2CAP_CONF_OPT_SIZE) {
2012 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2014 hint = type & L2CAP_CONF_HINT;
2015 type &= L2CAP_CONF_MASK;
2018 case L2CAP_CONF_MTU:
2022 case L2CAP_CONF_FLUSH_TO:
2023 chan->flush_to = val;
2026 case L2CAP_CONF_QOS:
2029 case L2CAP_CONF_RFC:
2030 if (olen == sizeof(rfc))
2031 memcpy(&rfc, (void *) val, olen);
2034 case L2CAP_CONF_FCS:
2035 if (val == L2CAP_FCS_NONE)
2036 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2044 result = L2CAP_CONF_UNKNOWN;
2045 *((u8 *) ptr++) = type;
2050 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2053 switch (chan->mode) {
2054 case L2CAP_MODE_STREAMING:
2055 case L2CAP_MODE_ERTM:
2056 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2057 chan->mode = l2cap_select_mode(rfc.mode,
2058 chan->conn->feat_mask);
2062 if (chan->mode != rfc.mode)
2063 return -ECONNREFUSED;
2069 if (chan->mode != rfc.mode) {
2070 result = L2CAP_CONF_UNACCEPT;
2071 rfc.mode = chan->mode;
2073 if (chan->num_conf_rsp == 1)
2074 return -ECONNREFUSED;
2076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2077 sizeof(rfc), (unsigned long) &rfc);
2081 if (result == L2CAP_CONF_SUCCESS) {
2082 /* Configure output options and let the other side know
2083 * which ones we don't like. */
2085 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2086 result = L2CAP_CONF_UNACCEPT;
2089 chan->conf_state |= L2CAP_CONF_MTU_DONE;
2091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2094 case L2CAP_MODE_BASIC:
2095 chan->fcs = L2CAP_FCS_NONE;
2096 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2099 case L2CAP_MODE_ERTM:
2100 chan->remote_tx_win = rfc.txwin_size;
2101 chan->remote_max_tx = rfc.max_transmit;
2103 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2104 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2106 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2108 rfc.retrans_timeout =
2109 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2110 rfc.monitor_timeout =
2111 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2113 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2116 sizeof(rfc), (unsigned long) &rfc);
2120 case L2CAP_MODE_STREAMING:
2121 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2122 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2124 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2126 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2129 sizeof(rfc), (unsigned long) &rfc);
2134 result = L2CAP_CONF_UNACCEPT;
2136 memset(&rfc, 0, sizeof(rfc));
2137 rfc.mode = chan->mode;
2140 if (result == L2CAP_CONF_SUCCESS)
2141 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2143 rsp->scid = cpu_to_le16(chan->dcid);
2144 rsp->result = cpu_to_le16(result);
2145 rsp->flags = cpu_to_le16(0x0000);
2150 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2152 struct l2cap_conf_req *req = data;
2153 void *ptr = req->data;
2156 struct l2cap_conf_rfc rfc;
2158 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2160 while (len >= L2CAP_CONF_OPT_SIZE) {
2161 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2164 case L2CAP_CONF_MTU:
2165 if (val < L2CAP_DEFAULT_MIN_MTU) {
2166 *result = L2CAP_CONF_UNACCEPT;
2167 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2173 case L2CAP_CONF_FLUSH_TO:
2174 chan->flush_to = val;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2179 case L2CAP_CONF_RFC:
2180 if (olen == sizeof(rfc))
2181 memcpy(&rfc, (void *)val, olen);
2183 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2184 rfc.mode != chan->mode)
2185 return -ECONNREFUSED;
2189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2190 sizeof(rfc), (unsigned long) &rfc);
2195 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2196 return -ECONNREFUSED;
2198 chan->mode = rfc.mode;
2200 if (*result == L2CAP_CONF_SUCCESS) {
2202 case L2CAP_MODE_ERTM:
2203 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2204 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2205 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2207 case L2CAP_MODE_STREAMING:
2208 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2212 req->dcid = cpu_to_le16(chan->dcid);
2213 req->flags = cpu_to_le16(0x0000);
2218 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2220 struct l2cap_conf_rsp *rsp = data;
2221 void *ptr = rsp->data;
2223 BT_DBG("chan %p", chan);
2225 rsp->scid = cpu_to_le16(chan->dcid);
2226 rsp->result = cpu_to_le16(result);
2227 rsp->flags = cpu_to_le16(flags);
2232 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2234 struct l2cap_conn_rsp rsp;
2235 struct l2cap_conn *conn = chan->conn;
2238 rsp.scid = cpu_to_le16(chan->dcid);
2239 rsp.dcid = cpu_to_le16(chan->scid);
2240 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2241 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2242 l2cap_send_cmd(conn, chan->ident,
2243 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2245 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2248 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2249 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2250 l2cap_build_conf_req(chan, buf), buf);
2251 chan->num_conf_req++;
2254 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2258 struct l2cap_conf_rfc rfc;
2260 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2262 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2265 while (len >= L2CAP_CONF_OPT_SIZE) {
2266 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2269 case L2CAP_CONF_RFC:
2270 if (olen == sizeof(rfc))
2271 memcpy(&rfc, (void *)val, olen);
2278 case L2CAP_MODE_ERTM:
2279 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2280 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2281 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2283 case L2CAP_MODE_STREAMING:
2284 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2288 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2290 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2292 if (rej->reason != 0x0000)
2295 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2296 cmd->ident == conn->info_ident) {
2297 del_timer(&conn->info_timer);
2299 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2300 conn->info_ident = 0;
2302 l2cap_conn_start(conn);
2308 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2310 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2311 struct l2cap_conn_rsp rsp;
2312 struct l2cap_chan *chan = NULL, *pchan;
2313 struct sock *parent, *sk = NULL;
2314 int result, status = L2CAP_CS_NO_INFO;
2316 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2317 __le16 psm = req->psm;
2319 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2321 /* Check if we have socket listening on psm */
2322 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2324 result = L2CAP_CR_BAD_PSM;
2330 bh_lock_sock(parent);
2332 /* Check if the ACL is secure enough (if not SDP) */
2333 if (psm != cpu_to_le16(0x0001) &&
2334 !hci_conn_check_link_mode(conn->hcon)) {
2335 conn->disc_reason = 0x05;
2336 result = L2CAP_CR_SEC_BLOCK;
2340 result = L2CAP_CR_NO_MEM;
2342 /* Check for backlog size */
2343 if (sk_acceptq_is_full(parent)) {
2344 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2348 chan = pchan->ops->new_connection(pchan->data);
2354 write_lock_bh(&conn->chan_lock);
2356 /* Check if we already have channel with that dcid */
2357 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2358 write_unlock_bh(&conn->chan_lock);
2359 sock_set_flag(sk, SOCK_ZAPPED);
2360 chan->ops->close(chan->data);
2364 hci_conn_hold(conn->hcon);
2366 bacpy(&bt_sk(sk)->src, conn->src);
2367 bacpy(&bt_sk(sk)->dst, conn->dst);
2371 bt_accept_enqueue(parent, sk);
2373 __l2cap_chan_add(conn, chan);
2377 __set_chan_timer(chan, sk->sk_sndtimeo);
2379 chan->ident = cmd->ident;
2381 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2382 if (l2cap_check_security(chan)) {
2383 if (bt_sk(sk)->defer_setup) {
2384 l2cap_state_change(chan, BT_CONNECT2);
2385 result = L2CAP_CR_PEND;
2386 status = L2CAP_CS_AUTHOR_PEND;
2387 parent->sk_data_ready(parent, 0);
2389 l2cap_state_change(chan, BT_CONFIG);
2390 result = L2CAP_CR_SUCCESS;
2391 status = L2CAP_CS_NO_INFO;
2394 l2cap_state_change(chan, BT_CONNECT2);
2395 result = L2CAP_CR_PEND;
2396 status = L2CAP_CS_AUTHEN_PEND;
2399 l2cap_state_change(chan, BT_CONNECT2);
2400 result = L2CAP_CR_PEND;
2401 status = L2CAP_CS_NO_INFO;
2404 write_unlock_bh(&conn->chan_lock);
2407 bh_unlock_sock(parent);
2410 rsp.scid = cpu_to_le16(scid);
2411 rsp.dcid = cpu_to_le16(dcid);
2412 rsp.result = cpu_to_le16(result);
2413 rsp.status = cpu_to_le16(status);
2414 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2416 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2417 struct l2cap_info_req info;
2418 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2420 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2421 conn->info_ident = l2cap_get_ident(conn);
2423 mod_timer(&conn->info_timer, jiffies +
2424 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2426 l2cap_send_cmd(conn, conn->info_ident,
2427 L2CAP_INFO_REQ, sizeof(info), &info);
2430 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2431 result == L2CAP_CR_SUCCESS) {
2433 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2434 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2435 l2cap_build_conf_req(chan, buf), buf);
2436 chan->num_conf_req++;
2442 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2444 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2445 u16 scid, dcid, result, status;
2446 struct l2cap_chan *chan;
2450 scid = __le16_to_cpu(rsp->scid);
2451 dcid = __le16_to_cpu(rsp->dcid);
2452 result = __le16_to_cpu(rsp->result);
2453 status = __le16_to_cpu(rsp->status);
2455 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2458 chan = l2cap_get_chan_by_scid(conn, scid);
2462 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2470 case L2CAP_CR_SUCCESS:
2471 l2cap_state_change(chan, BT_CONFIG);
2474 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2476 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2479 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2481 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2482 l2cap_build_conf_req(chan, req), req);
2483 chan->num_conf_req++;
2487 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2491 /* don't delete l2cap channel if sk is owned by user */
2492 if (sock_owned_by_user(sk)) {
2493 l2cap_state_change(chan, BT_DISCONN);
2494 __clear_chan_timer(chan);
2495 __set_chan_timer(chan, HZ / 5);
2499 l2cap_chan_del(chan, ECONNREFUSED);
2507 static inline void set_default_fcs(struct l2cap_chan *chan)
2509 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2511 /* FCS is enabled only in ERTM or streaming mode, if one or both
2514 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2515 chan->fcs = L2CAP_FCS_NONE;
2516 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2517 chan->fcs = L2CAP_FCS_CRC16;
2520 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2522 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2525 struct l2cap_chan *chan;
2529 dcid = __le16_to_cpu(req->dcid);
2530 flags = __le16_to_cpu(req->flags);
2532 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2534 chan = l2cap_get_chan_by_scid(conn, dcid);
2540 if (chan->state != BT_CONFIG) {
2541 struct l2cap_cmd_rej rej;
2543 rej.reason = cpu_to_le16(0x0002);
2544 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2549 /* Reject if config buffer is too small. */
2550 len = cmd_len - sizeof(*req);
2551 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2552 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2553 l2cap_build_conf_rsp(chan, rsp,
2554 L2CAP_CONF_REJECT, flags), rsp);
2559 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2560 chan->conf_len += len;
2562 if (flags & 0x0001) {
2563 /* Incomplete config. Send empty response. */
2564 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2565 l2cap_build_conf_rsp(chan, rsp,
2566 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2570 /* Complete config. */
2571 len = l2cap_parse_conf_req(chan, rsp);
2573 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2577 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2578 chan->num_conf_rsp++;
2580 /* Reset config buffer. */
2583 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2586 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2587 set_default_fcs(chan);
2589 l2cap_state_change(chan, BT_CONNECTED);
2591 chan->next_tx_seq = 0;
2592 chan->expected_tx_seq = 0;
2593 skb_queue_head_init(&chan->tx_q);
2594 if (chan->mode == L2CAP_MODE_ERTM)
2595 l2cap_ertm_init(chan);
2597 l2cap_chan_ready(sk);
2601 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2603 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2604 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2605 l2cap_build_conf_req(chan, buf), buf);
2606 chan->num_conf_req++;
2614 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2616 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2617 u16 scid, flags, result;
2618 struct l2cap_chan *chan;
2620 int len = cmd->len - sizeof(*rsp);
2622 scid = __le16_to_cpu(rsp->scid);
2623 flags = __le16_to_cpu(rsp->flags);
2624 result = __le16_to_cpu(rsp->result);
2626 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2627 scid, flags, result);
2629 chan = l2cap_get_chan_by_scid(conn, scid);
2636 case L2CAP_CONF_SUCCESS:
2637 l2cap_conf_rfc_get(chan, rsp->data, len);
2640 case L2CAP_CONF_UNACCEPT:
2641 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2644 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2645 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2649 /* throw out any old stored conf requests */
2650 result = L2CAP_CONF_SUCCESS;
2651 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2654 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2658 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2659 L2CAP_CONF_REQ, len, req);
2660 chan->num_conf_req++;
2661 if (result != L2CAP_CONF_SUCCESS)
2667 sk->sk_err = ECONNRESET;
2668 __set_chan_timer(chan, HZ * 5);
2669 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2676 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2678 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2679 set_default_fcs(chan);
2681 l2cap_state_change(chan, BT_CONNECTED);
2682 chan->next_tx_seq = 0;
2683 chan->expected_tx_seq = 0;
2684 skb_queue_head_init(&chan->tx_q);
2685 if (chan->mode == L2CAP_MODE_ERTM)
2686 l2cap_ertm_init(chan);
2688 l2cap_chan_ready(sk);
2696 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2698 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2699 struct l2cap_disconn_rsp rsp;
2701 struct l2cap_chan *chan;
2704 scid = __le16_to_cpu(req->scid);
2705 dcid = __le16_to_cpu(req->dcid);
2707 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2709 chan = l2cap_get_chan_by_scid(conn, dcid);
2715 rsp.dcid = cpu_to_le16(chan->scid);
2716 rsp.scid = cpu_to_le16(chan->dcid);
2717 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2719 sk->sk_shutdown = SHUTDOWN_MASK;
2721 /* don't delete l2cap channel if sk is owned by user */
2722 if (sock_owned_by_user(sk)) {
2723 l2cap_state_change(chan, BT_DISCONN);
2724 __clear_chan_timer(chan);
2725 __set_chan_timer(chan, HZ / 5);
2730 l2cap_chan_del(chan, ECONNRESET);
2733 chan->ops->close(chan->data);
2737 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2739 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2741 struct l2cap_chan *chan;
2744 scid = __le16_to_cpu(rsp->scid);
2745 dcid = __le16_to_cpu(rsp->dcid);
2747 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2749 chan = l2cap_get_chan_by_scid(conn, scid);
2755 /* don't delete l2cap channel if sk is owned by user */
2756 if (sock_owned_by_user(sk)) {
2757 l2cap_state_change(chan,BT_DISCONN);
2758 __clear_chan_timer(chan);
2759 __set_chan_timer(chan, HZ / 5);
2764 l2cap_chan_del(chan, 0);
2767 chan->ops->close(chan->data);
2771 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2773 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2776 type = __le16_to_cpu(req->type);
2778 BT_DBG("type 0x%4.4x", type);
2780 if (type == L2CAP_IT_FEAT_MASK) {
2782 u32 feat_mask = l2cap_feat_mask;
2783 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2784 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2785 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2787 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2789 put_unaligned_le32(feat_mask, rsp->data);
2790 l2cap_send_cmd(conn, cmd->ident,
2791 L2CAP_INFO_RSP, sizeof(buf), buf);
2792 } else if (type == L2CAP_IT_FIXED_CHAN) {
2794 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2795 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2796 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2797 memcpy(buf + 4, l2cap_fixed_chan, 8);
2798 l2cap_send_cmd(conn, cmd->ident,
2799 L2CAP_INFO_RSP, sizeof(buf), buf);
2801 struct l2cap_info_rsp rsp;
2802 rsp.type = cpu_to_le16(type);
2803 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2804 l2cap_send_cmd(conn, cmd->ident,
2805 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2811 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2813 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2816 type = __le16_to_cpu(rsp->type);
2817 result = __le16_to_cpu(rsp->result);
2819 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2821 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2822 if (cmd->ident != conn->info_ident ||
2823 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2826 del_timer(&conn->info_timer);
2828 if (result != L2CAP_IR_SUCCESS) {
2829 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2830 conn->info_ident = 0;
2832 l2cap_conn_start(conn);
2837 if (type == L2CAP_IT_FEAT_MASK) {
2838 conn->feat_mask = get_unaligned_le32(rsp->data);
2840 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2841 struct l2cap_info_req req;
2842 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2844 conn->info_ident = l2cap_get_ident(conn);
2846 l2cap_send_cmd(conn, conn->info_ident,
2847 L2CAP_INFO_REQ, sizeof(req), &req);
2849 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2850 conn->info_ident = 0;
2852 l2cap_conn_start(conn);
2854 } else if (type == L2CAP_IT_FIXED_CHAN) {
2855 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2856 conn->info_ident = 0;
2858 l2cap_conn_start(conn);
2864 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2869 if (min > max || min < 6 || max > 3200)
2872 if (to_multiplier < 10 || to_multiplier > 3200)
2875 if (max >= to_multiplier * 8)
2878 max_latency = (to_multiplier * 8 / max) - 1;
2879 if (latency > 499 || latency > max_latency)
2885 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2886 struct l2cap_cmd_hdr *cmd, u8 *data)
2888 struct hci_conn *hcon = conn->hcon;
2889 struct l2cap_conn_param_update_req *req;
2890 struct l2cap_conn_param_update_rsp rsp;
2891 u16 min, max, latency, to_multiplier, cmd_len;
2894 if (!(hcon->link_mode & HCI_LM_MASTER))
2897 cmd_len = __le16_to_cpu(cmd->len);
2898 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2901 req = (struct l2cap_conn_param_update_req *) data;
2902 min = __le16_to_cpu(req->min);
2903 max = __le16_to_cpu(req->max);
2904 latency = __le16_to_cpu(req->latency);
2905 to_multiplier = __le16_to_cpu(req->to_multiplier);
2907 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2908 min, max, latency, to_multiplier);
2910 memset(&rsp, 0, sizeof(rsp));
2912 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2914 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2916 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2918 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2922 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2927 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2928 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2932 switch (cmd->code) {
2933 case L2CAP_COMMAND_REJ:
2934 l2cap_command_rej(conn, cmd, data);
2937 case L2CAP_CONN_REQ:
2938 err = l2cap_connect_req(conn, cmd, data);
2941 case L2CAP_CONN_RSP:
2942 err = l2cap_connect_rsp(conn, cmd, data);
2945 case L2CAP_CONF_REQ:
2946 err = l2cap_config_req(conn, cmd, cmd_len, data);
2949 case L2CAP_CONF_RSP:
2950 err = l2cap_config_rsp(conn, cmd, data);
2953 case L2CAP_DISCONN_REQ:
2954 err = l2cap_disconnect_req(conn, cmd, data);
2957 case L2CAP_DISCONN_RSP:
2958 err = l2cap_disconnect_rsp(conn, cmd, data);
2961 case L2CAP_ECHO_REQ:
2962 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2965 case L2CAP_ECHO_RSP:
2968 case L2CAP_INFO_REQ:
2969 err = l2cap_information_req(conn, cmd, data);
2972 case L2CAP_INFO_RSP:
2973 err = l2cap_information_rsp(conn, cmd, data);
2977 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2985 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2986 struct l2cap_cmd_hdr *cmd, u8 *data)
2988 switch (cmd->code) {
2989 case L2CAP_COMMAND_REJ:
2992 case L2CAP_CONN_PARAM_UPDATE_REQ:
2993 return l2cap_conn_param_update_req(conn, cmd, data);
2995 case L2CAP_CONN_PARAM_UPDATE_RSP:
2999 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3004 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3005 struct sk_buff *skb)
3007 u8 *data = skb->data;
3009 struct l2cap_cmd_hdr cmd;
3012 l2cap_raw_recv(conn, skb);
3014 while (len >= L2CAP_CMD_HDR_SIZE) {
3016 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3017 data += L2CAP_CMD_HDR_SIZE;
3018 len -= L2CAP_CMD_HDR_SIZE;
3020 cmd_len = le16_to_cpu(cmd.len);
3022 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3024 if (cmd_len > len || !cmd.ident) {
3025 BT_DBG("corrupted command");
3029 if (conn->hcon->type == LE_LINK)
3030 err = l2cap_le_sig_cmd(conn, &cmd, data);
3032 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3035 struct l2cap_cmd_rej rej;
3037 BT_ERR("Wrong link type (%d)", err);
3039 /* FIXME: Map err to a valid reason */
3040 rej.reason = cpu_to_le16(0);
3041 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3051 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3053 u16 our_fcs, rcv_fcs;
3054 int hdr_size = L2CAP_HDR_SIZE + 2;
3056 if (chan->fcs == L2CAP_FCS_CRC16) {
3057 skb_trim(skb, skb->len - 2);
3058 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3059 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3061 if (our_fcs != rcv_fcs)
3067 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3071 chan->frames_sent = 0;
3073 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3075 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3076 control |= L2CAP_SUPER_RCV_NOT_READY;
3077 l2cap_send_sframe(chan, control);
3078 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3081 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
3082 l2cap_retransmit_frames(chan);
3084 l2cap_ertm_send(chan);
3086 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3087 chan->frames_sent == 0) {
3088 control |= L2CAP_SUPER_RCV_READY;
3089 l2cap_send_sframe(chan, control);
3093 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3095 struct sk_buff *next_skb;
3096 int tx_seq_offset, next_tx_seq_offset;
3098 bt_cb(skb)->tx_seq = tx_seq;
3099 bt_cb(skb)->sar = sar;
3101 next_skb = skb_peek(&chan->srej_q);
3103 __skb_queue_tail(&chan->srej_q, skb);
3107 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3108 if (tx_seq_offset < 0)
3109 tx_seq_offset += 64;
3112 if (bt_cb(next_skb)->tx_seq == tx_seq)
3115 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3116 chan->buffer_seq) % 64;
3117 if (next_tx_seq_offset < 0)
3118 next_tx_seq_offset += 64;
3120 if (next_tx_seq_offset > tx_seq_offset) {
3121 __skb_queue_before(&chan->srej_q, next_skb, skb);
3125 if (skb_queue_is_last(&chan->srej_q, next_skb))
3128 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3130 __skb_queue_tail(&chan->srej_q, skb);
3135 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3137 struct sk_buff *_skb;
3140 switch (control & L2CAP_CTRL_SAR) {
3141 case L2CAP_SDU_UNSEGMENTED:
3142 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3145 return chan->ops->recv(chan->data, skb);
3147 case L2CAP_SDU_START:
3148 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3151 chan->sdu_len = get_unaligned_le16(skb->data);
3153 if (chan->sdu_len > chan->imtu)
3156 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3160 /* pull sdu_len bytes only after alloc, because of Local Busy
3161 * condition we have to be sure that this will be executed
3162 * only once, i.e., when alloc does not fail */
3165 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3167 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3168 chan->partial_sdu_len = skb->len;
3171 case L2CAP_SDU_CONTINUE:
3172 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3178 chan->partial_sdu_len += skb->len;
3179 if (chan->partial_sdu_len > chan->sdu_len)
3182 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3187 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3193 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
3194 chan->partial_sdu_len += skb->len;
3196 if (chan->partial_sdu_len > chan->imtu)
3199 if (chan->partial_sdu_len != chan->sdu_len)
3202 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3205 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3207 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3211 err = chan->ops->recv(chan->data, _skb);
3214 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3218 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3219 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3221 kfree_skb(chan->sdu);
3229 kfree_skb(chan->sdu);
3233 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3238 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3240 struct sk_buff *skb;
3244 while ((skb = skb_dequeue(&chan->busy_q))) {
3245 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3246 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3248 skb_queue_head(&chan->busy_q, skb);
3252 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3255 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3258 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3259 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3260 l2cap_send_sframe(chan, control);
3261 chan->retry_count = 1;
3263 __clear_retrans_timer(chan);
3264 __set_monitor_timer(chan);
3266 chan->conn_state |= L2CAP_CONN_WAIT_F;
3269 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3270 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3272 BT_DBG("chan %p, Exit local busy", chan);
3277 static void l2cap_busy_work(struct work_struct *work)
3279 DECLARE_WAITQUEUE(wait, current);
3280 struct l2cap_chan *chan =
3281 container_of(work, struct l2cap_chan, busy_work);
3282 struct sock *sk = chan->sk;
3283 int n_tries = 0, timeo = HZ/5, err;
3284 struct sk_buff *skb;
3288 add_wait_queue(sk_sleep(sk), &wait);
3289 while ((skb = skb_peek(&chan->busy_q))) {
3290 set_current_state(TASK_INTERRUPTIBLE);
3292 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3294 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3301 if (signal_pending(current)) {
3302 err = sock_intr_errno(timeo);
3307 timeo = schedule_timeout(timeo);
3310 err = sock_error(sk);
3314 if (l2cap_try_push_rx_skb(chan) == 0)
3318 set_current_state(TASK_RUNNING);
3319 remove_wait_queue(sk_sleep(sk), &wait);
3324 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3328 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3329 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3330 __skb_queue_tail(&chan->busy_q, skb);
3331 return l2cap_try_push_rx_skb(chan);
3336 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3338 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3342 /* Busy Condition */
3343 BT_DBG("chan %p, Enter local busy", chan);
3345 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3346 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3347 __skb_queue_tail(&chan->busy_q, skb);
3349 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3350 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3351 l2cap_send_sframe(chan, sctrl);
3353 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3355 __clear_ack_timer(chan);
3357 queue_work(_busy_wq, &chan->busy_work);
3362 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3364 struct sk_buff *_skb;
3368 * TODO: We have to notify the userland if some data is lost with the
3372 switch (control & L2CAP_CTRL_SAR) {
3373 case L2CAP_SDU_UNSEGMENTED:
3374 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3375 kfree_skb(chan->sdu);
3379 err = chan->ops->recv(chan->data, skb);
3385 case L2CAP_SDU_START:
3386 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3387 kfree_skb(chan->sdu);
3391 chan->sdu_len = get_unaligned_le16(skb->data);
3394 if (chan->sdu_len > chan->imtu) {
3399 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3405 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3407 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3408 chan->partial_sdu_len = skb->len;
3412 case L2CAP_SDU_CONTINUE:
3413 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3416 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3418 chan->partial_sdu_len += skb->len;
3419 if (chan->partial_sdu_len > chan->sdu_len)
3420 kfree_skb(chan->sdu);
3427 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3430 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3432 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3433 chan->partial_sdu_len += skb->len;
3435 if (chan->partial_sdu_len > chan->imtu)
3438 if (chan->partial_sdu_len == chan->sdu_len) {
3439 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3440 err = chan->ops->recv(chan->data, _skb);
3447 kfree_skb(chan->sdu);
3455 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3457 struct sk_buff *skb;
3460 while ((skb = skb_peek(&chan->srej_q))) {
3461 if (bt_cb(skb)->tx_seq != tx_seq)
3464 skb = skb_dequeue(&chan->srej_q);
3465 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3466 l2cap_ertm_reassembly_sdu(chan, skb, control);
3467 chan->buffer_seq_srej =
3468 (chan->buffer_seq_srej + 1) % 64;
3469 tx_seq = (tx_seq + 1) % 64;
3473 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3475 struct srej_list *l, *tmp;
3478 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3479 if (l->tx_seq == tx_seq) {
3484 control = L2CAP_SUPER_SELECT_REJECT;
3485 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3486 l2cap_send_sframe(chan, control);
3488 list_add_tail(&l->list, &chan->srej_l);
3492 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3494 struct srej_list *new;
3497 while (tx_seq != chan->expected_tx_seq) {
3498 control = L2CAP_SUPER_SELECT_REJECT;
3499 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3500 l2cap_send_sframe(chan, control);
3502 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3503 new->tx_seq = chan->expected_tx_seq;
3504 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3505 list_add_tail(&new->list, &chan->srej_l);
3507 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3510 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3512 u8 tx_seq = __get_txseq(rx_control);
3513 u8 req_seq = __get_reqseq(rx_control);
3514 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3515 int tx_seq_offset, expected_tx_seq_offset;
3516 int num_to_ack = (chan->tx_win/6) + 1;
3519 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3520 tx_seq, rx_control);
3522 if (L2CAP_CTRL_FINAL & rx_control &&
3523 chan->conn_state & L2CAP_CONN_WAIT_F) {
3524 __clear_monitor_timer(chan);
3525 if (chan->unacked_frames > 0)
3526 __set_retrans_timer(chan);
3527 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3530 chan->expected_ack_seq = req_seq;
3531 l2cap_drop_acked_frames(chan);
3533 if (tx_seq == chan->expected_tx_seq)
3536 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3537 if (tx_seq_offset < 0)
3538 tx_seq_offset += 64;
3540 /* invalid tx_seq */
3541 if (tx_seq_offset >= chan->tx_win) {
3542 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3546 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY)
3549 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3550 struct srej_list *first;
3552 first = list_first_entry(&chan->srej_l,
3553 struct srej_list, list);
3554 if (tx_seq == first->tx_seq) {
3555 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3556 l2cap_check_srej_gap(chan, tx_seq);
3558 list_del(&first->list);
3561 if (list_empty(&chan->srej_l)) {
3562 chan->buffer_seq = chan->buffer_seq_srej;
3563 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3564 l2cap_send_ack(chan);
3565 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3568 struct srej_list *l;
3570 /* duplicated tx_seq */
3571 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3574 list_for_each_entry(l, &chan->srej_l, list) {
3575 if (l->tx_seq == tx_seq) {
3576 l2cap_resend_srejframe(chan, tx_seq);
3580 l2cap_send_srejframe(chan, tx_seq);
3583 expected_tx_seq_offset =
3584 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3585 if (expected_tx_seq_offset < 0)
3586 expected_tx_seq_offset += 64;
3588 /* duplicated tx_seq */
3589 if (tx_seq_offset < expected_tx_seq_offset)
3592 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3594 BT_DBG("chan %p, Enter SREJ", chan);
3596 INIT_LIST_HEAD(&chan->srej_l);
3597 chan->buffer_seq_srej = chan->buffer_seq;
3599 __skb_queue_head_init(&chan->srej_q);
3600 __skb_queue_head_init(&chan->busy_q);
3601 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3603 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3605 l2cap_send_srejframe(chan, tx_seq);
3607 __clear_ack_timer(chan);
3612 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3614 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3615 bt_cb(skb)->tx_seq = tx_seq;
3616 bt_cb(skb)->sar = sar;
3617 __skb_queue_tail(&chan->srej_q, skb);
3621 err = l2cap_push_rx_skb(chan, skb, rx_control);
3625 if (rx_control & L2CAP_CTRL_FINAL) {
3626 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3627 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3629 l2cap_retransmit_frames(chan);
3632 __set_ack_timer(chan);
3634 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3635 if (chan->num_acked == num_to_ack - 1)
3636 l2cap_send_ack(chan);
3645 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3647 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3650 chan->expected_ack_seq = __get_reqseq(rx_control);
3651 l2cap_drop_acked_frames(chan);
3653 if (rx_control & L2CAP_CTRL_POLL) {
3654 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3655 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3656 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3657 (chan->unacked_frames > 0))
3658 __set_retrans_timer(chan);
3660 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3661 l2cap_send_srejtail(chan);
3663 l2cap_send_i_or_rr_or_rnr(chan);
3666 } else if (rx_control & L2CAP_CTRL_FINAL) {
3667 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3669 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3670 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3672 l2cap_retransmit_frames(chan);
3675 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3676 (chan->unacked_frames > 0))
3677 __set_retrans_timer(chan);
3679 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3680 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3681 l2cap_send_ack(chan);
3683 l2cap_ertm_send(chan);
3687 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3689 u8 tx_seq = __get_reqseq(rx_control);
3691 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3693 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3695 chan->expected_ack_seq = tx_seq;
3696 l2cap_drop_acked_frames(chan);
3698 if (rx_control & L2CAP_CTRL_FINAL) {
3699 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3700 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3702 l2cap_retransmit_frames(chan);
3704 l2cap_retransmit_frames(chan);
3706 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3707 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3710 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3712 u8 tx_seq = __get_reqseq(rx_control);
3714 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3716 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3718 if (rx_control & L2CAP_CTRL_POLL) {
3719 chan->expected_ack_seq = tx_seq;
3720 l2cap_drop_acked_frames(chan);
3722 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3723 l2cap_retransmit_one_frame(chan, tx_seq);
3725 l2cap_ertm_send(chan);
3727 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3728 chan->srej_save_reqseq = tx_seq;
3729 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3731 } else if (rx_control & L2CAP_CTRL_FINAL) {
3732 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3733 chan->srej_save_reqseq == tx_seq)
3734 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3736 l2cap_retransmit_one_frame(chan, tx_seq);
3738 l2cap_retransmit_one_frame(chan, tx_seq);
3739 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3740 chan->srej_save_reqseq = tx_seq;
3741 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3746 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3748 u8 tx_seq = __get_reqseq(rx_control);
3750 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3752 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3753 chan->expected_ack_seq = tx_seq;
3754 l2cap_drop_acked_frames(chan);
3756 if (rx_control & L2CAP_CTRL_POLL)
3757 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3759 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3760 __clear_retrans_timer(chan);
3761 if (rx_control & L2CAP_CTRL_POLL)
3762 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3766 if (rx_control & L2CAP_CTRL_POLL)
3767 l2cap_send_srejtail(chan);
3769 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3772 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3774 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3776 if (L2CAP_CTRL_FINAL & rx_control &&
3777 chan->conn_state & L2CAP_CONN_WAIT_F) {
3778 __clear_monitor_timer(chan);
3779 if (chan->unacked_frames > 0)
3780 __set_retrans_timer(chan);
3781 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3784 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3785 case L2CAP_SUPER_RCV_READY:
3786 l2cap_data_channel_rrframe(chan, rx_control);
3789 case L2CAP_SUPER_REJECT:
3790 l2cap_data_channel_rejframe(chan, rx_control);
3793 case L2CAP_SUPER_SELECT_REJECT:
3794 l2cap_data_channel_srejframe(chan, rx_control);
3797 case L2CAP_SUPER_RCV_NOT_READY:
3798 l2cap_data_channel_rnrframe(chan, rx_control);
3806 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3808 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3811 int len, next_tx_seq_offset, req_seq_offset;
3813 control = get_unaligned_le16(skb->data);
3818 * We can just drop the corrupted I-frame here.
3819 * Receiver will miss it and start proper recovery
3820 * procedures and ask retransmission.
3822 if (l2cap_check_fcs(chan, skb))
3825 if (__is_sar_start(control) && __is_iframe(control))
3828 if (chan->fcs == L2CAP_FCS_CRC16)
3831 if (len > chan->mps) {
3832 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3836 req_seq = __get_reqseq(control);
3837 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3838 if (req_seq_offset < 0)
3839 req_seq_offset += 64;
3841 next_tx_seq_offset =
3842 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3843 if (next_tx_seq_offset < 0)
3844 next_tx_seq_offset += 64;
3846 /* check for invalid req-seq */
3847 if (req_seq_offset > next_tx_seq_offset) {
3848 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3852 if (__is_iframe(control)) {
3854 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3858 l2cap_data_channel_iframe(chan, control, skb);
3862 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3866 l2cap_data_channel_sframe(chan, control, skb);
3876 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3878 struct l2cap_chan *chan;
3879 struct sock *sk = NULL;
3884 chan = l2cap_get_chan_by_scid(conn, cid);
3886 BT_DBG("unknown cid 0x%4.4x", cid);
3892 BT_DBG("chan %p, len %d", chan, skb->len);
3894 if (chan->state != BT_CONNECTED)
3897 switch (chan->mode) {
3898 case L2CAP_MODE_BASIC:
3899 /* If socket recv buffers overflows we drop data here
3900 * which is *bad* because L2CAP has to be reliable.
3901 * But we don't have any other choice. L2CAP doesn't
3902 * provide flow control mechanism. */
3904 if (chan->imtu < skb->len)
3907 if (!chan->ops->recv(chan->data, skb))
3911 case L2CAP_MODE_ERTM:
3912 if (!sock_owned_by_user(sk)) {
3913 l2cap_ertm_data_rcv(sk, skb);
3915 if (sk_add_backlog(sk, skb))
3921 case L2CAP_MODE_STREAMING:
3922 control = get_unaligned_le16(skb->data);
3926 if (l2cap_check_fcs(chan, skb))
3929 if (__is_sar_start(control))
3932 if (chan->fcs == L2CAP_FCS_CRC16)
3935 if (len > chan->mps || len < 0 || __is_sframe(control))
3938 tx_seq = __get_txseq(control);
3940 if (chan->expected_tx_seq == tx_seq)
3941 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3943 chan->expected_tx_seq = (tx_seq + 1) % 64;
3945 l2cap_streaming_reassembly_sdu(chan, skb, control);
3950 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3964 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3966 struct sock *sk = NULL;
3967 struct l2cap_chan *chan;
3969 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3977 BT_DBG("sk %p, len %d", sk, skb->len);
3979 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3982 if (l2cap_pi(sk)->chan->imtu < skb->len)
3985 if (!chan->ops->recv(chan->data, skb))
3997 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3999 struct sock *sk = NULL;
4000 struct l2cap_chan *chan;
4002 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4010 BT_DBG("sk %p, len %d", sk, skb->len);
4012 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4015 if (l2cap_pi(sk)->chan->imtu < skb->len)
4018 if (!chan->ops->recv(chan->data, skb))
4030 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4032 struct l2cap_hdr *lh = (void *) skb->data;
4036 skb_pull(skb, L2CAP_HDR_SIZE);
4037 cid = __le16_to_cpu(lh->cid);
4038 len = __le16_to_cpu(lh->len);
4040 if (len != skb->len) {
4045 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4048 case L2CAP_CID_LE_SIGNALING:
4049 case L2CAP_CID_SIGNALING:
4050 l2cap_sig_channel(conn, skb);
4053 case L2CAP_CID_CONN_LESS:
4054 psm = get_unaligned_le16(skb->data);
4056 l2cap_conless_channel(conn, psm, skb);
4059 case L2CAP_CID_LE_DATA:
4060 l2cap_att_channel(conn, cid, skb);
4064 l2cap_data_channel(conn, cid, skb);
4069 /* ---- L2CAP interface with lower layer (HCI) ---- */
4071 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4073 int exact = 0, lm1 = 0, lm2 = 0;
4074 struct l2cap_chan *c;
4076 if (type != ACL_LINK)
4079 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4081 /* Find listening sockets and check their link_mode */
4082 read_lock(&chan_list_lock);
4083 list_for_each_entry(c, &chan_list, global_l) {
4084 struct sock *sk = c->sk;
4086 if (c->state != BT_LISTEN)
4089 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4090 lm1 |= HCI_LM_ACCEPT;
4092 lm1 |= HCI_LM_MASTER;
4094 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4095 lm2 |= HCI_LM_ACCEPT;
4097 lm2 |= HCI_LM_MASTER;
4100 read_unlock(&chan_list_lock);
4102 return exact ? lm1 : lm2;
4105 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4107 struct l2cap_conn *conn;
4109 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4111 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4115 conn = l2cap_conn_add(hcon, status);
4117 l2cap_conn_ready(conn);
4119 l2cap_conn_del(hcon, bt_err(status));
4124 static int l2cap_disconn_ind(struct hci_conn *hcon)
4126 struct l2cap_conn *conn = hcon->l2cap_data;
4128 BT_DBG("hcon %p", hcon);
4130 if (hcon->type != ACL_LINK || !conn)
4133 return conn->disc_reason;
4136 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4138 BT_DBG("hcon %p reason %d", hcon, reason);
4140 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4143 l2cap_conn_del(hcon, bt_err(reason));
4148 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4150 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4153 if (encrypt == 0x00) {
4154 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4155 __clear_chan_timer(chan);
4156 __set_chan_timer(chan, HZ * 5);
4157 } else if (chan->sec_level == BT_SECURITY_HIGH)
4158 l2cap_chan_close(chan, ECONNREFUSED);
4160 if (chan->sec_level == BT_SECURITY_MEDIUM)
4161 __clear_chan_timer(chan);
4165 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4167 struct l2cap_conn *conn = hcon->l2cap_data;
4168 struct l2cap_chan *chan;
4173 BT_DBG("conn %p", conn);
4175 read_lock(&conn->chan_lock);
4177 list_for_each_entry(chan, &conn->chan_l, list) {
4178 struct sock *sk = chan->sk;
4182 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
4187 if (!status && (chan->state == BT_CONNECTED ||
4188 chan->state == BT_CONFIG)) {
4189 l2cap_check_encryption(chan, encrypt);
4194 if (chan->state == BT_CONNECT) {
4196 struct l2cap_conn_req req;
4197 req.scid = cpu_to_le16(chan->scid);
4198 req.psm = chan->psm;
4200 chan->ident = l2cap_get_ident(conn);
4201 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
4203 l2cap_send_cmd(conn, chan->ident,
4204 L2CAP_CONN_REQ, sizeof(req), &req);
4206 __clear_chan_timer(chan);
4207 __set_chan_timer(chan, HZ / 10);
4209 } else if (chan->state == BT_CONNECT2) {
4210 struct l2cap_conn_rsp rsp;
4214 l2cap_state_change(chan, BT_CONFIG);
4215 result = L2CAP_CR_SUCCESS;
4217 l2cap_state_change(chan, BT_DISCONN);
4218 __set_chan_timer(chan, HZ / 10);
4219 result = L2CAP_CR_SEC_BLOCK;
4222 rsp.scid = cpu_to_le16(chan->dcid);
4223 rsp.dcid = cpu_to_le16(chan->scid);
4224 rsp.result = cpu_to_le16(result);
4225 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4226 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4233 read_unlock(&conn->chan_lock);
4238 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4240 struct l2cap_conn *conn = hcon->l2cap_data;
4243 conn = l2cap_conn_add(hcon, 0);
4248 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4250 if (!(flags & ACL_CONT)) {
4251 struct l2cap_hdr *hdr;
4252 struct l2cap_chan *chan;
4257 BT_ERR("Unexpected start frame (len %d)", skb->len);
4258 kfree_skb(conn->rx_skb);
4259 conn->rx_skb = NULL;
4261 l2cap_conn_unreliable(conn, ECOMM);
4264 /* Start fragment always begin with Basic L2CAP header */
4265 if (skb->len < L2CAP_HDR_SIZE) {
4266 BT_ERR("Frame is too short (len %d)", skb->len);
4267 l2cap_conn_unreliable(conn, ECOMM);
4271 hdr = (struct l2cap_hdr *) skb->data;
4272 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4273 cid = __le16_to_cpu(hdr->cid);
4275 if (len == skb->len) {
4276 /* Complete frame received */
4277 l2cap_recv_frame(conn, skb);
4281 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4283 if (skb->len > len) {
4284 BT_ERR("Frame is too long (len %d, expected len %d)",
4286 l2cap_conn_unreliable(conn, ECOMM);
4290 chan = l2cap_get_chan_by_scid(conn, cid);
4292 if (chan && chan->sk) {
4293 struct sock *sk = chan->sk;
4295 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4296 BT_ERR("Frame exceeding recv MTU (len %d, "
4300 l2cap_conn_unreliable(conn, ECOMM);
4306 /* Allocate skb for the complete frame (with header) */
4307 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4311 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4313 conn->rx_len = len - skb->len;
4315 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4317 if (!conn->rx_len) {
4318 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4319 l2cap_conn_unreliable(conn, ECOMM);
4323 if (skb->len > conn->rx_len) {
4324 BT_ERR("Fragment is too long (len %d, expected %d)",
4325 skb->len, conn->rx_len);
4326 kfree_skb(conn->rx_skb);
4327 conn->rx_skb = NULL;
4329 l2cap_conn_unreliable(conn, ECOMM);
4333 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4335 conn->rx_len -= skb->len;
4337 if (!conn->rx_len) {
4338 /* Complete frame received */
4339 l2cap_recv_frame(conn, conn->rx_skb);
4340 conn->rx_skb = NULL;
4349 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4351 struct l2cap_chan *c;
4353 read_lock_bh(&chan_list_lock);
4355 list_for_each_entry(c, &chan_list, global_l) {
4356 struct sock *sk = c->sk;
4358 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4359 batostr(&bt_sk(sk)->src),
4360 batostr(&bt_sk(sk)->dst),
4361 c->state, __le16_to_cpu(c->psm),
4362 c->scid, c->dcid, c->imtu, c->omtu,
4363 c->sec_level, c->mode);
4366 read_unlock_bh(&chan_list_lock);
4371 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4373 return single_open(file, l2cap_debugfs_show, inode->i_private);
4376 static const struct file_operations l2cap_debugfs_fops = {
4377 .open = l2cap_debugfs_open,
4379 .llseek = seq_lseek,
4380 .release = single_release,
4383 static struct dentry *l2cap_debugfs;
4385 static struct hci_proto l2cap_hci_proto = {
4387 .id = HCI_PROTO_L2CAP,
4388 .connect_ind = l2cap_connect_ind,
4389 .connect_cfm = l2cap_connect_cfm,
4390 .disconn_ind = l2cap_disconn_ind,
4391 .disconn_cfm = l2cap_disconn_cfm,
4392 .security_cfm = l2cap_security_cfm,
4393 .recv_acldata = l2cap_recv_acldata
4396 int __init l2cap_init(void)
4400 err = l2cap_init_sockets();
4404 _busy_wq = create_singlethread_workqueue("l2cap");
4410 err = hci_register_proto(&l2cap_hci_proto);
4412 BT_ERR("L2CAP protocol registration failed");
4413 bt_sock_unregister(BTPROTO_L2CAP);
4418 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4419 bt_debugfs, NULL, &l2cap_debugfs_fops);
4421 BT_ERR("Failed to create L2CAP debug file");
4427 destroy_workqueue(_busy_wq);
4428 l2cap_cleanup_sockets();
4432 void l2cap_exit(void)
4434 debugfs_remove(l2cap_debugfs);
4436 flush_workqueue(_busy_wq);
4437 destroy_workqueue(_busy_wq);
4439 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4440 BT_ERR("L2CAP protocol unregistration failed");
4442 l2cap_cleanup_sockets();
4445 module_param(disable_ertm, bool, 0644);
4446 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");