2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static struct workqueue_struct *_busy_wq;
66 static LIST_HEAD(chan_list);
67 static DEFINE_RWLOCK(chan_list_lock);
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
75 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
76 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
77 struct l2cap_chan *chan, int err);
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
81 /* ---- L2CAP channels ---- */
83 static inline void chan_hold(struct l2cap_chan *c)
85 atomic_inc(&c->refcnt);
88 static inline void chan_put(struct l2cap_chan *c)
90 if (atomic_dec_and_test(&c->refcnt))
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
98 list_for_each_entry(c, &conn->chan_l, list) {
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
108 struct l2cap_chan *c;
110 list_for_each_entry(c, &conn->chan_l, list) {
117 /* Find channel with given SCID.
118 * Returns locked socket */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
121 struct l2cap_chan *c;
123 read_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
127 read_unlock(&conn->chan_lock);
131 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
133 struct l2cap_chan *c;
135 list_for_each_entry(c, &conn->chan_l, list) {
136 if (c->ident == ident)
142 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144 struct l2cap_chan *c;
146 read_lock(&conn->chan_lock);
147 c = __l2cap_get_chan_by_ident(conn, ident);
150 read_unlock(&conn->chan_lock);
154 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
156 struct l2cap_chan *c;
158 list_for_each_entry(c, &chan_list, global_l) {
159 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
168 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
172 write_lock_bh(&chan_list_lock);
174 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
187 for (p = 0x1001; p < 0x1100; p += 2)
188 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
189 chan->psm = cpu_to_le16(p);
190 chan->sport = cpu_to_le16(p);
197 write_unlock_bh(&chan_list_lock);
201 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
203 write_lock_bh(&chan_list_lock);
207 write_unlock_bh(&chan_list_lock);
212 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
214 u16 cid = L2CAP_CID_DYN_START;
216 for (; cid < L2CAP_CID_DYN_END; cid++) {
217 if (!__l2cap_get_chan_by_scid(conn, cid))
224 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
226 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
228 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
232 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
234 BT_DBG("chan %p state %d", chan, chan->state);
236 if (timer_pending(timer) && del_timer(timer))
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
243 chan->ops->state_change(chan->data, state);
246 static void l2cap_chan_timeout(unsigned long arg)
248 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
249 struct sock *sk = chan->sk;
252 BT_DBG("chan %p state %d", chan, chan->state);
256 if (sock_owned_by_user(sk)) {
257 /* sk is owned by user. Try again later */
258 __set_chan_timer(chan, HZ / 5);
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
272 l2cap_chan_close(chan, reason);
276 chan->ops->close(chan->data);
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
282 struct l2cap_chan *chan;
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
290 write_lock_bh(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock_bh(&chan_list_lock);
294 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
296 chan->state = BT_OPEN;
298 atomic_set(&chan->refcnt, 1);
303 void l2cap_chan_destroy(struct l2cap_chan *chan)
305 write_lock_bh(&chan_list_lock);
306 list_del(&chan->global_l);
307 write_unlock_bh(&chan_list_lock);
312 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
314 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
315 chan->psm, chan->dcid);
317 conn->disc_reason = 0x13;
321 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
322 if (conn->hcon->type == LE_LINK) {
324 chan->omtu = L2CAP_LE_DEFAULT_MTU;
325 chan->scid = L2CAP_CID_LE_DATA;
326 chan->dcid = L2CAP_CID_LE_DATA;
328 /* Alloc CID for connection-oriented socket */
329 chan->scid = l2cap_alloc_cid(conn);
330 chan->omtu = L2CAP_DEFAULT_MTU;
332 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
333 /* Connectionless socket */
334 chan->scid = L2CAP_CID_CONN_LESS;
335 chan->dcid = L2CAP_CID_CONN_LESS;
336 chan->omtu = L2CAP_DEFAULT_MTU;
338 /* Raw socket can send/recv signalling messages only */
339 chan->scid = L2CAP_CID_SIGNALING;
340 chan->dcid = L2CAP_CID_SIGNALING;
341 chan->omtu = L2CAP_DEFAULT_MTU;
346 list_add(&chan->list, &conn->chan_l);
350 * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
353 struct sock *sk = chan->sk;
354 struct l2cap_conn *conn = chan->conn;
355 struct sock *parent = bt_sk(sk)->parent;
357 __clear_chan_timer(chan);
359 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
362 /* Delete from channel list */
363 write_lock_bh(&conn->chan_lock);
364 list_del(&chan->list);
365 write_unlock_bh(&conn->chan_lock);
369 hci_conn_put(conn->hcon);
372 l2cap_state_change(chan, BT_CLOSED);
373 sock_set_flag(sk, SOCK_ZAPPED);
379 bt_accept_unlink(sk);
380 parent->sk_data_ready(parent, 0);
382 sk->sk_state_change(sk);
384 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
385 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
388 skb_queue_purge(&chan->tx_q);
390 if (chan->mode == L2CAP_MODE_ERTM) {
391 struct srej_list *l, *tmp;
393 __clear_retrans_timer(chan);
394 __clear_monitor_timer(chan);
395 __clear_ack_timer(chan);
397 skb_queue_purge(&chan->srej_q);
398 skb_queue_purge(&chan->busy_q);
400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 BT_DBG("parent %p", parent);
413 /* Close not yet accepted channels */
414 while ((sk = bt_accept_dequeue(parent, NULL))) {
415 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 __clear_chan_timer(chan);
418 l2cap_chan_close(chan, ECONNRESET);
420 chan->ops->close(chan->data);
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
426 struct l2cap_conn *conn = chan->conn;
427 struct sock *sk = chan->sk;
429 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
431 switch (chan->state) {
433 l2cap_chan_cleanup_listen(sk);
435 l2cap_state_change(chan, BT_CLOSED);
436 sock_set_flag(sk, SOCK_ZAPPED);
441 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 conn->hcon->type == ACL_LINK) {
443 __clear_chan_timer(chan);
444 __set_chan_timer(chan, sk->sk_sndtimeo);
445 l2cap_send_disconn_req(conn, chan, reason);
447 l2cap_chan_del(chan, reason);
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 struct l2cap_conn_rsp rsp;
456 if (bt_sk(sk)->defer_setup)
457 result = L2CAP_CR_SEC_BLOCK;
459 result = L2CAP_CR_BAD_PSM;
460 l2cap_state_change(chan, BT_DISCONN);
462 rsp.scid = cpu_to_le16(chan->dcid);
463 rsp.dcid = cpu_to_le16(chan->scid);
464 rsp.result = cpu_to_le16(result);
465 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 l2cap_chan_del(chan, reason);
475 l2cap_chan_del(chan, reason);
479 sock_set_flag(sk, SOCK_ZAPPED);
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
486 if (chan->chan_type == L2CAP_CHAN_RAW) {
487 switch (chan->sec_level) {
488 case BT_SECURITY_HIGH:
489 return HCI_AT_DEDICATED_BONDING_MITM;
490 case BT_SECURITY_MEDIUM:
491 return HCI_AT_DEDICATED_BONDING;
493 return HCI_AT_NO_BONDING;
495 } else if (chan->psm == cpu_to_le16(0x0001)) {
496 if (chan->sec_level == BT_SECURITY_LOW)
497 chan->sec_level = BT_SECURITY_SDP;
499 if (chan->sec_level == BT_SECURITY_HIGH)
500 return HCI_AT_NO_BONDING_MITM;
502 return HCI_AT_NO_BONDING;
504 switch (chan->sec_level) {
505 case BT_SECURITY_HIGH:
506 return HCI_AT_GENERAL_BONDING_MITM;
507 case BT_SECURITY_MEDIUM:
508 return HCI_AT_GENERAL_BONDING;
510 return HCI_AT_NO_BONDING;
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan *chan)
518 struct l2cap_conn *conn = chan->conn;
521 auth_type = l2cap_get_auth_type(chan);
523 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
536 spin_lock_bh(&conn->lock);
538 if (++conn->tx_ident > 128)
543 spin_unlock_bh(&conn->lock);
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
550 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
553 BT_DBG("code 0x%2.2x", code);
558 if (lmp_no_flush_capable(conn->hcon->hdev))
559 flags = ACL_START_NO_FLUSH;
563 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
565 hci_send_acl(conn->hcon, skb, flags);
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
571 struct l2cap_hdr *lh;
572 struct l2cap_conn *conn = chan->conn;
573 int count, hlen = L2CAP_HDR_SIZE + 2;
576 if (chan->state != BT_CONNECTED)
579 if (chan->fcs == L2CAP_FCS_CRC16)
582 BT_DBG("chan %p, control 0x%2.2x", chan, control);
584 count = min_t(unsigned int, conn->mtu, hlen);
585 control |= L2CAP_CTRL_FRAME_TYPE;
587 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
588 control |= L2CAP_CTRL_FINAL;
590 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
591 control |= L2CAP_CTRL_POLL;
593 skb = bt_skb_alloc(count, GFP_ATOMIC);
597 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
598 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
599 lh->cid = cpu_to_le16(chan->dcid);
600 put_unaligned_le16(control, skb_put(skb, 2));
602 if (chan->fcs == L2CAP_FCS_CRC16) {
603 u16 fcs = crc16(0, (u8 *)lh, count - 2);
604 put_unaligned_le16(fcs, skb_put(skb, 2));
607 if (lmp_no_flush_capable(conn->hcon->hdev))
608 flags = ACL_START_NO_FLUSH;
612 bt_cb(skb)->force_active = chan->force_active;
614 hci_send_acl(chan->conn->hcon, skb, flags);
617 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
619 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
620 control |= L2CAP_SUPER_RCV_NOT_READY;
621 set_bit(CONN_RNR_SENT, &chan->conn_state);
623 control |= L2CAP_SUPER_RCV_READY;
625 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
627 l2cap_send_sframe(chan, control);
630 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
632 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
635 static void l2cap_do_start(struct l2cap_chan *chan)
637 struct l2cap_conn *conn = chan->conn;
639 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
640 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
643 if (l2cap_check_security(chan) &&
644 __l2cap_no_conn_pending(chan)) {
645 struct l2cap_conn_req req;
646 req.scid = cpu_to_le16(chan->scid);
649 chan->ident = l2cap_get_ident(conn);
650 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
652 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
656 struct l2cap_info_req req;
657 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
659 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
660 conn->info_ident = l2cap_get_ident(conn);
662 mod_timer(&conn->info_timer, jiffies +
663 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
665 l2cap_send_cmd(conn, conn->info_ident,
666 L2CAP_INFO_REQ, sizeof(req), &req);
670 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
672 u32 local_feat_mask = l2cap_feat_mask;
674 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
677 case L2CAP_MODE_ERTM:
678 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
679 case L2CAP_MODE_STREAMING:
680 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
686 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
689 struct l2cap_disconn_req req;
696 if (chan->mode == L2CAP_MODE_ERTM) {
697 __clear_retrans_timer(chan);
698 __clear_monitor_timer(chan);
699 __clear_ack_timer(chan);
702 req.dcid = cpu_to_le16(chan->dcid);
703 req.scid = cpu_to_le16(chan->scid);
704 l2cap_send_cmd(conn, l2cap_get_ident(conn),
705 L2CAP_DISCONN_REQ, sizeof(req), &req);
707 l2cap_state_change(chan, BT_DISCONN);
711 /* ---- L2CAP connections ---- */
712 static void l2cap_conn_start(struct l2cap_conn *conn)
714 struct l2cap_chan *chan, *tmp;
716 BT_DBG("conn %p", conn);
718 read_lock(&conn->chan_lock);
720 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
721 struct sock *sk = chan->sk;
725 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
730 if (chan->state == BT_CONNECT) {
731 struct l2cap_conn_req req;
733 if (!l2cap_check_security(chan) ||
734 !__l2cap_no_conn_pending(chan)) {
739 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
740 && test_bit(CONF_STATE2_DEVICE,
741 &chan->conf_state)) {
742 /* l2cap_chan_close() calls list_del(chan)
743 * so release the lock */
744 read_unlock(&conn->chan_lock);
745 l2cap_chan_close(chan, ECONNRESET);
746 read_lock(&conn->chan_lock);
751 req.scid = cpu_to_le16(chan->scid);
754 chan->ident = l2cap_get_ident(conn);
755 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
757 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
760 } else if (chan->state == BT_CONNECT2) {
761 struct l2cap_conn_rsp rsp;
763 rsp.scid = cpu_to_le16(chan->dcid);
764 rsp.dcid = cpu_to_le16(chan->scid);
766 if (l2cap_check_security(chan)) {
767 if (bt_sk(sk)->defer_setup) {
768 struct sock *parent = bt_sk(sk)->parent;
769 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
770 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
771 parent->sk_data_ready(parent, 0);
774 l2cap_state_change(chan, BT_CONFIG);
775 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
776 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
779 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
780 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
783 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
786 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
787 rsp.result != L2CAP_CR_SUCCESS) {
792 set_bit(CONF_REQ_SENT, &chan->conf_state);
793 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
794 l2cap_build_conf_req(chan, buf), buf);
795 chan->num_conf_req++;
801 read_unlock(&conn->chan_lock);
804 /* Find socket with cid and source bdaddr.
805 * Returns closest match, locked.
807 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
809 struct l2cap_chan *c, *c1 = NULL;
811 read_lock(&chan_list_lock);
813 list_for_each_entry(c, &chan_list, global_l) {
814 struct sock *sk = c->sk;
816 if (state && c->state != state)
819 if (c->scid == cid) {
821 if (!bacmp(&bt_sk(sk)->src, src)) {
822 read_unlock(&chan_list_lock);
827 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
832 read_unlock(&chan_list_lock);
837 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
839 struct sock *parent, *sk;
840 struct l2cap_chan *chan, *pchan;
844 /* Check if we have socket listening on cid */
845 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
852 bh_lock_sock(parent);
854 /* Check for backlog size */
855 if (sk_acceptq_is_full(parent)) {
856 BT_DBG("backlog full %d", parent->sk_ack_backlog);
860 chan = pchan->ops->new_connection(pchan->data);
866 write_lock_bh(&conn->chan_lock);
868 hci_conn_hold(conn->hcon);
870 bacpy(&bt_sk(sk)->src, conn->src);
871 bacpy(&bt_sk(sk)->dst, conn->dst);
873 bt_accept_enqueue(parent, sk);
875 __l2cap_chan_add(conn, chan);
877 __set_chan_timer(chan, sk->sk_sndtimeo);
879 l2cap_state_change(chan, BT_CONNECTED);
880 parent->sk_data_ready(parent, 0);
882 write_unlock_bh(&conn->chan_lock);
885 bh_unlock_sock(parent);
888 static void l2cap_chan_ready(struct sock *sk)
890 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
891 struct sock *parent = bt_sk(sk)->parent;
893 BT_DBG("sk %p, parent %p", sk, parent);
895 chan->conf_state = 0;
896 __clear_chan_timer(chan);
898 l2cap_state_change(chan, BT_CONNECTED);
899 sk->sk_state_change(sk);
902 parent->sk_data_ready(parent, 0);
905 static void l2cap_conn_ready(struct l2cap_conn *conn)
907 struct l2cap_chan *chan;
909 BT_DBG("conn %p", conn);
911 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
912 l2cap_le_conn_ready(conn);
914 read_lock(&conn->chan_lock);
916 list_for_each_entry(chan, &conn->chan_l, list) {
917 struct sock *sk = chan->sk;
921 if (conn->hcon->type == LE_LINK) {
922 if (smp_conn_security(conn, chan->sec_level))
923 l2cap_chan_ready(sk);
925 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
926 __clear_chan_timer(chan);
927 l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
930 } else if (chan->state == BT_CONNECT)
931 l2cap_do_start(chan);
936 read_unlock(&conn->chan_lock);
939 /* Notify sockets that we cannot guaranty reliability anymore */
940 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
942 struct l2cap_chan *chan;
944 BT_DBG("conn %p", conn);
946 read_lock(&conn->chan_lock);
948 list_for_each_entry(chan, &conn->chan_l, list) {
949 struct sock *sk = chan->sk;
951 if (chan->force_reliable)
955 read_unlock(&conn->chan_lock);
958 static void l2cap_info_timeout(unsigned long arg)
960 struct l2cap_conn *conn = (void *) arg;
962 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
963 conn->info_ident = 0;
965 l2cap_conn_start(conn);
968 static void l2cap_conn_del(struct hci_conn *hcon, int err)
970 struct l2cap_conn *conn = hcon->l2cap_data;
971 struct l2cap_chan *chan, *l;
977 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
979 kfree_skb(conn->rx_skb);
982 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
985 l2cap_chan_del(chan, err);
987 chan->ops->close(chan->data);
990 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
991 del_timer_sync(&conn->info_timer);
993 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
994 del_timer(&conn->security_timer);
996 hcon->l2cap_data = NULL;
1000 static void security_timeout(unsigned long arg)
1002 struct l2cap_conn *conn = (void *) arg;
1004 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1007 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009 struct l2cap_conn *conn = hcon->l2cap_data;
1014 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1018 hcon->l2cap_data = conn;
1021 BT_DBG("hcon %p conn %p", hcon, conn);
1023 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1024 conn->mtu = hcon->hdev->le_mtu;
1026 conn->mtu = hcon->hdev->acl_mtu;
1028 conn->src = &hcon->hdev->bdaddr;
1029 conn->dst = &hcon->dst;
1031 conn->feat_mask = 0;
1033 spin_lock_init(&conn->lock);
1034 rwlock_init(&conn->chan_lock);
1036 INIT_LIST_HEAD(&conn->chan_l);
1038 if (hcon->type == LE_LINK)
1039 setup_timer(&conn->security_timer, security_timeout,
1040 (unsigned long) conn);
1042 setup_timer(&conn->info_timer, l2cap_info_timeout,
1043 (unsigned long) conn);
1045 conn->disc_reason = 0x13;
1050 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1052 write_lock_bh(&conn->chan_lock);
1053 __l2cap_chan_add(conn, chan);
1054 write_unlock_bh(&conn->chan_lock);
1057 /* ---- Socket interface ---- */
1059 /* Find socket with psm and source bdaddr.
1060 * Returns closest match.
1062 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1064 struct l2cap_chan *c, *c1 = NULL;
1066 read_lock(&chan_list_lock);
1068 list_for_each_entry(c, &chan_list, global_l) {
1069 struct sock *sk = c->sk;
1071 if (state && c->state != state)
1074 if (c->psm == psm) {
1076 if (!bacmp(&bt_sk(sk)->src, src)) {
1077 read_unlock(&chan_list_lock);
1082 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1087 read_unlock(&chan_list_lock);
1092 int l2cap_chan_connect(struct l2cap_chan *chan)
1094 struct sock *sk = chan->sk;
1095 bdaddr_t *src = &bt_sk(sk)->src;
1096 bdaddr_t *dst = &bt_sk(sk)->dst;
1097 struct l2cap_conn *conn;
1098 struct hci_conn *hcon;
1099 struct hci_dev *hdev;
1103 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1106 hdev = hci_get_route(dst, src);
1108 return -EHOSTUNREACH;
1110 hci_dev_lock_bh(hdev);
1112 auth_type = l2cap_get_auth_type(chan);
1114 if (chan->dcid == L2CAP_CID_LE_DATA)
1115 hcon = hci_connect(hdev, LE_LINK, dst,
1116 chan->sec_level, auth_type);
1118 hcon = hci_connect(hdev, ACL_LINK, dst,
1119 chan->sec_level, auth_type);
1122 err = PTR_ERR(hcon);
1126 conn = l2cap_conn_add(hcon, 0);
1133 /* Update source addr of the socket */
1134 bacpy(src, conn->src);
1136 l2cap_chan_add(conn, chan);
1138 l2cap_state_change(chan, BT_CONNECT);
1139 __set_chan_timer(chan, sk->sk_sndtimeo);
1141 if (hcon->state == BT_CONNECTED) {
1142 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1143 __clear_chan_timer(chan);
1144 if (l2cap_check_security(chan))
1145 l2cap_state_change(chan, BT_CONNECTED);
1147 l2cap_do_start(chan);
1153 hci_dev_unlock_bh(hdev);
1158 int __l2cap_wait_ack(struct sock *sk)
1160 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1161 DECLARE_WAITQUEUE(wait, current);
1165 add_wait_queue(sk_sleep(sk), &wait);
1166 while ((chan->unacked_frames > 0 && chan->conn)) {
1167 set_current_state(TASK_INTERRUPTIBLE);
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1178 timeo = schedule_timeout(timeo);
1181 err = sock_error(sk);
1185 set_current_state(TASK_RUNNING);
1186 remove_wait_queue(sk_sleep(sk), &wait);
1190 static void l2cap_monitor_timeout(unsigned long arg)
1192 struct l2cap_chan *chan = (void *) arg;
1193 struct sock *sk = chan->sk;
1195 BT_DBG("chan %p", chan);
1198 if (chan->retry_count >= chan->remote_max_tx) {
1199 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1204 chan->retry_count++;
1205 __set_monitor_timer(chan);
1207 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1211 static void l2cap_retrans_timeout(unsigned long arg)
1213 struct l2cap_chan *chan = (void *) arg;
1214 struct sock *sk = chan->sk;
1216 BT_DBG("chan %p", chan);
1219 chan->retry_count = 1;
1220 __set_monitor_timer(chan);
1222 set_bit(CONN_WAIT_F, &chan->conn_state);
1224 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1228 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1230 struct sk_buff *skb;
1232 while ((skb = skb_peek(&chan->tx_q)) &&
1233 chan->unacked_frames) {
1234 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1237 skb = skb_dequeue(&chan->tx_q);
1240 chan->unacked_frames--;
1243 if (!chan->unacked_frames)
1244 __clear_retrans_timer(chan);
1247 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249 struct hci_conn *hcon = chan->conn->hcon;
1252 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1255 flags = ACL_START_NO_FLUSH;
1259 bt_cb(skb)->force_active = chan->force_active;
1260 hci_send_acl(hcon, skb, flags);
1263 void l2cap_streaming_send(struct l2cap_chan *chan)
1265 struct sk_buff *skb;
1268 while ((skb = skb_dequeue(&chan->tx_q))) {
1269 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1270 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1271 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1273 if (chan->fcs == L2CAP_FCS_CRC16) {
1274 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1275 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1278 l2cap_do_send(chan, skb);
1280 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1284 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1286 struct sk_buff *skb, *tx_skb;
1289 skb = skb_peek(&chan->tx_q);
1294 if (bt_cb(skb)->tx_seq == tx_seq)
1297 if (skb_queue_is_last(&chan->tx_q, skb))
1300 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1302 if (chan->remote_max_tx &&
1303 bt_cb(skb)->retries == chan->remote_max_tx) {
1304 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1308 tx_skb = skb_clone(skb, GFP_ATOMIC);
1309 bt_cb(skb)->retries++;
1310 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1311 control &= L2CAP_CTRL_SAR;
1313 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1314 control |= L2CAP_CTRL_FINAL;
1316 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1317 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321 if (chan->fcs == L2CAP_FCS_CRC16) {
1322 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1323 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1326 l2cap_do_send(chan, tx_skb);
1329 int l2cap_ertm_send(struct l2cap_chan *chan)
1331 struct sk_buff *skb, *tx_skb;
1335 if (chan->state != BT_CONNECTED)
1338 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1340 if (chan->remote_max_tx &&
1341 bt_cb(skb)->retries == chan->remote_max_tx) {
1342 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1346 tx_skb = skb_clone(skb, GFP_ATOMIC);
1348 bt_cb(skb)->retries++;
1350 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1351 control &= L2CAP_CTRL_SAR;
1353 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1354 control |= L2CAP_CTRL_FINAL;
1356 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1357 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1358 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1361 if (chan->fcs == L2CAP_FCS_CRC16) {
1362 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1363 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1366 l2cap_do_send(chan, tx_skb);
1368 __set_retrans_timer(chan);
1370 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1371 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1373 if (bt_cb(skb)->retries == 1)
1374 chan->unacked_frames++;
1376 chan->frames_sent++;
1378 if (skb_queue_is_last(&chan->tx_q, skb))
1379 chan->tx_send_head = NULL;
1381 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1389 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1393 if (!skb_queue_empty(&chan->tx_q))
1394 chan->tx_send_head = chan->tx_q.next;
1396 chan->next_tx_seq = chan->expected_ack_seq;
1397 ret = l2cap_ertm_send(chan);
1401 static void l2cap_send_ack(struct l2cap_chan *chan)
1405 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1407 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1408 control |= L2CAP_SUPER_RCV_NOT_READY;
1409 set_bit(CONN_RNR_SENT, &chan->conn_state);
1410 l2cap_send_sframe(chan, control);
1414 if (l2cap_ertm_send(chan) > 0)
1417 control |= L2CAP_SUPER_RCV_READY;
1418 l2cap_send_sframe(chan, control);
1421 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423 struct srej_list *tail;
1426 control = L2CAP_SUPER_SELECT_REJECT;
1427 control |= L2CAP_CTRL_FINAL;
1429 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1430 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1432 l2cap_send_sframe(chan, control);
1435 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1437 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1438 struct sk_buff **frag;
1441 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1447 /* Continuation fragments (no L2CAP header) */
1448 frag = &skb_shinfo(skb)->frag_list;
1450 count = min_t(unsigned int, conn->mtu, len);
1452 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1455 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1461 frag = &(*frag)->next;
1467 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1469 struct sock *sk = chan->sk;
1470 struct l2cap_conn *conn = chan->conn;
1471 struct sk_buff *skb;
1472 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1473 struct l2cap_hdr *lh;
1475 BT_DBG("sk %p len %d", sk, (int)len);
1477 count = min_t(unsigned int, (conn->mtu - hlen), len);
1478 skb = bt_skb_send_alloc(sk, count + hlen,
1479 msg->msg_flags & MSG_DONTWAIT, &err);
1481 return ERR_PTR(err);
1483 /* Create L2CAP header */
1484 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1485 lh->cid = cpu_to_le16(chan->dcid);
1486 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1487 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1489 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1490 if (unlikely(err < 0)) {
1492 return ERR_PTR(err);
1497 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1499 struct sock *sk = chan->sk;
1500 struct l2cap_conn *conn = chan->conn;
1501 struct sk_buff *skb;
1502 int err, count, hlen = L2CAP_HDR_SIZE;
1503 struct l2cap_hdr *lh;
1505 BT_DBG("sk %p len %d", sk, (int)len);
1507 count = min_t(unsigned int, (conn->mtu - hlen), len);
1508 skb = bt_skb_send_alloc(sk, count + hlen,
1509 msg->msg_flags & MSG_DONTWAIT, &err);
1511 return ERR_PTR(err);
1513 /* Create L2CAP header */
1514 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1515 lh->cid = cpu_to_le16(chan->dcid);
1516 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1518 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1519 if (unlikely(err < 0)) {
1521 return ERR_PTR(err);
1526 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1528 struct sock *sk = chan->sk;
1529 struct l2cap_conn *conn = chan->conn;
1530 struct sk_buff *skb;
1531 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1532 struct l2cap_hdr *lh;
1534 BT_DBG("sk %p len %d", sk, (int)len);
1537 return ERR_PTR(-ENOTCONN);
1542 if (chan->fcs == L2CAP_FCS_CRC16)
1545 count = min_t(unsigned int, (conn->mtu - hlen), len);
1546 skb = bt_skb_send_alloc(sk, count + hlen,
1547 msg->msg_flags & MSG_DONTWAIT, &err);
1549 return ERR_PTR(err);
1551 /* Create L2CAP header */
1552 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1553 lh->cid = cpu_to_le16(chan->dcid);
1554 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1555 put_unaligned_le16(control, skb_put(skb, 2));
1557 put_unaligned_le16(sdulen, skb_put(skb, 2));
1559 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1560 if (unlikely(err < 0)) {
1562 return ERR_PTR(err);
1565 if (chan->fcs == L2CAP_FCS_CRC16)
1566 put_unaligned_le16(0, skb_put(skb, 2));
1568 bt_cb(skb)->retries = 0;
1572 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1574 struct sk_buff *skb;
1575 struct sk_buff_head sar_queue;
1579 skb_queue_head_init(&sar_queue);
1580 control = L2CAP_SDU_START;
1581 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1583 return PTR_ERR(skb);
1585 __skb_queue_tail(&sar_queue, skb);
1586 len -= chan->remote_mps;
1587 size += chan->remote_mps;
1592 if (len > chan->remote_mps) {
1593 control = L2CAP_SDU_CONTINUE;
1594 buflen = chan->remote_mps;
1596 control = L2CAP_SDU_END;
1600 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1602 skb_queue_purge(&sar_queue);
1603 return PTR_ERR(skb);
1606 __skb_queue_tail(&sar_queue, skb);
1610 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1611 if (chan->tx_send_head == NULL)
1612 chan->tx_send_head = sar_queue.next;
1617 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1619 struct sk_buff *skb;
1623 /* Connectionless channel */
1624 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1625 skb = l2cap_create_connless_pdu(chan, msg, len);
1627 return PTR_ERR(skb);
1629 l2cap_do_send(chan, skb);
1633 switch (chan->mode) {
1634 case L2CAP_MODE_BASIC:
1635 /* Check outgoing MTU */
1636 if (len > chan->omtu)
1639 /* Create a basic PDU */
1640 skb = l2cap_create_basic_pdu(chan, msg, len);
1642 return PTR_ERR(skb);
1644 l2cap_do_send(chan, skb);
1648 case L2CAP_MODE_ERTM:
1649 case L2CAP_MODE_STREAMING:
1650 /* Entire SDU fits into one PDU */
1651 if (len <= chan->remote_mps) {
1652 control = L2CAP_SDU_UNSEGMENTED;
1653 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1656 return PTR_ERR(skb);
1658 __skb_queue_tail(&chan->tx_q, skb);
1660 if (chan->tx_send_head == NULL)
1661 chan->tx_send_head = skb;
1664 /* Segment SDU into multiples PDUs */
1665 err = l2cap_sar_segment_sdu(chan, msg, len);
1670 if (chan->mode == L2CAP_MODE_STREAMING) {
1671 l2cap_streaming_send(chan);
1676 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1677 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1682 err = l2cap_ertm_send(chan);
1689 BT_DBG("bad state %1.1x", chan->mode);
1696 /* Copy frame to all raw sockets on that connection */
1697 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1699 struct sk_buff *nskb;
1700 struct l2cap_chan *chan;
1702 BT_DBG("conn %p", conn);
1704 read_lock(&conn->chan_lock);
1705 list_for_each_entry(chan, &conn->chan_l, list) {
1706 struct sock *sk = chan->sk;
1707 if (chan->chan_type != L2CAP_CHAN_RAW)
1710 /* Don't send frame to the socket it came from */
1713 nskb = skb_clone(skb, GFP_ATOMIC);
1717 if (chan->ops->recv(chan->data, nskb))
1720 read_unlock(&conn->chan_lock);
1723 /* ---- L2CAP signalling commands ---- */
1724 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1725 u8 code, u8 ident, u16 dlen, void *data)
1727 struct sk_buff *skb, **frag;
1728 struct l2cap_cmd_hdr *cmd;
1729 struct l2cap_hdr *lh;
1732 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1733 conn, code, ident, dlen);
1735 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1736 count = min_t(unsigned int, conn->mtu, len);
1738 skb = bt_skb_alloc(count, GFP_ATOMIC);
1742 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1743 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1745 if (conn->hcon->type == LE_LINK)
1746 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1748 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1750 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1753 cmd->len = cpu_to_le16(dlen);
1756 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1757 memcpy(skb_put(skb, count), data, count);
1763 /* Continuation fragments (no L2CAP header) */
1764 frag = &skb_shinfo(skb)->frag_list;
1766 count = min_t(unsigned int, conn->mtu, len);
1768 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1772 memcpy(skb_put(*frag, count), data, count);
1777 frag = &(*frag)->next;
1787 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1789 struct l2cap_conf_opt *opt = *ptr;
1792 len = L2CAP_CONF_OPT_SIZE + opt->len;
1800 *val = *((u8 *) opt->val);
1804 *val = get_unaligned_le16(opt->val);
1808 *val = get_unaligned_le32(opt->val);
1812 *val = (unsigned long) opt->val;
1816 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1820 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1822 struct l2cap_conf_opt *opt = *ptr;
1824 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1831 *((u8 *) opt->val) = val;
1835 put_unaligned_le16(val, opt->val);
1839 put_unaligned_le32(val, opt->val);
1843 memcpy(opt->val, (void *) val, len);
1847 *ptr += L2CAP_CONF_OPT_SIZE + len;
1850 static void l2cap_ack_timeout(unsigned long arg)
1852 struct l2cap_chan *chan = (void *) arg;
1854 bh_lock_sock(chan->sk);
1855 l2cap_send_ack(chan);
1856 bh_unlock_sock(chan->sk);
1859 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1861 struct sock *sk = chan->sk;
1863 chan->expected_ack_seq = 0;
1864 chan->unacked_frames = 0;
1865 chan->buffer_seq = 0;
1866 chan->num_acked = 0;
1867 chan->frames_sent = 0;
1869 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1870 (unsigned long) chan);
1871 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1872 (unsigned long) chan);
1873 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1875 skb_queue_head_init(&chan->srej_q);
1876 skb_queue_head_init(&chan->busy_q);
1878 INIT_LIST_HEAD(&chan->srej_l);
1880 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1882 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1885 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1888 case L2CAP_MODE_STREAMING:
1889 case L2CAP_MODE_ERTM:
1890 if (l2cap_mode_supported(mode, remote_feat_mask))
1894 return L2CAP_MODE_BASIC;
1898 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1900 struct l2cap_conf_req *req = data;
1901 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1902 void *ptr = req->data;
1904 BT_DBG("chan %p", chan);
1906 if (chan->num_conf_req || chan->num_conf_rsp)
1909 switch (chan->mode) {
1910 case L2CAP_MODE_STREAMING:
1911 case L2CAP_MODE_ERTM:
1912 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1917 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1922 if (chan->imtu != L2CAP_DEFAULT_MTU)
1923 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1925 switch (chan->mode) {
1926 case L2CAP_MODE_BASIC:
1927 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1928 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1931 rfc.mode = L2CAP_MODE_BASIC;
1933 rfc.max_transmit = 0;
1934 rfc.retrans_timeout = 0;
1935 rfc.monitor_timeout = 0;
1936 rfc.max_pdu_size = 0;
1938 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1939 (unsigned long) &rfc);
1942 case L2CAP_MODE_ERTM:
1943 rfc.mode = L2CAP_MODE_ERTM;
1944 rfc.txwin_size = chan->tx_win;
1945 rfc.max_transmit = chan->max_tx;
1946 rfc.retrans_timeout = 0;
1947 rfc.monitor_timeout = 0;
1948 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1949 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1950 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1952 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1953 (unsigned long) &rfc);
1955 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1958 if (chan->fcs == L2CAP_FCS_NONE ||
1959 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1960 chan->fcs = L2CAP_FCS_NONE;
1961 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1965 case L2CAP_MODE_STREAMING:
1966 rfc.mode = L2CAP_MODE_STREAMING;
1968 rfc.max_transmit = 0;
1969 rfc.retrans_timeout = 0;
1970 rfc.monitor_timeout = 0;
1971 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1972 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1973 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1975 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1976 (unsigned long) &rfc);
1978 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1981 if (chan->fcs == L2CAP_FCS_NONE ||
1982 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1983 chan->fcs = L2CAP_FCS_NONE;
1984 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1989 req->dcid = cpu_to_le16(chan->dcid);
1990 req->flags = cpu_to_le16(0);
1995 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1997 struct l2cap_conf_rsp *rsp = data;
1998 void *ptr = rsp->data;
1999 void *req = chan->conf_req;
2000 int len = chan->conf_len;
2001 int type, hint, olen;
2003 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2004 u16 mtu = L2CAP_DEFAULT_MTU;
2005 u16 result = L2CAP_CONF_SUCCESS;
2007 BT_DBG("chan %p", chan);
2009 while (len >= L2CAP_CONF_OPT_SIZE) {
2010 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2012 hint = type & L2CAP_CONF_HINT;
2013 type &= L2CAP_CONF_MASK;
2016 case L2CAP_CONF_MTU:
2020 case L2CAP_CONF_FLUSH_TO:
2021 chan->flush_to = val;
2024 case L2CAP_CONF_QOS:
2027 case L2CAP_CONF_RFC:
2028 if (olen == sizeof(rfc))
2029 memcpy(&rfc, (void *) val, olen);
2032 case L2CAP_CONF_FCS:
2033 if (val == L2CAP_FCS_NONE)
2034 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2042 result = L2CAP_CONF_UNKNOWN;
2043 *((u8 *) ptr++) = type;
2048 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2051 switch (chan->mode) {
2052 case L2CAP_MODE_STREAMING:
2053 case L2CAP_MODE_ERTM:
2054 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2055 chan->mode = l2cap_select_mode(rfc.mode,
2056 chan->conn->feat_mask);
2060 if (chan->mode != rfc.mode)
2061 return -ECONNREFUSED;
2067 if (chan->mode != rfc.mode) {
2068 result = L2CAP_CONF_UNACCEPT;
2069 rfc.mode = chan->mode;
2071 if (chan->num_conf_rsp == 1)
2072 return -ECONNREFUSED;
2074 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2075 sizeof(rfc), (unsigned long) &rfc);
2079 if (result == L2CAP_CONF_SUCCESS) {
2080 /* Configure output options and let the other side know
2081 * which ones we don't like. */
2083 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2084 result = L2CAP_CONF_UNACCEPT;
2087 set_bit(CONF_MTU_DONE, &chan->conf_state);
2089 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2092 case L2CAP_MODE_BASIC:
2093 chan->fcs = L2CAP_FCS_NONE;
2094 set_bit(CONF_MODE_DONE, &chan->conf_state);
2097 case L2CAP_MODE_ERTM:
2098 chan->remote_tx_win = rfc.txwin_size;
2099 chan->remote_max_tx = rfc.max_transmit;
2101 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2102 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2104 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2106 rfc.retrans_timeout =
2107 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2108 rfc.monitor_timeout =
2109 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2111 set_bit(CONF_MODE_DONE, &chan->conf_state);
2113 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2114 sizeof(rfc), (unsigned long) &rfc);
2118 case L2CAP_MODE_STREAMING:
2119 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2120 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2122 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2124 set_bit(CONF_MODE_DONE, &chan->conf_state);
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2127 sizeof(rfc), (unsigned long) &rfc);
2132 result = L2CAP_CONF_UNACCEPT;
2134 memset(&rfc, 0, sizeof(rfc));
2135 rfc.mode = chan->mode;
2138 if (result == L2CAP_CONF_SUCCESS)
2139 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2141 rsp->scid = cpu_to_le16(chan->dcid);
2142 rsp->result = cpu_to_le16(result);
2143 rsp->flags = cpu_to_le16(0x0000);
2148 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2150 struct l2cap_conf_req *req = data;
2151 void *ptr = req->data;
2154 struct l2cap_conf_rfc rfc;
2156 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158 while (len >= L2CAP_CONF_OPT_SIZE) {
2159 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2162 case L2CAP_CONF_MTU:
2163 if (val < L2CAP_DEFAULT_MIN_MTU) {
2164 *result = L2CAP_CONF_UNACCEPT;
2165 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2171 case L2CAP_CONF_FLUSH_TO:
2172 chan->flush_to = val;
2173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2177 case L2CAP_CONF_RFC:
2178 if (olen == sizeof(rfc))
2179 memcpy(&rfc, (void *)val, olen);
2181 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2182 rfc.mode != chan->mode)
2183 return -ECONNREFUSED;
2187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2188 sizeof(rfc), (unsigned long) &rfc);
2193 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2194 return -ECONNREFUSED;
2196 chan->mode = rfc.mode;
2198 if (*result == L2CAP_CONF_SUCCESS) {
2200 case L2CAP_MODE_ERTM:
2201 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2202 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2203 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2205 case L2CAP_MODE_STREAMING:
2206 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2210 req->dcid = cpu_to_le16(chan->dcid);
2211 req->flags = cpu_to_le16(0x0000);
2216 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2218 struct l2cap_conf_rsp *rsp = data;
2219 void *ptr = rsp->data;
2221 BT_DBG("chan %p", chan);
2223 rsp->scid = cpu_to_le16(chan->dcid);
2224 rsp->result = cpu_to_le16(result);
2225 rsp->flags = cpu_to_le16(flags);
2230 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2232 struct l2cap_conn_rsp rsp;
2233 struct l2cap_conn *conn = chan->conn;
2236 rsp.scid = cpu_to_le16(chan->dcid);
2237 rsp.dcid = cpu_to_le16(chan->scid);
2238 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2239 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2240 l2cap_send_cmd(conn, chan->ident,
2241 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2243 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2246 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2247 l2cap_build_conf_req(chan, buf), buf);
2248 chan->num_conf_req++;
2251 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2255 struct l2cap_conf_rfc rfc;
2257 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2259 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2262 while (len >= L2CAP_CONF_OPT_SIZE) {
2263 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2266 case L2CAP_CONF_RFC:
2267 if (olen == sizeof(rfc))
2268 memcpy(&rfc, (void *)val, olen);
2275 case L2CAP_MODE_ERTM:
2276 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2277 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2278 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2280 case L2CAP_MODE_STREAMING:
2281 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2285 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2287 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2289 if (rej->reason != 0x0000)
2292 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2293 cmd->ident == conn->info_ident) {
2294 del_timer(&conn->info_timer);
2296 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2297 conn->info_ident = 0;
2299 l2cap_conn_start(conn);
2305 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2307 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2308 struct l2cap_conn_rsp rsp;
2309 struct l2cap_chan *chan = NULL, *pchan;
2310 struct sock *parent, *sk = NULL;
2311 int result, status = L2CAP_CS_NO_INFO;
2313 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2314 __le16 psm = req->psm;
2316 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2318 /* Check if we have socket listening on psm */
2319 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2321 result = L2CAP_CR_BAD_PSM;
2327 bh_lock_sock(parent);
2329 /* Check if the ACL is secure enough (if not SDP) */
2330 if (psm != cpu_to_le16(0x0001) &&
2331 !hci_conn_check_link_mode(conn->hcon)) {
2332 conn->disc_reason = 0x05;
2333 result = L2CAP_CR_SEC_BLOCK;
2337 result = L2CAP_CR_NO_MEM;
2339 /* Check for backlog size */
2340 if (sk_acceptq_is_full(parent)) {
2341 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2345 chan = pchan->ops->new_connection(pchan->data);
2351 write_lock_bh(&conn->chan_lock);
2353 /* Check if we already have channel with that dcid */
2354 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2355 write_unlock_bh(&conn->chan_lock);
2356 sock_set_flag(sk, SOCK_ZAPPED);
2357 chan->ops->close(chan->data);
2361 hci_conn_hold(conn->hcon);
2363 bacpy(&bt_sk(sk)->src, conn->src);
2364 bacpy(&bt_sk(sk)->dst, conn->dst);
2368 bt_accept_enqueue(parent, sk);
2370 __l2cap_chan_add(conn, chan);
2374 __set_chan_timer(chan, sk->sk_sndtimeo);
2376 chan->ident = cmd->ident;
2378 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2379 if (l2cap_check_security(chan)) {
2380 if (bt_sk(sk)->defer_setup) {
2381 l2cap_state_change(chan, BT_CONNECT2);
2382 result = L2CAP_CR_PEND;
2383 status = L2CAP_CS_AUTHOR_PEND;
2384 parent->sk_data_ready(parent, 0);
2386 l2cap_state_change(chan, BT_CONFIG);
2387 result = L2CAP_CR_SUCCESS;
2388 status = L2CAP_CS_NO_INFO;
2391 l2cap_state_change(chan, BT_CONNECT2);
2392 result = L2CAP_CR_PEND;
2393 status = L2CAP_CS_AUTHEN_PEND;
2396 l2cap_state_change(chan, BT_CONNECT2);
2397 result = L2CAP_CR_PEND;
2398 status = L2CAP_CS_NO_INFO;
2401 write_unlock_bh(&conn->chan_lock);
2404 bh_unlock_sock(parent);
2407 rsp.scid = cpu_to_le16(scid);
2408 rsp.dcid = cpu_to_le16(dcid);
2409 rsp.result = cpu_to_le16(result);
2410 rsp.status = cpu_to_le16(status);
2411 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2413 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2414 struct l2cap_info_req info;
2415 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2418 conn->info_ident = l2cap_get_ident(conn);
2420 mod_timer(&conn->info_timer, jiffies +
2421 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2423 l2cap_send_cmd(conn, conn->info_ident,
2424 L2CAP_INFO_REQ, sizeof(info), &info);
2427 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2428 result == L2CAP_CR_SUCCESS) {
2430 set_bit(CONF_REQ_SENT, &chan->conf_state);
2431 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2432 l2cap_build_conf_req(chan, buf), buf);
2433 chan->num_conf_req++;
2439 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2441 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2442 u16 scid, dcid, result, status;
2443 struct l2cap_chan *chan;
2447 scid = __le16_to_cpu(rsp->scid);
2448 dcid = __le16_to_cpu(rsp->dcid);
2449 result = __le16_to_cpu(rsp->result);
2450 status = __le16_to_cpu(rsp->status);
2452 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2455 chan = l2cap_get_chan_by_scid(conn, scid);
2459 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2467 case L2CAP_CR_SUCCESS:
2468 l2cap_state_change(chan, BT_CONFIG);
2471 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2473 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(chan, req), req);
2478 chan->num_conf_req++;
2482 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2486 /* don't delete l2cap channel if sk is owned by user */
2487 if (sock_owned_by_user(sk)) {
2488 l2cap_state_change(chan, BT_DISCONN);
2489 __clear_chan_timer(chan);
2490 __set_chan_timer(chan, HZ / 5);
2494 l2cap_chan_del(chan, ECONNREFUSED);
2502 static inline void set_default_fcs(struct l2cap_chan *chan)
2504 /* FCS is enabled only in ERTM or streaming mode, if one or both
2507 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2508 chan->fcs = L2CAP_FCS_NONE;
2509 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2510 chan->fcs = L2CAP_FCS_CRC16;
2513 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2515 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2518 struct l2cap_chan *chan;
2522 dcid = __le16_to_cpu(req->dcid);
2523 flags = __le16_to_cpu(req->flags);
2525 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2527 chan = l2cap_get_chan_by_scid(conn, dcid);
2533 if (chan->state != BT_CONFIG) {
2534 struct l2cap_cmd_rej rej;
2536 rej.reason = cpu_to_le16(0x0002);
2537 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2542 /* Reject if config buffer is too small. */
2543 len = cmd_len - sizeof(*req);
2544 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2545 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2546 l2cap_build_conf_rsp(chan, rsp,
2547 L2CAP_CONF_REJECT, flags), rsp);
2552 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2553 chan->conf_len += len;
2555 if (flags & 0x0001) {
2556 /* Incomplete config. Send empty response. */
2557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2558 l2cap_build_conf_rsp(chan, rsp,
2559 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2563 /* Complete config. */
2564 len = l2cap_parse_conf_req(chan, rsp);
2566 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2571 chan->num_conf_rsp++;
2573 /* Reset config buffer. */
2576 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2579 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2580 set_default_fcs(chan);
2582 l2cap_state_change(chan, BT_CONNECTED);
2584 chan->next_tx_seq = 0;
2585 chan->expected_tx_seq = 0;
2586 skb_queue_head_init(&chan->tx_q);
2587 if (chan->mode == L2CAP_MODE_ERTM)
2588 l2cap_ertm_init(chan);
2590 l2cap_chan_ready(sk);
2594 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2596 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2597 l2cap_build_conf_req(chan, buf), buf);
2598 chan->num_conf_req++;
2606 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2608 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2609 u16 scid, flags, result;
2610 struct l2cap_chan *chan;
2612 int len = cmd->len - sizeof(*rsp);
2614 scid = __le16_to_cpu(rsp->scid);
2615 flags = __le16_to_cpu(rsp->flags);
2616 result = __le16_to_cpu(rsp->result);
2618 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2619 scid, flags, result);
2621 chan = l2cap_get_chan_by_scid(conn, scid);
2628 case L2CAP_CONF_SUCCESS:
2629 l2cap_conf_rfc_get(chan, rsp->data, len);
2632 case L2CAP_CONF_UNACCEPT:
2633 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2636 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2637 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2641 /* throw out any old stored conf requests */
2642 result = L2CAP_CONF_SUCCESS;
2643 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2646 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2650 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2651 L2CAP_CONF_REQ, len, req);
2652 chan->num_conf_req++;
2653 if (result != L2CAP_CONF_SUCCESS)
2659 sk->sk_err = ECONNRESET;
2660 __set_chan_timer(chan, HZ * 5);
2661 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2668 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2670 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2671 set_default_fcs(chan);
2673 l2cap_state_change(chan, BT_CONNECTED);
2674 chan->next_tx_seq = 0;
2675 chan->expected_tx_seq = 0;
2676 skb_queue_head_init(&chan->tx_q);
2677 if (chan->mode == L2CAP_MODE_ERTM)
2678 l2cap_ertm_init(chan);
2680 l2cap_chan_ready(sk);
2688 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2690 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2691 struct l2cap_disconn_rsp rsp;
2693 struct l2cap_chan *chan;
2696 scid = __le16_to_cpu(req->scid);
2697 dcid = __le16_to_cpu(req->dcid);
2699 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2701 chan = l2cap_get_chan_by_scid(conn, dcid);
2707 rsp.dcid = cpu_to_le16(chan->scid);
2708 rsp.scid = cpu_to_le16(chan->dcid);
2709 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2711 sk->sk_shutdown = SHUTDOWN_MASK;
2713 /* don't delete l2cap channel if sk is owned by user */
2714 if (sock_owned_by_user(sk)) {
2715 l2cap_state_change(chan, BT_DISCONN);
2716 __clear_chan_timer(chan);
2717 __set_chan_timer(chan, HZ / 5);
2722 l2cap_chan_del(chan, ECONNRESET);
2725 chan->ops->close(chan->data);
2729 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2731 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2733 struct l2cap_chan *chan;
2736 scid = __le16_to_cpu(rsp->scid);
2737 dcid = __le16_to_cpu(rsp->dcid);
2739 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2741 chan = l2cap_get_chan_by_scid(conn, scid);
2747 /* don't delete l2cap channel if sk is owned by user */
2748 if (sock_owned_by_user(sk)) {
2749 l2cap_state_change(chan,BT_DISCONN);
2750 __clear_chan_timer(chan);
2751 __set_chan_timer(chan, HZ / 5);
2756 l2cap_chan_del(chan, 0);
2759 chan->ops->close(chan->data);
2763 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2765 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2768 type = __le16_to_cpu(req->type);
2770 BT_DBG("type 0x%4.4x", type);
2772 if (type == L2CAP_IT_FEAT_MASK) {
2774 u32 feat_mask = l2cap_feat_mask;
2775 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2776 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2777 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2779 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2781 put_unaligned_le32(feat_mask, rsp->data);
2782 l2cap_send_cmd(conn, cmd->ident,
2783 L2CAP_INFO_RSP, sizeof(buf), buf);
2784 } else if (type == L2CAP_IT_FIXED_CHAN) {
2786 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2787 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2788 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2789 memcpy(buf + 4, l2cap_fixed_chan, 8);
2790 l2cap_send_cmd(conn, cmd->ident,
2791 L2CAP_INFO_RSP, sizeof(buf), buf);
2793 struct l2cap_info_rsp rsp;
2794 rsp.type = cpu_to_le16(type);
2795 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2796 l2cap_send_cmd(conn, cmd->ident,
2797 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2803 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2805 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2808 type = __le16_to_cpu(rsp->type);
2809 result = __le16_to_cpu(rsp->result);
2811 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2813 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2814 if (cmd->ident != conn->info_ident ||
2815 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2818 del_timer(&conn->info_timer);
2820 if (result != L2CAP_IR_SUCCESS) {
2821 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2822 conn->info_ident = 0;
2824 l2cap_conn_start(conn);
2829 if (type == L2CAP_IT_FEAT_MASK) {
2830 conn->feat_mask = get_unaligned_le32(rsp->data);
2832 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2833 struct l2cap_info_req req;
2834 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2836 conn->info_ident = l2cap_get_ident(conn);
2838 l2cap_send_cmd(conn, conn->info_ident,
2839 L2CAP_INFO_REQ, sizeof(req), &req);
2841 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2842 conn->info_ident = 0;
2844 l2cap_conn_start(conn);
2846 } else if (type == L2CAP_IT_FIXED_CHAN) {
2847 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2848 conn->info_ident = 0;
2850 l2cap_conn_start(conn);
2856 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2861 if (min > max || min < 6 || max > 3200)
2864 if (to_multiplier < 10 || to_multiplier > 3200)
2867 if (max >= to_multiplier * 8)
2870 max_latency = (to_multiplier * 8 / max) - 1;
2871 if (latency > 499 || latency > max_latency)
2877 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2878 struct l2cap_cmd_hdr *cmd, u8 *data)
2880 struct hci_conn *hcon = conn->hcon;
2881 struct l2cap_conn_param_update_req *req;
2882 struct l2cap_conn_param_update_rsp rsp;
2883 u16 min, max, latency, to_multiplier, cmd_len;
2886 if (!(hcon->link_mode & HCI_LM_MASTER))
2889 cmd_len = __le16_to_cpu(cmd->len);
2890 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2893 req = (struct l2cap_conn_param_update_req *) data;
2894 min = __le16_to_cpu(req->min);
2895 max = __le16_to_cpu(req->max);
2896 latency = __le16_to_cpu(req->latency);
2897 to_multiplier = __le16_to_cpu(req->to_multiplier);
2899 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2900 min, max, latency, to_multiplier);
2902 memset(&rsp, 0, sizeof(rsp));
2904 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2906 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2908 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2910 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2914 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2919 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2920 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2924 switch (cmd->code) {
2925 case L2CAP_COMMAND_REJ:
2926 l2cap_command_rej(conn, cmd, data);
2929 case L2CAP_CONN_REQ:
2930 err = l2cap_connect_req(conn, cmd, data);
2933 case L2CAP_CONN_RSP:
2934 err = l2cap_connect_rsp(conn, cmd, data);
2937 case L2CAP_CONF_REQ:
2938 err = l2cap_config_req(conn, cmd, cmd_len, data);
2941 case L2CAP_CONF_RSP:
2942 err = l2cap_config_rsp(conn, cmd, data);
2945 case L2CAP_DISCONN_REQ:
2946 err = l2cap_disconnect_req(conn, cmd, data);
2949 case L2CAP_DISCONN_RSP:
2950 err = l2cap_disconnect_rsp(conn, cmd, data);
2953 case L2CAP_ECHO_REQ:
2954 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2957 case L2CAP_ECHO_RSP:
2960 case L2CAP_INFO_REQ:
2961 err = l2cap_information_req(conn, cmd, data);
2964 case L2CAP_INFO_RSP:
2965 err = l2cap_information_rsp(conn, cmd, data);
2969 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2977 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2978 struct l2cap_cmd_hdr *cmd, u8 *data)
2980 switch (cmd->code) {
2981 case L2CAP_COMMAND_REJ:
2984 case L2CAP_CONN_PARAM_UPDATE_REQ:
2985 return l2cap_conn_param_update_req(conn, cmd, data);
2987 case L2CAP_CONN_PARAM_UPDATE_RSP:
2991 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2996 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2997 struct sk_buff *skb)
2999 u8 *data = skb->data;
3001 struct l2cap_cmd_hdr cmd;
3004 l2cap_raw_recv(conn, skb);
3006 while (len >= L2CAP_CMD_HDR_SIZE) {
3008 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3009 data += L2CAP_CMD_HDR_SIZE;
3010 len -= L2CAP_CMD_HDR_SIZE;
3012 cmd_len = le16_to_cpu(cmd.len);
3014 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3016 if (cmd_len > len || !cmd.ident) {
3017 BT_DBG("corrupted command");
3021 if (conn->hcon->type == LE_LINK)
3022 err = l2cap_le_sig_cmd(conn, &cmd, data);
3024 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3027 struct l2cap_cmd_rej rej;
3029 BT_ERR("Wrong link type (%d)", err);
3031 /* FIXME: Map err to a valid reason */
3032 rej.reason = cpu_to_le16(0);
3033 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3043 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3045 u16 our_fcs, rcv_fcs;
3046 int hdr_size = L2CAP_HDR_SIZE + 2;
3048 if (chan->fcs == L2CAP_FCS_CRC16) {
3049 skb_trim(skb, skb->len - 2);
3050 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3051 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3053 if (our_fcs != rcv_fcs)
3059 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3063 chan->frames_sent = 0;
3065 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3067 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3068 control |= L2CAP_SUPER_RCV_NOT_READY;
3069 l2cap_send_sframe(chan, control);
3070 set_bit(CONN_RNR_SENT, &chan->conn_state);
3073 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3074 l2cap_retransmit_frames(chan);
3076 l2cap_ertm_send(chan);
3078 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3079 chan->frames_sent == 0) {
3080 control |= L2CAP_SUPER_RCV_READY;
3081 l2cap_send_sframe(chan, control);
3085 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3087 struct sk_buff *next_skb;
3088 int tx_seq_offset, next_tx_seq_offset;
3090 bt_cb(skb)->tx_seq = tx_seq;
3091 bt_cb(skb)->sar = sar;
3093 next_skb = skb_peek(&chan->srej_q);
3095 __skb_queue_tail(&chan->srej_q, skb);
3099 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3100 if (tx_seq_offset < 0)
3101 tx_seq_offset += 64;
3104 if (bt_cb(next_skb)->tx_seq == tx_seq)
3107 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3108 chan->buffer_seq) % 64;
3109 if (next_tx_seq_offset < 0)
3110 next_tx_seq_offset += 64;
3112 if (next_tx_seq_offset > tx_seq_offset) {
3113 __skb_queue_before(&chan->srej_q, next_skb, skb);
3117 if (skb_queue_is_last(&chan->srej_q, next_skb))
3120 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3122 __skb_queue_tail(&chan->srej_q, skb);
3127 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3129 struct sk_buff *_skb;
3132 switch (control & L2CAP_CTRL_SAR) {
3133 case L2CAP_SDU_UNSEGMENTED:
3134 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3137 return chan->ops->recv(chan->data, skb);
3139 case L2CAP_SDU_START:
3140 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3143 chan->sdu_len = get_unaligned_le16(skb->data);
3145 if (chan->sdu_len > chan->imtu)
3148 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3152 /* pull sdu_len bytes only after alloc, because of Local Busy
3153 * condition we have to be sure that this will be executed
3154 * only once, i.e., when alloc does not fail */
3157 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3159 set_bit(CONN_SAR_SDU, &chan->conn_state);
3160 chan->partial_sdu_len = skb->len;
3163 case L2CAP_SDU_CONTINUE:
3164 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3170 chan->partial_sdu_len += skb->len;
3171 if (chan->partial_sdu_len > chan->sdu_len)
3174 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3179 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3185 if (!test_bit(CONN_SAR_RETRY, &chan->conn_state)) {
3186 chan->partial_sdu_len += skb->len;
3188 if (chan->partial_sdu_len > chan->imtu)
3191 if (chan->partial_sdu_len != chan->sdu_len)
3194 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3197 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3199 set_bit(CONN_SAR_RETRY, &chan->conn_state);
3203 err = chan->ops->recv(chan->data, _skb);
3206 set_bit(CONN_SAR_RETRY, &chan->conn_state);
3210 clear_bit(CONN_SAR_RETRY, &chan->conn_state);
3211 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3213 kfree_skb(chan->sdu);
3221 kfree_skb(chan->sdu);
3225 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3230 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3232 struct sk_buff *skb;
3236 while ((skb = skb_dequeue(&chan->busy_q))) {
3237 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3238 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3240 skb_queue_head(&chan->busy_q, skb);
3244 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3247 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3250 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3251 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3252 l2cap_send_sframe(chan, control);
3253 chan->retry_count = 1;
3255 __clear_retrans_timer(chan);
3256 __set_monitor_timer(chan);
3258 set_bit(CONN_WAIT_F, &chan->conn_state);
3261 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3262 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3264 BT_DBG("chan %p, Exit local busy", chan);
3269 static void l2cap_busy_work(struct work_struct *work)
3271 DECLARE_WAITQUEUE(wait, current);
3272 struct l2cap_chan *chan =
3273 container_of(work, struct l2cap_chan, busy_work);
3274 struct sock *sk = chan->sk;
3275 int n_tries = 0, timeo = HZ/5, err;
3276 struct sk_buff *skb;
3280 add_wait_queue(sk_sleep(sk), &wait);
3281 while ((skb = skb_peek(&chan->busy_q))) {
3282 set_current_state(TASK_INTERRUPTIBLE);
3284 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3286 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3293 if (signal_pending(current)) {
3294 err = sock_intr_errno(timeo);
3299 timeo = schedule_timeout(timeo);
3302 err = sock_error(sk);
3306 if (l2cap_try_push_rx_skb(chan) == 0)
3310 set_current_state(TASK_RUNNING);
3311 remove_wait_queue(sk_sleep(sk), &wait);
3316 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3320 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3321 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3322 __skb_queue_tail(&chan->busy_q, skb);
3323 return l2cap_try_push_rx_skb(chan);
3328 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3330 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3334 /* Busy Condition */
3335 BT_DBG("chan %p, Enter local busy", chan);
3337 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3338 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3339 __skb_queue_tail(&chan->busy_q, skb);
3341 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3342 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3343 l2cap_send_sframe(chan, sctrl);
3345 set_bit(CONN_RNR_SENT, &chan->conn_state);
3347 __clear_ack_timer(chan);
3349 queue_work(_busy_wq, &chan->busy_work);
3354 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3356 struct sk_buff *_skb;
3360 * TODO: We have to notify the userland if some data is lost with the
3364 switch (control & L2CAP_CTRL_SAR) {
3365 case L2CAP_SDU_UNSEGMENTED:
3366 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3367 kfree_skb(chan->sdu);
3371 err = chan->ops->recv(chan->data, skb);
3377 case L2CAP_SDU_START:
3378 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3379 kfree_skb(chan->sdu);
3383 chan->sdu_len = get_unaligned_le16(skb->data);
3386 if (chan->sdu_len > chan->imtu) {
3391 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3397 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3399 set_bit(CONN_SAR_SDU, &chan->conn_state);
3400 chan->partial_sdu_len = skb->len;
3404 case L2CAP_SDU_CONTINUE:
3405 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3408 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3410 chan->partial_sdu_len += skb->len;
3411 if (chan->partial_sdu_len > chan->sdu_len)
3412 kfree_skb(chan->sdu);
3419 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3422 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3424 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3425 chan->partial_sdu_len += skb->len;
3427 if (chan->partial_sdu_len > chan->imtu)
3430 if (chan->partial_sdu_len == chan->sdu_len) {
3431 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3432 err = chan->ops->recv(chan->data, _skb);
3439 kfree_skb(chan->sdu);
3447 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3449 struct sk_buff *skb;
3452 while ((skb = skb_peek(&chan->srej_q))) {
3453 if (bt_cb(skb)->tx_seq != tx_seq)
3456 skb = skb_dequeue(&chan->srej_q);
3457 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3458 l2cap_ertm_reassembly_sdu(chan, skb, control);
3459 chan->buffer_seq_srej =
3460 (chan->buffer_seq_srej + 1) % 64;
3461 tx_seq = (tx_seq + 1) % 64;
3465 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3467 struct srej_list *l, *tmp;
3470 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3471 if (l->tx_seq == tx_seq) {
3476 control = L2CAP_SUPER_SELECT_REJECT;
3477 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3478 l2cap_send_sframe(chan, control);
3480 list_add_tail(&l->list, &chan->srej_l);
3484 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3486 struct srej_list *new;
3489 while (tx_seq != chan->expected_tx_seq) {
3490 control = L2CAP_SUPER_SELECT_REJECT;
3491 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3492 l2cap_send_sframe(chan, control);
3494 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3495 new->tx_seq = chan->expected_tx_seq;
3496 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3497 list_add_tail(&new->list, &chan->srej_l);
3499 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3502 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3504 u8 tx_seq = __get_txseq(rx_control);
3505 u8 req_seq = __get_reqseq(rx_control);
3506 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3507 int tx_seq_offset, expected_tx_seq_offset;
3508 int num_to_ack = (chan->tx_win/6) + 1;
3511 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3512 tx_seq, rx_control);
3514 if (L2CAP_CTRL_FINAL & rx_control &&
3515 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3516 __clear_monitor_timer(chan);
3517 if (chan->unacked_frames > 0)
3518 __set_retrans_timer(chan);
3519 clear_bit(CONN_WAIT_F, &chan->conn_state);
3522 chan->expected_ack_seq = req_seq;
3523 l2cap_drop_acked_frames(chan);
3525 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3526 if (tx_seq_offset < 0)
3527 tx_seq_offset += 64;
3529 /* invalid tx_seq */
3530 if (tx_seq_offset >= chan->tx_win) {
3531 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3535 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3538 if (tx_seq == chan->expected_tx_seq)
3541 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3542 struct srej_list *first;
3544 first = list_first_entry(&chan->srej_l,
3545 struct srej_list, list);
3546 if (tx_seq == first->tx_seq) {
3547 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3548 l2cap_check_srej_gap(chan, tx_seq);
3550 list_del(&first->list);
3553 if (list_empty(&chan->srej_l)) {
3554 chan->buffer_seq = chan->buffer_seq_srej;
3555 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3556 l2cap_send_ack(chan);
3557 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3560 struct srej_list *l;
3562 /* duplicated tx_seq */
3563 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3566 list_for_each_entry(l, &chan->srej_l, list) {
3567 if (l->tx_seq == tx_seq) {
3568 l2cap_resend_srejframe(chan, tx_seq);
3572 l2cap_send_srejframe(chan, tx_seq);
3575 expected_tx_seq_offset =
3576 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3577 if (expected_tx_seq_offset < 0)
3578 expected_tx_seq_offset += 64;
3580 /* duplicated tx_seq */
3581 if (tx_seq_offset < expected_tx_seq_offset)
3584 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3586 BT_DBG("chan %p, Enter SREJ", chan);
3588 INIT_LIST_HEAD(&chan->srej_l);
3589 chan->buffer_seq_srej = chan->buffer_seq;
3591 __skb_queue_head_init(&chan->srej_q);
3592 __skb_queue_head_init(&chan->busy_q);
3593 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3595 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3597 l2cap_send_srejframe(chan, tx_seq);
3599 __clear_ack_timer(chan);
3604 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3606 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3607 bt_cb(skb)->tx_seq = tx_seq;
3608 bt_cb(skb)->sar = sar;
3609 __skb_queue_tail(&chan->srej_q, skb);
3613 err = l2cap_push_rx_skb(chan, skb, rx_control);
3617 if (rx_control & L2CAP_CTRL_FINAL) {
3618 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3619 l2cap_retransmit_frames(chan);
3622 __set_ack_timer(chan);
3624 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3625 if (chan->num_acked == num_to_ack - 1)
3626 l2cap_send_ack(chan);
3635 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3637 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3640 chan->expected_ack_seq = __get_reqseq(rx_control);
3641 l2cap_drop_acked_frames(chan);
3643 if (rx_control & L2CAP_CTRL_POLL) {
3644 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3645 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3646 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3647 (chan->unacked_frames > 0))
3648 __set_retrans_timer(chan);
3650 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3651 l2cap_send_srejtail(chan);
3653 l2cap_send_i_or_rr_or_rnr(chan);
3656 } else if (rx_control & L2CAP_CTRL_FINAL) {
3657 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3659 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3660 l2cap_retransmit_frames(chan);
3663 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3664 (chan->unacked_frames > 0))
3665 __set_retrans_timer(chan);
3667 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3668 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3669 l2cap_send_ack(chan);
3671 l2cap_ertm_send(chan);
3675 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3677 u8 tx_seq = __get_reqseq(rx_control);
3679 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3681 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3683 chan->expected_ack_seq = tx_seq;
3684 l2cap_drop_acked_frames(chan);
3686 if (rx_control & L2CAP_CTRL_FINAL) {
3687 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3688 l2cap_retransmit_frames(chan);
3690 l2cap_retransmit_frames(chan);
3692 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3693 set_bit(CONN_REJ_ACT, &chan->conn_state);
3696 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3698 u8 tx_seq = __get_reqseq(rx_control);
3700 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3702 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3704 if (rx_control & L2CAP_CTRL_POLL) {
3705 chan->expected_ack_seq = tx_seq;
3706 l2cap_drop_acked_frames(chan);
3708 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3709 l2cap_retransmit_one_frame(chan, tx_seq);
3711 l2cap_ertm_send(chan);
3713 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3714 chan->srej_save_reqseq = tx_seq;
3715 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3717 } else if (rx_control & L2CAP_CTRL_FINAL) {
3718 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3719 chan->srej_save_reqseq == tx_seq)
3720 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3722 l2cap_retransmit_one_frame(chan, tx_seq);
3724 l2cap_retransmit_one_frame(chan, tx_seq);
3725 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3726 chan->srej_save_reqseq = tx_seq;
3727 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3732 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3734 u8 tx_seq = __get_reqseq(rx_control);
3736 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3738 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3739 chan->expected_ack_seq = tx_seq;
3740 l2cap_drop_acked_frames(chan);
3742 if (rx_control & L2CAP_CTRL_POLL)
3743 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3745 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3746 __clear_retrans_timer(chan);
3747 if (rx_control & L2CAP_CTRL_POLL)
3748 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3752 if (rx_control & L2CAP_CTRL_POLL)
3753 l2cap_send_srejtail(chan);
3755 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3758 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3760 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3762 if (L2CAP_CTRL_FINAL & rx_control &&
3763 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3764 __clear_monitor_timer(chan);
3765 if (chan->unacked_frames > 0)
3766 __set_retrans_timer(chan);
3767 clear_bit(CONN_WAIT_F, &chan->conn_state);
3770 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3771 case L2CAP_SUPER_RCV_READY:
3772 l2cap_data_channel_rrframe(chan, rx_control);
3775 case L2CAP_SUPER_REJECT:
3776 l2cap_data_channel_rejframe(chan, rx_control);
3779 case L2CAP_SUPER_SELECT_REJECT:
3780 l2cap_data_channel_srejframe(chan, rx_control);
3783 case L2CAP_SUPER_RCV_NOT_READY:
3784 l2cap_data_channel_rnrframe(chan, rx_control);
3792 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3794 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3797 int len, next_tx_seq_offset, req_seq_offset;
3799 control = get_unaligned_le16(skb->data);
3804 * We can just drop the corrupted I-frame here.
3805 * Receiver will miss it and start proper recovery
3806 * procedures and ask retransmission.
3808 if (l2cap_check_fcs(chan, skb))
3811 if (__is_sar_start(control) && __is_iframe(control))
3814 if (chan->fcs == L2CAP_FCS_CRC16)
3817 if (len > chan->mps) {
3818 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3822 req_seq = __get_reqseq(control);
3823 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3824 if (req_seq_offset < 0)
3825 req_seq_offset += 64;
3827 next_tx_seq_offset =
3828 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3829 if (next_tx_seq_offset < 0)
3830 next_tx_seq_offset += 64;
3832 /* check for invalid req-seq */
3833 if (req_seq_offset > next_tx_seq_offset) {
3834 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3838 if (__is_iframe(control)) {
3840 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3844 l2cap_data_channel_iframe(chan, control, skb);
3848 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3852 l2cap_data_channel_sframe(chan, control, skb);
3862 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3864 struct l2cap_chan *chan;
3865 struct sock *sk = NULL;
3870 chan = l2cap_get_chan_by_scid(conn, cid);
3872 BT_DBG("unknown cid 0x%4.4x", cid);
3878 BT_DBG("chan %p, len %d", chan, skb->len);
3880 if (chan->state != BT_CONNECTED)
3883 switch (chan->mode) {
3884 case L2CAP_MODE_BASIC:
3885 /* If socket recv buffers overflows we drop data here
3886 * which is *bad* because L2CAP has to be reliable.
3887 * But we don't have any other choice. L2CAP doesn't
3888 * provide flow control mechanism. */
3890 if (chan->imtu < skb->len)
3893 if (!chan->ops->recv(chan->data, skb))
3897 case L2CAP_MODE_ERTM:
3898 if (!sock_owned_by_user(sk)) {
3899 l2cap_ertm_data_rcv(sk, skb);
3901 if (sk_add_backlog(sk, skb))
3907 case L2CAP_MODE_STREAMING:
3908 control = get_unaligned_le16(skb->data);
3912 if (l2cap_check_fcs(chan, skb))
3915 if (__is_sar_start(control))
3918 if (chan->fcs == L2CAP_FCS_CRC16)
3921 if (len > chan->mps || len < 0 || __is_sframe(control))
3924 tx_seq = __get_txseq(control);
3926 if (chan->expected_tx_seq == tx_seq)
3927 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3929 chan->expected_tx_seq = (tx_seq + 1) % 64;
3931 l2cap_streaming_reassembly_sdu(chan, skb, control);
3936 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3950 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3952 struct sock *sk = NULL;
3953 struct l2cap_chan *chan;
3955 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3963 BT_DBG("sk %p, len %d", sk, skb->len);
3965 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3968 if (chan->imtu < skb->len)
3971 if (!chan->ops->recv(chan->data, skb))
3983 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3985 struct sock *sk = NULL;
3986 struct l2cap_chan *chan;
3988 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3996 BT_DBG("sk %p, len %d", sk, skb->len);
3998 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4001 if (chan->imtu < skb->len)
4004 if (!chan->ops->recv(chan->data, skb))
4016 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4018 struct l2cap_hdr *lh = (void *) skb->data;
4022 skb_pull(skb, L2CAP_HDR_SIZE);
4023 cid = __le16_to_cpu(lh->cid);
4024 len = __le16_to_cpu(lh->len);
4026 if (len != skb->len) {
4031 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4034 case L2CAP_CID_LE_SIGNALING:
4035 case L2CAP_CID_SIGNALING:
4036 l2cap_sig_channel(conn, skb);
4039 case L2CAP_CID_CONN_LESS:
4040 psm = get_unaligned_le16(skb->data);
4042 l2cap_conless_channel(conn, psm, skb);
4045 case L2CAP_CID_LE_DATA:
4046 l2cap_att_channel(conn, cid, skb);
4050 if (smp_sig_channel(conn, skb))
4051 l2cap_conn_del(conn->hcon, EACCES);
4055 l2cap_data_channel(conn, cid, skb);
4060 /* ---- L2CAP interface with lower layer (HCI) ---- */
4062 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4064 int exact = 0, lm1 = 0, lm2 = 0;
4065 struct l2cap_chan *c;
4067 if (type != ACL_LINK)
4070 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4072 /* Find listening sockets and check their link_mode */
4073 read_lock(&chan_list_lock);
4074 list_for_each_entry(c, &chan_list, global_l) {
4075 struct sock *sk = c->sk;
4077 if (c->state != BT_LISTEN)
4080 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4081 lm1 |= HCI_LM_ACCEPT;
4083 lm1 |= HCI_LM_MASTER;
4085 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4086 lm2 |= HCI_LM_ACCEPT;
4088 lm2 |= HCI_LM_MASTER;
4091 read_unlock(&chan_list_lock);
4093 return exact ? lm1 : lm2;
4096 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4098 struct l2cap_conn *conn;
4100 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4102 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4106 conn = l2cap_conn_add(hcon, status);
4108 l2cap_conn_ready(conn);
4110 l2cap_conn_del(hcon, bt_to_errno(status));
4115 static int l2cap_disconn_ind(struct hci_conn *hcon)
4117 struct l2cap_conn *conn = hcon->l2cap_data;
4119 BT_DBG("hcon %p", hcon);
4121 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4124 return conn->disc_reason;
4127 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4129 BT_DBG("hcon %p reason %d", hcon, reason);
4131 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4134 l2cap_conn_del(hcon, bt_to_errno(reason));
4139 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4141 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4144 if (encrypt == 0x00) {
4145 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4146 __clear_chan_timer(chan);
4147 __set_chan_timer(chan, HZ * 5);
4148 } else if (chan->sec_level == BT_SECURITY_HIGH)
4149 l2cap_chan_close(chan, ECONNREFUSED);
4151 if (chan->sec_level == BT_SECURITY_MEDIUM)
4152 __clear_chan_timer(chan);
4156 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4158 struct l2cap_conn *conn = hcon->l2cap_data;
4159 struct l2cap_chan *chan;
4164 BT_DBG("conn %p", conn);
4166 read_lock(&conn->chan_lock);
4168 list_for_each_entry(chan, &conn->chan_l, list) {
4169 struct sock *sk = chan->sk;
4173 BT_DBG("chan->scid %d", chan->scid);
4175 if (chan->scid == L2CAP_CID_LE_DATA) {
4176 if (!status && encrypt) {
4177 chan->sec_level = hcon->sec_level;
4178 del_timer(&conn->security_timer);
4179 l2cap_chan_ready(sk);
4186 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4191 if (!status && (chan->state == BT_CONNECTED ||
4192 chan->state == BT_CONFIG)) {
4193 l2cap_check_encryption(chan, encrypt);
4198 if (chan->state == BT_CONNECT) {
4200 struct l2cap_conn_req req;
4201 req.scid = cpu_to_le16(chan->scid);
4202 req.psm = chan->psm;
4204 chan->ident = l2cap_get_ident(conn);
4205 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4207 l2cap_send_cmd(conn, chan->ident,
4208 L2CAP_CONN_REQ, sizeof(req), &req);
4210 __clear_chan_timer(chan);
4211 __set_chan_timer(chan, HZ / 10);
4213 } else if (chan->state == BT_CONNECT2) {
4214 struct l2cap_conn_rsp rsp;
4218 if (bt_sk(sk)->defer_setup) {
4219 struct sock *parent = bt_sk(sk)->parent;
4220 res = L2CAP_CR_PEND;
4221 stat = L2CAP_CS_AUTHOR_PEND;
4222 parent->sk_data_ready(parent, 0);
4224 l2cap_state_change(chan, BT_CONFIG);
4225 res = L2CAP_CR_SUCCESS;
4226 stat = L2CAP_CS_NO_INFO;
4229 l2cap_state_change(chan, BT_DISCONN);
4230 __set_chan_timer(chan, HZ / 10);
4231 res = L2CAP_CR_SEC_BLOCK;
4232 stat = L2CAP_CS_NO_INFO;
4235 rsp.scid = cpu_to_le16(chan->dcid);
4236 rsp.dcid = cpu_to_le16(chan->scid);
4237 rsp.result = cpu_to_le16(res);
4238 rsp.status = cpu_to_le16(stat);
4239 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4246 read_unlock(&conn->chan_lock);
4251 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4253 struct l2cap_conn *conn = hcon->l2cap_data;
4256 conn = l2cap_conn_add(hcon, 0);
4261 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4263 if (!(flags & ACL_CONT)) {
4264 struct l2cap_hdr *hdr;
4265 struct l2cap_chan *chan;
4270 BT_ERR("Unexpected start frame (len %d)", skb->len);
4271 kfree_skb(conn->rx_skb);
4272 conn->rx_skb = NULL;
4274 l2cap_conn_unreliable(conn, ECOMM);
4277 /* Start fragment always begin with Basic L2CAP header */
4278 if (skb->len < L2CAP_HDR_SIZE) {
4279 BT_ERR("Frame is too short (len %d)", skb->len);
4280 l2cap_conn_unreliable(conn, ECOMM);
4284 hdr = (struct l2cap_hdr *) skb->data;
4285 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4286 cid = __le16_to_cpu(hdr->cid);
4288 if (len == skb->len) {
4289 /* Complete frame received */
4290 l2cap_recv_frame(conn, skb);
4294 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4296 if (skb->len > len) {
4297 BT_ERR("Frame is too long (len %d, expected len %d)",
4299 l2cap_conn_unreliable(conn, ECOMM);
4303 chan = l2cap_get_chan_by_scid(conn, cid);
4305 if (chan && chan->sk) {
4306 struct sock *sk = chan->sk;
4308 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4309 BT_ERR("Frame exceeding recv MTU (len %d, "
4313 l2cap_conn_unreliable(conn, ECOMM);
4319 /* Allocate skb for the complete frame (with header) */
4320 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4324 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4326 conn->rx_len = len - skb->len;
4328 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4330 if (!conn->rx_len) {
4331 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4332 l2cap_conn_unreliable(conn, ECOMM);
4336 if (skb->len > conn->rx_len) {
4337 BT_ERR("Fragment is too long (len %d, expected %d)",
4338 skb->len, conn->rx_len);
4339 kfree_skb(conn->rx_skb);
4340 conn->rx_skb = NULL;
4342 l2cap_conn_unreliable(conn, ECOMM);
4346 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4348 conn->rx_len -= skb->len;
4350 if (!conn->rx_len) {
4351 /* Complete frame received */
4352 l2cap_recv_frame(conn, conn->rx_skb);
4353 conn->rx_skb = NULL;
4362 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4364 struct l2cap_chan *c;
4366 read_lock_bh(&chan_list_lock);
4368 list_for_each_entry(c, &chan_list, global_l) {
4369 struct sock *sk = c->sk;
4371 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4372 batostr(&bt_sk(sk)->src),
4373 batostr(&bt_sk(sk)->dst),
4374 c->state, __le16_to_cpu(c->psm),
4375 c->scid, c->dcid, c->imtu, c->omtu,
4376 c->sec_level, c->mode);
4379 read_unlock_bh(&chan_list_lock);
4384 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4386 return single_open(file, l2cap_debugfs_show, inode->i_private);
4389 static const struct file_operations l2cap_debugfs_fops = {
4390 .open = l2cap_debugfs_open,
4392 .llseek = seq_lseek,
4393 .release = single_release,
4396 static struct dentry *l2cap_debugfs;
4398 static struct hci_proto l2cap_hci_proto = {
4400 .id = HCI_PROTO_L2CAP,
4401 .connect_ind = l2cap_connect_ind,
4402 .connect_cfm = l2cap_connect_cfm,
4403 .disconn_ind = l2cap_disconn_ind,
4404 .disconn_cfm = l2cap_disconn_cfm,
4405 .security_cfm = l2cap_security_cfm,
4406 .recv_acldata = l2cap_recv_acldata
4409 int __init l2cap_init(void)
4413 err = l2cap_init_sockets();
4417 _busy_wq = create_singlethread_workqueue("l2cap");
4423 err = hci_register_proto(&l2cap_hci_proto);
4425 BT_ERR("L2CAP protocol registration failed");
4426 bt_sock_unregister(BTPROTO_L2CAP);
4431 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4432 bt_debugfs, NULL, &l2cap_debugfs_fops);
4434 BT_ERR("Failed to create L2CAP debug file");
4440 destroy_workqueue(_busy_wq);
4441 l2cap_cleanup_sockets();
4445 void l2cap_exit(void)
4447 debugfs_remove(l2cap_debugfs);
4449 flush_workqueue(_busy_wq);
4450 destroy_workqueue(_busy_wq);
4452 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4453 BT_ERR("L2CAP protocol unregistration failed");
4455 l2cap_cleanup_sockets();
4458 module_param(disable_ertm, bool, 0644);
4459 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");