2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 __l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
879 write_unlock_bh(&conn->chan_lock);
882 bh_unlock_sock(parent);
885 static void l2cap_chan_ready(struct sock *sk)
887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct sock *parent = bt_sk(sk)->parent;
890 BT_DBG("sk %p, parent %p", sk, parent);
892 chan->conf_state = 0;
893 __clear_chan_timer(chan);
895 l2cap_state_change(chan, BT_CONNECTED);
896 sk->sk_state_change(sk);
899 parent->sk_data_ready(parent, 0);
902 static void l2cap_conn_ready(struct l2cap_conn *conn)
904 struct l2cap_chan *chan;
905 struct hci_conn *hcon = conn->hcon;
907 BT_DBG("conn %p", conn);
909 if (!hcon->out && hcon->type == LE_LINK)
910 l2cap_le_conn_ready(conn);
912 if (hcon->out && hcon->type == LE_LINK)
913 smp_conn_security(hcon, hcon->pending_sec_level);
915 read_lock(&conn->chan_lock);
917 list_for_each_entry(chan, &conn->chan_l, list) {
918 struct sock *sk = chan->sk;
922 if (hcon->type == LE_LINK) {
923 if (smp_conn_security(hcon, chan->sec_level))
924 l2cap_chan_ready(sk);
926 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
927 __clear_chan_timer(chan);
928 l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
931 } else if (chan->state == BT_CONNECT)
932 l2cap_do_start(chan);
937 read_unlock(&conn->chan_lock);
940 /* Notify sockets that we cannot guaranty reliability anymore */
941 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
943 struct l2cap_chan *chan;
945 BT_DBG("conn %p", conn);
947 read_lock(&conn->chan_lock);
949 list_for_each_entry(chan, &conn->chan_l, list) {
950 struct sock *sk = chan->sk;
952 if (chan->force_reliable)
956 read_unlock(&conn->chan_lock);
959 static void l2cap_info_timeout(unsigned long arg)
961 struct l2cap_conn *conn = (void *) arg;
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
964 conn->info_ident = 0;
966 l2cap_conn_start(conn);
969 static void l2cap_conn_del(struct hci_conn *hcon, int err)
971 struct l2cap_conn *conn = hcon->l2cap_data;
972 struct l2cap_chan *chan, *l;
978 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
980 kfree_skb(conn->rx_skb);
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
986 l2cap_chan_del(chan, err);
988 chan->ops->close(chan->data);
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
992 del_timer_sync(&conn->info_timer);
994 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
995 del_timer(&conn->security_timer);
996 smp_chan_destroy(conn);
999 hcon->l2cap_data = NULL;
1003 static void security_timeout(unsigned long arg)
1005 struct l2cap_conn *conn = (void *) arg;
1007 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1010 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1012 struct l2cap_conn *conn = hcon->l2cap_data;
1017 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1021 hcon->l2cap_data = conn;
1024 BT_DBG("hcon %p conn %p", hcon, conn);
1026 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1027 conn->mtu = hcon->hdev->le_mtu;
1029 conn->mtu = hcon->hdev->acl_mtu;
1031 conn->src = &hcon->hdev->bdaddr;
1032 conn->dst = &hcon->dst;
1034 conn->feat_mask = 0;
1036 spin_lock_init(&conn->lock);
1037 rwlock_init(&conn->chan_lock);
1039 INIT_LIST_HEAD(&conn->chan_l);
1041 if (hcon->type == LE_LINK)
1042 setup_timer(&conn->security_timer, security_timeout,
1043 (unsigned long) conn);
1045 setup_timer(&conn->info_timer, l2cap_info_timeout,
1046 (unsigned long) conn);
1048 conn->disc_reason = 0x13;
1053 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1055 write_lock_bh(&conn->chan_lock);
1056 __l2cap_chan_add(conn, chan);
1057 write_unlock_bh(&conn->chan_lock);
1060 /* ---- Socket interface ---- */
1062 /* Find socket with psm and source bdaddr.
1063 * Returns closest match.
1065 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1067 struct l2cap_chan *c, *c1 = NULL;
1069 read_lock(&chan_list_lock);
1071 list_for_each_entry(c, &chan_list, global_l) {
1072 struct sock *sk = c->sk;
1074 if (state && c->state != state)
1077 if (c->psm == psm) {
1079 if (!bacmp(&bt_sk(sk)->src, src)) {
1080 read_unlock(&chan_list_lock);
1085 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1090 read_unlock(&chan_list_lock);
1095 int l2cap_chan_connect(struct l2cap_chan *chan)
1097 struct sock *sk = chan->sk;
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 bdaddr_t *dst = &bt_sk(sk)->dst;
1100 struct l2cap_conn *conn;
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1106 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1109 hdev = hci_get_route(dst, src);
1111 return -EHOSTUNREACH;
1113 hci_dev_lock_bh(hdev);
1115 auth_type = l2cap_get_auth_type(chan);
1117 if (chan->dcid == L2CAP_CID_LE_DATA)
1118 hcon = hci_connect(hdev, LE_LINK, dst,
1119 chan->sec_level, auth_type);
1121 hcon = hci_connect(hdev, ACL_LINK, dst,
1122 chan->sec_level, auth_type);
1125 err = PTR_ERR(hcon);
1129 conn = l2cap_conn_add(hcon, 0);
1136 /* Update source addr of the socket */
1137 bacpy(src, conn->src);
1139 l2cap_chan_add(conn, chan);
1141 l2cap_state_change(chan, BT_CONNECT);
1142 __set_chan_timer(chan, sk->sk_sndtimeo);
1144 if (hcon->state == BT_CONNECTED) {
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1146 __clear_chan_timer(chan);
1147 if (l2cap_check_security(chan))
1148 l2cap_state_change(chan, BT_CONNECTED);
1150 l2cap_do_start(chan);
1156 hci_dev_unlock_bh(hdev);
1161 int __l2cap_wait_ack(struct sock *sk)
1163 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1164 DECLARE_WAITQUEUE(wait, current);
1168 add_wait_queue(sk_sleep(sk), &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 while (chan->unacked_frames > 0 && chan->conn) {
1174 if (signal_pending(current)) {
1175 err = sock_intr_errno(timeo);
1180 timeo = schedule_timeout(timeo);
1182 set_current_state(TASK_INTERRUPTIBLE);
1184 err = sock_error(sk);
1188 set_current_state(TASK_RUNNING);
1189 remove_wait_queue(sk_sleep(sk), &wait);
1193 static void l2cap_monitor_timeout(unsigned long arg)
1195 struct l2cap_chan *chan = (void *) arg;
1196 struct sock *sk = chan->sk;
1198 BT_DBG("chan %p", chan);
1201 if (chan->retry_count >= chan->remote_max_tx) {
1202 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1207 chan->retry_count++;
1208 __set_monitor_timer(chan);
1210 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1214 static void l2cap_retrans_timeout(unsigned long arg)
1216 struct l2cap_chan *chan = (void *) arg;
1217 struct sock *sk = chan->sk;
1219 BT_DBG("chan %p", chan);
1222 chan->retry_count = 1;
1223 __set_monitor_timer(chan);
1225 set_bit(CONN_WAIT_F, &chan->conn_state);
1227 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1231 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1233 struct sk_buff *skb;
1235 while ((skb = skb_peek(&chan->tx_q)) &&
1236 chan->unacked_frames) {
1237 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1240 skb = skb_dequeue(&chan->tx_q);
1243 chan->unacked_frames--;
1246 if (!chan->unacked_frames)
1247 __clear_retrans_timer(chan);
1250 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1252 struct hci_conn *hcon = chan->conn->hcon;
1255 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1257 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1258 flags = ACL_START_NO_FLUSH;
1262 bt_cb(skb)->force_active = chan->force_active;
1263 hci_send_acl(hcon, skb, flags);
1266 static void l2cap_streaming_send(struct l2cap_chan *chan)
1268 struct sk_buff *skb;
1271 while ((skb = skb_dequeue(&chan->tx_q))) {
1272 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1273 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1274 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1276 if (chan->fcs == L2CAP_FCS_CRC16) {
1277 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1278 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1281 l2cap_do_send(chan, skb);
1283 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1287 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1289 struct sk_buff *skb, *tx_skb;
1292 skb = skb_peek(&chan->tx_q);
1297 if (bt_cb(skb)->tx_seq == tx_seq)
1300 if (skb_queue_is_last(&chan->tx_q, skb))
1303 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1305 if (chan->remote_max_tx &&
1306 bt_cb(skb)->retries == chan->remote_max_tx) {
1307 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 tx_skb = skb_clone(skb, GFP_ATOMIC);
1312 bt_cb(skb)->retries++;
1313 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1314 control &= L2CAP_CTRL_SAR;
1316 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1317 control |= L2CAP_CTRL_FINAL;
1319 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1320 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (chan->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 l2cap_do_send(chan, tx_skb);
1332 static int l2cap_ertm_send(struct l2cap_chan *chan)
1334 struct sk_buff *skb, *tx_skb;
1338 if (chan->state != BT_CONNECTED)
1341 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1343 if (chan->remote_max_tx &&
1344 bt_cb(skb)->retries == chan->remote_max_tx) {
1345 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1349 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1353 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1354 control &= L2CAP_CTRL_SAR;
1356 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1357 control |= L2CAP_CTRL_FINAL;
1359 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1360 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1361 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1364 if (chan->fcs == L2CAP_FCS_CRC16) {
1365 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1366 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1369 l2cap_do_send(chan, tx_skb);
1371 __set_retrans_timer(chan);
1373 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1374 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1376 if (bt_cb(skb)->retries == 1)
1377 chan->unacked_frames++;
1379 chan->frames_sent++;
1381 if (skb_queue_is_last(&chan->tx_q, skb))
1382 chan->tx_send_head = NULL;
1384 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1392 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1396 if (!skb_queue_empty(&chan->tx_q))
1397 chan->tx_send_head = chan->tx_q.next;
1399 chan->next_tx_seq = chan->expected_ack_seq;
1400 ret = l2cap_ertm_send(chan);
1404 static void l2cap_send_ack(struct l2cap_chan *chan)
1408 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1410 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1411 control |= L2CAP_SUPER_RCV_NOT_READY;
1412 set_bit(CONN_RNR_SENT, &chan->conn_state);
1413 l2cap_send_sframe(chan, control);
1417 if (l2cap_ertm_send(chan) > 0)
1420 control |= L2CAP_SUPER_RCV_READY;
1421 l2cap_send_sframe(chan, control);
1424 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1426 struct srej_list *tail;
1429 control = L2CAP_SUPER_SELECT_REJECT;
1430 control |= L2CAP_CTRL_FINAL;
1432 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1433 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1435 l2cap_send_sframe(chan, control);
1438 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1441 struct sk_buff **frag;
1444 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1453 count = min_t(unsigned int, conn->mtu, len);
1455 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1458 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1464 frag = &(*frag)->next;
1470 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1472 struct sock *sk = chan->sk;
1473 struct l2cap_conn *conn = chan->conn;
1474 struct sk_buff *skb;
1475 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1476 struct l2cap_hdr *lh;
1478 BT_DBG("sk %p len %d", sk, (int)len);
1480 count = min_t(unsigned int, (conn->mtu - hlen), len);
1481 skb = bt_skb_send_alloc(sk, count + hlen,
1482 msg->msg_flags & MSG_DONTWAIT, &err);
1484 return ERR_PTR(err);
1486 /* Create L2CAP header */
1487 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1488 lh->cid = cpu_to_le16(chan->dcid);
1489 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1490 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1492 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1493 if (unlikely(err < 0)) {
1495 return ERR_PTR(err);
1500 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1502 struct sock *sk = chan->sk;
1503 struct l2cap_conn *conn = chan->conn;
1504 struct sk_buff *skb;
1505 int err, count, hlen = L2CAP_HDR_SIZE;
1506 struct l2cap_hdr *lh;
1508 BT_DBG("sk %p len %d", sk, (int)len);
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1514 return ERR_PTR(err);
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(chan->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1524 return ERR_PTR(err);
1529 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1530 struct msghdr *msg, size_t len,
1531 u16 control, u16 sdulen)
1533 struct sock *sk = chan->sk;
1534 struct l2cap_conn *conn = chan->conn;
1535 struct sk_buff *skb;
1536 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1537 struct l2cap_hdr *lh;
1539 BT_DBG("sk %p len %d", sk, (int)len);
1542 return ERR_PTR(-ENOTCONN);
1547 if (chan->fcs == L2CAP_FCS_CRC16)
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1554 return ERR_PTR(err);
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1560 put_unaligned_le16(control, skb_put(skb, 2));
1562 put_unaligned_le16(sdulen, skb_put(skb, 2));
1564 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1565 if (unlikely(err < 0)) {
1567 return ERR_PTR(err);
1570 if (chan->fcs == L2CAP_FCS_CRC16)
1571 put_unaligned_le16(0, skb_put(skb, 2));
1573 bt_cb(skb)->retries = 0;
1577 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1579 struct sk_buff *skb;
1580 struct sk_buff_head sar_queue;
1584 skb_queue_head_init(&sar_queue);
1585 control = L2CAP_SDU_START;
1586 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1588 return PTR_ERR(skb);
1590 __skb_queue_tail(&sar_queue, skb);
1591 len -= chan->remote_mps;
1592 size += chan->remote_mps;
1597 if (len > chan->remote_mps) {
1598 control = L2CAP_SDU_CONTINUE;
1599 buflen = chan->remote_mps;
1601 control = L2CAP_SDU_END;
1605 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1607 skb_queue_purge(&sar_queue);
1608 return PTR_ERR(skb);
1611 __skb_queue_tail(&sar_queue, skb);
1615 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1616 if (chan->tx_send_head == NULL)
1617 chan->tx_send_head = sar_queue.next;
1622 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1624 struct sk_buff *skb;
1628 /* Connectionless channel */
1629 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1630 skb = l2cap_create_connless_pdu(chan, msg, len);
1632 return PTR_ERR(skb);
1634 l2cap_do_send(chan, skb);
1638 switch (chan->mode) {
1639 case L2CAP_MODE_BASIC:
1640 /* Check outgoing MTU */
1641 if (len > chan->omtu)
1644 /* Create a basic PDU */
1645 skb = l2cap_create_basic_pdu(chan, msg, len);
1647 return PTR_ERR(skb);
1649 l2cap_do_send(chan, skb);
1653 case L2CAP_MODE_ERTM:
1654 case L2CAP_MODE_STREAMING:
1655 /* Entire SDU fits into one PDU */
1656 if (len <= chan->remote_mps) {
1657 control = L2CAP_SDU_UNSEGMENTED;
1658 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1661 return PTR_ERR(skb);
1663 __skb_queue_tail(&chan->tx_q, skb);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = skb;
1669 /* Segment SDU into multiples PDUs */
1670 err = l2cap_sar_segment_sdu(chan, msg, len);
1675 if (chan->mode == L2CAP_MODE_STREAMING) {
1676 l2cap_streaming_send(chan);
1681 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1682 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1687 err = l2cap_ertm_send(chan);
1694 BT_DBG("bad state %1.1x", chan->mode);
1701 /* Copy frame to all raw sockets on that connection */
1702 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1704 struct sk_buff *nskb;
1705 struct l2cap_chan *chan;
1707 BT_DBG("conn %p", conn);
1709 read_lock(&conn->chan_lock);
1710 list_for_each_entry(chan, &conn->chan_l, list) {
1711 struct sock *sk = chan->sk;
1712 if (chan->chan_type != L2CAP_CHAN_RAW)
1715 /* Don't send frame to the socket it came from */
1718 nskb = skb_clone(skb, GFP_ATOMIC);
1722 if (chan->ops->recv(chan->data, nskb))
1725 read_unlock(&conn->chan_lock);
1728 /* ---- L2CAP signalling commands ---- */
1729 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1730 u8 code, u8 ident, u16 dlen, void *data)
1732 struct sk_buff *skb, **frag;
1733 struct l2cap_cmd_hdr *cmd;
1734 struct l2cap_hdr *lh;
1737 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1738 conn, code, ident, dlen);
1740 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
1743 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1744 count = min_t(unsigned int, conn->mtu, len);
1746 skb = bt_skb_alloc(count, GFP_ATOMIC);
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1753 if (conn->hcon->type == LE_LINK)
1754 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1756 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1758 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1761 cmd->len = cpu_to_le16(dlen);
1764 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1765 memcpy(skb_put(skb, count), data, count);
1771 /* Continuation fragments (no L2CAP header) */
1772 frag = &skb_shinfo(skb)->frag_list;
1774 count = min_t(unsigned int, conn->mtu, len);
1776 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1780 memcpy(skb_put(*frag, count), data, count);
1785 frag = &(*frag)->next;
1795 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1797 struct l2cap_conf_opt *opt = *ptr;
1800 len = L2CAP_CONF_OPT_SIZE + opt->len;
1808 *val = *((u8 *) opt->val);
1812 *val = get_unaligned_le16(opt->val);
1816 *val = get_unaligned_le32(opt->val);
1820 *val = (unsigned long) opt->val;
1824 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1828 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
1830 struct l2cap_conf_opt *opt = *ptr;
1832 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1834 if (size < L2CAP_CONF_OPT_SIZE + len)
1842 *((u8 *) opt->val) = val;
1846 put_unaligned_le16(val, opt->val);
1850 put_unaligned_le32(val, opt->val);
1854 memcpy(opt->val, (void *) val, len);
1858 *ptr += L2CAP_CONF_OPT_SIZE + len;
1861 static void l2cap_ack_timeout(unsigned long arg)
1863 struct l2cap_chan *chan = (void *) arg;
1865 bh_lock_sock(chan->sk);
1866 l2cap_send_ack(chan);
1867 bh_unlock_sock(chan->sk);
1870 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1872 struct sock *sk = chan->sk;
1874 chan->expected_ack_seq = 0;
1875 chan->unacked_frames = 0;
1876 chan->buffer_seq = 0;
1877 chan->num_acked = 0;
1878 chan->frames_sent = 0;
1880 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1881 (unsigned long) chan);
1882 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1883 (unsigned long) chan);
1884 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1886 skb_queue_head_init(&chan->srej_q);
1888 INIT_LIST_HEAD(&chan->srej_l);
1891 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1894 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1897 case L2CAP_MODE_STREAMING:
1898 case L2CAP_MODE_ERTM:
1899 if (l2cap_mode_supported(mode, remote_feat_mask))
1903 return L2CAP_MODE_BASIC;
1907 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
1909 struct l2cap_conf_req *req = data;
1910 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1911 void *ptr = req->data;
1912 void *endptr = data + data_size;
1914 BT_DBG("chan %p", chan);
1916 if (chan->num_conf_req || chan->num_conf_rsp)
1919 switch (chan->mode) {
1920 case L2CAP_MODE_STREAMING:
1921 case L2CAP_MODE_ERTM:
1922 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1927 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1932 if (chan->imtu != L2CAP_DEFAULT_MTU)
1933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
1935 switch (chan->mode) {
1936 case L2CAP_MODE_BASIC:
1937 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1938 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1941 rfc.mode = L2CAP_MODE_BASIC;
1943 rfc.max_transmit = 0;
1944 rfc.retrans_timeout = 0;
1945 rfc.monitor_timeout = 0;
1946 rfc.max_pdu_size = 0;
1948 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1949 (unsigned long) &rfc, endptr - ptr);
1952 case L2CAP_MODE_ERTM:
1953 rfc.mode = L2CAP_MODE_ERTM;
1954 rfc.txwin_size = chan->tx_win;
1955 rfc.max_transmit = chan->max_tx;
1956 rfc.retrans_timeout = 0;
1957 rfc.monitor_timeout = 0;
1958 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1959 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1960 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1962 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1963 (unsigned long) &rfc, endptr - ptr);
1965 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1968 if (chan->fcs == L2CAP_FCS_NONE ||
1969 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1970 chan->fcs = L2CAP_FCS_NONE;
1971 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs,
1976 case L2CAP_MODE_STREAMING:
1977 rfc.mode = L2CAP_MODE_STREAMING;
1979 rfc.max_transmit = 0;
1980 rfc.retrans_timeout = 0;
1981 rfc.monitor_timeout = 0;
1982 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1983 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1984 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1987 (unsigned long) &rfc, endptr - ptr);
1989 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1992 if (chan->fcs == L2CAP_FCS_NONE ||
1993 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1994 chan->fcs = L2CAP_FCS_NONE;
1995 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs,
2001 req->dcid = cpu_to_le16(chan->dcid);
2002 req->flags = cpu_to_le16(0);
2007 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
2009 struct l2cap_conf_rsp *rsp = data;
2010 void *ptr = rsp->data;
2011 void *endptr = data + data_size;
2012 void *req = chan->conf_req;
2013 int len = chan->conf_len;
2014 int type, hint, olen;
2016 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2017 u16 mtu = L2CAP_DEFAULT_MTU;
2018 u16 result = L2CAP_CONF_SUCCESS;
2020 BT_DBG("chan %p", chan);
2022 while (len >= L2CAP_CONF_OPT_SIZE) {
2023 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2027 hint = type & L2CAP_CONF_HINT;
2028 type &= L2CAP_CONF_MASK;
2031 case L2CAP_CONF_MTU:
2035 case L2CAP_CONF_FLUSH_TO:
2036 chan->flush_to = val;
2039 case L2CAP_CONF_QOS:
2042 case L2CAP_CONF_RFC:
2043 if (olen == sizeof(rfc))
2044 memcpy(&rfc, (void *) val, olen);
2047 case L2CAP_CONF_FCS:
2048 if (val == L2CAP_FCS_NONE)
2049 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2057 result = L2CAP_CONF_UNKNOWN;
2058 *((u8 *) ptr++) = type;
2063 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2066 switch (chan->mode) {
2067 case L2CAP_MODE_STREAMING:
2068 case L2CAP_MODE_ERTM:
2069 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2070 chan->mode = l2cap_select_mode(rfc.mode,
2071 chan->conn->feat_mask);
2075 if (chan->mode != rfc.mode)
2076 return -ECONNREFUSED;
2082 if (chan->mode != rfc.mode) {
2083 result = L2CAP_CONF_UNACCEPT;
2084 rfc.mode = chan->mode;
2086 if (chan->num_conf_rsp == 1)
2087 return -ECONNREFUSED;
2089 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2090 (unsigned long) &rfc, endptr - ptr);
2094 if (result == L2CAP_CONF_SUCCESS) {
2095 /* Configure output options and let the other side know
2096 * which ones we don't like. */
2098 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2099 result = L2CAP_CONF_UNACCEPT;
2102 set_bit(CONF_MTU_DONE, &chan->conf_state);
2104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
2107 case L2CAP_MODE_BASIC:
2108 chan->fcs = L2CAP_FCS_NONE;
2109 set_bit(CONF_MODE_DONE, &chan->conf_state);
2112 case L2CAP_MODE_ERTM:
2113 chan->remote_tx_win = rfc.txwin_size;
2114 chan->remote_max_tx = rfc.max_transmit;
2116 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2117 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2119 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2121 rfc.retrans_timeout =
2122 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2123 rfc.monitor_timeout =
2124 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2126 set_bit(CONF_MODE_DONE, &chan->conf_state);
2128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2129 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
2133 case L2CAP_MODE_STREAMING:
2134 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2135 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2137 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2139 set_bit(CONF_MODE_DONE, &chan->conf_state);
2141 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2142 (unsigned long) &rfc, endptr - ptr);
2147 result = L2CAP_CONF_UNACCEPT;
2149 memset(&rfc, 0, sizeof(rfc));
2150 rfc.mode = chan->mode;
2153 if (result == L2CAP_CONF_SUCCESS)
2154 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2156 rsp->scid = cpu_to_le16(chan->dcid);
2157 rsp->result = cpu_to_le16(result);
2158 rsp->flags = cpu_to_le16(0x0000);
2163 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
2164 void *data, size_t size, u16 *result)
2166 struct l2cap_conf_req *req = data;
2167 void *ptr = req->data;
2168 void *endptr = data + size;
2171 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2173 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2175 while (len >= L2CAP_CONF_OPT_SIZE) {
2176 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2181 case L2CAP_CONF_MTU:
2182 if (val < L2CAP_DEFAULT_MIN_MTU) {
2183 *result = L2CAP_CONF_UNACCEPT;
2184 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
2190 case L2CAP_CONF_FLUSH_TO:
2191 chan->flush_to = val;
2192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2193 2, chan->flush_to, endptr - ptr);
2196 case L2CAP_CONF_RFC:
2197 if (olen == sizeof(rfc))
2198 memcpy(&rfc, (void *)val, olen);
2200 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2201 rfc.mode != chan->mode)
2202 return -ECONNREFUSED;
2206 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2207 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
2212 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2213 return -ECONNREFUSED;
2215 chan->mode = rfc.mode;
2217 if (*result == L2CAP_CONF_SUCCESS) {
2219 case L2CAP_MODE_ERTM:
2220 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2221 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2222 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2224 case L2CAP_MODE_STREAMING:
2225 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2229 req->dcid = cpu_to_le16(chan->dcid);
2230 req->flags = cpu_to_le16(0x0000);
2235 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2237 struct l2cap_conf_rsp *rsp = data;
2238 void *ptr = rsp->data;
2240 BT_DBG("chan %p", chan);
2242 rsp->scid = cpu_to_le16(chan->dcid);
2243 rsp->result = cpu_to_le16(result);
2244 rsp->flags = cpu_to_le16(flags);
2249 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2251 struct l2cap_conn_rsp rsp;
2252 struct l2cap_conn *conn = chan->conn;
2255 rsp.scid = cpu_to_le16(chan->dcid);
2256 rsp.dcid = cpu_to_le16(chan->scid);
2257 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2258 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2259 l2cap_send_cmd(conn, chan->ident,
2260 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2262 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2265 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2266 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
2267 chan->num_conf_req++;
2270 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2274 struct l2cap_conf_rfc rfc;
2276 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2278 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2281 while (len >= L2CAP_CONF_OPT_SIZE) {
2282 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2286 if (type != L2CAP_CONF_RFC)
2289 if (olen != sizeof(rfc))
2292 memcpy(&rfc, (void *)val, olen);
2296 /* Use sane default values in case a misbehaving remote device
2297 * did not send an RFC option.
2299 rfc.mode = chan->mode;
2300 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2301 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2302 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2304 BT_ERR("Expected RFC option was not found, using defaults");
2308 case L2CAP_MODE_ERTM:
2309 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2310 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2311 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2313 case L2CAP_MODE_STREAMING:
2314 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2318 static inline int l2cap_command_rej(struct l2cap_conn *conn,
2319 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2322 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2324 if (cmd_len < sizeof(*rej))
2327 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2330 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2331 cmd->ident == conn->info_ident) {
2332 del_timer(&conn->info_timer);
2334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2335 conn->info_ident = 0;
2337 l2cap_conn_start(conn);
2343 static int l2cap_connect_req(struct l2cap_conn *conn,
2344 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2346 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2347 struct l2cap_conn_rsp rsp;
2348 struct l2cap_chan *chan = NULL, *pchan;
2349 struct sock *parent, *sk = NULL;
2350 int result, status = L2CAP_CS_NO_INFO;
2355 if (cmd_len < sizeof(struct l2cap_conn_req))
2358 scid = __le16_to_cpu(req->scid);
2361 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2363 /* Check if we have socket listening on psm */
2364 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2366 result = L2CAP_CR_BAD_PSM;
2372 bh_lock_sock(parent);
2374 /* Check if the ACL is secure enough (if not SDP) */
2375 if (psm != cpu_to_le16(0x0001) &&
2376 !hci_conn_check_link_mode(conn->hcon)) {
2377 conn->disc_reason = 0x05;
2378 result = L2CAP_CR_SEC_BLOCK;
2382 result = L2CAP_CR_NO_MEM;
2384 /* Check for backlog size */
2385 if (sk_acceptq_is_full(parent)) {
2386 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2390 chan = pchan->ops->new_connection(pchan->data);
2396 write_lock_bh(&conn->chan_lock);
2398 /* Check if we already have channel with that dcid */
2399 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2400 write_unlock_bh(&conn->chan_lock);
2401 sock_set_flag(sk, SOCK_ZAPPED);
2402 chan->ops->close(chan->data);
2406 hci_conn_hold(conn->hcon);
2408 bacpy(&bt_sk(sk)->src, conn->src);
2409 bacpy(&bt_sk(sk)->dst, conn->dst);
2413 bt_accept_enqueue(parent, sk);
2415 __l2cap_chan_add(conn, chan);
2419 __set_chan_timer(chan, sk->sk_sndtimeo);
2421 chan->ident = cmd->ident;
2423 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2424 if (l2cap_check_security(chan)) {
2425 if (bt_sk(sk)->defer_setup) {
2426 l2cap_state_change(chan, BT_CONNECT2);
2427 result = L2CAP_CR_PEND;
2428 status = L2CAP_CS_AUTHOR_PEND;
2429 parent->sk_data_ready(parent, 0);
2431 l2cap_state_change(chan, BT_CONFIG);
2432 result = L2CAP_CR_SUCCESS;
2433 status = L2CAP_CS_NO_INFO;
2436 l2cap_state_change(chan, BT_CONNECT2);
2437 result = L2CAP_CR_PEND;
2438 status = L2CAP_CS_AUTHEN_PEND;
2441 l2cap_state_change(chan, BT_CONNECT2);
2442 result = L2CAP_CR_PEND;
2443 status = L2CAP_CS_NO_INFO;
2446 write_unlock_bh(&conn->chan_lock);
2449 bh_unlock_sock(parent);
2452 rsp.scid = cpu_to_le16(scid);
2453 rsp.dcid = cpu_to_le16(dcid);
2454 rsp.result = cpu_to_le16(result);
2455 rsp.status = cpu_to_le16(status);
2456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2458 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2459 struct l2cap_info_req info;
2460 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2462 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2463 conn->info_ident = l2cap_get_ident(conn);
2465 mod_timer(&conn->info_timer, jiffies +
2466 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2468 l2cap_send_cmd(conn, conn->info_ident,
2469 L2CAP_INFO_REQ, sizeof(info), &info);
2472 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2473 result == L2CAP_CR_SUCCESS) {
2475 set_bit(CONF_REQ_SENT, &chan->conf_state);
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
2478 chan->num_conf_req++;
2484 static int l2cap_connect_rsp(struct l2cap_conn *conn,
2485 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2488 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2489 u16 scid, dcid, result, status;
2490 struct l2cap_chan *chan;
2494 if (cmd_len < sizeof(*rsp))
2497 scid = __le16_to_cpu(rsp->scid);
2498 dcid = __le16_to_cpu(rsp->dcid);
2499 result = __le16_to_cpu(rsp->result);
2500 status = __le16_to_cpu(rsp->status);
2502 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2505 chan = l2cap_get_chan_by_scid(conn, scid);
2509 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2517 case L2CAP_CR_SUCCESS:
2518 l2cap_state_change(chan, BT_CONFIG);
2521 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2523 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2526 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2527 l2cap_build_conf_req(chan, req, sizeof(req)), req);
2528 chan->num_conf_req++;
2532 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2536 /* don't delete l2cap channel if sk is owned by user */
2537 if (sock_owned_by_user(sk)) {
2538 l2cap_state_change(chan, BT_DISCONN);
2539 __clear_chan_timer(chan);
2540 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2544 l2cap_chan_del(chan, ECONNREFUSED);
2552 static inline void set_default_fcs(struct l2cap_chan *chan)
2554 /* FCS is enabled only in ERTM or streaming mode, if one or both
2557 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2558 chan->fcs = L2CAP_FCS_NONE;
2559 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2560 chan->fcs = L2CAP_FCS_CRC16;
2563 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2565 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2568 struct l2cap_chan *chan;
2572 if (cmd_len < sizeof(*req))
2575 dcid = __le16_to_cpu(req->dcid);
2576 flags = __le16_to_cpu(req->flags);
2578 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2580 chan = l2cap_get_chan_by_scid(conn, dcid);
2586 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2587 struct l2cap_cmd_rej_cid rej;
2589 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2590 rej.scid = cpu_to_le16(chan->scid);
2591 rej.dcid = cpu_to_le16(chan->dcid);
2593 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2598 /* Reject if config buffer is too small. */
2599 len = cmd_len - sizeof(*req);
2600 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2601 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2602 l2cap_build_conf_rsp(chan, rsp,
2603 L2CAP_CONF_REJECT, flags), rsp);
2608 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2609 chan->conf_len += len;
2611 if (flags & 0x0001) {
2612 /* Incomplete config. Send empty response. */
2613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2614 l2cap_build_conf_rsp(chan, rsp,
2615 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2619 /* Complete config. */
2620 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
2622 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2626 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2627 chan->num_conf_rsp++;
2629 /* Reset config buffer. */
2632 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2635 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2636 set_default_fcs(chan);
2638 l2cap_state_change(chan, BT_CONNECTED);
2640 chan->next_tx_seq = 0;
2641 chan->expected_tx_seq = 0;
2642 skb_queue_head_init(&chan->tx_q);
2643 if (chan->mode == L2CAP_MODE_ERTM)
2644 l2cap_ertm_init(chan);
2646 l2cap_chan_ready(sk);
2650 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2652 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2653 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
2654 chan->num_conf_req++;
2662 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
2663 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2666 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2667 u16 scid, flags, result;
2668 struct l2cap_chan *chan;
2670 int len = cmd_len - sizeof(*rsp);
2672 if (cmd_len < sizeof(*rsp))
2675 scid = __le16_to_cpu(rsp->scid);
2676 flags = __le16_to_cpu(rsp->flags);
2677 result = __le16_to_cpu(rsp->result);
2679 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2680 scid, flags, result);
2682 chan = l2cap_get_chan_by_scid(conn, scid);
2689 case L2CAP_CONF_SUCCESS:
2690 l2cap_conf_rfc_get(chan, rsp->data, len);
2693 case L2CAP_CONF_UNACCEPT:
2694 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2697 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2698 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2702 /* throw out any old stored conf requests */
2703 result = L2CAP_CONF_SUCCESS;
2704 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2705 req, sizeof(req), &result);
2707 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2711 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2712 L2CAP_CONF_REQ, len, req);
2713 chan->num_conf_req++;
2714 if (result != L2CAP_CONF_SUCCESS)
2720 sk->sk_err = ECONNRESET;
2721 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2722 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2729 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2731 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2732 set_default_fcs(chan);
2734 l2cap_state_change(chan, BT_CONNECTED);
2735 chan->next_tx_seq = 0;
2736 chan->expected_tx_seq = 0;
2737 skb_queue_head_init(&chan->tx_q);
2738 if (chan->mode == L2CAP_MODE_ERTM)
2739 l2cap_ertm_init(chan);
2741 l2cap_chan_ready(sk);
2749 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
2750 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2753 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2754 struct l2cap_disconn_rsp rsp;
2756 struct l2cap_chan *chan;
2759 if (cmd_len != sizeof(*req))
2762 scid = __le16_to_cpu(req->scid);
2763 dcid = __le16_to_cpu(req->dcid);
2765 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2767 chan = l2cap_get_chan_by_scid(conn, dcid);
2773 rsp.dcid = cpu_to_le16(chan->scid);
2774 rsp.scid = cpu_to_le16(chan->dcid);
2775 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2777 sk->sk_shutdown = SHUTDOWN_MASK;
2779 /* don't delete l2cap channel if sk is owned by user */
2780 if (sock_owned_by_user(sk)) {
2781 l2cap_state_change(chan, BT_DISCONN);
2782 __clear_chan_timer(chan);
2783 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2788 l2cap_chan_del(chan, ECONNRESET);
2791 chan->ops->close(chan->data);
2795 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
2796 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2799 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2801 struct l2cap_chan *chan;
2804 if (cmd_len != sizeof(*rsp))
2807 scid = __le16_to_cpu(rsp->scid);
2808 dcid = __le16_to_cpu(rsp->dcid);
2810 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2812 chan = l2cap_get_chan_by_scid(conn, scid);
2818 /* don't delete l2cap channel if sk is owned by user */
2819 if (sock_owned_by_user(sk)) {
2820 l2cap_state_change(chan,BT_DISCONN);
2821 __clear_chan_timer(chan);
2822 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2827 l2cap_chan_del(chan, 0);
2830 chan->ops->close(chan->data);
2834 static inline int l2cap_information_req(struct l2cap_conn *conn,
2835 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2838 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2841 if (cmd_len != sizeof(*req))
2844 type = __le16_to_cpu(req->type);
2846 BT_DBG("type 0x%4.4x", type);
2848 if (type == L2CAP_IT_FEAT_MASK) {
2850 u32 feat_mask = l2cap_feat_mask;
2851 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2852 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2853 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2855 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2857 put_unaligned_le32(feat_mask, rsp->data);
2858 l2cap_send_cmd(conn, cmd->ident,
2859 L2CAP_INFO_RSP, sizeof(buf), buf);
2860 } else if (type == L2CAP_IT_FIXED_CHAN) {
2862 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2863 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2864 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2865 memcpy(buf + 4, l2cap_fixed_chan, 8);
2866 l2cap_send_cmd(conn, cmd->ident,
2867 L2CAP_INFO_RSP, sizeof(buf), buf);
2869 struct l2cap_info_rsp rsp;
2870 rsp.type = cpu_to_le16(type);
2871 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2872 l2cap_send_cmd(conn, cmd->ident,
2873 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2879 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
2880 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2883 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2886 if (cmd_len < sizeof(*rsp))
2889 type = __le16_to_cpu(rsp->type);
2890 result = __le16_to_cpu(rsp->result);
2892 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2894 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2895 if (cmd->ident != conn->info_ident ||
2896 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2899 del_timer(&conn->info_timer);
2901 if (result != L2CAP_IR_SUCCESS) {
2902 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2903 conn->info_ident = 0;
2905 l2cap_conn_start(conn);
2910 if (type == L2CAP_IT_FEAT_MASK) {
2911 conn->feat_mask = get_unaligned_le32(rsp->data);
2913 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2914 struct l2cap_info_req req;
2915 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2917 conn->info_ident = l2cap_get_ident(conn);
2919 l2cap_send_cmd(conn, conn->info_ident,
2920 L2CAP_INFO_REQ, sizeof(req), &req);
2922 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2923 conn->info_ident = 0;
2925 l2cap_conn_start(conn);
2927 } else if (type == L2CAP_IT_FIXED_CHAN) {
2928 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2929 conn->info_ident = 0;
2931 l2cap_conn_start(conn);
2937 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2942 if (min > max || min < 6 || max > 3200)
2945 if (to_multiplier < 10 || to_multiplier > 3200)
2948 if (max >= to_multiplier * 8)
2951 max_latency = (to_multiplier * 8 / max) - 1;
2952 if (latency > 499 || latency > max_latency)
2958 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2959 struct l2cap_cmd_hdr *cmd, u8 *data)
2961 struct hci_conn *hcon = conn->hcon;
2962 struct l2cap_conn_param_update_req *req;
2963 struct l2cap_conn_param_update_rsp rsp;
2964 u16 min, max, latency, to_multiplier, cmd_len;
2967 if (!(hcon->link_mode & HCI_LM_MASTER))
2970 cmd_len = __le16_to_cpu(cmd->len);
2971 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2974 req = (struct l2cap_conn_param_update_req *) data;
2975 min = __le16_to_cpu(req->min);
2976 max = __le16_to_cpu(req->max);
2977 latency = __le16_to_cpu(req->latency);
2978 to_multiplier = __le16_to_cpu(req->to_multiplier);
2980 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2981 min, max, latency, to_multiplier);
2983 memset(&rsp, 0, sizeof(rsp));
2985 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2987 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2989 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2991 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2995 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3000 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3001 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3005 switch (cmd->code) {
3006 case L2CAP_COMMAND_REJ:
3007 l2cap_command_rej(conn, cmd, cmd_len, data);
3010 case L2CAP_CONN_REQ:
3011 err = l2cap_connect_req(conn, cmd, cmd_len, data);
3014 case L2CAP_CONN_RSP:
3015 err = l2cap_connect_rsp(conn, cmd, cmd_len, data);
3018 case L2CAP_CONF_REQ:
3019 err = l2cap_config_req(conn, cmd, cmd_len, data);
3022 case L2CAP_CONF_RSP:
3023 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
3026 case L2CAP_DISCONN_REQ:
3027 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
3030 case L2CAP_DISCONN_RSP:
3031 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
3034 case L2CAP_ECHO_REQ:
3035 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3038 case L2CAP_ECHO_RSP:
3041 case L2CAP_INFO_REQ:
3042 err = l2cap_information_req(conn, cmd, cmd_len, data);
3045 case L2CAP_INFO_RSP:
3046 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
3050 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3058 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3059 struct l2cap_cmd_hdr *cmd, u8 *data)
3061 switch (cmd->code) {
3062 case L2CAP_COMMAND_REJ:
3065 case L2CAP_CONN_PARAM_UPDATE_REQ:
3066 return l2cap_conn_param_update_req(conn, cmd, data);
3068 case L2CAP_CONN_PARAM_UPDATE_RSP:
3072 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3077 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3078 struct sk_buff *skb)
3080 u8 *data = skb->data;
3082 struct l2cap_cmd_hdr cmd;
3085 l2cap_raw_recv(conn, skb);
3087 while (len >= L2CAP_CMD_HDR_SIZE) {
3089 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3090 data += L2CAP_CMD_HDR_SIZE;
3091 len -= L2CAP_CMD_HDR_SIZE;
3093 cmd_len = le16_to_cpu(cmd.len);
3095 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3097 if (cmd_len > len || !cmd.ident) {
3098 BT_DBG("corrupted command");
3102 if (conn->hcon->type == LE_LINK)
3103 err = l2cap_le_sig_cmd(conn, &cmd, data);
3105 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3108 struct l2cap_cmd_rej_unk rej;
3110 BT_ERR("Wrong link type (%d)", err);
3112 /* FIXME: Map err to a valid reason */
3113 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3114 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3124 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3126 u16 our_fcs, rcv_fcs;
3127 int hdr_size = L2CAP_HDR_SIZE + 2;
3129 if (chan->fcs == L2CAP_FCS_CRC16) {
3130 skb_trim(skb, skb->len - 2);
3131 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3132 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3134 if (our_fcs != rcv_fcs)
3140 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3144 chan->frames_sent = 0;
3146 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3148 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3149 control |= L2CAP_SUPER_RCV_NOT_READY;
3150 l2cap_send_sframe(chan, control);
3151 set_bit(CONN_RNR_SENT, &chan->conn_state);
3154 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3155 l2cap_retransmit_frames(chan);
3157 l2cap_ertm_send(chan);
3159 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3160 chan->frames_sent == 0) {
3161 control |= L2CAP_SUPER_RCV_READY;
3162 l2cap_send_sframe(chan, control);
3166 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3168 struct sk_buff *next_skb;
3169 int tx_seq_offset, next_tx_seq_offset;
3171 bt_cb(skb)->tx_seq = tx_seq;
3172 bt_cb(skb)->sar = sar;
3174 next_skb = skb_peek(&chan->srej_q);
3176 __skb_queue_tail(&chan->srej_q, skb);
3180 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3181 if (tx_seq_offset < 0)
3182 tx_seq_offset += 64;
3185 if (bt_cb(next_skb)->tx_seq == tx_seq)
3188 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3189 chan->buffer_seq) % 64;
3190 if (next_tx_seq_offset < 0)
3191 next_tx_seq_offset += 64;
3193 if (next_tx_seq_offset > tx_seq_offset) {
3194 __skb_queue_before(&chan->srej_q, next_skb, skb);
3198 if (skb_queue_is_last(&chan->srej_q, next_skb))
3201 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3203 __skb_queue_tail(&chan->srej_q, skb);
3208 static void append_skb_frag(struct sk_buff *skb,
3209 struct sk_buff *new_frag, struct sk_buff **last_frag)
3211 /* skb->len reflects data in skb as well as all fragments
3212 * skb->data_len reflects only data in fragments
3214 if (!skb_has_frag_list(skb))
3215 skb_shinfo(skb)->frag_list = new_frag;
3217 new_frag->next = NULL;
3219 (*last_frag)->next = new_frag;
3220 *last_frag = new_frag;
3222 skb->len += new_frag->len;
3223 skb->data_len += new_frag->len;
3224 skb->truesize += new_frag->truesize;
3227 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3231 switch (control & L2CAP_CTRL_SAR) {
3232 case L2CAP_SDU_UNSEGMENTED:
3236 err = chan->ops->recv(chan->data, skb);
3239 case L2CAP_SDU_START:
3243 chan->sdu_len = get_unaligned_le16(skb->data);
3246 if (chan->sdu_len > chan->imtu) {
3251 if (skb->len >= chan->sdu_len)
3255 chan->sdu_last_frag = skb;
3261 case L2CAP_SDU_CONTINUE:
3265 append_skb_frag(chan->sdu, skb,
3266 &chan->sdu_last_frag);
3269 if (chan->sdu->len >= chan->sdu_len)
3279 append_skb_frag(chan->sdu, skb,
3280 &chan->sdu_last_frag);
3283 if (chan->sdu->len != chan->sdu_len)
3286 err = chan->ops->recv(chan->data, chan->sdu);
3289 /* Reassembly complete */
3291 chan->sdu_last_frag = NULL;
3299 kfree_skb(chan->sdu);
3301 chan->sdu_last_frag = NULL;
3308 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3312 BT_DBG("chan %p, Enter local busy", chan);
3314 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3316 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3317 control |= L2CAP_SUPER_RCV_NOT_READY;
3318 l2cap_send_sframe(chan, control);
3320 set_bit(CONN_RNR_SENT, &chan->conn_state);
3322 __clear_ack_timer(chan);
3325 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3329 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3332 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3333 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3334 l2cap_send_sframe(chan, control);
3335 chan->retry_count = 1;
3337 __clear_retrans_timer(chan);
3338 __set_monitor_timer(chan);
3340 set_bit(CONN_WAIT_F, &chan->conn_state);
3343 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3344 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3346 BT_DBG("chan %p, Exit local busy", chan);
3349 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3351 if (chan->mode == L2CAP_MODE_ERTM) {
3353 l2cap_ertm_enter_local_busy(chan);
3355 l2cap_ertm_exit_local_busy(chan);
3359 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3361 struct sk_buff *skb;
3364 while ((skb = skb_peek(&chan->srej_q)) &&
3365 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3368 if (bt_cb(skb)->tx_seq != tx_seq)
3371 skb = skb_dequeue(&chan->srej_q);
3372 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3373 err = l2cap_reassemble_sdu(chan, skb, control);
3376 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3380 chan->buffer_seq_srej =
3381 (chan->buffer_seq_srej + 1) % 64;
3382 tx_seq = (tx_seq + 1) % 64;
3386 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3388 struct srej_list *l, *tmp;
3391 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3392 if (l->tx_seq == tx_seq) {
3397 control = L2CAP_SUPER_SELECT_REJECT;
3398 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3399 l2cap_send_sframe(chan, control);
3401 list_add_tail(&l->list, &chan->srej_l);
3405 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3407 struct srej_list *new;
3410 while (tx_seq != chan->expected_tx_seq) {
3411 control = L2CAP_SUPER_SELECT_REJECT;
3412 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3413 l2cap_send_sframe(chan, control);
3415 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3416 new->tx_seq = chan->expected_tx_seq;
3417 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3418 list_add_tail(&new->list, &chan->srej_l);
3420 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3423 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3425 u8 tx_seq = __get_txseq(rx_control);
3426 u8 req_seq = __get_reqseq(rx_control);
3427 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3428 int tx_seq_offset, expected_tx_seq_offset;
3429 int num_to_ack = (chan->tx_win/6) + 1;
3432 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3433 tx_seq, rx_control);
3435 if (L2CAP_CTRL_FINAL & rx_control &&
3436 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3437 __clear_monitor_timer(chan);
3438 if (chan->unacked_frames > 0)
3439 __set_retrans_timer(chan);
3440 clear_bit(CONN_WAIT_F, &chan->conn_state);
3443 chan->expected_ack_seq = req_seq;
3444 l2cap_drop_acked_frames(chan);
3446 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3447 if (tx_seq_offset < 0)
3448 tx_seq_offset += 64;
3450 /* invalid tx_seq */
3451 if (tx_seq_offset >= chan->tx_win) {
3452 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3456 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3459 if (tx_seq == chan->expected_tx_seq)
3462 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3463 struct srej_list *first;
3465 first = list_first_entry(&chan->srej_l,
3466 struct srej_list, list);
3467 if (tx_seq == first->tx_seq) {
3468 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3469 l2cap_check_srej_gap(chan, tx_seq);
3471 list_del(&first->list);
3474 if (list_empty(&chan->srej_l)) {
3475 chan->buffer_seq = chan->buffer_seq_srej;
3476 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3477 l2cap_send_ack(chan);
3478 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3481 struct srej_list *l;
3483 /* duplicated tx_seq */
3484 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3487 list_for_each_entry(l, &chan->srej_l, list) {
3488 if (l->tx_seq == tx_seq) {
3489 l2cap_resend_srejframe(chan, tx_seq);
3493 l2cap_send_srejframe(chan, tx_seq);
3496 expected_tx_seq_offset =
3497 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3498 if (expected_tx_seq_offset < 0)
3499 expected_tx_seq_offset += 64;
3501 /* duplicated tx_seq */
3502 if (tx_seq_offset < expected_tx_seq_offset)
3505 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3507 BT_DBG("chan %p, Enter SREJ", chan);
3509 INIT_LIST_HEAD(&chan->srej_l);
3510 chan->buffer_seq_srej = chan->buffer_seq;
3512 __skb_queue_head_init(&chan->srej_q);
3513 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3515 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3517 l2cap_send_srejframe(chan, tx_seq);
3519 __clear_ack_timer(chan);
3524 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3526 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3527 bt_cb(skb)->tx_seq = tx_seq;
3528 bt_cb(skb)->sar = sar;
3529 __skb_queue_tail(&chan->srej_q, skb);
3533 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3534 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3536 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3540 if (rx_control & L2CAP_CTRL_FINAL) {
3541 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3542 l2cap_retransmit_frames(chan);
3545 __set_ack_timer(chan);
3547 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3548 if (chan->num_acked == num_to_ack - 1)
3549 l2cap_send_ack(chan);
3558 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3560 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3563 chan->expected_ack_seq = __get_reqseq(rx_control);
3564 l2cap_drop_acked_frames(chan);
3566 if (rx_control & L2CAP_CTRL_POLL) {
3567 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3568 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3569 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3570 (chan->unacked_frames > 0))
3571 __set_retrans_timer(chan);
3573 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3574 l2cap_send_srejtail(chan);
3576 l2cap_send_i_or_rr_or_rnr(chan);
3579 } else if (rx_control & L2CAP_CTRL_FINAL) {
3580 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3582 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3583 l2cap_retransmit_frames(chan);
3586 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3587 (chan->unacked_frames > 0))
3588 __set_retrans_timer(chan);
3590 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3591 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3592 l2cap_send_ack(chan);
3594 l2cap_ertm_send(chan);
3598 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3600 u8 tx_seq = __get_reqseq(rx_control);
3602 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3604 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3606 chan->expected_ack_seq = tx_seq;
3607 l2cap_drop_acked_frames(chan);
3609 if (rx_control & L2CAP_CTRL_FINAL) {
3610 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3611 l2cap_retransmit_frames(chan);
3613 l2cap_retransmit_frames(chan);
3615 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3616 set_bit(CONN_REJ_ACT, &chan->conn_state);
3619 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3621 u8 tx_seq = __get_reqseq(rx_control);
3623 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3625 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3627 if (rx_control & L2CAP_CTRL_POLL) {
3628 chan->expected_ack_seq = tx_seq;
3629 l2cap_drop_acked_frames(chan);
3631 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3632 l2cap_retransmit_one_frame(chan, tx_seq);
3634 l2cap_ertm_send(chan);
3636 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3637 chan->srej_save_reqseq = tx_seq;
3638 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3640 } else if (rx_control & L2CAP_CTRL_FINAL) {
3641 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3642 chan->srej_save_reqseq == tx_seq)
3643 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3645 l2cap_retransmit_one_frame(chan, tx_seq);
3647 l2cap_retransmit_one_frame(chan, tx_seq);
3648 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3649 chan->srej_save_reqseq = tx_seq;
3650 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3655 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3657 u8 tx_seq = __get_reqseq(rx_control);
3659 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3661 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3662 chan->expected_ack_seq = tx_seq;
3663 l2cap_drop_acked_frames(chan);
3665 if (rx_control & L2CAP_CTRL_POLL)
3666 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3668 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3669 __clear_retrans_timer(chan);
3670 if (rx_control & L2CAP_CTRL_POLL)
3671 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3675 if (rx_control & L2CAP_CTRL_POLL)
3676 l2cap_send_srejtail(chan);
3678 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3681 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3683 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3685 if (L2CAP_CTRL_FINAL & rx_control &&
3686 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3687 __clear_monitor_timer(chan);
3688 if (chan->unacked_frames > 0)
3689 __set_retrans_timer(chan);
3690 clear_bit(CONN_WAIT_F, &chan->conn_state);
3693 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3694 case L2CAP_SUPER_RCV_READY:
3695 l2cap_data_channel_rrframe(chan, rx_control);
3698 case L2CAP_SUPER_REJECT:
3699 l2cap_data_channel_rejframe(chan, rx_control);
3702 case L2CAP_SUPER_SELECT_REJECT:
3703 l2cap_data_channel_srejframe(chan, rx_control);
3706 case L2CAP_SUPER_RCV_NOT_READY:
3707 l2cap_data_channel_rnrframe(chan, rx_control);
3715 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3717 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3720 int len, next_tx_seq_offset, req_seq_offset;
3722 control = get_unaligned_le16(skb->data);
3727 * We can just drop the corrupted I-frame here.
3728 * Receiver will miss it and start proper recovery
3729 * procedures and ask retransmission.
3731 if (l2cap_check_fcs(chan, skb))
3734 if (__is_sar_start(control) && __is_iframe(control))
3737 if (chan->fcs == L2CAP_FCS_CRC16)
3740 if (len > chan->mps) {
3741 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3745 req_seq = __get_reqseq(control);
3746 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3747 if (req_seq_offset < 0)
3748 req_seq_offset += 64;
3750 next_tx_seq_offset =
3751 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3752 if (next_tx_seq_offset < 0)
3753 next_tx_seq_offset += 64;
3755 /* check for invalid req-seq */
3756 if (req_seq_offset > next_tx_seq_offset) {
3757 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3761 if (__is_iframe(control)) {
3763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3767 l2cap_data_channel_iframe(chan, control, skb);
3771 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3775 l2cap_data_channel_sframe(chan, control, skb);
3785 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3787 struct l2cap_chan *chan;
3788 struct sock *sk = NULL;
3793 chan = l2cap_get_chan_by_scid(conn, cid);
3795 BT_DBG("unknown cid 0x%4.4x", cid);
3801 BT_DBG("chan %p, len %d", chan, skb->len);
3803 if (chan->state != BT_CONNECTED)
3806 switch (chan->mode) {
3807 case L2CAP_MODE_BASIC:
3808 /* If socket recv buffers overflows we drop data here
3809 * which is *bad* because L2CAP has to be reliable.
3810 * But we don't have any other choice. L2CAP doesn't
3811 * provide flow control mechanism. */
3813 if (chan->imtu < skb->len)
3816 if (!chan->ops->recv(chan->data, skb))
3820 case L2CAP_MODE_ERTM:
3821 if (!sock_owned_by_user(sk)) {
3822 l2cap_ertm_data_rcv(sk, skb);
3824 if (sk_add_backlog(sk, skb))
3830 case L2CAP_MODE_STREAMING:
3831 control = get_unaligned_le16(skb->data);
3835 if (l2cap_check_fcs(chan, skb))
3838 if (__is_sar_start(control))
3841 if (chan->fcs == L2CAP_FCS_CRC16)
3844 if (len > chan->mps || len < 0 || __is_sframe(control))
3847 tx_seq = __get_txseq(control);
3849 if (chan->expected_tx_seq != tx_seq) {
3850 /* Frame(s) missing - must discard partial SDU */
3851 kfree_skb(chan->sdu);
3853 chan->sdu_last_frag = NULL;
3856 /* TODO: Notify userland of missing data */
3859 chan->expected_tx_seq = (tx_seq + 1) % 64;
3861 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3862 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3867 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3881 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3883 struct sock *sk = NULL;
3884 struct l2cap_chan *chan;
3886 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3894 BT_DBG("sk %p, len %d", sk, skb->len);
3896 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3899 if (chan->imtu < skb->len)
3902 if (!chan->ops->recv(chan->data, skb))
3914 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3916 struct sock *sk = NULL;
3917 struct l2cap_chan *chan;
3919 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3927 BT_DBG("sk %p, len %d", sk, skb->len);
3929 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3932 if (chan->imtu < skb->len)
3935 if (!chan->ops->recv(chan->data, skb))
3947 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3949 struct l2cap_hdr *lh = (void *) skb->data;
3953 skb_pull(skb, L2CAP_HDR_SIZE);
3954 cid = __le16_to_cpu(lh->cid);
3955 len = __le16_to_cpu(lh->len);
3957 if (len != skb->len) {
3962 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3965 case L2CAP_CID_LE_SIGNALING:
3966 case L2CAP_CID_SIGNALING:
3967 l2cap_sig_channel(conn, skb);
3970 case L2CAP_CID_CONN_LESS:
3971 psm = get_unaligned_le16(skb->data);
3973 l2cap_conless_channel(conn, psm, skb);
3976 case L2CAP_CID_LE_DATA:
3977 l2cap_att_channel(conn, cid, skb);
3981 if (smp_sig_channel(conn, skb))
3982 l2cap_conn_del(conn->hcon, EACCES);
3986 l2cap_data_channel(conn, cid, skb);
3991 /* ---- L2CAP interface with lower layer (HCI) ---- */
3993 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3995 int exact = 0, lm1 = 0, lm2 = 0;
3996 struct l2cap_chan *c;
3998 if (type != ACL_LINK)
4001 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4003 /* Find listening sockets and check their link_mode */
4004 read_lock(&chan_list_lock);
4005 list_for_each_entry(c, &chan_list, global_l) {
4006 struct sock *sk = c->sk;
4008 if (c->state != BT_LISTEN)
4011 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4012 lm1 |= HCI_LM_ACCEPT;
4014 lm1 |= HCI_LM_MASTER;
4016 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4017 lm2 |= HCI_LM_ACCEPT;
4019 lm2 |= HCI_LM_MASTER;
4022 read_unlock(&chan_list_lock);
4024 return exact ? lm1 : lm2;
4027 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4029 struct l2cap_conn *conn;
4031 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4033 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4037 conn = l2cap_conn_add(hcon, status);
4039 l2cap_conn_ready(conn);
4041 l2cap_conn_del(hcon, bt_to_errno(status));
4046 static int l2cap_disconn_ind(struct hci_conn *hcon)
4048 struct l2cap_conn *conn = hcon->l2cap_data;
4050 BT_DBG("hcon %p", hcon);
4052 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4055 return conn->disc_reason;
4058 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4060 BT_DBG("hcon %p reason %d", hcon, reason);
4062 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4065 l2cap_conn_del(hcon, bt_to_errno(reason));
4070 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4072 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4075 if (encrypt == 0x00) {
4076 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4077 __clear_chan_timer(chan);
4078 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4079 } else if (chan->sec_level == BT_SECURITY_HIGH)
4080 l2cap_chan_close(chan, ECONNREFUSED);
4082 if (chan->sec_level == BT_SECURITY_MEDIUM)
4083 __clear_chan_timer(chan);
4087 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4089 struct l2cap_conn *conn = hcon->l2cap_data;
4090 struct l2cap_chan *chan;
4095 BT_DBG("conn %p", conn);
4097 if (hcon->type == LE_LINK) {
4098 smp_distribute_keys(conn, 0);
4099 del_timer(&conn->security_timer);
4102 read_lock(&conn->chan_lock);
4104 list_for_each_entry(chan, &conn->chan_l, list) {
4105 struct sock *sk = chan->sk;
4109 BT_DBG("chan->scid %d", chan->scid);
4111 if (chan->scid == L2CAP_CID_LE_DATA) {
4112 if (!status && encrypt) {
4113 chan->sec_level = hcon->sec_level;
4114 l2cap_chan_ready(sk);
4121 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4126 if (!status && (chan->state == BT_CONNECTED ||
4127 chan->state == BT_CONFIG)) {
4128 l2cap_check_encryption(chan, encrypt);
4133 if (chan->state == BT_CONNECT) {
4135 struct l2cap_conn_req req;
4136 req.scid = cpu_to_le16(chan->scid);
4137 req.psm = chan->psm;
4139 chan->ident = l2cap_get_ident(conn);
4140 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4142 l2cap_send_cmd(conn, chan->ident,
4143 L2CAP_CONN_REQ, sizeof(req), &req);
4145 __clear_chan_timer(chan);
4146 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4148 } else if (chan->state == BT_CONNECT2) {
4149 struct l2cap_conn_rsp rsp;
4153 if (bt_sk(sk)->defer_setup) {
4154 struct sock *parent = bt_sk(sk)->parent;
4155 res = L2CAP_CR_PEND;
4156 stat = L2CAP_CS_AUTHOR_PEND;
4158 parent->sk_data_ready(parent, 0);
4160 l2cap_state_change(chan, BT_CONFIG);
4161 res = L2CAP_CR_SUCCESS;
4162 stat = L2CAP_CS_NO_INFO;
4165 l2cap_state_change(chan, BT_DISCONN);
4166 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4167 res = L2CAP_CR_SEC_BLOCK;
4168 stat = L2CAP_CS_NO_INFO;
4171 rsp.scid = cpu_to_le16(chan->dcid);
4172 rsp.dcid = cpu_to_le16(chan->scid);
4173 rsp.result = cpu_to_le16(res);
4174 rsp.status = cpu_to_le16(stat);
4175 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4182 read_unlock(&conn->chan_lock);
4187 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4189 struct l2cap_conn *conn = hcon->l2cap_data;
4192 conn = l2cap_conn_add(hcon, 0);
4197 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4199 if (!(flags & ACL_CONT)) {
4200 struct l2cap_hdr *hdr;
4201 struct l2cap_chan *chan;
4206 BT_ERR("Unexpected start frame (len %d)", skb->len);
4207 kfree_skb(conn->rx_skb);
4208 conn->rx_skb = NULL;
4210 l2cap_conn_unreliable(conn, ECOMM);
4213 /* Start fragment always begin with Basic L2CAP header */
4214 if (skb->len < L2CAP_HDR_SIZE) {
4215 BT_ERR("Frame is too short (len %d)", skb->len);
4216 l2cap_conn_unreliable(conn, ECOMM);
4220 hdr = (struct l2cap_hdr *) skb->data;
4221 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4222 cid = __le16_to_cpu(hdr->cid);
4224 if (len == skb->len) {
4225 /* Complete frame received */
4226 l2cap_recv_frame(conn, skb);
4230 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4232 if (skb->len > len) {
4233 BT_ERR("Frame is too long (len %d, expected len %d)",
4235 l2cap_conn_unreliable(conn, ECOMM);
4239 chan = l2cap_get_chan_by_scid(conn, cid);
4241 if (chan && chan->sk) {
4242 struct sock *sk = chan->sk;
4244 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4245 BT_ERR("Frame exceeding recv MTU (len %d, "
4249 l2cap_conn_unreliable(conn, ECOMM);
4255 /* Allocate skb for the complete frame (with header) */
4256 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4260 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4262 conn->rx_len = len - skb->len;
4264 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4266 if (!conn->rx_len) {
4267 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4268 l2cap_conn_unreliable(conn, ECOMM);
4272 if (skb->len > conn->rx_len) {
4273 BT_ERR("Fragment is too long (len %d, expected %d)",
4274 skb->len, conn->rx_len);
4275 kfree_skb(conn->rx_skb);
4276 conn->rx_skb = NULL;
4278 l2cap_conn_unreliable(conn, ECOMM);
4282 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4284 conn->rx_len -= skb->len;
4286 if (!conn->rx_len) {
4287 /* Complete frame received */
4288 l2cap_recv_frame(conn, conn->rx_skb);
4289 conn->rx_skb = NULL;
4298 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4300 struct l2cap_chan *c;
4302 read_lock_bh(&chan_list_lock);
4304 list_for_each_entry(c, &chan_list, global_l) {
4305 struct sock *sk = c->sk;
4307 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4308 batostr(&bt_sk(sk)->src),
4309 batostr(&bt_sk(sk)->dst),
4310 c->state, __le16_to_cpu(c->psm),
4311 c->scid, c->dcid, c->imtu, c->omtu,
4312 c->sec_level, c->mode);
4315 read_unlock_bh(&chan_list_lock);
4320 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4322 return single_open(file, l2cap_debugfs_show, inode->i_private);
4325 static const struct file_operations l2cap_debugfs_fops = {
4326 .open = l2cap_debugfs_open,
4328 .llseek = seq_lseek,
4329 .release = single_release,
4332 static struct dentry *l2cap_debugfs;
4334 static struct hci_proto l2cap_hci_proto = {
4336 .id = HCI_PROTO_L2CAP,
4337 .connect_ind = l2cap_connect_ind,
4338 .connect_cfm = l2cap_connect_cfm,
4339 .disconn_ind = l2cap_disconn_ind,
4340 .disconn_cfm = l2cap_disconn_cfm,
4341 .security_cfm = l2cap_security_cfm,
4342 .recv_acldata = l2cap_recv_acldata
4345 int __init l2cap_init(void)
4349 err = l2cap_init_sockets();
4353 err = hci_register_proto(&l2cap_hci_proto);
4355 BT_ERR("L2CAP protocol registration failed");
4356 bt_sock_unregister(BTPROTO_L2CAP);
4361 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4362 bt_debugfs, NULL, &l2cap_debugfs_fops);
4364 BT_ERR("Failed to create L2CAP debug file");
4370 l2cap_cleanup_sockets();
4374 void l2cap_exit(void)
4376 debugfs_remove(l2cap_debugfs);
4378 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4379 BT_ERR("L2CAP protocol unregistration failed");
4381 l2cap_cleanup_sockets();
4384 module_param(disable_ertm, bool, 0644);
4385 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");