2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 __l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
879 write_unlock_bh(&conn->chan_lock);
882 bh_unlock_sock(parent);
885 static void l2cap_chan_ready(struct sock *sk)
887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct sock *parent = bt_sk(sk)->parent;
890 BT_DBG("sk %p, parent %p", sk, parent);
892 chan->conf_state = 0;
893 __clear_chan_timer(chan);
895 l2cap_state_change(chan, BT_CONNECTED);
896 sk->sk_state_change(sk);
899 parent->sk_data_ready(parent, 0);
902 static void l2cap_conn_ready(struct l2cap_conn *conn)
904 struct l2cap_chan *chan;
906 BT_DBG("conn %p", conn);
908 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
909 l2cap_le_conn_ready(conn);
911 if (conn->hcon->out && conn->hcon->type == LE_LINK)
912 smp_conn_security(conn, conn->hcon->pending_sec_level);
914 read_lock(&conn->chan_lock);
916 list_for_each_entry(chan, &conn->chan_l, list) {
917 struct sock *sk = chan->sk;
921 if (conn->hcon->type == LE_LINK) {
922 if (smp_conn_security(conn, chan->sec_level))
923 l2cap_chan_ready(sk);
925 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
926 __clear_chan_timer(chan);
927 l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
930 } else if (chan->state == BT_CONNECT)
931 l2cap_do_start(chan);
936 read_unlock(&conn->chan_lock);
939 /* Notify sockets that we cannot guaranty reliability anymore */
940 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
942 struct l2cap_chan *chan;
944 BT_DBG("conn %p", conn);
946 read_lock(&conn->chan_lock);
948 list_for_each_entry(chan, &conn->chan_l, list) {
949 struct sock *sk = chan->sk;
951 if (chan->force_reliable)
955 read_unlock(&conn->chan_lock);
958 static void l2cap_info_timeout(unsigned long arg)
960 struct l2cap_conn *conn = (void *) arg;
962 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
963 conn->info_ident = 0;
965 l2cap_conn_start(conn);
968 static void l2cap_conn_del(struct hci_conn *hcon, int err)
970 struct l2cap_conn *conn = hcon->l2cap_data;
971 struct l2cap_chan *chan, *l;
977 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
979 kfree_skb(conn->rx_skb);
982 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
985 l2cap_chan_del(chan, err);
987 chan->ops->close(chan->data);
990 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
991 del_timer_sync(&conn->info_timer);
993 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
994 del_timer(&conn->security_timer);
995 smp_chan_destroy(conn);
998 hcon->l2cap_data = NULL;
1002 static void security_timeout(unsigned long arg)
1004 struct l2cap_conn *conn = (void *) arg;
1006 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1009 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1011 struct l2cap_conn *conn = hcon->l2cap_data;
1016 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1020 hcon->l2cap_data = conn;
1023 BT_DBG("hcon %p conn %p", hcon, conn);
1025 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1026 conn->mtu = hcon->hdev->le_mtu;
1028 conn->mtu = hcon->hdev->acl_mtu;
1030 conn->src = &hcon->hdev->bdaddr;
1031 conn->dst = &hcon->dst;
1033 conn->feat_mask = 0;
1035 spin_lock_init(&conn->lock);
1036 rwlock_init(&conn->chan_lock);
1038 INIT_LIST_HEAD(&conn->chan_l);
1040 if (hcon->type == LE_LINK)
1041 setup_timer(&conn->security_timer, security_timeout,
1042 (unsigned long) conn);
1044 setup_timer(&conn->info_timer, l2cap_info_timeout,
1045 (unsigned long) conn);
1047 conn->disc_reason = 0x13;
1052 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1054 write_lock_bh(&conn->chan_lock);
1055 __l2cap_chan_add(conn, chan);
1056 write_unlock_bh(&conn->chan_lock);
1059 /* ---- Socket interface ---- */
1061 /* Find socket with psm and source bdaddr.
1062 * Returns closest match.
1064 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1066 struct l2cap_chan *c, *c1 = NULL;
1068 read_lock(&chan_list_lock);
1070 list_for_each_entry(c, &chan_list, global_l) {
1071 struct sock *sk = c->sk;
1073 if (state && c->state != state)
1076 if (c->psm == psm) {
1078 if (!bacmp(&bt_sk(sk)->src, src)) {
1079 read_unlock(&chan_list_lock);
1084 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1089 read_unlock(&chan_list_lock);
1094 int l2cap_chan_connect(struct l2cap_chan *chan)
1096 struct sock *sk = chan->sk;
1097 bdaddr_t *src = &bt_sk(sk)->src;
1098 bdaddr_t *dst = &bt_sk(sk)->dst;
1099 struct l2cap_conn *conn;
1100 struct hci_conn *hcon;
1101 struct hci_dev *hdev;
1105 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1108 hdev = hci_get_route(dst, src);
1110 return -EHOSTUNREACH;
1112 hci_dev_lock_bh(hdev);
1114 auth_type = l2cap_get_auth_type(chan);
1116 if (chan->dcid == L2CAP_CID_LE_DATA)
1117 hcon = hci_connect(hdev, LE_LINK, dst,
1118 chan->sec_level, auth_type);
1120 hcon = hci_connect(hdev, ACL_LINK, dst,
1121 chan->sec_level, auth_type);
1124 err = PTR_ERR(hcon);
1128 conn = l2cap_conn_add(hcon, 0);
1135 /* Update source addr of the socket */
1136 bacpy(src, conn->src);
1138 l2cap_chan_add(conn, chan);
1140 l2cap_state_change(chan, BT_CONNECT);
1141 __set_chan_timer(chan, sk->sk_sndtimeo);
1143 if (hcon->state == BT_CONNECTED) {
1144 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1145 __clear_chan_timer(chan);
1146 if (l2cap_check_security(chan))
1147 l2cap_state_change(chan, BT_CONNECTED);
1149 l2cap_do_start(chan);
1155 hci_dev_unlock_bh(hdev);
1160 int __l2cap_wait_ack(struct sock *sk)
1162 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1163 DECLARE_WAITQUEUE(wait, current);
1167 add_wait_queue(sk_sleep(sk), &wait);
1168 set_current_state(TASK_INTERRUPTIBLE);
1169 while (chan->unacked_frames > 0 && chan->conn) {
1173 if (signal_pending(current)) {
1174 err = sock_intr_errno(timeo);
1179 timeo = schedule_timeout(timeo);
1181 set_current_state(TASK_INTERRUPTIBLE);
1183 err = sock_error(sk);
1187 set_current_state(TASK_RUNNING);
1188 remove_wait_queue(sk_sleep(sk), &wait);
1192 static void l2cap_monitor_timeout(unsigned long arg)
1194 struct l2cap_chan *chan = (void *) arg;
1195 struct sock *sk = chan->sk;
1197 BT_DBG("chan %p", chan);
1200 if (chan->retry_count >= chan->remote_max_tx) {
1201 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1206 chan->retry_count++;
1207 __set_monitor_timer(chan);
1209 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1213 static void l2cap_retrans_timeout(unsigned long arg)
1215 struct l2cap_chan *chan = (void *) arg;
1216 struct sock *sk = chan->sk;
1218 BT_DBG("chan %p", chan);
1221 chan->retry_count = 1;
1222 __set_monitor_timer(chan);
1224 set_bit(CONN_WAIT_F, &chan->conn_state);
1226 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1230 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1232 struct sk_buff *skb;
1234 while ((skb = skb_peek(&chan->tx_q)) &&
1235 chan->unacked_frames) {
1236 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1239 skb = skb_dequeue(&chan->tx_q);
1242 chan->unacked_frames--;
1245 if (!chan->unacked_frames)
1246 __clear_retrans_timer(chan);
1249 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1251 struct hci_conn *hcon = chan->conn->hcon;
1254 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1256 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1257 flags = ACL_START_NO_FLUSH;
1261 bt_cb(skb)->force_active = chan->force_active;
1262 hci_send_acl(hcon, skb, flags);
1265 static void l2cap_streaming_send(struct l2cap_chan *chan)
1267 struct sk_buff *skb;
1270 while ((skb = skb_dequeue(&chan->tx_q))) {
1271 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1272 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1273 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1275 if (chan->fcs == L2CAP_FCS_CRC16) {
1276 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1277 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1280 l2cap_do_send(chan, skb);
1282 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1286 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1288 struct sk_buff *skb, *tx_skb;
1291 skb = skb_peek(&chan->tx_q);
1296 if (bt_cb(skb)->tx_seq == tx_seq)
1299 if (skb_queue_is_last(&chan->tx_q, skb))
1302 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1304 if (chan->remote_max_tx &&
1305 bt_cb(skb)->retries == chan->remote_max_tx) {
1306 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 tx_skb = skb_clone(skb, GFP_ATOMIC);
1311 bt_cb(skb)->retries++;
1312 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1313 control &= L2CAP_CTRL_SAR;
1315 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1316 control |= L2CAP_CTRL_FINAL;
1318 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1319 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1321 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1323 if (chan->fcs == L2CAP_FCS_CRC16) {
1324 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1325 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1328 l2cap_do_send(chan, tx_skb);
1331 static int l2cap_ertm_send(struct l2cap_chan *chan)
1333 struct sk_buff *skb, *tx_skb;
1337 if (chan->state != BT_CONNECTED)
1340 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1342 if (chan->remote_max_tx &&
1343 bt_cb(skb)->retries == chan->remote_max_tx) {
1344 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1348 tx_skb = skb_clone(skb, GFP_ATOMIC);
1350 bt_cb(skb)->retries++;
1352 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1353 control &= L2CAP_CTRL_SAR;
1355 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1356 control |= L2CAP_CTRL_FINAL;
1358 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1359 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1360 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1363 if (chan->fcs == L2CAP_FCS_CRC16) {
1364 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1365 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1368 l2cap_do_send(chan, tx_skb);
1370 __set_retrans_timer(chan);
1372 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1373 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1375 if (bt_cb(skb)->retries == 1)
1376 chan->unacked_frames++;
1378 chan->frames_sent++;
1380 if (skb_queue_is_last(&chan->tx_q, skb))
1381 chan->tx_send_head = NULL;
1383 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1391 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1395 if (!skb_queue_empty(&chan->tx_q))
1396 chan->tx_send_head = chan->tx_q.next;
1398 chan->next_tx_seq = chan->expected_ack_seq;
1399 ret = l2cap_ertm_send(chan);
1403 static void l2cap_send_ack(struct l2cap_chan *chan)
1407 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1409 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1410 control |= L2CAP_SUPER_RCV_NOT_READY;
1411 set_bit(CONN_RNR_SENT, &chan->conn_state);
1412 l2cap_send_sframe(chan, control);
1416 if (l2cap_ertm_send(chan) > 0)
1419 control |= L2CAP_SUPER_RCV_READY;
1420 l2cap_send_sframe(chan, control);
1423 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1425 struct srej_list *tail;
1428 control = L2CAP_SUPER_SELECT_REJECT;
1429 control |= L2CAP_CTRL_FINAL;
1431 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1432 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1434 l2cap_send_sframe(chan, control);
1437 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1439 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1440 struct sk_buff **frag;
1443 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1449 /* Continuation fragments (no L2CAP header) */
1450 frag = &skb_shinfo(skb)->frag_list;
1452 count = min_t(unsigned int, conn->mtu, len);
1454 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1457 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1463 frag = &(*frag)->next;
1469 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1471 struct sock *sk = chan->sk;
1472 struct l2cap_conn *conn = chan->conn;
1473 struct sk_buff *skb;
1474 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1475 struct l2cap_hdr *lh;
1477 BT_DBG("sk %p len %d", sk, (int)len);
1479 count = min_t(unsigned int, (conn->mtu - hlen), len);
1480 skb = bt_skb_send_alloc(sk, count + hlen,
1481 msg->msg_flags & MSG_DONTWAIT, &err);
1483 return ERR_PTR(err);
1485 /* Create L2CAP header */
1486 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1487 lh->cid = cpu_to_le16(chan->dcid);
1488 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1489 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1491 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1492 if (unlikely(err < 0)) {
1494 return ERR_PTR(err);
1499 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1501 struct sock *sk = chan->sk;
1502 struct l2cap_conn *conn = chan->conn;
1503 struct sk_buff *skb;
1504 int err, count, hlen = L2CAP_HDR_SIZE;
1505 struct l2cap_hdr *lh;
1507 BT_DBG("sk %p len %d", sk, (int)len);
1509 count = min_t(unsigned int, (conn->mtu - hlen), len);
1510 skb = bt_skb_send_alloc(sk, count + hlen,
1511 msg->msg_flags & MSG_DONTWAIT, &err);
1513 return ERR_PTR(err);
1515 /* Create L2CAP header */
1516 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1517 lh->cid = cpu_to_le16(chan->dcid);
1518 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1520 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1521 if (unlikely(err < 0)) {
1523 return ERR_PTR(err);
1528 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1529 struct msghdr *msg, size_t len,
1530 u16 control, u16 sdulen)
1532 struct sock *sk = chan->sk;
1533 struct l2cap_conn *conn = chan->conn;
1534 struct sk_buff *skb;
1535 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1536 struct l2cap_hdr *lh;
1538 BT_DBG("sk %p len %d", sk, (int)len);
1541 return ERR_PTR(-ENOTCONN);
1546 if (chan->fcs == L2CAP_FCS_CRC16)
1549 count = min_t(unsigned int, (conn->mtu - hlen), len);
1550 skb = bt_skb_send_alloc(sk, count + hlen,
1551 msg->msg_flags & MSG_DONTWAIT, &err);
1553 return ERR_PTR(err);
1555 /* Create L2CAP header */
1556 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1557 lh->cid = cpu_to_le16(chan->dcid);
1558 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1559 put_unaligned_le16(control, skb_put(skb, 2));
1561 put_unaligned_le16(sdulen, skb_put(skb, 2));
1563 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1566 return ERR_PTR(err);
1569 if (chan->fcs == L2CAP_FCS_CRC16)
1570 put_unaligned_le16(0, skb_put(skb, 2));
1572 bt_cb(skb)->retries = 0;
1576 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1578 struct sk_buff *skb;
1579 struct sk_buff_head sar_queue;
1583 skb_queue_head_init(&sar_queue);
1584 control = L2CAP_SDU_START;
1585 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1587 return PTR_ERR(skb);
1589 __skb_queue_tail(&sar_queue, skb);
1590 len -= chan->remote_mps;
1591 size += chan->remote_mps;
1596 if (len > chan->remote_mps) {
1597 control = L2CAP_SDU_CONTINUE;
1598 buflen = chan->remote_mps;
1600 control = L2CAP_SDU_END;
1604 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1606 skb_queue_purge(&sar_queue);
1607 return PTR_ERR(skb);
1610 __skb_queue_tail(&sar_queue, skb);
1614 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1615 if (chan->tx_send_head == NULL)
1616 chan->tx_send_head = sar_queue.next;
1621 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1623 struct sk_buff *skb;
1627 /* Connectionless channel */
1628 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1629 skb = l2cap_create_connless_pdu(chan, msg, len);
1631 return PTR_ERR(skb);
1633 l2cap_do_send(chan, skb);
1637 switch (chan->mode) {
1638 case L2CAP_MODE_BASIC:
1639 /* Check outgoing MTU */
1640 if (len > chan->omtu)
1643 /* Create a basic PDU */
1644 skb = l2cap_create_basic_pdu(chan, msg, len);
1646 return PTR_ERR(skb);
1648 l2cap_do_send(chan, skb);
1652 case L2CAP_MODE_ERTM:
1653 case L2CAP_MODE_STREAMING:
1654 /* Entire SDU fits into one PDU */
1655 if (len <= chan->remote_mps) {
1656 control = L2CAP_SDU_UNSEGMENTED;
1657 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1660 return PTR_ERR(skb);
1662 __skb_queue_tail(&chan->tx_q, skb);
1664 if (chan->tx_send_head == NULL)
1665 chan->tx_send_head = skb;
1668 /* Segment SDU into multiples PDUs */
1669 err = l2cap_sar_segment_sdu(chan, msg, len);
1674 if (chan->mode == L2CAP_MODE_STREAMING) {
1675 l2cap_streaming_send(chan);
1680 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1681 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1686 err = l2cap_ertm_send(chan);
1693 BT_DBG("bad state %1.1x", chan->mode);
1700 /* Copy frame to all raw sockets on that connection */
1701 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1703 struct sk_buff *nskb;
1704 struct l2cap_chan *chan;
1706 BT_DBG("conn %p", conn);
1708 read_lock(&conn->chan_lock);
1709 list_for_each_entry(chan, &conn->chan_l, list) {
1710 struct sock *sk = chan->sk;
1711 if (chan->chan_type != L2CAP_CHAN_RAW)
1714 /* Don't send frame to the socket it came from */
1717 nskb = skb_clone(skb, GFP_ATOMIC);
1721 if (chan->ops->recv(chan->data, nskb))
1724 read_unlock(&conn->chan_lock);
1727 /* ---- L2CAP signalling commands ---- */
1728 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1729 u8 code, u8 ident, u16 dlen, void *data)
1731 struct sk_buff *skb, **frag;
1732 struct l2cap_cmd_hdr *cmd;
1733 struct l2cap_hdr *lh;
1736 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1737 conn, code, ident, dlen);
1739 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1740 count = min_t(unsigned int, conn->mtu, len);
1742 skb = bt_skb_alloc(count, GFP_ATOMIC);
1746 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1747 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1749 if (conn->hcon->type == LE_LINK)
1750 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1752 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1754 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1757 cmd->len = cpu_to_le16(dlen);
1760 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1761 memcpy(skb_put(skb, count), data, count);
1767 /* Continuation fragments (no L2CAP header) */
1768 frag = &skb_shinfo(skb)->frag_list;
1770 count = min_t(unsigned int, conn->mtu, len);
1772 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1776 memcpy(skb_put(*frag, count), data, count);
1781 frag = &(*frag)->next;
1791 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1793 struct l2cap_conf_opt *opt = *ptr;
1796 len = L2CAP_CONF_OPT_SIZE + opt->len;
1804 *val = *((u8 *) opt->val);
1808 *val = get_unaligned_le16(opt->val);
1812 *val = get_unaligned_le32(opt->val);
1816 *val = (unsigned long) opt->val;
1820 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1824 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1826 struct l2cap_conf_opt *opt = *ptr;
1828 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1835 *((u8 *) opt->val) = val;
1839 put_unaligned_le16(val, opt->val);
1843 put_unaligned_le32(val, opt->val);
1847 memcpy(opt->val, (void *) val, len);
1851 *ptr += L2CAP_CONF_OPT_SIZE + len;
1854 static void l2cap_ack_timeout(unsigned long arg)
1856 struct l2cap_chan *chan = (void *) arg;
1858 bh_lock_sock(chan->sk);
1859 l2cap_send_ack(chan);
1860 bh_unlock_sock(chan->sk);
1863 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1865 struct sock *sk = chan->sk;
1867 chan->expected_ack_seq = 0;
1868 chan->unacked_frames = 0;
1869 chan->buffer_seq = 0;
1870 chan->num_acked = 0;
1871 chan->frames_sent = 0;
1873 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1874 (unsigned long) chan);
1875 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1876 (unsigned long) chan);
1877 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1879 skb_queue_head_init(&chan->srej_q);
1881 INIT_LIST_HEAD(&chan->srej_l);
1884 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1887 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1890 case L2CAP_MODE_STREAMING:
1891 case L2CAP_MODE_ERTM:
1892 if (l2cap_mode_supported(mode, remote_feat_mask))
1896 return L2CAP_MODE_BASIC;
1900 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1902 struct l2cap_conf_req *req = data;
1903 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1904 void *ptr = req->data;
1906 BT_DBG("chan %p", chan);
1908 if (chan->num_conf_req || chan->num_conf_rsp)
1911 switch (chan->mode) {
1912 case L2CAP_MODE_STREAMING:
1913 case L2CAP_MODE_ERTM:
1914 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1919 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1924 if (chan->imtu != L2CAP_DEFAULT_MTU)
1925 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1927 switch (chan->mode) {
1928 case L2CAP_MODE_BASIC:
1929 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1930 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1933 rfc.mode = L2CAP_MODE_BASIC;
1935 rfc.max_transmit = 0;
1936 rfc.retrans_timeout = 0;
1937 rfc.monitor_timeout = 0;
1938 rfc.max_pdu_size = 0;
1940 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1941 (unsigned long) &rfc);
1944 case L2CAP_MODE_ERTM:
1945 rfc.mode = L2CAP_MODE_ERTM;
1946 rfc.txwin_size = chan->tx_win;
1947 rfc.max_transmit = chan->max_tx;
1948 rfc.retrans_timeout = 0;
1949 rfc.monitor_timeout = 0;
1950 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1951 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1952 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1955 (unsigned long) &rfc);
1957 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1960 if (chan->fcs == L2CAP_FCS_NONE ||
1961 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1962 chan->fcs = L2CAP_FCS_NONE;
1963 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1967 case L2CAP_MODE_STREAMING:
1968 rfc.mode = L2CAP_MODE_STREAMING;
1970 rfc.max_transmit = 0;
1971 rfc.retrans_timeout = 0;
1972 rfc.monitor_timeout = 0;
1973 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1974 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1975 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1977 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1978 (unsigned long) &rfc);
1980 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1983 if (chan->fcs == L2CAP_FCS_NONE ||
1984 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1985 chan->fcs = L2CAP_FCS_NONE;
1986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1991 req->dcid = cpu_to_le16(chan->dcid);
1992 req->flags = cpu_to_le16(0);
1997 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1999 struct l2cap_conf_rsp *rsp = data;
2000 void *ptr = rsp->data;
2001 void *req = chan->conf_req;
2002 int len = chan->conf_len;
2003 int type, hint, olen;
2005 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2006 u16 mtu = L2CAP_DEFAULT_MTU;
2007 u16 result = L2CAP_CONF_SUCCESS;
2009 BT_DBG("chan %p", chan);
2011 while (len >= L2CAP_CONF_OPT_SIZE) {
2012 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2014 hint = type & L2CAP_CONF_HINT;
2015 type &= L2CAP_CONF_MASK;
2018 case L2CAP_CONF_MTU:
2022 case L2CAP_CONF_FLUSH_TO:
2023 chan->flush_to = val;
2026 case L2CAP_CONF_QOS:
2029 case L2CAP_CONF_RFC:
2030 if (olen == sizeof(rfc))
2031 memcpy(&rfc, (void *) val, olen);
2034 case L2CAP_CONF_FCS:
2035 if (val == L2CAP_FCS_NONE)
2036 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2044 result = L2CAP_CONF_UNKNOWN;
2045 *((u8 *) ptr++) = type;
2050 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2053 switch (chan->mode) {
2054 case L2CAP_MODE_STREAMING:
2055 case L2CAP_MODE_ERTM:
2056 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2057 chan->mode = l2cap_select_mode(rfc.mode,
2058 chan->conn->feat_mask);
2062 if (chan->mode != rfc.mode)
2063 return -ECONNREFUSED;
2069 if (chan->mode != rfc.mode) {
2070 result = L2CAP_CONF_UNACCEPT;
2071 rfc.mode = chan->mode;
2073 if (chan->num_conf_rsp == 1)
2074 return -ECONNREFUSED;
2076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2077 sizeof(rfc), (unsigned long) &rfc);
2081 if (result == L2CAP_CONF_SUCCESS) {
2082 /* Configure output options and let the other side know
2083 * which ones we don't like. */
2085 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2086 result = L2CAP_CONF_UNACCEPT;
2089 set_bit(CONF_MTU_DONE, &chan->conf_state);
2091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2094 case L2CAP_MODE_BASIC:
2095 chan->fcs = L2CAP_FCS_NONE;
2096 set_bit(CONF_MODE_DONE, &chan->conf_state);
2099 case L2CAP_MODE_ERTM:
2100 chan->remote_tx_win = rfc.txwin_size;
2101 chan->remote_max_tx = rfc.max_transmit;
2103 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2104 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2106 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2108 rfc.retrans_timeout =
2109 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2110 rfc.monitor_timeout =
2111 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2113 set_bit(CONF_MODE_DONE, &chan->conf_state);
2115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2116 sizeof(rfc), (unsigned long) &rfc);
2120 case L2CAP_MODE_STREAMING:
2121 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2122 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2124 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2126 set_bit(CONF_MODE_DONE, &chan->conf_state);
2128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2129 sizeof(rfc), (unsigned long) &rfc);
2134 result = L2CAP_CONF_UNACCEPT;
2136 memset(&rfc, 0, sizeof(rfc));
2137 rfc.mode = chan->mode;
2140 if (result == L2CAP_CONF_SUCCESS)
2141 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2143 rsp->scid = cpu_to_le16(chan->dcid);
2144 rsp->result = cpu_to_le16(result);
2145 rsp->flags = cpu_to_le16(0x0000);
2150 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2152 struct l2cap_conf_req *req = data;
2153 void *ptr = req->data;
2156 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2158 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2160 while (len >= L2CAP_CONF_OPT_SIZE) {
2161 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2164 case L2CAP_CONF_MTU:
2165 if (val < L2CAP_DEFAULT_MIN_MTU) {
2166 *result = L2CAP_CONF_UNACCEPT;
2167 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2173 case L2CAP_CONF_FLUSH_TO:
2174 chan->flush_to = val;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2179 case L2CAP_CONF_RFC:
2180 if (olen == sizeof(rfc))
2181 memcpy(&rfc, (void *)val, olen);
2183 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2184 rfc.mode != chan->mode)
2185 return -ECONNREFUSED;
2189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2190 sizeof(rfc), (unsigned long) &rfc);
2195 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2196 return -ECONNREFUSED;
2198 chan->mode = rfc.mode;
2200 if (*result == L2CAP_CONF_SUCCESS) {
2202 case L2CAP_MODE_ERTM:
2203 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2204 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2205 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2207 case L2CAP_MODE_STREAMING:
2208 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2212 req->dcid = cpu_to_le16(chan->dcid);
2213 req->flags = cpu_to_le16(0x0000);
2218 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2220 struct l2cap_conf_rsp *rsp = data;
2221 void *ptr = rsp->data;
2223 BT_DBG("chan %p", chan);
2225 rsp->scid = cpu_to_le16(chan->dcid);
2226 rsp->result = cpu_to_le16(result);
2227 rsp->flags = cpu_to_le16(flags);
2232 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2234 struct l2cap_conn_rsp rsp;
2235 struct l2cap_conn *conn = chan->conn;
2238 rsp.scid = cpu_to_le16(chan->dcid);
2239 rsp.dcid = cpu_to_le16(chan->scid);
2240 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2241 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2242 l2cap_send_cmd(conn, chan->ident,
2243 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2245 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2248 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2249 l2cap_build_conf_req(chan, buf), buf);
2250 chan->num_conf_req++;
2253 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2257 struct l2cap_conf_rfc rfc;
2259 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2261 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2264 while (len >= L2CAP_CONF_OPT_SIZE) {
2265 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2268 case L2CAP_CONF_RFC:
2269 if (olen == sizeof(rfc))
2270 memcpy(&rfc, (void *)val, olen);
2275 /* Use sane default values in case a misbehaving remote device
2276 * did not send an RFC option.
2278 rfc.mode = chan->mode;
2279 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2280 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2281 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2283 BT_ERR("Expected RFC option was not found, using defaults");
2287 case L2CAP_MODE_ERTM:
2288 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2289 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2290 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2292 case L2CAP_MODE_STREAMING:
2293 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2297 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2299 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2301 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2304 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2305 cmd->ident == conn->info_ident) {
2306 del_timer(&conn->info_timer);
2308 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2309 conn->info_ident = 0;
2311 l2cap_conn_start(conn);
2317 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2319 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2320 struct l2cap_conn_rsp rsp;
2321 struct l2cap_chan *chan = NULL, *pchan;
2322 struct sock *parent, *sk = NULL;
2323 int result, status = L2CAP_CS_NO_INFO;
2325 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2326 __le16 psm = req->psm;
2328 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2330 /* Check if we have socket listening on psm */
2331 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2333 result = L2CAP_CR_BAD_PSM;
2339 bh_lock_sock(parent);
2341 /* Check if the ACL is secure enough (if not SDP) */
2342 if (psm != cpu_to_le16(0x0001) &&
2343 !hci_conn_check_link_mode(conn->hcon)) {
2344 conn->disc_reason = 0x05;
2345 result = L2CAP_CR_SEC_BLOCK;
2349 result = L2CAP_CR_NO_MEM;
2351 /* Check for backlog size */
2352 if (sk_acceptq_is_full(parent)) {
2353 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2357 chan = pchan->ops->new_connection(pchan->data);
2363 write_lock_bh(&conn->chan_lock);
2365 /* Check if we already have channel with that dcid */
2366 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2367 write_unlock_bh(&conn->chan_lock);
2368 sock_set_flag(sk, SOCK_ZAPPED);
2369 chan->ops->close(chan->data);
2373 hci_conn_hold(conn->hcon);
2375 bacpy(&bt_sk(sk)->src, conn->src);
2376 bacpy(&bt_sk(sk)->dst, conn->dst);
2380 bt_accept_enqueue(parent, sk);
2382 __l2cap_chan_add(conn, chan);
2386 __set_chan_timer(chan, sk->sk_sndtimeo);
2388 chan->ident = cmd->ident;
2390 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2391 if (l2cap_check_security(chan)) {
2392 if (bt_sk(sk)->defer_setup) {
2393 l2cap_state_change(chan, BT_CONNECT2);
2394 result = L2CAP_CR_PEND;
2395 status = L2CAP_CS_AUTHOR_PEND;
2396 parent->sk_data_ready(parent, 0);
2398 l2cap_state_change(chan, BT_CONFIG);
2399 result = L2CAP_CR_SUCCESS;
2400 status = L2CAP_CS_NO_INFO;
2403 l2cap_state_change(chan, BT_CONNECT2);
2404 result = L2CAP_CR_PEND;
2405 status = L2CAP_CS_AUTHEN_PEND;
2408 l2cap_state_change(chan, BT_CONNECT2);
2409 result = L2CAP_CR_PEND;
2410 status = L2CAP_CS_NO_INFO;
2413 write_unlock_bh(&conn->chan_lock);
2416 bh_unlock_sock(parent);
2419 rsp.scid = cpu_to_le16(scid);
2420 rsp.dcid = cpu_to_le16(dcid);
2421 rsp.result = cpu_to_le16(result);
2422 rsp.status = cpu_to_le16(status);
2423 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2425 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2426 struct l2cap_info_req info;
2427 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2429 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2430 conn->info_ident = l2cap_get_ident(conn);
2432 mod_timer(&conn->info_timer, jiffies +
2433 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2435 l2cap_send_cmd(conn, conn->info_ident,
2436 L2CAP_INFO_REQ, sizeof(info), &info);
2439 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2440 result == L2CAP_CR_SUCCESS) {
2442 set_bit(CONF_REQ_SENT, &chan->conf_state);
2443 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2444 l2cap_build_conf_req(chan, buf), buf);
2445 chan->num_conf_req++;
2451 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2453 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2454 u16 scid, dcid, result, status;
2455 struct l2cap_chan *chan;
2459 scid = __le16_to_cpu(rsp->scid);
2460 dcid = __le16_to_cpu(rsp->dcid);
2461 result = __le16_to_cpu(rsp->result);
2462 status = __le16_to_cpu(rsp->status);
2464 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2467 chan = l2cap_get_chan_by_scid(conn, scid);
2471 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2479 case L2CAP_CR_SUCCESS:
2480 l2cap_state_change(chan, BT_CONFIG);
2483 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2485 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2488 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2489 l2cap_build_conf_req(chan, req), req);
2490 chan->num_conf_req++;
2494 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2498 /* don't delete l2cap channel if sk is owned by user */
2499 if (sock_owned_by_user(sk)) {
2500 l2cap_state_change(chan, BT_DISCONN);
2501 __clear_chan_timer(chan);
2502 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2506 l2cap_chan_del(chan, ECONNREFUSED);
2514 static inline void set_default_fcs(struct l2cap_chan *chan)
2516 /* FCS is enabled only in ERTM or streaming mode, if one or both
2519 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2520 chan->fcs = L2CAP_FCS_NONE;
2521 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2522 chan->fcs = L2CAP_FCS_CRC16;
2525 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2527 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2530 struct l2cap_chan *chan;
2534 dcid = __le16_to_cpu(req->dcid);
2535 flags = __le16_to_cpu(req->flags);
2537 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2539 chan = l2cap_get_chan_by_scid(conn, dcid);
2545 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2546 struct l2cap_cmd_rej_cid rej;
2548 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2549 rej.scid = cpu_to_le16(chan->scid);
2550 rej.dcid = cpu_to_le16(chan->dcid);
2552 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2557 /* Reject if config buffer is too small. */
2558 len = cmd_len - sizeof(*req);
2559 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2560 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2561 l2cap_build_conf_rsp(chan, rsp,
2562 L2CAP_CONF_REJECT, flags), rsp);
2567 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2568 chan->conf_len += len;
2570 if (flags & 0x0001) {
2571 /* Incomplete config. Send empty response. */
2572 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2573 l2cap_build_conf_rsp(chan, rsp,
2574 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2578 /* Complete config. */
2579 len = l2cap_parse_conf_req(chan, rsp);
2581 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2585 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2586 chan->num_conf_rsp++;
2588 /* Reset config buffer. */
2591 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2594 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2595 set_default_fcs(chan);
2597 l2cap_state_change(chan, BT_CONNECTED);
2599 chan->next_tx_seq = 0;
2600 chan->expected_tx_seq = 0;
2601 skb_queue_head_init(&chan->tx_q);
2602 if (chan->mode == L2CAP_MODE_ERTM)
2603 l2cap_ertm_init(chan);
2605 l2cap_chan_ready(sk);
2609 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2611 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2612 l2cap_build_conf_req(chan, buf), buf);
2613 chan->num_conf_req++;
2621 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2623 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2624 u16 scid, flags, result;
2625 struct l2cap_chan *chan;
2627 int len = cmd->len - sizeof(*rsp);
2629 scid = __le16_to_cpu(rsp->scid);
2630 flags = __le16_to_cpu(rsp->flags);
2631 result = __le16_to_cpu(rsp->result);
2633 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2634 scid, flags, result);
2636 chan = l2cap_get_chan_by_scid(conn, scid);
2643 case L2CAP_CONF_SUCCESS:
2644 l2cap_conf_rfc_get(chan, rsp->data, len);
2647 case L2CAP_CONF_UNACCEPT:
2648 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2651 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2652 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2656 /* throw out any old stored conf requests */
2657 result = L2CAP_CONF_SUCCESS;
2658 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2661 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2665 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2666 L2CAP_CONF_REQ, len, req);
2667 chan->num_conf_req++;
2668 if (result != L2CAP_CONF_SUCCESS)
2674 sk->sk_err = ECONNRESET;
2675 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2676 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2683 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2685 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2686 set_default_fcs(chan);
2688 l2cap_state_change(chan, BT_CONNECTED);
2689 chan->next_tx_seq = 0;
2690 chan->expected_tx_seq = 0;
2691 skb_queue_head_init(&chan->tx_q);
2692 if (chan->mode == L2CAP_MODE_ERTM)
2693 l2cap_ertm_init(chan);
2695 l2cap_chan_ready(sk);
2703 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2705 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2706 struct l2cap_disconn_rsp rsp;
2708 struct l2cap_chan *chan;
2711 scid = __le16_to_cpu(req->scid);
2712 dcid = __le16_to_cpu(req->dcid);
2714 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2716 chan = l2cap_get_chan_by_scid(conn, dcid);
2722 rsp.dcid = cpu_to_le16(chan->scid);
2723 rsp.scid = cpu_to_le16(chan->dcid);
2724 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2726 sk->sk_shutdown = SHUTDOWN_MASK;
2728 /* don't delete l2cap channel if sk is owned by user */
2729 if (sock_owned_by_user(sk)) {
2730 l2cap_state_change(chan, BT_DISCONN);
2731 __clear_chan_timer(chan);
2732 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2737 l2cap_chan_del(chan, ECONNRESET);
2740 chan->ops->close(chan->data);
2744 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2746 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2748 struct l2cap_chan *chan;
2751 scid = __le16_to_cpu(rsp->scid);
2752 dcid = __le16_to_cpu(rsp->dcid);
2754 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2756 chan = l2cap_get_chan_by_scid(conn, scid);
2762 /* don't delete l2cap channel if sk is owned by user */
2763 if (sock_owned_by_user(sk)) {
2764 l2cap_state_change(chan,BT_DISCONN);
2765 __clear_chan_timer(chan);
2766 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2771 l2cap_chan_del(chan, 0);
2774 chan->ops->close(chan->data);
2778 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2780 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2783 type = __le16_to_cpu(req->type);
2785 BT_DBG("type 0x%4.4x", type);
2787 if (type == L2CAP_IT_FEAT_MASK) {
2789 u32 feat_mask = l2cap_feat_mask;
2790 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2791 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2792 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2794 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2796 put_unaligned_le32(feat_mask, rsp->data);
2797 l2cap_send_cmd(conn, cmd->ident,
2798 L2CAP_INFO_RSP, sizeof(buf), buf);
2799 } else if (type == L2CAP_IT_FIXED_CHAN) {
2801 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2802 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2803 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2804 memcpy(buf + 4, l2cap_fixed_chan, 8);
2805 l2cap_send_cmd(conn, cmd->ident,
2806 L2CAP_INFO_RSP, sizeof(buf), buf);
2808 struct l2cap_info_rsp rsp;
2809 rsp.type = cpu_to_le16(type);
2810 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2811 l2cap_send_cmd(conn, cmd->ident,
2812 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2818 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2820 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2823 type = __le16_to_cpu(rsp->type);
2824 result = __le16_to_cpu(rsp->result);
2826 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2828 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2829 if (cmd->ident != conn->info_ident ||
2830 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2833 del_timer(&conn->info_timer);
2835 if (result != L2CAP_IR_SUCCESS) {
2836 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2837 conn->info_ident = 0;
2839 l2cap_conn_start(conn);
2844 if (type == L2CAP_IT_FEAT_MASK) {
2845 conn->feat_mask = get_unaligned_le32(rsp->data);
2847 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2848 struct l2cap_info_req req;
2849 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2851 conn->info_ident = l2cap_get_ident(conn);
2853 l2cap_send_cmd(conn, conn->info_ident,
2854 L2CAP_INFO_REQ, sizeof(req), &req);
2856 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2857 conn->info_ident = 0;
2859 l2cap_conn_start(conn);
2861 } else if (type == L2CAP_IT_FIXED_CHAN) {
2862 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2863 conn->info_ident = 0;
2865 l2cap_conn_start(conn);
2871 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2876 if (min > max || min < 6 || max > 3200)
2879 if (to_multiplier < 10 || to_multiplier > 3200)
2882 if (max >= to_multiplier * 8)
2885 max_latency = (to_multiplier * 8 / max) - 1;
2886 if (latency > 499 || latency > max_latency)
2892 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2893 struct l2cap_cmd_hdr *cmd, u8 *data)
2895 struct hci_conn *hcon = conn->hcon;
2896 struct l2cap_conn_param_update_req *req;
2897 struct l2cap_conn_param_update_rsp rsp;
2898 u16 min, max, latency, to_multiplier, cmd_len;
2901 if (!(hcon->link_mode & HCI_LM_MASTER))
2904 cmd_len = __le16_to_cpu(cmd->len);
2905 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2908 req = (struct l2cap_conn_param_update_req *) data;
2909 min = __le16_to_cpu(req->min);
2910 max = __le16_to_cpu(req->max);
2911 latency = __le16_to_cpu(req->latency);
2912 to_multiplier = __le16_to_cpu(req->to_multiplier);
2914 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2915 min, max, latency, to_multiplier);
2917 memset(&rsp, 0, sizeof(rsp));
2919 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2921 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2923 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2925 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2929 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2934 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2935 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2939 switch (cmd->code) {
2940 case L2CAP_COMMAND_REJ:
2941 l2cap_command_rej(conn, cmd, data);
2944 case L2CAP_CONN_REQ:
2945 err = l2cap_connect_req(conn, cmd, data);
2948 case L2CAP_CONN_RSP:
2949 err = l2cap_connect_rsp(conn, cmd, data);
2952 case L2CAP_CONF_REQ:
2953 err = l2cap_config_req(conn, cmd, cmd_len, data);
2956 case L2CAP_CONF_RSP:
2957 err = l2cap_config_rsp(conn, cmd, data);
2960 case L2CAP_DISCONN_REQ:
2961 err = l2cap_disconnect_req(conn, cmd, data);
2964 case L2CAP_DISCONN_RSP:
2965 err = l2cap_disconnect_rsp(conn, cmd, data);
2968 case L2CAP_ECHO_REQ:
2969 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2972 case L2CAP_ECHO_RSP:
2975 case L2CAP_INFO_REQ:
2976 err = l2cap_information_req(conn, cmd, data);
2979 case L2CAP_INFO_RSP:
2980 err = l2cap_information_rsp(conn, cmd, data);
2984 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2992 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2993 struct l2cap_cmd_hdr *cmd, u8 *data)
2995 switch (cmd->code) {
2996 case L2CAP_COMMAND_REJ:
2999 case L2CAP_CONN_PARAM_UPDATE_REQ:
3000 return l2cap_conn_param_update_req(conn, cmd, data);
3002 case L2CAP_CONN_PARAM_UPDATE_RSP:
3006 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3011 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3012 struct sk_buff *skb)
3014 u8 *data = skb->data;
3016 struct l2cap_cmd_hdr cmd;
3019 l2cap_raw_recv(conn, skb);
3021 while (len >= L2CAP_CMD_HDR_SIZE) {
3023 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3024 data += L2CAP_CMD_HDR_SIZE;
3025 len -= L2CAP_CMD_HDR_SIZE;
3027 cmd_len = le16_to_cpu(cmd.len);
3029 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3031 if (cmd_len > len || !cmd.ident) {
3032 BT_DBG("corrupted command");
3036 if (conn->hcon->type == LE_LINK)
3037 err = l2cap_le_sig_cmd(conn, &cmd, data);
3039 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3042 struct l2cap_cmd_rej_unk rej;
3044 BT_ERR("Wrong link type (%d)", err);
3046 /* FIXME: Map err to a valid reason */
3047 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3048 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3058 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3060 u16 our_fcs, rcv_fcs;
3061 int hdr_size = L2CAP_HDR_SIZE + 2;
3063 if (chan->fcs == L2CAP_FCS_CRC16) {
3064 skb_trim(skb, skb->len - 2);
3065 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3066 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3068 if (our_fcs != rcv_fcs)
3074 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3078 chan->frames_sent = 0;
3080 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3082 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3083 control |= L2CAP_SUPER_RCV_NOT_READY;
3084 l2cap_send_sframe(chan, control);
3085 set_bit(CONN_RNR_SENT, &chan->conn_state);
3088 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3089 l2cap_retransmit_frames(chan);
3091 l2cap_ertm_send(chan);
3093 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3094 chan->frames_sent == 0) {
3095 control |= L2CAP_SUPER_RCV_READY;
3096 l2cap_send_sframe(chan, control);
3100 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3102 struct sk_buff *next_skb;
3103 int tx_seq_offset, next_tx_seq_offset;
3105 bt_cb(skb)->tx_seq = tx_seq;
3106 bt_cb(skb)->sar = sar;
3108 next_skb = skb_peek(&chan->srej_q);
3110 __skb_queue_tail(&chan->srej_q, skb);
3114 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3115 if (tx_seq_offset < 0)
3116 tx_seq_offset += 64;
3119 if (bt_cb(next_skb)->tx_seq == tx_seq)
3122 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3123 chan->buffer_seq) % 64;
3124 if (next_tx_seq_offset < 0)
3125 next_tx_seq_offset += 64;
3127 if (next_tx_seq_offset > tx_seq_offset) {
3128 __skb_queue_before(&chan->srej_q, next_skb, skb);
3132 if (skb_queue_is_last(&chan->srej_q, next_skb))
3135 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3137 __skb_queue_tail(&chan->srej_q, skb);
3142 static void append_skb_frag(struct sk_buff *skb,
3143 struct sk_buff *new_frag, struct sk_buff **last_frag)
3145 /* skb->len reflects data in skb as well as all fragments
3146 * skb->data_len reflects only data in fragments
3148 if (!skb_has_frag_list(skb))
3149 skb_shinfo(skb)->frag_list = new_frag;
3151 new_frag->next = NULL;
3153 (*last_frag)->next = new_frag;
3154 *last_frag = new_frag;
3156 skb->len += new_frag->len;
3157 skb->data_len += new_frag->len;
3158 skb->truesize += new_frag->truesize;
3161 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3165 switch (control & L2CAP_CTRL_SAR) {
3166 case L2CAP_SDU_UNSEGMENTED:
3170 err = chan->ops->recv(chan->data, skb);
3173 case L2CAP_SDU_START:
3177 chan->sdu_len = get_unaligned_le16(skb->data);
3180 if (chan->sdu_len > chan->imtu) {
3185 if (skb->len >= chan->sdu_len)
3189 chan->sdu_last_frag = skb;
3195 case L2CAP_SDU_CONTINUE:
3199 append_skb_frag(chan->sdu, skb,
3200 &chan->sdu_last_frag);
3203 if (chan->sdu->len >= chan->sdu_len)
3213 append_skb_frag(chan->sdu, skb,
3214 &chan->sdu_last_frag);
3217 if (chan->sdu->len != chan->sdu_len)
3220 err = chan->ops->recv(chan->data, chan->sdu);
3223 /* Reassembly complete */
3225 chan->sdu_last_frag = NULL;
3233 kfree_skb(chan->sdu);
3235 chan->sdu_last_frag = NULL;
3242 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3246 BT_DBG("chan %p, Enter local busy", chan);
3248 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3250 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3251 control |= L2CAP_SUPER_RCV_NOT_READY;
3252 l2cap_send_sframe(chan, control);
3254 set_bit(CONN_RNR_SENT, &chan->conn_state);
3256 __clear_ack_timer(chan);
3259 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3263 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3266 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3267 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3268 l2cap_send_sframe(chan, control);
3269 chan->retry_count = 1;
3271 __clear_retrans_timer(chan);
3272 __set_monitor_timer(chan);
3274 set_bit(CONN_WAIT_F, &chan->conn_state);
3277 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3278 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3280 BT_DBG("chan %p, Exit local busy", chan);
3283 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3285 if (chan->mode == L2CAP_MODE_ERTM) {
3287 l2cap_ertm_enter_local_busy(chan);
3289 l2cap_ertm_exit_local_busy(chan);
3293 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3295 struct sk_buff *skb;
3298 while ((skb = skb_peek(&chan->srej_q)) &&
3299 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3302 if (bt_cb(skb)->tx_seq != tx_seq)
3305 skb = skb_dequeue(&chan->srej_q);
3306 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3307 err = l2cap_reassemble_sdu(chan, skb, control);
3310 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3314 chan->buffer_seq_srej =
3315 (chan->buffer_seq_srej + 1) % 64;
3316 tx_seq = (tx_seq + 1) % 64;
3320 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3322 struct srej_list *l, *tmp;
3325 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3326 if (l->tx_seq == tx_seq) {
3331 control = L2CAP_SUPER_SELECT_REJECT;
3332 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3333 l2cap_send_sframe(chan, control);
3335 list_add_tail(&l->list, &chan->srej_l);
3339 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3341 struct srej_list *new;
3344 while (tx_seq != chan->expected_tx_seq) {
3345 control = L2CAP_SUPER_SELECT_REJECT;
3346 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3347 l2cap_send_sframe(chan, control);
3349 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3350 new->tx_seq = chan->expected_tx_seq;
3351 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3352 list_add_tail(&new->list, &chan->srej_l);
3354 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3357 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3359 u8 tx_seq = __get_txseq(rx_control);
3360 u8 req_seq = __get_reqseq(rx_control);
3361 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3362 int tx_seq_offset, expected_tx_seq_offset;
3363 int num_to_ack = (chan->tx_win/6) + 1;
3366 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3367 tx_seq, rx_control);
3369 if (L2CAP_CTRL_FINAL & rx_control &&
3370 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3371 __clear_monitor_timer(chan);
3372 if (chan->unacked_frames > 0)
3373 __set_retrans_timer(chan);
3374 clear_bit(CONN_WAIT_F, &chan->conn_state);
3377 chan->expected_ack_seq = req_seq;
3378 l2cap_drop_acked_frames(chan);
3380 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3381 if (tx_seq_offset < 0)
3382 tx_seq_offset += 64;
3384 /* invalid tx_seq */
3385 if (tx_seq_offset >= chan->tx_win) {
3386 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3390 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3393 if (tx_seq == chan->expected_tx_seq)
3396 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3397 struct srej_list *first;
3399 first = list_first_entry(&chan->srej_l,
3400 struct srej_list, list);
3401 if (tx_seq == first->tx_seq) {
3402 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3403 l2cap_check_srej_gap(chan, tx_seq);
3405 list_del(&first->list);
3408 if (list_empty(&chan->srej_l)) {
3409 chan->buffer_seq = chan->buffer_seq_srej;
3410 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3411 l2cap_send_ack(chan);
3412 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3415 struct srej_list *l;
3417 /* duplicated tx_seq */
3418 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3421 list_for_each_entry(l, &chan->srej_l, list) {
3422 if (l->tx_seq == tx_seq) {
3423 l2cap_resend_srejframe(chan, tx_seq);
3427 l2cap_send_srejframe(chan, tx_seq);
3430 expected_tx_seq_offset =
3431 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3432 if (expected_tx_seq_offset < 0)
3433 expected_tx_seq_offset += 64;
3435 /* duplicated tx_seq */
3436 if (tx_seq_offset < expected_tx_seq_offset)
3439 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3441 BT_DBG("chan %p, Enter SREJ", chan);
3443 INIT_LIST_HEAD(&chan->srej_l);
3444 chan->buffer_seq_srej = chan->buffer_seq;
3446 __skb_queue_head_init(&chan->srej_q);
3447 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3449 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3451 l2cap_send_srejframe(chan, tx_seq);
3453 __clear_ack_timer(chan);
3458 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3460 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3461 bt_cb(skb)->tx_seq = tx_seq;
3462 bt_cb(skb)->sar = sar;
3463 __skb_queue_tail(&chan->srej_q, skb);
3467 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3468 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3470 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3474 if (rx_control & L2CAP_CTRL_FINAL) {
3475 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3476 l2cap_retransmit_frames(chan);
3479 __set_ack_timer(chan);
3481 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3482 if (chan->num_acked == num_to_ack - 1)
3483 l2cap_send_ack(chan);
3492 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3494 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3497 chan->expected_ack_seq = __get_reqseq(rx_control);
3498 l2cap_drop_acked_frames(chan);
3500 if (rx_control & L2CAP_CTRL_POLL) {
3501 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3502 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3503 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3504 (chan->unacked_frames > 0))
3505 __set_retrans_timer(chan);
3507 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3508 l2cap_send_srejtail(chan);
3510 l2cap_send_i_or_rr_or_rnr(chan);
3513 } else if (rx_control & L2CAP_CTRL_FINAL) {
3514 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3516 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3517 l2cap_retransmit_frames(chan);
3520 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3521 (chan->unacked_frames > 0))
3522 __set_retrans_timer(chan);
3524 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3525 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3526 l2cap_send_ack(chan);
3528 l2cap_ertm_send(chan);
3532 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3534 u8 tx_seq = __get_reqseq(rx_control);
3536 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3538 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3540 chan->expected_ack_seq = tx_seq;
3541 l2cap_drop_acked_frames(chan);
3543 if (rx_control & L2CAP_CTRL_FINAL) {
3544 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3545 l2cap_retransmit_frames(chan);
3547 l2cap_retransmit_frames(chan);
3549 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3550 set_bit(CONN_REJ_ACT, &chan->conn_state);
3553 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3555 u8 tx_seq = __get_reqseq(rx_control);
3557 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3559 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3561 if (rx_control & L2CAP_CTRL_POLL) {
3562 chan->expected_ack_seq = tx_seq;
3563 l2cap_drop_acked_frames(chan);
3565 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3566 l2cap_retransmit_one_frame(chan, tx_seq);
3568 l2cap_ertm_send(chan);
3570 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3571 chan->srej_save_reqseq = tx_seq;
3572 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3574 } else if (rx_control & L2CAP_CTRL_FINAL) {
3575 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3576 chan->srej_save_reqseq == tx_seq)
3577 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3579 l2cap_retransmit_one_frame(chan, tx_seq);
3581 l2cap_retransmit_one_frame(chan, tx_seq);
3582 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3583 chan->srej_save_reqseq = tx_seq;
3584 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3589 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3591 u8 tx_seq = __get_reqseq(rx_control);
3593 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3595 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3596 chan->expected_ack_seq = tx_seq;
3597 l2cap_drop_acked_frames(chan);
3599 if (rx_control & L2CAP_CTRL_POLL)
3600 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3602 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3603 __clear_retrans_timer(chan);
3604 if (rx_control & L2CAP_CTRL_POLL)
3605 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3609 if (rx_control & L2CAP_CTRL_POLL)
3610 l2cap_send_srejtail(chan);
3612 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3615 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3617 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3619 if (L2CAP_CTRL_FINAL & rx_control &&
3620 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3621 __clear_monitor_timer(chan);
3622 if (chan->unacked_frames > 0)
3623 __set_retrans_timer(chan);
3624 clear_bit(CONN_WAIT_F, &chan->conn_state);
3627 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3628 case L2CAP_SUPER_RCV_READY:
3629 l2cap_data_channel_rrframe(chan, rx_control);
3632 case L2CAP_SUPER_REJECT:
3633 l2cap_data_channel_rejframe(chan, rx_control);
3636 case L2CAP_SUPER_SELECT_REJECT:
3637 l2cap_data_channel_srejframe(chan, rx_control);
3640 case L2CAP_SUPER_RCV_NOT_READY:
3641 l2cap_data_channel_rnrframe(chan, rx_control);
3649 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3651 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3654 int len, next_tx_seq_offset, req_seq_offset;
3656 control = get_unaligned_le16(skb->data);
3661 * We can just drop the corrupted I-frame here.
3662 * Receiver will miss it and start proper recovery
3663 * procedures and ask retransmission.
3665 if (l2cap_check_fcs(chan, skb))
3668 if (__is_sar_start(control) && __is_iframe(control))
3671 if (chan->fcs == L2CAP_FCS_CRC16)
3674 if (len > chan->mps) {
3675 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3679 req_seq = __get_reqseq(control);
3680 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3681 if (req_seq_offset < 0)
3682 req_seq_offset += 64;
3684 next_tx_seq_offset =
3685 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3686 if (next_tx_seq_offset < 0)
3687 next_tx_seq_offset += 64;
3689 /* check for invalid req-seq */
3690 if (req_seq_offset > next_tx_seq_offset) {
3691 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3695 if (__is_iframe(control)) {
3697 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3701 l2cap_data_channel_iframe(chan, control, skb);
3705 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3709 l2cap_data_channel_sframe(chan, control, skb);
3719 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3721 struct l2cap_chan *chan;
3722 struct sock *sk = NULL;
3727 chan = l2cap_get_chan_by_scid(conn, cid);
3729 BT_DBG("unknown cid 0x%4.4x", cid);
3735 BT_DBG("chan %p, len %d", chan, skb->len);
3737 if (chan->state != BT_CONNECTED)
3740 switch (chan->mode) {
3741 case L2CAP_MODE_BASIC:
3742 /* If socket recv buffers overflows we drop data here
3743 * which is *bad* because L2CAP has to be reliable.
3744 * But we don't have any other choice. L2CAP doesn't
3745 * provide flow control mechanism. */
3747 if (chan->imtu < skb->len)
3750 if (!chan->ops->recv(chan->data, skb))
3754 case L2CAP_MODE_ERTM:
3755 if (!sock_owned_by_user(sk)) {
3756 l2cap_ertm_data_rcv(sk, skb);
3758 if (sk_add_backlog(sk, skb))
3764 case L2CAP_MODE_STREAMING:
3765 control = get_unaligned_le16(skb->data);
3769 if (l2cap_check_fcs(chan, skb))
3772 if (__is_sar_start(control))
3775 if (chan->fcs == L2CAP_FCS_CRC16)
3778 if (len > chan->mps || len < 0 || __is_sframe(control))
3781 tx_seq = __get_txseq(control);
3783 if (chan->expected_tx_seq != tx_seq) {
3784 /* Frame(s) missing - must discard partial SDU */
3785 kfree_skb(chan->sdu);
3787 chan->sdu_last_frag = NULL;
3790 /* TODO: Notify userland of missing data */
3793 chan->expected_tx_seq = (tx_seq + 1) % 64;
3795 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3796 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3801 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3815 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3817 struct sock *sk = NULL;
3818 struct l2cap_chan *chan;
3820 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3828 BT_DBG("sk %p, len %d", sk, skb->len);
3830 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3833 if (chan->imtu < skb->len)
3836 if (!chan->ops->recv(chan->data, skb))
3848 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3850 struct sock *sk = NULL;
3851 struct l2cap_chan *chan;
3853 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3861 BT_DBG("sk %p, len %d", sk, skb->len);
3863 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3866 if (chan->imtu < skb->len)
3869 if (!chan->ops->recv(chan->data, skb))
3881 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3883 struct l2cap_hdr *lh = (void *) skb->data;
3887 skb_pull(skb, L2CAP_HDR_SIZE);
3888 cid = __le16_to_cpu(lh->cid);
3889 len = __le16_to_cpu(lh->len);
3891 if (len != skb->len) {
3896 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3899 case L2CAP_CID_LE_SIGNALING:
3900 case L2CAP_CID_SIGNALING:
3901 l2cap_sig_channel(conn, skb);
3904 case L2CAP_CID_CONN_LESS:
3905 psm = get_unaligned_le16(skb->data);
3907 l2cap_conless_channel(conn, psm, skb);
3910 case L2CAP_CID_LE_DATA:
3911 l2cap_att_channel(conn, cid, skb);
3915 if (smp_sig_channel(conn, skb))
3916 l2cap_conn_del(conn->hcon, EACCES);
3920 l2cap_data_channel(conn, cid, skb);
3925 /* ---- L2CAP interface with lower layer (HCI) ---- */
3927 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3929 int exact = 0, lm1 = 0, lm2 = 0;
3930 struct l2cap_chan *c;
3932 if (type != ACL_LINK)
3935 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3937 /* Find listening sockets and check their link_mode */
3938 read_lock(&chan_list_lock);
3939 list_for_each_entry(c, &chan_list, global_l) {
3940 struct sock *sk = c->sk;
3942 if (c->state != BT_LISTEN)
3945 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3946 lm1 |= HCI_LM_ACCEPT;
3948 lm1 |= HCI_LM_MASTER;
3950 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3951 lm2 |= HCI_LM_ACCEPT;
3953 lm2 |= HCI_LM_MASTER;
3956 read_unlock(&chan_list_lock);
3958 return exact ? lm1 : lm2;
3961 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3963 struct l2cap_conn *conn;
3965 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3967 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3971 conn = l2cap_conn_add(hcon, status);
3973 l2cap_conn_ready(conn);
3975 l2cap_conn_del(hcon, bt_to_errno(status));
3980 static int l2cap_disconn_ind(struct hci_conn *hcon)
3982 struct l2cap_conn *conn = hcon->l2cap_data;
3984 BT_DBG("hcon %p", hcon);
3986 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3989 return conn->disc_reason;
3992 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3994 BT_DBG("hcon %p reason %d", hcon, reason);
3996 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3999 l2cap_conn_del(hcon, bt_to_errno(reason));
4004 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4006 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4009 if (encrypt == 0x00) {
4010 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4011 __clear_chan_timer(chan);
4012 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4013 } else if (chan->sec_level == BT_SECURITY_HIGH)
4014 l2cap_chan_close(chan, ECONNREFUSED);
4016 if (chan->sec_level == BT_SECURITY_MEDIUM)
4017 __clear_chan_timer(chan);
4021 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4023 struct l2cap_conn *conn = hcon->l2cap_data;
4024 struct l2cap_chan *chan;
4029 BT_DBG("conn %p", conn);
4031 if (hcon->type == LE_LINK) {
4032 smp_distribute_keys(conn, 0);
4033 del_timer(&conn->security_timer);
4036 read_lock(&conn->chan_lock);
4038 list_for_each_entry(chan, &conn->chan_l, list) {
4039 struct sock *sk = chan->sk;
4043 BT_DBG("chan->scid %d", chan->scid);
4045 if (chan->scid == L2CAP_CID_LE_DATA) {
4046 if (!status && encrypt) {
4047 chan->sec_level = hcon->sec_level;
4048 l2cap_chan_ready(sk);
4055 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4060 if (!status && (chan->state == BT_CONNECTED ||
4061 chan->state == BT_CONFIG)) {
4062 l2cap_check_encryption(chan, encrypt);
4067 if (chan->state == BT_CONNECT) {
4069 struct l2cap_conn_req req;
4070 req.scid = cpu_to_le16(chan->scid);
4071 req.psm = chan->psm;
4073 chan->ident = l2cap_get_ident(conn);
4074 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4076 l2cap_send_cmd(conn, chan->ident,
4077 L2CAP_CONN_REQ, sizeof(req), &req);
4079 __clear_chan_timer(chan);
4080 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4082 } else if (chan->state == BT_CONNECT2) {
4083 struct l2cap_conn_rsp rsp;
4087 if (bt_sk(sk)->defer_setup) {
4088 struct sock *parent = bt_sk(sk)->parent;
4089 res = L2CAP_CR_PEND;
4090 stat = L2CAP_CS_AUTHOR_PEND;
4092 parent->sk_data_ready(parent, 0);
4094 l2cap_state_change(chan, BT_CONFIG);
4095 res = L2CAP_CR_SUCCESS;
4096 stat = L2CAP_CS_NO_INFO;
4099 l2cap_state_change(chan, BT_DISCONN);
4100 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4101 res = L2CAP_CR_SEC_BLOCK;
4102 stat = L2CAP_CS_NO_INFO;
4105 rsp.scid = cpu_to_le16(chan->dcid);
4106 rsp.dcid = cpu_to_le16(chan->scid);
4107 rsp.result = cpu_to_le16(res);
4108 rsp.status = cpu_to_le16(stat);
4109 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4116 read_unlock(&conn->chan_lock);
4121 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4123 struct l2cap_conn *conn = hcon->l2cap_data;
4126 conn = l2cap_conn_add(hcon, 0);
4131 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4133 if (!(flags & ACL_CONT)) {
4134 struct l2cap_hdr *hdr;
4135 struct l2cap_chan *chan;
4140 BT_ERR("Unexpected start frame (len %d)", skb->len);
4141 kfree_skb(conn->rx_skb);
4142 conn->rx_skb = NULL;
4144 l2cap_conn_unreliable(conn, ECOMM);
4147 /* Start fragment always begin with Basic L2CAP header */
4148 if (skb->len < L2CAP_HDR_SIZE) {
4149 BT_ERR("Frame is too short (len %d)", skb->len);
4150 l2cap_conn_unreliable(conn, ECOMM);
4154 hdr = (struct l2cap_hdr *) skb->data;
4155 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4156 cid = __le16_to_cpu(hdr->cid);
4158 if (len == skb->len) {
4159 /* Complete frame received */
4160 l2cap_recv_frame(conn, skb);
4164 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4166 if (skb->len > len) {
4167 BT_ERR("Frame is too long (len %d, expected len %d)",
4169 l2cap_conn_unreliable(conn, ECOMM);
4173 chan = l2cap_get_chan_by_scid(conn, cid);
4175 if (chan && chan->sk) {
4176 struct sock *sk = chan->sk;
4178 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4179 BT_ERR("Frame exceeding recv MTU (len %d, "
4183 l2cap_conn_unreliable(conn, ECOMM);
4189 /* Allocate skb for the complete frame (with header) */
4190 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4194 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4196 conn->rx_len = len - skb->len;
4198 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4200 if (!conn->rx_len) {
4201 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4202 l2cap_conn_unreliable(conn, ECOMM);
4206 if (skb->len > conn->rx_len) {
4207 BT_ERR("Fragment is too long (len %d, expected %d)",
4208 skb->len, conn->rx_len);
4209 kfree_skb(conn->rx_skb);
4210 conn->rx_skb = NULL;
4212 l2cap_conn_unreliable(conn, ECOMM);
4216 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4218 conn->rx_len -= skb->len;
4220 if (!conn->rx_len) {
4221 /* Complete frame received */
4222 l2cap_recv_frame(conn, conn->rx_skb);
4223 conn->rx_skb = NULL;
4232 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4234 struct l2cap_chan *c;
4236 read_lock_bh(&chan_list_lock);
4238 list_for_each_entry(c, &chan_list, global_l) {
4239 struct sock *sk = c->sk;
4241 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4242 batostr(&bt_sk(sk)->src),
4243 batostr(&bt_sk(sk)->dst),
4244 c->state, __le16_to_cpu(c->psm),
4245 c->scid, c->dcid, c->imtu, c->omtu,
4246 c->sec_level, c->mode);
4249 read_unlock_bh(&chan_list_lock);
4254 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4256 return single_open(file, l2cap_debugfs_show, inode->i_private);
4259 static const struct file_operations l2cap_debugfs_fops = {
4260 .open = l2cap_debugfs_open,
4262 .llseek = seq_lseek,
4263 .release = single_release,
4266 static struct dentry *l2cap_debugfs;
4268 static struct hci_proto l2cap_hci_proto = {
4270 .id = HCI_PROTO_L2CAP,
4271 .connect_ind = l2cap_connect_ind,
4272 .connect_cfm = l2cap_connect_cfm,
4273 .disconn_ind = l2cap_disconn_ind,
4274 .disconn_cfm = l2cap_disconn_cfm,
4275 .security_cfm = l2cap_security_cfm,
4276 .recv_acldata = l2cap_recv_acldata
4279 int __init l2cap_init(void)
4283 err = l2cap_init_sockets();
4287 err = hci_register_proto(&l2cap_hci_proto);
4289 BT_ERR("L2CAP protocol registration failed");
4290 bt_sock_unregister(BTPROTO_L2CAP);
4295 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4296 bt_debugfs, NULL, &l2cap_debugfs_fops);
4298 BT_ERR("Failed to create L2CAP debug file");
4304 l2cap_cleanup_sockets();
4308 void l2cap_exit(void)
4310 debugfs_remove(l2cap_debugfs);
4312 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4313 BT_ERR("L2CAP protocol unregistration failed");
4315 l2cap_cleanup_sockets();
4318 module_param(disable_ertm, bool, 0644);
4319 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");