2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 __l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
879 write_unlock_bh(&conn->chan_lock);
882 bh_unlock_sock(parent);
885 static void l2cap_chan_ready(struct sock *sk)
887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct sock *parent = bt_sk(sk)->parent;
890 BT_DBG("sk %p, parent %p", sk, parent);
892 chan->conf_state = 0;
893 __clear_chan_timer(chan);
895 l2cap_state_change(chan, BT_CONNECTED);
896 sk->sk_state_change(sk);
899 parent->sk_data_ready(parent, 0);
902 static void l2cap_conn_ready(struct l2cap_conn *conn)
904 struct l2cap_chan *chan;
906 BT_DBG("conn %p", conn);
908 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
909 l2cap_le_conn_ready(conn);
911 if (conn->hcon->out && conn->hcon->type == LE_LINK)
912 smp_conn_security(conn, conn->hcon->pending_sec_level);
914 read_lock(&conn->chan_lock);
916 list_for_each_entry(chan, &conn->chan_l, list) {
917 struct sock *sk = chan->sk;
921 if (conn->hcon->type == LE_LINK) {
922 if (smp_conn_security(conn, chan->sec_level))
923 l2cap_chan_ready(sk);
925 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
926 __clear_chan_timer(chan);
927 l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
930 } else if (chan->state == BT_CONNECT)
931 l2cap_do_start(chan);
936 read_unlock(&conn->chan_lock);
939 /* Notify sockets that we cannot guaranty reliability anymore */
940 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
942 struct l2cap_chan *chan;
944 BT_DBG("conn %p", conn);
946 read_lock(&conn->chan_lock);
948 list_for_each_entry(chan, &conn->chan_l, list) {
949 struct sock *sk = chan->sk;
951 if (chan->force_reliable)
955 read_unlock(&conn->chan_lock);
958 static void l2cap_info_timeout(unsigned long arg)
960 struct l2cap_conn *conn = (void *) arg;
962 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
963 conn->info_ident = 0;
965 l2cap_conn_start(conn);
968 static void l2cap_conn_del(struct hci_conn *hcon, int err)
970 struct l2cap_conn *conn = hcon->l2cap_data;
971 struct l2cap_chan *chan, *l;
977 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
979 kfree_skb(conn->rx_skb);
982 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
985 l2cap_chan_del(chan, err);
987 chan->ops->close(chan->data);
990 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
991 del_timer_sync(&conn->info_timer);
993 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
994 del_timer(&conn->security_timer);
995 smp_chan_destroy(conn);
998 hcon->l2cap_data = NULL;
1002 static void security_timeout(unsigned long arg)
1004 struct l2cap_conn *conn = (void *) arg;
1006 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1009 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1011 struct l2cap_conn *conn = hcon->l2cap_data;
1016 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1020 hcon->l2cap_data = conn;
1023 BT_DBG("hcon %p conn %p", hcon, conn);
1025 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1026 conn->mtu = hcon->hdev->le_mtu;
1028 conn->mtu = hcon->hdev->acl_mtu;
1030 conn->src = &hcon->hdev->bdaddr;
1031 conn->dst = &hcon->dst;
1033 conn->feat_mask = 0;
1035 spin_lock_init(&conn->lock);
1036 rwlock_init(&conn->chan_lock);
1038 INIT_LIST_HEAD(&conn->chan_l);
1040 if (hcon->type == LE_LINK)
1041 setup_timer(&conn->security_timer, security_timeout,
1042 (unsigned long) conn);
1044 setup_timer(&conn->info_timer, l2cap_info_timeout,
1045 (unsigned long) conn);
1047 conn->disc_reason = 0x13;
1052 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1054 write_lock_bh(&conn->chan_lock);
1055 __l2cap_chan_add(conn, chan);
1056 write_unlock_bh(&conn->chan_lock);
1059 /* ---- Socket interface ---- */
1061 /* Find socket with psm and source bdaddr.
1062 * Returns closest match.
1064 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1066 struct l2cap_chan *c, *c1 = NULL;
1068 read_lock(&chan_list_lock);
1070 list_for_each_entry(c, &chan_list, global_l) {
1071 struct sock *sk = c->sk;
1073 if (state && c->state != state)
1076 if (c->psm == psm) {
1078 if (!bacmp(&bt_sk(sk)->src, src)) {
1079 read_unlock(&chan_list_lock);
1084 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1089 read_unlock(&chan_list_lock);
1094 int l2cap_chan_connect(struct l2cap_chan *chan)
1096 struct sock *sk = chan->sk;
1097 bdaddr_t *src = &bt_sk(sk)->src;
1098 bdaddr_t *dst = &bt_sk(sk)->dst;
1099 struct l2cap_conn *conn;
1100 struct hci_conn *hcon;
1101 struct hci_dev *hdev;
1105 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1108 hdev = hci_get_route(dst, src);
1110 return -EHOSTUNREACH;
1112 hci_dev_lock_bh(hdev);
1114 auth_type = l2cap_get_auth_type(chan);
1116 if (chan->dcid == L2CAP_CID_LE_DATA)
1117 hcon = hci_connect(hdev, LE_LINK, dst,
1118 chan->sec_level, auth_type);
1120 hcon = hci_connect(hdev, ACL_LINK, dst,
1121 chan->sec_level, auth_type);
1124 err = PTR_ERR(hcon);
1128 conn = l2cap_conn_add(hcon, 0);
1135 /* Update source addr of the socket */
1136 bacpy(src, conn->src);
1138 l2cap_chan_add(conn, chan);
1140 l2cap_state_change(chan, BT_CONNECT);
1141 __set_chan_timer(chan, sk->sk_sndtimeo);
1143 if (hcon->state == BT_CONNECTED) {
1144 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1145 __clear_chan_timer(chan);
1146 if (l2cap_check_security(chan))
1147 l2cap_state_change(chan, BT_CONNECTED);
1149 l2cap_do_start(chan);
1155 hci_dev_unlock_bh(hdev);
1160 int __l2cap_wait_ack(struct sock *sk)
1162 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1163 DECLARE_WAITQUEUE(wait, current);
1167 add_wait_queue(sk_sleep(sk), &wait);
1168 set_current_state(TASK_INTERRUPTIBLE);
1169 while (chan->unacked_frames > 0 && chan->conn) {
1173 if (signal_pending(current)) {
1174 err = sock_intr_errno(timeo);
1179 timeo = schedule_timeout(timeo);
1181 set_current_state(TASK_INTERRUPTIBLE);
1183 err = sock_error(sk);
1187 set_current_state(TASK_RUNNING);
1188 remove_wait_queue(sk_sleep(sk), &wait);
1192 static void l2cap_monitor_timeout(unsigned long arg)
1194 struct l2cap_chan *chan = (void *) arg;
1195 struct sock *sk = chan->sk;
1197 BT_DBG("chan %p", chan);
1200 if (chan->retry_count >= chan->remote_max_tx) {
1201 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1206 chan->retry_count++;
1207 __set_monitor_timer(chan);
1209 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1213 static void l2cap_retrans_timeout(unsigned long arg)
1215 struct l2cap_chan *chan = (void *) arg;
1216 struct sock *sk = chan->sk;
1218 BT_DBG("chan %p", chan);
1221 chan->retry_count = 1;
1222 __set_monitor_timer(chan);
1224 set_bit(CONN_WAIT_F, &chan->conn_state);
1226 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1230 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1232 struct sk_buff *skb;
1234 while ((skb = skb_peek(&chan->tx_q)) &&
1235 chan->unacked_frames) {
1236 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1239 skb = skb_dequeue(&chan->tx_q);
1242 chan->unacked_frames--;
1245 if (!chan->unacked_frames)
1246 __clear_retrans_timer(chan);
1249 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1251 struct hci_conn *hcon = chan->conn->hcon;
1254 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1256 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1257 flags = ACL_START_NO_FLUSH;
1261 bt_cb(skb)->force_active = chan->force_active;
1262 hci_send_acl(hcon, skb, flags);
1265 static void l2cap_streaming_send(struct l2cap_chan *chan)
1267 struct sk_buff *skb;
1270 while ((skb = skb_dequeue(&chan->tx_q))) {
1271 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1272 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1273 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1275 if (chan->fcs == L2CAP_FCS_CRC16) {
1276 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1277 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1280 l2cap_do_send(chan, skb);
1282 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1286 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1288 struct sk_buff *skb, *tx_skb;
1291 skb = skb_peek(&chan->tx_q);
1296 if (bt_cb(skb)->tx_seq == tx_seq)
1299 if (skb_queue_is_last(&chan->tx_q, skb))
1302 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1304 if (chan->remote_max_tx &&
1305 bt_cb(skb)->retries == chan->remote_max_tx) {
1306 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 tx_skb = skb_clone(skb, GFP_ATOMIC);
1311 bt_cb(skb)->retries++;
1312 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1313 control &= L2CAP_CTRL_SAR;
1315 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1316 control |= L2CAP_CTRL_FINAL;
1318 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1319 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1321 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1323 if (chan->fcs == L2CAP_FCS_CRC16) {
1324 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1325 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1328 l2cap_do_send(chan, tx_skb);
1331 static int l2cap_ertm_send(struct l2cap_chan *chan)
1333 struct sk_buff *skb, *tx_skb;
1337 if (chan->state != BT_CONNECTED)
1340 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1342 if (chan->remote_max_tx &&
1343 bt_cb(skb)->retries == chan->remote_max_tx) {
1344 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1348 tx_skb = skb_clone(skb, GFP_ATOMIC);
1350 bt_cb(skb)->retries++;
1352 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1353 control &= L2CAP_CTRL_SAR;
1355 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1356 control |= L2CAP_CTRL_FINAL;
1358 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1359 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1360 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1363 if (chan->fcs == L2CAP_FCS_CRC16) {
1364 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1365 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1368 l2cap_do_send(chan, tx_skb);
1370 __set_retrans_timer(chan);
1372 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1373 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1375 if (bt_cb(skb)->retries == 1)
1376 chan->unacked_frames++;
1378 chan->frames_sent++;
1380 if (skb_queue_is_last(&chan->tx_q, skb))
1381 chan->tx_send_head = NULL;
1383 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1391 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1395 if (!skb_queue_empty(&chan->tx_q))
1396 chan->tx_send_head = chan->tx_q.next;
1398 chan->next_tx_seq = chan->expected_ack_seq;
1399 ret = l2cap_ertm_send(chan);
1403 static void l2cap_send_ack(struct l2cap_chan *chan)
1407 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1409 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1410 control |= L2CAP_SUPER_RCV_NOT_READY;
1411 set_bit(CONN_RNR_SENT, &chan->conn_state);
1412 l2cap_send_sframe(chan, control);
1416 if (l2cap_ertm_send(chan) > 0)
1419 control |= L2CAP_SUPER_RCV_READY;
1420 l2cap_send_sframe(chan, control);
1423 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1425 struct srej_list *tail;
1428 control = L2CAP_SUPER_SELECT_REJECT;
1429 control |= L2CAP_CTRL_FINAL;
1431 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1432 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1434 l2cap_send_sframe(chan, control);
1437 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1439 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1440 struct sk_buff **frag;
1443 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1449 /* Continuation fragments (no L2CAP header) */
1450 frag = &skb_shinfo(skb)->frag_list;
1452 count = min_t(unsigned int, conn->mtu, len);
1454 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1457 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1463 frag = &(*frag)->next;
1469 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1471 struct sock *sk = chan->sk;
1472 struct l2cap_conn *conn = chan->conn;
1473 struct sk_buff *skb;
1474 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1475 struct l2cap_hdr *lh;
1477 BT_DBG("sk %p len %d", sk, (int)len);
1479 count = min_t(unsigned int, (conn->mtu - hlen), len);
1480 skb = bt_skb_send_alloc(sk, count + hlen,
1481 msg->msg_flags & MSG_DONTWAIT, &err);
1483 return ERR_PTR(err);
1485 /* Create L2CAP header */
1486 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1487 lh->cid = cpu_to_le16(chan->dcid);
1488 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1489 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1491 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1492 if (unlikely(err < 0)) {
1494 return ERR_PTR(err);
1499 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1501 struct sock *sk = chan->sk;
1502 struct l2cap_conn *conn = chan->conn;
1503 struct sk_buff *skb;
1504 int err, count, hlen = L2CAP_HDR_SIZE;
1505 struct l2cap_hdr *lh;
1507 BT_DBG("sk %p len %d", sk, (int)len);
1509 count = min_t(unsigned int, (conn->mtu - hlen), len);
1510 skb = bt_skb_send_alloc(sk, count + hlen,
1511 msg->msg_flags & MSG_DONTWAIT, &err);
1513 return ERR_PTR(err);
1515 /* Create L2CAP header */
1516 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1517 lh->cid = cpu_to_le16(chan->dcid);
1518 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1520 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1521 if (unlikely(err < 0)) {
1523 return ERR_PTR(err);
1528 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1529 struct msghdr *msg, size_t len,
1530 u16 control, u16 sdulen)
1532 struct sock *sk = chan->sk;
1533 struct l2cap_conn *conn = chan->conn;
1534 struct sk_buff *skb;
1535 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1536 struct l2cap_hdr *lh;
1538 BT_DBG("sk %p len %d", sk, (int)len);
1541 return ERR_PTR(-ENOTCONN);
1546 if (chan->fcs == L2CAP_FCS_CRC16)
1549 count = min_t(unsigned int, (conn->mtu - hlen), len);
1550 skb = bt_skb_send_alloc(sk, count + hlen,
1551 msg->msg_flags & MSG_DONTWAIT, &err);
1553 return ERR_PTR(err);
1555 /* Create L2CAP header */
1556 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1557 lh->cid = cpu_to_le16(chan->dcid);
1558 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1559 put_unaligned_le16(control, skb_put(skb, 2));
1561 put_unaligned_le16(sdulen, skb_put(skb, 2));
1563 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1566 return ERR_PTR(err);
1569 if (chan->fcs == L2CAP_FCS_CRC16)
1570 put_unaligned_le16(0, skb_put(skb, 2));
1572 bt_cb(skb)->retries = 0;
1576 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1578 struct sk_buff *skb;
1579 struct sk_buff_head sar_queue;
1583 skb_queue_head_init(&sar_queue);
1584 control = L2CAP_SDU_START;
1585 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1587 return PTR_ERR(skb);
1589 __skb_queue_tail(&sar_queue, skb);
1590 len -= chan->remote_mps;
1591 size += chan->remote_mps;
1596 if (len > chan->remote_mps) {
1597 control = L2CAP_SDU_CONTINUE;
1598 buflen = chan->remote_mps;
1600 control = L2CAP_SDU_END;
1604 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1606 skb_queue_purge(&sar_queue);
1607 return PTR_ERR(skb);
1610 __skb_queue_tail(&sar_queue, skb);
1614 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1615 if (chan->tx_send_head == NULL)
1616 chan->tx_send_head = sar_queue.next;
1621 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1623 struct sk_buff *skb;
1627 /* Connectionless channel */
1628 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1629 skb = l2cap_create_connless_pdu(chan, msg, len);
1631 return PTR_ERR(skb);
1633 l2cap_do_send(chan, skb);
1637 switch (chan->mode) {
1638 case L2CAP_MODE_BASIC:
1639 /* Check outgoing MTU */
1640 if (len > chan->omtu)
1643 /* Create a basic PDU */
1644 skb = l2cap_create_basic_pdu(chan, msg, len);
1646 return PTR_ERR(skb);
1648 l2cap_do_send(chan, skb);
1652 case L2CAP_MODE_ERTM:
1653 case L2CAP_MODE_STREAMING:
1654 /* Entire SDU fits into one PDU */
1655 if (len <= chan->remote_mps) {
1656 control = L2CAP_SDU_UNSEGMENTED;
1657 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1660 return PTR_ERR(skb);
1662 __skb_queue_tail(&chan->tx_q, skb);
1664 if (chan->tx_send_head == NULL)
1665 chan->tx_send_head = skb;
1668 /* Segment SDU into multiples PDUs */
1669 err = l2cap_sar_segment_sdu(chan, msg, len);
1674 if (chan->mode == L2CAP_MODE_STREAMING) {
1675 l2cap_streaming_send(chan);
1680 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1681 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1686 err = l2cap_ertm_send(chan);
1693 BT_DBG("bad state %1.1x", chan->mode);
1700 /* Copy frame to all raw sockets on that connection */
1701 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1703 struct sk_buff *nskb;
1704 struct l2cap_chan *chan;
1706 BT_DBG("conn %p", conn);
1708 read_lock(&conn->chan_lock);
1709 list_for_each_entry(chan, &conn->chan_l, list) {
1710 struct sock *sk = chan->sk;
1711 if (chan->chan_type != L2CAP_CHAN_RAW)
1714 /* Don't send frame to the socket it came from */
1717 nskb = skb_clone(skb, GFP_ATOMIC);
1721 if (chan->ops->recv(chan->data, nskb))
1724 read_unlock(&conn->chan_lock);
1727 /* ---- L2CAP signalling commands ---- */
1728 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1729 u8 code, u8 ident, u16 dlen, void *data)
1731 struct sk_buff *skb, **frag;
1732 struct l2cap_cmd_hdr *cmd;
1733 struct l2cap_hdr *lh;
1736 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1737 conn, code, ident, dlen);
1739 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1740 count = min_t(unsigned int, conn->mtu, len);
1742 skb = bt_skb_alloc(count, GFP_ATOMIC);
1746 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1747 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1749 if (conn->hcon->type == LE_LINK)
1750 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1752 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1754 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1757 cmd->len = cpu_to_le16(dlen);
1760 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1761 memcpy(skb_put(skb, count), data, count);
1767 /* Continuation fragments (no L2CAP header) */
1768 frag = &skb_shinfo(skb)->frag_list;
1770 count = min_t(unsigned int, conn->mtu, len);
1772 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1776 memcpy(skb_put(*frag, count), data, count);
1781 frag = &(*frag)->next;
1791 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1793 struct l2cap_conf_opt *opt = *ptr;
1796 len = L2CAP_CONF_OPT_SIZE + opt->len;
1804 *val = *((u8 *) opt->val);
1808 *val = get_unaligned_le16(opt->val);
1812 *val = get_unaligned_le32(opt->val);
1816 *val = (unsigned long) opt->val;
1820 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1824 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1826 struct l2cap_conf_opt *opt = *ptr;
1828 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1835 *((u8 *) opt->val) = val;
1839 put_unaligned_le16(val, opt->val);
1843 put_unaligned_le32(val, opt->val);
1847 memcpy(opt->val, (void *) val, len);
1851 *ptr += L2CAP_CONF_OPT_SIZE + len;
1854 static void l2cap_ack_timeout(unsigned long arg)
1856 struct l2cap_chan *chan = (void *) arg;
1858 bh_lock_sock(chan->sk);
1859 l2cap_send_ack(chan);
1860 bh_unlock_sock(chan->sk);
1863 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1865 struct sock *sk = chan->sk;
1867 chan->expected_ack_seq = 0;
1868 chan->unacked_frames = 0;
1869 chan->buffer_seq = 0;
1870 chan->num_acked = 0;
1871 chan->frames_sent = 0;
1873 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1874 (unsigned long) chan);
1875 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1876 (unsigned long) chan);
1877 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1879 skb_queue_head_init(&chan->srej_q);
1881 INIT_LIST_HEAD(&chan->srej_l);
1884 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1887 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1890 case L2CAP_MODE_STREAMING:
1891 case L2CAP_MODE_ERTM:
1892 if (l2cap_mode_supported(mode, remote_feat_mask))
1896 return L2CAP_MODE_BASIC;
1900 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1902 struct l2cap_conf_req *req = data;
1903 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1904 void *ptr = req->data;
1906 BT_DBG("chan %p", chan);
1908 if (chan->num_conf_req || chan->num_conf_rsp)
1911 switch (chan->mode) {
1912 case L2CAP_MODE_STREAMING:
1913 case L2CAP_MODE_ERTM:
1914 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1919 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1924 if (chan->imtu != L2CAP_DEFAULT_MTU)
1925 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1927 switch (chan->mode) {
1928 case L2CAP_MODE_BASIC:
1929 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1930 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1933 rfc.mode = L2CAP_MODE_BASIC;
1935 rfc.max_transmit = 0;
1936 rfc.retrans_timeout = 0;
1937 rfc.monitor_timeout = 0;
1938 rfc.max_pdu_size = 0;
1940 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1941 (unsigned long) &rfc);
1944 case L2CAP_MODE_ERTM:
1945 rfc.mode = L2CAP_MODE_ERTM;
1946 rfc.txwin_size = chan->tx_win;
1947 rfc.max_transmit = chan->max_tx;
1948 rfc.retrans_timeout = 0;
1949 rfc.monitor_timeout = 0;
1950 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1951 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1952 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1955 (unsigned long) &rfc);
1957 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1960 if (chan->fcs == L2CAP_FCS_NONE ||
1961 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1962 chan->fcs = L2CAP_FCS_NONE;
1963 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1967 case L2CAP_MODE_STREAMING:
1968 rfc.mode = L2CAP_MODE_STREAMING;
1970 rfc.max_transmit = 0;
1971 rfc.retrans_timeout = 0;
1972 rfc.monitor_timeout = 0;
1973 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1974 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1975 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1977 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1978 (unsigned long) &rfc);
1980 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1983 if (chan->fcs == L2CAP_FCS_NONE ||
1984 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1985 chan->fcs = L2CAP_FCS_NONE;
1986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1991 req->dcid = cpu_to_le16(chan->dcid);
1992 req->flags = cpu_to_le16(0);
1997 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1999 struct l2cap_conf_rsp *rsp = data;
2000 void *ptr = rsp->data;
2001 void *req = chan->conf_req;
2002 int len = chan->conf_len;
2003 int type, hint, olen;
2005 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2006 u16 mtu = L2CAP_DEFAULT_MTU;
2007 u16 result = L2CAP_CONF_SUCCESS;
2009 BT_DBG("chan %p", chan);
2011 while (len >= L2CAP_CONF_OPT_SIZE) {
2012 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2014 hint = type & L2CAP_CONF_HINT;
2015 type &= L2CAP_CONF_MASK;
2018 case L2CAP_CONF_MTU:
2022 case L2CAP_CONF_FLUSH_TO:
2023 chan->flush_to = val;
2026 case L2CAP_CONF_QOS:
2029 case L2CAP_CONF_RFC:
2030 if (olen == sizeof(rfc))
2031 memcpy(&rfc, (void *) val, olen);
2034 case L2CAP_CONF_FCS:
2035 if (val == L2CAP_FCS_NONE)
2036 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2044 result = L2CAP_CONF_UNKNOWN;
2045 *((u8 *) ptr++) = type;
2050 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2053 switch (chan->mode) {
2054 case L2CAP_MODE_STREAMING:
2055 case L2CAP_MODE_ERTM:
2056 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2057 chan->mode = l2cap_select_mode(rfc.mode,
2058 chan->conn->feat_mask);
2062 if (chan->mode != rfc.mode)
2063 return -ECONNREFUSED;
2069 if (chan->mode != rfc.mode) {
2070 result = L2CAP_CONF_UNACCEPT;
2071 rfc.mode = chan->mode;
2073 if (chan->num_conf_rsp == 1)
2074 return -ECONNREFUSED;
2076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2077 sizeof(rfc), (unsigned long) &rfc);
2081 if (result == L2CAP_CONF_SUCCESS) {
2082 /* Configure output options and let the other side know
2083 * which ones we don't like. */
2085 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2086 result = L2CAP_CONF_UNACCEPT;
2089 set_bit(CONF_MTU_DONE, &chan->conf_state);
2091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2094 case L2CAP_MODE_BASIC:
2095 chan->fcs = L2CAP_FCS_NONE;
2096 set_bit(CONF_MODE_DONE, &chan->conf_state);
2099 case L2CAP_MODE_ERTM:
2100 chan->remote_tx_win = rfc.txwin_size;
2101 chan->remote_max_tx = rfc.max_transmit;
2103 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2104 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2106 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2108 rfc.retrans_timeout =
2109 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2110 rfc.monitor_timeout =
2111 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2113 set_bit(CONF_MODE_DONE, &chan->conf_state);
2115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2116 sizeof(rfc), (unsigned long) &rfc);
2120 case L2CAP_MODE_STREAMING:
2121 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2122 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2124 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2126 set_bit(CONF_MODE_DONE, &chan->conf_state);
2128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2129 sizeof(rfc), (unsigned long) &rfc);
2134 result = L2CAP_CONF_UNACCEPT;
2136 memset(&rfc, 0, sizeof(rfc));
2137 rfc.mode = chan->mode;
2140 if (result == L2CAP_CONF_SUCCESS)
2141 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2143 rsp->scid = cpu_to_le16(chan->dcid);
2144 rsp->result = cpu_to_le16(result);
2145 rsp->flags = cpu_to_le16(0x0000);
2150 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2152 struct l2cap_conf_req *req = data;
2153 void *ptr = req->data;
2156 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2158 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2160 while (len >= L2CAP_CONF_OPT_SIZE) {
2161 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2164 case L2CAP_CONF_MTU:
2165 if (val < L2CAP_DEFAULT_MIN_MTU) {
2166 *result = L2CAP_CONF_UNACCEPT;
2167 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2173 case L2CAP_CONF_FLUSH_TO:
2174 chan->flush_to = val;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2179 case L2CAP_CONF_RFC:
2180 if (olen == sizeof(rfc))
2181 memcpy(&rfc, (void *)val, olen);
2183 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2184 rfc.mode != chan->mode)
2185 return -ECONNREFUSED;
2189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2190 sizeof(rfc), (unsigned long) &rfc);
2195 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2196 return -ECONNREFUSED;
2198 chan->mode = rfc.mode;
2200 if (*result == L2CAP_CONF_SUCCESS) {
2202 case L2CAP_MODE_ERTM:
2203 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2204 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2205 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2207 case L2CAP_MODE_STREAMING:
2208 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2212 req->dcid = cpu_to_le16(chan->dcid);
2213 req->flags = cpu_to_le16(0x0000);
2218 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2220 struct l2cap_conf_rsp *rsp = data;
2221 void *ptr = rsp->data;
2223 BT_DBG("chan %p", chan);
2225 rsp->scid = cpu_to_le16(chan->dcid);
2226 rsp->result = cpu_to_le16(result);
2227 rsp->flags = cpu_to_le16(flags);
2232 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2234 struct l2cap_conn_rsp rsp;
2235 struct l2cap_conn *conn = chan->conn;
2238 rsp.scid = cpu_to_le16(chan->dcid);
2239 rsp.dcid = cpu_to_le16(chan->scid);
2240 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2241 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2242 l2cap_send_cmd(conn, chan->ident,
2243 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2245 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2248 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2249 l2cap_build_conf_req(chan, buf), buf);
2250 chan->num_conf_req++;
2253 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2257 struct l2cap_conf_rfc rfc;
2259 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2261 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2264 while (len >= L2CAP_CONF_OPT_SIZE) {
2265 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2267 if (type != L2CAP_CONF_RFC)
2270 if (olen != sizeof(rfc))
2273 memcpy(&rfc, (void *)val, olen);
2277 /* Use sane default values in case a misbehaving remote device
2278 * did not send an RFC option.
2280 rfc.mode = chan->mode;
2281 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2282 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2283 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2285 BT_ERR("Expected RFC option was not found, using defaults");
2289 case L2CAP_MODE_ERTM:
2290 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2291 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2292 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2294 case L2CAP_MODE_STREAMING:
2295 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2299 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2301 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2303 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2306 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2307 cmd->ident == conn->info_ident) {
2308 del_timer(&conn->info_timer);
2310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2311 conn->info_ident = 0;
2313 l2cap_conn_start(conn);
2319 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2321 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2322 struct l2cap_conn_rsp rsp;
2323 struct l2cap_chan *chan = NULL, *pchan;
2324 struct sock *parent, *sk = NULL;
2325 int result, status = L2CAP_CS_NO_INFO;
2327 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2328 __le16 psm = req->psm;
2330 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2332 /* Check if we have socket listening on psm */
2333 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2335 result = L2CAP_CR_BAD_PSM;
2341 bh_lock_sock(parent);
2343 /* Check if the ACL is secure enough (if not SDP) */
2344 if (psm != cpu_to_le16(0x0001) &&
2345 !hci_conn_check_link_mode(conn->hcon)) {
2346 conn->disc_reason = 0x05;
2347 result = L2CAP_CR_SEC_BLOCK;
2351 result = L2CAP_CR_NO_MEM;
2353 /* Check for backlog size */
2354 if (sk_acceptq_is_full(parent)) {
2355 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2359 chan = pchan->ops->new_connection(pchan->data);
2365 write_lock_bh(&conn->chan_lock);
2367 /* Check if we already have channel with that dcid */
2368 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2369 write_unlock_bh(&conn->chan_lock);
2370 sock_set_flag(sk, SOCK_ZAPPED);
2371 chan->ops->close(chan->data);
2375 hci_conn_hold(conn->hcon);
2377 bacpy(&bt_sk(sk)->src, conn->src);
2378 bacpy(&bt_sk(sk)->dst, conn->dst);
2382 bt_accept_enqueue(parent, sk);
2384 __l2cap_chan_add(conn, chan);
2388 __set_chan_timer(chan, sk->sk_sndtimeo);
2390 chan->ident = cmd->ident;
2392 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2393 if (l2cap_check_security(chan)) {
2394 if (bt_sk(sk)->defer_setup) {
2395 l2cap_state_change(chan, BT_CONNECT2);
2396 result = L2CAP_CR_PEND;
2397 status = L2CAP_CS_AUTHOR_PEND;
2398 parent->sk_data_ready(parent, 0);
2400 l2cap_state_change(chan, BT_CONFIG);
2401 result = L2CAP_CR_SUCCESS;
2402 status = L2CAP_CS_NO_INFO;
2405 l2cap_state_change(chan, BT_CONNECT2);
2406 result = L2CAP_CR_PEND;
2407 status = L2CAP_CS_AUTHEN_PEND;
2410 l2cap_state_change(chan, BT_CONNECT2);
2411 result = L2CAP_CR_PEND;
2412 status = L2CAP_CS_NO_INFO;
2415 write_unlock_bh(&conn->chan_lock);
2418 bh_unlock_sock(parent);
2421 rsp.scid = cpu_to_le16(scid);
2422 rsp.dcid = cpu_to_le16(dcid);
2423 rsp.result = cpu_to_le16(result);
2424 rsp.status = cpu_to_le16(status);
2425 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2427 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2428 struct l2cap_info_req info;
2429 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2432 conn->info_ident = l2cap_get_ident(conn);
2434 mod_timer(&conn->info_timer, jiffies +
2435 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2437 l2cap_send_cmd(conn, conn->info_ident,
2438 L2CAP_INFO_REQ, sizeof(info), &info);
2441 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2442 result == L2CAP_CR_SUCCESS) {
2444 set_bit(CONF_REQ_SENT, &chan->conf_state);
2445 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2446 l2cap_build_conf_req(chan, buf), buf);
2447 chan->num_conf_req++;
2453 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2455 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2456 u16 scid, dcid, result, status;
2457 struct l2cap_chan *chan;
2461 scid = __le16_to_cpu(rsp->scid);
2462 dcid = __le16_to_cpu(rsp->dcid);
2463 result = __le16_to_cpu(rsp->result);
2464 status = __le16_to_cpu(rsp->status);
2466 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2469 chan = l2cap_get_chan_by_scid(conn, scid);
2473 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2481 case L2CAP_CR_SUCCESS:
2482 l2cap_state_change(chan, BT_CONFIG);
2485 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2487 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2490 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2491 l2cap_build_conf_req(chan, req), req);
2492 chan->num_conf_req++;
2496 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2500 /* don't delete l2cap channel if sk is owned by user */
2501 if (sock_owned_by_user(sk)) {
2502 l2cap_state_change(chan, BT_DISCONN);
2503 __clear_chan_timer(chan);
2504 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2508 l2cap_chan_del(chan, ECONNREFUSED);
2516 static inline void set_default_fcs(struct l2cap_chan *chan)
2518 /* FCS is enabled only in ERTM or streaming mode, if one or both
2521 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2522 chan->fcs = L2CAP_FCS_NONE;
2523 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2524 chan->fcs = L2CAP_FCS_CRC16;
2527 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2529 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2532 struct l2cap_chan *chan;
2536 dcid = __le16_to_cpu(req->dcid);
2537 flags = __le16_to_cpu(req->flags);
2539 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2541 chan = l2cap_get_chan_by_scid(conn, dcid);
2547 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2548 struct l2cap_cmd_rej_cid rej;
2550 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2551 rej.scid = cpu_to_le16(chan->scid);
2552 rej.dcid = cpu_to_le16(chan->dcid);
2554 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2559 /* Reject if config buffer is too small. */
2560 len = cmd_len - sizeof(*req);
2561 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2562 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2563 l2cap_build_conf_rsp(chan, rsp,
2564 L2CAP_CONF_REJECT, flags), rsp);
2569 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2570 chan->conf_len += len;
2572 if (flags & 0x0001) {
2573 /* Incomplete config. Send empty response. */
2574 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2575 l2cap_build_conf_rsp(chan, rsp,
2576 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2580 /* Complete config. */
2581 len = l2cap_parse_conf_req(chan, rsp);
2583 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2587 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2588 chan->num_conf_rsp++;
2590 /* Reset config buffer. */
2593 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2596 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2597 set_default_fcs(chan);
2599 l2cap_state_change(chan, BT_CONNECTED);
2601 chan->next_tx_seq = 0;
2602 chan->expected_tx_seq = 0;
2603 skb_queue_head_init(&chan->tx_q);
2604 if (chan->mode == L2CAP_MODE_ERTM)
2605 l2cap_ertm_init(chan);
2607 l2cap_chan_ready(sk);
2611 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2613 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2614 l2cap_build_conf_req(chan, buf), buf);
2615 chan->num_conf_req++;
2623 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2625 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2626 u16 scid, flags, result;
2627 struct l2cap_chan *chan;
2629 int len = cmd->len - sizeof(*rsp);
2631 scid = __le16_to_cpu(rsp->scid);
2632 flags = __le16_to_cpu(rsp->flags);
2633 result = __le16_to_cpu(rsp->result);
2635 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2636 scid, flags, result);
2638 chan = l2cap_get_chan_by_scid(conn, scid);
2645 case L2CAP_CONF_SUCCESS:
2646 l2cap_conf_rfc_get(chan, rsp->data, len);
2649 case L2CAP_CONF_UNACCEPT:
2650 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2653 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2654 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2658 /* throw out any old stored conf requests */
2659 result = L2CAP_CONF_SUCCESS;
2660 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2663 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2667 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2668 L2CAP_CONF_REQ, len, req);
2669 chan->num_conf_req++;
2670 if (result != L2CAP_CONF_SUCCESS)
2676 sk->sk_err = ECONNRESET;
2677 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2678 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2685 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2687 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2688 set_default_fcs(chan);
2690 l2cap_state_change(chan, BT_CONNECTED);
2691 chan->next_tx_seq = 0;
2692 chan->expected_tx_seq = 0;
2693 skb_queue_head_init(&chan->tx_q);
2694 if (chan->mode == L2CAP_MODE_ERTM)
2695 l2cap_ertm_init(chan);
2697 l2cap_chan_ready(sk);
2705 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2707 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2708 struct l2cap_disconn_rsp rsp;
2710 struct l2cap_chan *chan;
2713 scid = __le16_to_cpu(req->scid);
2714 dcid = __le16_to_cpu(req->dcid);
2716 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2718 chan = l2cap_get_chan_by_scid(conn, dcid);
2724 rsp.dcid = cpu_to_le16(chan->scid);
2725 rsp.scid = cpu_to_le16(chan->dcid);
2726 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2728 sk->sk_shutdown = SHUTDOWN_MASK;
2730 /* don't delete l2cap channel if sk is owned by user */
2731 if (sock_owned_by_user(sk)) {
2732 l2cap_state_change(chan, BT_DISCONN);
2733 __clear_chan_timer(chan);
2734 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2739 l2cap_chan_del(chan, ECONNRESET);
2742 chan->ops->close(chan->data);
2746 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2748 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2750 struct l2cap_chan *chan;
2753 scid = __le16_to_cpu(rsp->scid);
2754 dcid = __le16_to_cpu(rsp->dcid);
2756 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2758 chan = l2cap_get_chan_by_scid(conn, scid);
2764 /* don't delete l2cap channel if sk is owned by user */
2765 if (sock_owned_by_user(sk)) {
2766 l2cap_state_change(chan,BT_DISCONN);
2767 __clear_chan_timer(chan);
2768 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2773 l2cap_chan_del(chan, 0);
2776 chan->ops->close(chan->data);
2780 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2782 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2785 type = __le16_to_cpu(req->type);
2787 BT_DBG("type 0x%4.4x", type);
2789 if (type == L2CAP_IT_FEAT_MASK) {
2791 u32 feat_mask = l2cap_feat_mask;
2792 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2793 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2794 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2796 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2798 put_unaligned_le32(feat_mask, rsp->data);
2799 l2cap_send_cmd(conn, cmd->ident,
2800 L2CAP_INFO_RSP, sizeof(buf), buf);
2801 } else if (type == L2CAP_IT_FIXED_CHAN) {
2803 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2804 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2805 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2806 memcpy(buf + 4, l2cap_fixed_chan, 8);
2807 l2cap_send_cmd(conn, cmd->ident,
2808 L2CAP_INFO_RSP, sizeof(buf), buf);
2810 struct l2cap_info_rsp rsp;
2811 rsp.type = cpu_to_le16(type);
2812 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2813 l2cap_send_cmd(conn, cmd->ident,
2814 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2820 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2822 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2825 type = __le16_to_cpu(rsp->type);
2826 result = __le16_to_cpu(rsp->result);
2828 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2830 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2831 if (cmd->ident != conn->info_ident ||
2832 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2835 del_timer(&conn->info_timer);
2837 if (result != L2CAP_IR_SUCCESS) {
2838 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2839 conn->info_ident = 0;
2841 l2cap_conn_start(conn);
2846 if (type == L2CAP_IT_FEAT_MASK) {
2847 conn->feat_mask = get_unaligned_le32(rsp->data);
2849 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2850 struct l2cap_info_req req;
2851 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2853 conn->info_ident = l2cap_get_ident(conn);
2855 l2cap_send_cmd(conn, conn->info_ident,
2856 L2CAP_INFO_REQ, sizeof(req), &req);
2858 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2859 conn->info_ident = 0;
2861 l2cap_conn_start(conn);
2863 } else if (type == L2CAP_IT_FIXED_CHAN) {
2864 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2865 conn->info_ident = 0;
2867 l2cap_conn_start(conn);
2873 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2878 if (min > max || min < 6 || max > 3200)
2881 if (to_multiplier < 10 || to_multiplier > 3200)
2884 if (max >= to_multiplier * 8)
2887 max_latency = (to_multiplier * 8 / max) - 1;
2888 if (latency > 499 || latency > max_latency)
2894 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2895 struct l2cap_cmd_hdr *cmd, u8 *data)
2897 struct hci_conn *hcon = conn->hcon;
2898 struct l2cap_conn_param_update_req *req;
2899 struct l2cap_conn_param_update_rsp rsp;
2900 u16 min, max, latency, to_multiplier, cmd_len;
2903 if (!(hcon->link_mode & HCI_LM_MASTER))
2906 cmd_len = __le16_to_cpu(cmd->len);
2907 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2910 req = (struct l2cap_conn_param_update_req *) data;
2911 min = __le16_to_cpu(req->min);
2912 max = __le16_to_cpu(req->max);
2913 latency = __le16_to_cpu(req->latency);
2914 to_multiplier = __le16_to_cpu(req->to_multiplier);
2916 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2917 min, max, latency, to_multiplier);
2919 memset(&rsp, 0, sizeof(rsp));
2921 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2923 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2925 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2927 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2931 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2936 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2937 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2941 switch (cmd->code) {
2942 case L2CAP_COMMAND_REJ:
2943 l2cap_command_rej(conn, cmd, data);
2946 case L2CAP_CONN_REQ:
2947 err = l2cap_connect_req(conn, cmd, data);
2950 case L2CAP_CONN_RSP:
2951 err = l2cap_connect_rsp(conn, cmd, data);
2954 case L2CAP_CONF_REQ:
2955 err = l2cap_config_req(conn, cmd, cmd_len, data);
2958 case L2CAP_CONF_RSP:
2959 err = l2cap_config_rsp(conn, cmd, data);
2962 case L2CAP_DISCONN_REQ:
2963 err = l2cap_disconnect_req(conn, cmd, data);
2966 case L2CAP_DISCONN_RSP:
2967 err = l2cap_disconnect_rsp(conn, cmd, data);
2970 case L2CAP_ECHO_REQ:
2971 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2974 case L2CAP_ECHO_RSP:
2977 case L2CAP_INFO_REQ:
2978 err = l2cap_information_req(conn, cmd, data);
2981 case L2CAP_INFO_RSP:
2982 err = l2cap_information_rsp(conn, cmd, data);
2986 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2994 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2995 struct l2cap_cmd_hdr *cmd, u8 *data)
2997 switch (cmd->code) {
2998 case L2CAP_COMMAND_REJ:
3001 case L2CAP_CONN_PARAM_UPDATE_REQ:
3002 return l2cap_conn_param_update_req(conn, cmd, data);
3004 case L2CAP_CONN_PARAM_UPDATE_RSP:
3008 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3013 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3014 struct sk_buff *skb)
3016 u8 *data = skb->data;
3018 struct l2cap_cmd_hdr cmd;
3021 l2cap_raw_recv(conn, skb);
3023 while (len >= L2CAP_CMD_HDR_SIZE) {
3025 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3026 data += L2CAP_CMD_HDR_SIZE;
3027 len -= L2CAP_CMD_HDR_SIZE;
3029 cmd_len = le16_to_cpu(cmd.len);
3031 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3033 if (cmd_len > len || !cmd.ident) {
3034 BT_DBG("corrupted command");
3038 if (conn->hcon->type == LE_LINK)
3039 err = l2cap_le_sig_cmd(conn, &cmd, data);
3041 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3044 struct l2cap_cmd_rej_unk rej;
3046 BT_ERR("Wrong link type (%d)", err);
3048 /* FIXME: Map err to a valid reason */
3049 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3050 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3060 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3062 u16 our_fcs, rcv_fcs;
3063 int hdr_size = L2CAP_HDR_SIZE + 2;
3065 if (chan->fcs == L2CAP_FCS_CRC16) {
3066 skb_trim(skb, skb->len - 2);
3067 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3068 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3070 if (our_fcs != rcv_fcs)
3076 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3080 chan->frames_sent = 0;
3082 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3084 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3085 control |= L2CAP_SUPER_RCV_NOT_READY;
3086 l2cap_send_sframe(chan, control);
3087 set_bit(CONN_RNR_SENT, &chan->conn_state);
3090 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3091 l2cap_retransmit_frames(chan);
3093 l2cap_ertm_send(chan);
3095 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3096 chan->frames_sent == 0) {
3097 control |= L2CAP_SUPER_RCV_READY;
3098 l2cap_send_sframe(chan, control);
3102 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3104 struct sk_buff *next_skb;
3105 int tx_seq_offset, next_tx_seq_offset;
3107 bt_cb(skb)->tx_seq = tx_seq;
3108 bt_cb(skb)->sar = sar;
3110 next_skb = skb_peek(&chan->srej_q);
3112 __skb_queue_tail(&chan->srej_q, skb);
3116 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3117 if (tx_seq_offset < 0)
3118 tx_seq_offset += 64;
3121 if (bt_cb(next_skb)->tx_seq == tx_seq)
3124 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3125 chan->buffer_seq) % 64;
3126 if (next_tx_seq_offset < 0)
3127 next_tx_seq_offset += 64;
3129 if (next_tx_seq_offset > tx_seq_offset) {
3130 __skb_queue_before(&chan->srej_q, next_skb, skb);
3134 if (skb_queue_is_last(&chan->srej_q, next_skb))
3137 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3139 __skb_queue_tail(&chan->srej_q, skb);
3144 static void append_skb_frag(struct sk_buff *skb,
3145 struct sk_buff *new_frag, struct sk_buff **last_frag)
3147 /* skb->len reflects data in skb as well as all fragments
3148 * skb->data_len reflects only data in fragments
3150 if (!skb_has_frag_list(skb))
3151 skb_shinfo(skb)->frag_list = new_frag;
3153 new_frag->next = NULL;
3155 (*last_frag)->next = new_frag;
3156 *last_frag = new_frag;
3158 skb->len += new_frag->len;
3159 skb->data_len += new_frag->len;
3160 skb->truesize += new_frag->truesize;
3163 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3167 switch (control & L2CAP_CTRL_SAR) {
3168 case L2CAP_SDU_UNSEGMENTED:
3172 err = chan->ops->recv(chan->data, skb);
3175 case L2CAP_SDU_START:
3179 chan->sdu_len = get_unaligned_le16(skb->data);
3182 if (chan->sdu_len > chan->imtu) {
3187 if (skb->len >= chan->sdu_len)
3191 chan->sdu_last_frag = skb;
3197 case L2CAP_SDU_CONTINUE:
3201 append_skb_frag(chan->sdu, skb,
3202 &chan->sdu_last_frag);
3205 if (chan->sdu->len >= chan->sdu_len)
3215 append_skb_frag(chan->sdu, skb,
3216 &chan->sdu_last_frag);
3219 if (chan->sdu->len != chan->sdu_len)
3222 err = chan->ops->recv(chan->data, chan->sdu);
3225 /* Reassembly complete */
3227 chan->sdu_last_frag = NULL;
3235 kfree_skb(chan->sdu);
3237 chan->sdu_last_frag = NULL;
3244 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3248 BT_DBG("chan %p, Enter local busy", chan);
3250 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3252 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3253 control |= L2CAP_SUPER_RCV_NOT_READY;
3254 l2cap_send_sframe(chan, control);
3256 set_bit(CONN_RNR_SENT, &chan->conn_state);
3258 __clear_ack_timer(chan);
3261 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3265 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3268 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3269 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3270 l2cap_send_sframe(chan, control);
3271 chan->retry_count = 1;
3273 __clear_retrans_timer(chan);
3274 __set_monitor_timer(chan);
3276 set_bit(CONN_WAIT_F, &chan->conn_state);
3279 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3280 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3282 BT_DBG("chan %p, Exit local busy", chan);
3285 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3287 if (chan->mode == L2CAP_MODE_ERTM) {
3289 l2cap_ertm_enter_local_busy(chan);
3291 l2cap_ertm_exit_local_busy(chan);
3295 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3297 struct sk_buff *skb;
3300 while ((skb = skb_peek(&chan->srej_q)) &&
3301 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3304 if (bt_cb(skb)->tx_seq != tx_seq)
3307 skb = skb_dequeue(&chan->srej_q);
3308 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3309 err = l2cap_reassemble_sdu(chan, skb, control);
3312 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3316 chan->buffer_seq_srej =
3317 (chan->buffer_seq_srej + 1) % 64;
3318 tx_seq = (tx_seq + 1) % 64;
3322 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3324 struct srej_list *l, *tmp;
3327 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3328 if (l->tx_seq == tx_seq) {
3333 control = L2CAP_SUPER_SELECT_REJECT;
3334 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3335 l2cap_send_sframe(chan, control);
3337 list_add_tail(&l->list, &chan->srej_l);
3341 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3343 struct srej_list *new;
3346 while (tx_seq != chan->expected_tx_seq) {
3347 control = L2CAP_SUPER_SELECT_REJECT;
3348 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3349 l2cap_send_sframe(chan, control);
3351 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3352 new->tx_seq = chan->expected_tx_seq;
3353 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3354 list_add_tail(&new->list, &chan->srej_l);
3356 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3359 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3361 u8 tx_seq = __get_txseq(rx_control);
3362 u8 req_seq = __get_reqseq(rx_control);
3363 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3364 int tx_seq_offset, expected_tx_seq_offset;
3365 int num_to_ack = (chan->tx_win/6) + 1;
3368 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3369 tx_seq, rx_control);
3371 if (L2CAP_CTRL_FINAL & rx_control &&
3372 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3373 __clear_monitor_timer(chan);
3374 if (chan->unacked_frames > 0)
3375 __set_retrans_timer(chan);
3376 clear_bit(CONN_WAIT_F, &chan->conn_state);
3379 chan->expected_ack_seq = req_seq;
3380 l2cap_drop_acked_frames(chan);
3382 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3383 if (tx_seq_offset < 0)
3384 tx_seq_offset += 64;
3386 /* invalid tx_seq */
3387 if (tx_seq_offset >= chan->tx_win) {
3388 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3392 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3395 if (tx_seq == chan->expected_tx_seq)
3398 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3399 struct srej_list *first;
3401 first = list_first_entry(&chan->srej_l,
3402 struct srej_list, list);
3403 if (tx_seq == first->tx_seq) {
3404 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3405 l2cap_check_srej_gap(chan, tx_seq);
3407 list_del(&first->list);
3410 if (list_empty(&chan->srej_l)) {
3411 chan->buffer_seq = chan->buffer_seq_srej;
3412 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3413 l2cap_send_ack(chan);
3414 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3417 struct srej_list *l;
3419 /* duplicated tx_seq */
3420 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3423 list_for_each_entry(l, &chan->srej_l, list) {
3424 if (l->tx_seq == tx_seq) {
3425 l2cap_resend_srejframe(chan, tx_seq);
3429 l2cap_send_srejframe(chan, tx_seq);
3432 expected_tx_seq_offset =
3433 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3434 if (expected_tx_seq_offset < 0)
3435 expected_tx_seq_offset += 64;
3437 /* duplicated tx_seq */
3438 if (tx_seq_offset < expected_tx_seq_offset)
3441 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3443 BT_DBG("chan %p, Enter SREJ", chan);
3445 INIT_LIST_HEAD(&chan->srej_l);
3446 chan->buffer_seq_srej = chan->buffer_seq;
3448 __skb_queue_head_init(&chan->srej_q);
3449 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3451 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3453 l2cap_send_srejframe(chan, tx_seq);
3455 __clear_ack_timer(chan);
3460 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3462 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3463 bt_cb(skb)->tx_seq = tx_seq;
3464 bt_cb(skb)->sar = sar;
3465 __skb_queue_tail(&chan->srej_q, skb);
3469 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3470 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3472 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3476 if (rx_control & L2CAP_CTRL_FINAL) {
3477 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3478 l2cap_retransmit_frames(chan);
3481 __set_ack_timer(chan);
3483 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3484 if (chan->num_acked == num_to_ack - 1)
3485 l2cap_send_ack(chan);
3494 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3496 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3499 chan->expected_ack_seq = __get_reqseq(rx_control);
3500 l2cap_drop_acked_frames(chan);
3502 if (rx_control & L2CAP_CTRL_POLL) {
3503 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3504 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3505 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3506 (chan->unacked_frames > 0))
3507 __set_retrans_timer(chan);
3509 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3510 l2cap_send_srejtail(chan);
3512 l2cap_send_i_or_rr_or_rnr(chan);
3515 } else if (rx_control & L2CAP_CTRL_FINAL) {
3516 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3518 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3519 l2cap_retransmit_frames(chan);
3522 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3523 (chan->unacked_frames > 0))
3524 __set_retrans_timer(chan);
3526 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3527 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3528 l2cap_send_ack(chan);
3530 l2cap_ertm_send(chan);
3534 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3536 u8 tx_seq = __get_reqseq(rx_control);
3538 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3540 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3542 chan->expected_ack_seq = tx_seq;
3543 l2cap_drop_acked_frames(chan);
3545 if (rx_control & L2CAP_CTRL_FINAL) {
3546 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3547 l2cap_retransmit_frames(chan);
3549 l2cap_retransmit_frames(chan);
3551 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3552 set_bit(CONN_REJ_ACT, &chan->conn_state);
3555 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3557 u8 tx_seq = __get_reqseq(rx_control);
3559 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3561 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3563 if (rx_control & L2CAP_CTRL_POLL) {
3564 chan->expected_ack_seq = tx_seq;
3565 l2cap_drop_acked_frames(chan);
3567 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3568 l2cap_retransmit_one_frame(chan, tx_seq);
3570 l2cap_ertm_send(chan);
3572 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3573 chan->srej_save_reqseq = tx_seq;
3574 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3576 } else if (rx_control & L2CAP_CTRL_FINAL) {
3577 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3578 chan->srej_save_reqseq == tx_seq)
3579 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3581 l2cap_retransmit_one_frame(chan, tx_seq);
3583 l2cap_retransmit_one_frame(chan, tx_seq);
3584 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3585 chan->srej_save_reqseq = tx_seq;
3586 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3591 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3593 u8 tx_seq = __get_reqseq(rx_control);
3595 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3597 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3598 chan->expected_ack_seq = tx_seq;
3599 l2cap_drop_acked_frames(chan);
3601 if (rx_control & L2CAP_CTRL_POLL)
3602 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3604 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3605 __clear_retrans_timer(chan);
3606 if (rx_control & L2CAP_CTRL_POLL)
3607 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3611 if (rx_control & L2CAP_CTRL_POLL)
3612 l2cap_send_srejtail(chan);
3614 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3617 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3619 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3621 if (L2CAP_CTRL_FINAL & rx_control &&
3622 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3623 __clear_monitor_timer(chan);
3624 if (chan->unacked_frames > 0)
3625 __set_retrans_timer(chan);
3626 clear_bit(CONN_WAIT_F, &chan->conn_state);
3629 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3630 case L2CAP_SUPER_RCV_READY:
3631 l2cap_data_channel_rrframe(chan, rx_control);
3634 case L2CAP_SUPER_REJECT:
3635 l2cap_data_channel_rejframe(chan, rx_control);
3638 case L2CAP_SUPER_SELECT_REJECT:
3639 l2cap_data_channel_srejframe(chan, rx_control);
3642 case L2CAP_SUPER_RCV_NOT_READY:
3643 l2cap_data_channel_rnrframe(chan, rx_control);
3651 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3653 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3656 int len, next_tx_seq_offset, req_seq_offset;
3658 control = get_unaligned_le16(skb->data);
3663 * We can just drop the corrupted I-frame here.
3664 * Receiver will miss it and start proper recovery
3665 * procedures and ask retransmission.
3667 if (l2cap_check_fcs(chan, skb))
3670 if (__is_sar_start(control) && __is_iframe(control))
3673 if (chan->fcs == L2CAP_FCS_CRC16)
3676 if (len > chan->mps) {
3677 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3681 req_seq = __get_reqseq(control);
3682 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3683 if (req_seq_offset < 0)
3684 req_seq_offset += 64;
3686 next_tx_seq_offset =
3687 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3688 if (next_tx_seq_offset < 0)
3689 next_tx_seq_offset += 64;
3691 /* check for invalid req-seq */
3692 if (req_seq_offset > next_tx_seq_offset) {
3693 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3697 if (__is_iframe(control)) {
3699 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3703 l2cap_data_channel_iframe(chan, control, skb);
3707 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3711 l2cap_data_channel_sframe(chan, control, skb);
3721 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3723 struct l2cap_chan *chan;
3724 struct sock *sk = NULL;
3729 chan = l2cap_get_chan_by_scid(conn, cid);
3731 BT_DBG("unknown cid 0x%4.4x", cid);
3737 BT_DBG("chan %p, len %d", chan, skb->len);
3739 if (chan->state != BT_CONNECTED)
3742 switch (chan->mode) {
3743 case L2CAP_MODE_BASIC:
3744 /* If socket recv buffers overflows we drop data here
3745 * which is *bad* because L2CAP has to be reliable.
3746 * But we don't have any other choice. L2CAP doesn't
3747 * provide flow control mechanism. */
3749 if (chan->imtu < skb->len)
3752 if (!chan->ops->recv(chan->data, skb))
3756 case L2CAP_MODE_ERTM:
3757 if (!sock_owned_by_user(sk)) {
3758 l2cap_ertm_data_rcv(sk, skb);
3760 if (sk_add_backlog(sk, skb))
3766 case L2CAP_MODE_STREAMING:
3767 control = get_unaligned_le16(skb->data);
3771 if (l2cap_check_fcs(chan, skb))
3774 if (__is_sar_start(control))
3777 if (chan->fcs == L2CAP_FCS_CRC16)
3780 if (len > chan->mps || len < 0 || __is_sframe(control))
3783 tx_seq = __get_txseq(control);
3785 if (chan->expected_tx_seq != tx_seq) {
3786 /* Frame(s) missing - must discard partial SDU */
3787 kfree_skb(chan->sdu);
3789 chan->sdu_last_frag = NULL;
3792 /* TODO: Notify userland of missing data */
3795 chan->expected_tx_seq = (tx_seq + 1) % 64;
3797 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3798 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3803 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3817 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3819 struct sock *sk = NULL;
3820 struct l2cap_chan *chan;
3822 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3830 BT_DBG("sk %p, len %d", sk, skb->len);
3832 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3835 if (chan->imtu < skb->len)
3838 if (!chan->ops->recv(chan->data, skb))
3850 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3852 struct sock *sk = NULL;
3853 struct l2cap_chan *chan;
3855 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3863 BT_DBG("sk %p, len %d", sk, skb->len);
3865 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3868 if (chan->imtu < skb->len)
3871 if (!chan->ops->recv(chan->data, skb))
3883 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3885 struct l2cap_hdr *lh = (void *) skb->data;
3889 skb_pull(skb, L2CAP_HDR_SIZE);
3890 cid = __le16_to_cpu(lh->cid);
3891 len = __le16_to_cpu(lh->len);
3893 if (len != skb->len) {
3898 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3901 case L2CAP_CID_LE_SIGNALING:
3902 case L2CAP_CID_SIGNALING:
3903 l2cap_sig_channel(conn, skb);
3906 case L2CAP_CID_CONN_LESS:
3907 psm = get_unaligned_le16(skb->data);
3909 l2cap_conless_channel(conn, psm, skb);
3912 case L2CAP_CID_LE_DATA:
3913 l2cap_att_channel(conn, cid, skb);
3917 if (smp_sig_channel(conn, skb))
3918 l2cap_conn_del(conn->hcon, EACCES);
3922 l2cap_data_channel(conn, cid, skb);
3927 /* ---- L2CAP interface with lower layer (HCI) ---- */
3929 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3931 int exact = 0, lm1 = 0, lm2 = 0;
3932 struct l2cap_chan *c;
3934 if (type != ACL_LINK)
3937 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3939 /* Find listening sockets and check their link_mode */
3940 read_lock(&chan_list_lock);
3941 list_for_each_entry(c, &chan_list, global_l) {
3942 struct sock *sk = c->sk;
3944 if (c->state != BT_LISTEN)
3947 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3948 lm1 |= HCI_LM_ACCEPT;
3950 lm1 |= HCI_LM_MASTER;
3952 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3953 lm2 |= HCI_LM_ACCEPT;
3955 lm2 |= HCI_LM_MASTER;
3958 read_unlock(&chan_list_lock);
3960 return exact ? lm1 : lm2;
3963 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3965 struct l2cap_conn *conn;
3967 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3969 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3973 conn = l2cap_conn_add(hcon, status);
3975 l2cap_conn_ready(conn);
3977 l2cap_conn_del(hcon, bt_to_errno(status));
3982 static int l2cap_disconn_ind(struct hci_conn *hcon)
3984 struct l2cap_conn *conn = hcon->l2cap_data;
3986 BT_DBG("hcon %p", hcon);
3988 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3991 return conn->disc_reason;
3994 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3996 BT_DBG("hcon %p reason %d", hcon, reason);
3998 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4001 l2cap_conn_del(hcon, bt_to_errno(reason));
4006 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4008 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4011 if (encrypt == 0x00) {
4012 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4013 __clear_chan_timer(chan);
4014 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4015 } else if (chan->sec_level == BT_SECURITY_HIGH)
4016 l2cap_chan_close(chan, ECONNREFUSED);
4018 if (chan->sec_level == BT_SECURITY_MEDIUM)
4019 __clear_chan_timer(chan);
4023 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4025 struct l2cap_conn *conn = hcon->l2cap_data;
4026 struct l2cap_chan *chan;
4031 BT_DBG("conn %p", conn);
4033 if (hcon->type == LE_LINK) {
4034 smp_distribute_keys(conn, 0);
4035 del_timer(&conn->security_timer);
4038 read_lock(&conn->chan_lock);
4040 list_for_each_entry(chan, &conn->chan_l, list) {
4041 struct sock *sk = chan->sk;
4045 BT_DBG("chan->scid %d", chan->scid);
4047 if (chan->scid == L2CAP_CID_LE_DATA) {
4048 if (!status && encrypt) {
4049 chan->sec_level = hcon->sec_level;
4050 l2cap_chan_ready(sk);
4057 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4062 if (!status && (chan->state == BT_CONNECTED ||
4063 chan->state == BT_CONFIG)) {
4064 l2cap_check_encryption(chan, encrypt);
4069 if (chan->state == BT_CONNECT) {
4071 struct l2cap_conn_req req;
4072 req.scid = cpu_to_le16(chan->scid);
4073 req.psm = chan->psm;
4075 chan->ident = l2cap_get_ident(conn);
4076 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4078 l2cap_send_cmd(conn, chan->ident,
4079 L2CAP_CONN_REQ, sizeof(req), &req);
4081 __clear_chan_timer(chan);
4082 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4084 } else if (chan->state == BT_CONNECT2) {
4085 struct l2cap_conn_rsp rsp;
4089 if (bt_sk(sk)->defer_setup) {
4090 struct sock *parent = bt_sk(sk)->parent;
4091 res = L2CAP_CR_PEND;
4092 stat = L2CAP_CS_AUTHOR_PEND;
4094 parent->sk_data_ready(parent, 0);
4096 l2cap_state_change(chan, BT_CONFIG);
4097 res = L2CAP_CR_SUCCESS;
4098 stat = L2CAP_CS_NO_INFO;
4101 l2cap_state_change(chan, BT_DISCONN);
4102 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4103 res = L2CAP_CR_SEC_BLOCK;
4104 stat = L2CAP_CS_NO_INFO;
4107 rsp.scid = cpu_to_le16(chan->dcid);
4108 rsp.dcid = cpu_to_le16(chan->scid);
4109 rsp.result = cpu_to_le16(res);
4110 rsp.status = cpu_to_le16(stat);
4111 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4118 read_unlock(&conn->chan_lock);
4123 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4125 struct l2cap_conn *conn = hcon->l2cap_data;
4128 conn = l2cap_conn_add(hcon, 0);
4133 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4135 if (!(flags & ACL_CONT)) {
4136 struct l2cap_hdr *hdr;
4137 struct l2cap_chan *chan;
4142 BT_ERR("Unexpected start frame (len %d)", skb->len);
4143 kfree_skb(conn->rx_skb);
4144 conn->rx_skb = NULL;
4146 l2cap_conn_unreliable(conn, ECOMM);
4149 /* Start fragment always begin with Basic L2CAP header */
4150 if (skb->len < L2CAP_HDR_SIZE) {
4151 BT_ERR("Frame is too short (len %d)", skb->len);
4152 l2cap_conn_unreliable(conn, ECOMM);
4156 hdr = (struct l2cap_hdr *) skb->data;
4157 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4158 cid = __le16_to_cpu(hdr->cid);
4160 if (len == skb->len) {
4161 /* Complete frame received */
4162 l2cap_recv_frame(conn, skb);
4166 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4168 if (skb->len > len) {
4169 BT_ERR("Frame is too long (len %d, expected len %d)",
4171 l2cap_conn_unreliable(conn, ECOMM);
4175 chan = l2cap_get_chan_by_scid(conn, cid);
4177 if (chan && chan->sk) {
4178 struct sock *sk = chan->sk;
4180 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4181 BT_ERR("Frame exceeding recv MTU (len %d, "
4185 l2cap_conn_unreliable(conn, ECOMM);
4191 /* Allocate skb for the complete frame (with header) */
4192 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4196 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4198 conn->rx_len = len - skb->len;
4200 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4202 if (!conn->rx_len) {
4203 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4204 l2cap_conn_unreliable(conn, ECOMM);
4208 if (skb->len > conn->rx_len) {
4209 BT_ERR("Fragment is too long (len %d, expected %d)",
4210 skb->len, conn->rx_len);
4211 kfree_skb(conn->rx_skb);
4212 conn->rx_skb = NULL;
4214 l2cap_conn_unreliable(conn, ECOMM);
4218 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4220 conn->rx_len -= skb->len;
4222 if (!conn->rx_len) {
4223 /* Complete frame received */
4224 l2cap_recv_frame(conn, conn->rx_skb);
4225 conn->rx_skb = NULL;
4234 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4236 struct l2cap_chan *c;
4238 read_lock_bh(&chan_list_lock);
4240 list_for_each_entry(c, &chan_list, global_l) {
4241 struct sock *sk = c->sk;
4243 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4244 batostr(&bt_sk(sk)->src),
4245 batostr(&bt_sk(sk)->dst),
4246 c->state, __le16_to_cpu(c->psm),
4247 c->scid, c->dcid, c->imtu, c->omtu,
4248 c->sec_level, c->mode);
4251 read_unlock_bh(&chan_list_lock);
4256 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4258 return single_open(file, l2cap_debugfs_show, inode->i_private);
4261 static const struct file_operations l2cap_debugfs_fops = {
4262 .open = l2cap_debugfs_open,
4264 .llseek = seq_lseek,
4265 .release = single_release,
4268 static struct dentry *l2cap_debugfs;
4270 static struct hci_proto l2cap_hci_proto = {
4272 .id = HCI_PROTO_L2CAP,
4273 .connect_ind = l2cap_connect_ind,
4274 .connect_cfm = l2cap_connect_cfm,
4275 .disconn_ind = l2cap_disconn_ind,
4276 .disconn_cfm = l2cap_disconn_cfm,
4277 .security_cfm = l2cap_security_cfm,
4278 .recv_acldata = l2cap_recv_acldata
4281 int __init l2cap_init(void)
4285 err = l2cap_init_sockets();
4289 err = hci_register_proto(&l2cap_hci_proto);
4291 BT_ERR("L2CAP protocol registration failed");
4292 bt_sock_unregister(BTPROTO_L2CAP);
4297 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4298 bt_debugfs, NULL, &l2cap_debugfs_fops);
4300 BT_ERR("Failed to create L2CAP debug file");
4306 l2cap_cleanup_sockets();
4310 void l2cap_exit(void)
4312 debugfs_remove(l2cap_debugfs);
4314 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4315 BT_ERR("L2CAP protocol unregistration failed");
4317 l2cap_cleanup_sockets();
4320 module_param(disable_ertm, bool, 0644);
4321 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");