2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 __l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
879 write_unlock_bh(&conn->chan_lock);
882 bh_unlock_sock(parent);
885 static void l2cap_chan_ready(struct sock *sk)
887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct sock *parent = bt_sk(sk)->parent;
890 BT_DBG("sk %p, parent %p", sk, parent);
892 chan->conf_state = 0;
893 __clear_chan_timer(chan);
895 l2cap_state_change(chan, BT_CONNECTED);
896 sk->sk_state_change(sk);
899 parent->sk_data_ready(parent, 0);
902 static void l2cap_conn_ready(struct l2cap_conn *conn)
904 struct l2cap_chan *chan;
905 struct hci_conn *hcon = conn->hcon;
907 BT_DBG("conn %p", conn);
909 if (!hcon->out && hcon->type == LE_LINK)
910 l2cap_le_conn_ready(conn);
912 if (hcon->out && hcon->type == LE_LINK)
913 smp_conn_security(hcon, hcon->pending_sec_level);
915 read_lock(&conn->chan_lock);
917 list_for_each_entry(chan, &conn->chan_l, list) {
918 struct sock *sk = chan->sk;
922 if (hcon->type == LE_LINK) {
923 if (smp_conn_security(hcon, chan->sec_level))
924 l2cap_chan_ready(sk);
926 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
927 __clear_chan_timer(chan);
928 l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
931 } else if (chan->state == BT_CONNECT)
932 l2cap_do_start(chan);
937 read_unlock(&conn->chan_lock);
940 /* Notify sockets that we cannot guaranty reliability anymore */
941 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
943 struct l2cap_chan *chan;
945 BT_DBG("conn %p", conn);
947 read_lock(&conn->chan_lock);
949 list_for_each_entry(chan, &conn->chan_l, list) {
950 struct sock *sk = chan->sk;
952 if (chan->force_reliable)
956 read_unlock(&conn->chan_lock);
959 static void l2cap_info_timeout(unsigned long arg)
961 struct l2cap_conn *conn = (void *) arg;
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
964 conn->info_ident = 0;
966 l2cap_conn_start(conn);
969 static void l2cap_conn_del(struct hci_conn *hcon, int err)
971 struct l2cap_conn *conn = hcon->l2cap_data;
972 struct l2cap_chan *chan, *l;
978 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
980 kfree_skb(conn->rx_skb);
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
986 l2cap_chan_del(chan, err);
988 chan->ops->close(chan->data);
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
992 del_timer_sync(&conn->info_timer);
994 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
995 del_timer(&conn->security_timer);
996 smp_chan_destroy(conn);
999 hcon->l2cap_data = NULL;
1003 static void security_timeout(unsigned long arg)
1005 struct l2cap_conn *conn = (void *) arg;
1007 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1010 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1012 struct l2cap_conn *conn = hcon->l2cap_data;
1017 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1021 hcon->l2cap_data = conn;
1024 BT_DBG("hcon %p conn %p", hcon, conn);
1026 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1027 conn->mtu = hcon->hdev->le_mtu;
1029 conn->mtu = hcon->hdev->acl_mtu;
1031 conn->src = &hcon->hdev->bdaddr;
1032 conn->dst = &hcon->dst;
1034 conn->feat_mask = 0;
1036 spin_lock_init(&conn->lock);
1037 rwlock_init(&conn->chan_lock);
1039 INIT_LIST_HEAD(&conn->chan_l);
1041 if (hcon->type == LE_LINK)
1042 setup_timer(&conn->security_timer, security_timeout,
1043 (unsigned long) conn);
1045 setup_timer(&conn->info_timer, l2cap_info_timeout,
1046 (unsigned long) conn);
1048 conn->disc_reason = 0x13;
1053 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1055 write_lock_bh(&conn->chan_lock);
1056 __l2cap_chan_add(conn, chan);
1057 write_unlock_bh(&conn->chan_lock);
1060 /* ---- Socket interface ---- */
1062 /* Find socket with psm and source bdaddr.
1063 * Returns closest match.
1065 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1067 struct l2cap_chan *c, *c1 = NULL;
1069 read_lock(&chan_list_lock);
1071 list_for_each_entry(c, &chan_list, global_l) {
1072 struct sock *sk = c->sk;
1074 if (state && c->state != state)
1077 if (c->psm == psm) {
1079 if (!bacmp(&bt_sk(sk)->src, src)) {
1080 read_unlock(&chan_list_lock);
1085 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1090 read_unlock(&chan_list_lock);
1095 int l2cap_chan_connect(struct l2cap_chan *chan)
1097 struct sock *sk = chan->sk;
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 bdaddr_t *dst = &bt_sk(sk)->dst;
1100 struct l2cap_conn *conn;
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1106 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1109 hdev = hci_get_route(dst, src);
1111 return -EHOSTUNREACH;
1113 hci_dev_lock_bh(hdev);
1115 auth_type = l2cap_get_auth_type(chan);
1117 if (chan->dcid == L2CAP_CID_LE_DATA)
1118 hcon = hci_connect(hdev, LE_LINK, dst,
1119 chan->sec_level, auth_type);
1121 hcon = hci_connect(hdev, ACL_LINK, dst,
1122 chan->sec_level, auth_type);
1125 err = PTR_ERR(hcon);
1129 conn = l2cap_conn_add(hcon, 0);
1136 /* Update source addr of the socket */
1137 bacpy(src, conn->src);
1139 l2cap_chan_add(conn, chan);
1141 l2cap_state_change(chan, BT_CONNECT);
1142 __set_chan_timer(chan, sk->sk_sndtimeo);
1144 if (hcon->state == BT_CONNECTED) {
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1146 __clear_chan_timer(chan);
1147 if (l2cap_check_security(chan))
1148 l2cap_state_change(chan, BT_CONNECTED);
1150 l2cap_do_start(chan);
1156 hci_dev_unlock_bh(hdev);
1161 int __l2cap_wait_ack(struct sock *sk)
1163 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1164 DECLARE_WAITQUEUE(wait, current);
1168 add_wait_queue(sk_sleep(sk), &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 while (chan->unacked_frames > 0 && chan->conn) {
1174 if (signal_pending(current)) {
1175 err = sock_intr_errno(timeo);
1180 timeo = schedule_timeout(timeo);
1182 set_current_state(TASK_INTERRUPTIBLE);
1184 err = sock_error(sk);
1188 set_current_state(TASK_RUNNING);
1189 remove_wait_queue(sk_sleep(sk), &wait);
1193 static void l2cap_monitor_timeout(unsigned long arg)
1195 struct l2cap_chan *chan = (void *) arg;
1196 struct sock *sk = chan->sk;
1198 BT_DBG("chan %p", chan);
1201 if (chan->retry_count >= chan->remote_max_tx) {
1202 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1207 chan->retry_count++;
1208 __set_monitor_timer(chan);
1210 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1214 static void l2cap_retrans_timeout(unsigned long arg)
1216 struct l2cap_chan *chan = (void *) arg;
1217 struct sock *sk = chan->sk;
1219 BT_DBG("chan %p", chan);
1222 chan->retry_count = 1;
1223 __set_monitor_timer(chan);
1225 set_bit(CONN_WAIT_F, &chan->conn_state);
1227 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1231 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1233 struct sk_buff *skb;
1235 while ((skb = skb_peek(&chan->tx_q)) &&
1236 chan->unacked_frames) {
1237 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1240 skb = skb_dequeue(&chan->tx_q);
1243 chan->unacked_frames--;
1246 if (!chan->unacked_frames)
1247 __clear_retrans_timer(chan);
1250 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1252 struct hci_conn *hcon = chan->conn->hcon;
1255 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1257 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1258 flags = ACL_START_NO_FLUSH;
1262 bt_cb(skb)->force_active = chan->force_active;
1263 hci_send_acl(hcon, skb, flags);
1266 static void l2cap_streaming_send(struct l2cap_chan *chan)
1268 struct sk_buff *skb;
1271 while ((skb = skb_dequeue(&chan->tx_q))) {
1272 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1273 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1274 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1276 if (chan->fcs == L2CAP_FCS_CRC16) {
1277 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1278 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1281 l2cap_do_send(chan, skb);
1283 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1287 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1289 struct sk_buff *skb, *tx_skb;
1292 skb = skb_peek(&chan->tx_q);
1297 if (bt_cb(skb)->tx_seq == tx_seq)
1300 if (skb_queue_is_last(&chan->tx_q, skb))
1303 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1305 if (chan->remote_max_tx &&
1306 bt_cb(skb)->retries == chan->remote_max_tx) {
1307 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 tx_skb = skb_clone(skb, GFP_ATOMIC);
1312 bt_cb(skb)->retries++;
1313 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1314 control &= L2CAP_CTRL_SAR;
1316 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1317 control |= L2CAP_CTRL_FINAL;
1319 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1320 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (chan->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 l2cap_do_send(chan, tx_skb);
1332 static int l2cap_ertm_send(struct l2cap_chan *chan)
1334 struct sk_buff *skb, *tx_skb;
1338 if (chan->state != BT_CONNECTED)
1341 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1343 if (chan->remote_max_tx &&
1344 bt_cb(skb)->retries == chan->remote_max_tx) {
1345 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1349 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1353 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1354 control &= L2CAP_CTRL_SAR;
1356 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1357 control |= L2CAP_CTRL_FINAL;
1359 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1360 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1361 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1364 if (chan->fcs == L2CAP_FCS_CRC16) {
1365 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1366 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1369 l2cap_do_send(chan, tx_skb);
1371 __set_retrans_timer(chan);
1373 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1374 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1376 if (bt_cb(skb)->retries == 1)
1377 chan->unacked_frames++;
1379 chan->frames_sent++;
1381 if (skb_queue_is_last(&chan->tx_q, skb))
1382 chan->tx_send_head = NULL;
1384 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1392 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1396 if (!skb_queue_empty(&chan->tx_q))
1397 chan->tx_send_head = chan->tx_q.next;
1399 chan->next_tx_seq = chan->expected_ack_seq;
1400 ret = l2cap_ertm_send(chan);
1404 static void l2cap_send_ack(struct l2cap_chan *chan)
1408 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1410 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1411 control |= L2CAP_SUPER_RCV_NOT_READY;
1412 set_bit(CONN_RNR_SENT, &chan->conn_state);
1413 l2cap_send_sframe(chan, control);
1417 if (l2cap_ertm_send(chan) > 0)
1420 control |= L2CAP_SUPER_RCV_READY;
1421 l2cap_send_sframe(chan, control);
1424 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1426 struct srej_list *tail;
1429 control = L2CAP_SUPER_SELECT_REJECT;
1430 control |= L2CAP_CTRL_FINAL;
1432 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1433 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1435 l2cap_send_sframe(chan, control);
1438 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1441 struct sk_buff **frag;
1444 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1453 count = min_t(unsigned int, conn->mtu, len);
1455 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1458 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1464 frag = &(*frag)->next;
1470 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1472 struct sock *sk = chan->sk;
1473 struct l2cap_conn *conn = chan->conn;
1474 struct sk_buff *skb;
1475 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1476 struct l2cap_hdr *lh;
1478 BT_DBG("sk %p len %d", sk, (int)len);
1480 count = min_t(unsigned int, (conn->mtu - hlen), len);
1481 skb = bt_skb_send_alloc(sk, count + hlen,
1482 msg->msg_flags & MSG_DONTWAIT, &err);
1484 return ERR_PTR(err);
1486 /* Create L2CAP header */
1487 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1488 lh->cid = cpu_to_le16(chan->dcid);
1489 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1490 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1492 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1493 if (unlikely(err < 0)) {
1495 return ERR_PTR(err);
1500 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1502 struct sock *sk = chan->sk;
1503 struct l2cap_conn *conn = chan->conn;
1504 struct sk_buff *skb;
1505 int err, count, hlen = L2CAP_HDR_SIZE;
1506 struct l2cap_hdr *lh;
1508 BT_DBG("sk %p len %d", sk, (int)len);
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1514 return ERR_PTR(err);
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(chan->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1524 return ERR_PTR(err);
1529 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1530 struct msghdr *msg, size_t len,
1531 u16 control, u16 sdulen)
1533 struct sock *sk = chan->sk;
1534 struct l2cap_conn *conn = chan->conn;
1535 struct sk_buff *skb;
1536 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1537 struct l2cap_hdr *lh;
1539 BT_DBG("sk %p len %d", sk, (int)len);
1542 return ERR_PTR(-ENOTCONN);
1547 if (chan->fcs == L2CAP_FCS_CRC16)
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1554 return ERR_PTR(err);
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1560 put_unaligned_le16(control, skb_put(skb, 2));
1562 put_unaligned_le16(sdulen, skb_put(skb, 2));
1564 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1565 if (unlikely(err < 0)) {
1567 return ERR_PTR(err);
1570 if (chan->fcs == L2CAP_FCS_CRC16)
1571 put_unaligned_le16(0, skb_put(skb, 2));
1573 bt_cb(skb)->retries = 0;
1577 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1579 struct sk_buff *skb;
1580 struct sk_buff_head sar_queue;
1584 skb_queue_head_init(&sar_queue);
1585 control = L2CAP_SDU_START;
1586 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1588 return PTR_ERR(skb);
1590 __skb_queue_tail(&sar_queue, skb);
1591 len -= chan->remote_mps;
1592 size += chan->remote_mps;
1597 if (len > chan->remote_mps) {
1598 control = L2CAP_SDU_CONTINUE;
1599 buflen = chan->remote_mps;
1601 control = L2CAP_SDU_END;
1605 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1607 skb_queue_purge(&sar_queue);
1608 return PTR_ERR(skb);
1611 __skb_queue_tail(&sar_queue, skb);
1615 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1616 if (chan->tx_send_head == NULL)
1617 chan->tx_send_head = sar_queue.next;
1622 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1624 struct sk_buff *skb;
1628 /* Connectionless channel */
1629 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1630 skb = l2cap_create_connless_pdu(chan, msg, len);
1632 return PTR_ERR(skb);
1634 l2cap_do_send(chan, skb);
1638 switch (chan->mode) {
1639 case L2CAP_MODE_BASIC:
1640 /* Check outgoing MTU */
1641 if (len > chan->omtu)
1644 /* Create a basic PDU */
1645 skb = l2cap_create_basic_pdu(chan, msg, len);
1647 return PTR_ERR(skb);
1649 l2cap_do_send(chan, skb);
1653 case L2CAP_MODE_ERTM:
1654 case L2CAP_MODE_STREAMING:
1655 /* Entire SDU fits into one PDU */
1656 if (len <= chan->remote_mps) {
1657 control = L2CAP_SDU_UNSEGMENTED;
1658 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1661 return PTR_ERR(skb);
1663 __skb_queue_tail(&chan->tx_q, skb);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = skb;
1669 /* Segment SDU into multiples PDUs */
1670 err = l2cap_sar_segment_sdu(chan, msg, len);
1675 if (chan->mode == L2CAP_MODE_STREAMING) {
1676 l2cap_streaming_send(chan);
1681 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1682 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1687 err = l2cap_ertm_send(chan);
1694 BT_DBG("bad state %1.1x", chan->mode);
1701 /* Copy frame to all raw sockets on that connection */
1702 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1704 struct sk_buff *nskb;
1705 struct l2cap_chan *chan;
1707 BT_DBG("conn %p", conn);
1709 read_lock(&conn->chan_lock);
1710 list_for_each_entry(chan, &conn->chan_l, list) {
1711 struct sock *sk = chan->sk;
1712 if (chan->chan_type != L2CAP_CHAN_RAW)
1715 /* Don't send frame to the socket it came from */
1718 nskb = skb_clone(skb, GFP_ATOMIC);
1722 if (chan->ops->recv(chan->data, nskb))
1725 read_unlock(&conn->chan_lock);
1728 /* ---- L2CAP signalling commands ---- */
1729 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1730 u8 code, u8 ident, u16 dlen, void *data)
1732 struct sk_buff *skb, **frag;
1733 struct l2cap_cmd_hdr *cmd;
1734 struct l2cap_hdr *lh;
1737 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1738 conn, code, ident, dlen);
1740 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
1743 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1744 count = min_t(unsigned int, conn->mtu, len);
1746 skb = bt_skb_alloc(count, GFP_ATOMIC);
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1753 if (conn->hcon->type == LE_LINK)
1754 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1756 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1758 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1761 cmd->len = cpu_to_le16(dlen);
1764 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1765 memcpy(skb_put(skb, count), data, count);
1771 /* Continuation fragments (no L2CAP header) */
1772 frag = &skb_shinfo(skb)->frag_list;
1774 count = min_t(unsigned int, conn->mtu, len);
1776 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1780 memcpy(skb_put(*frag, count), data, count);
1785 frag = &(*frag)->next;
1795 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1797 struct l2cap_conf_opt *opt = *ptr;
1800 len = L2CAP_CONF_OPT_SIZE + opt->len;
1808 *val = *((u8 *) opt->val);
1812 *val = get_unaligned_le16(opt->val);
1816 *val = get_unaligned_le32(opt->val);
1820 *val = (unsigned long) opt->val;
1824 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1828 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
1830 struct l2cap_conf_opt *opt = *ptr;
1832 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1834 if (size < L2CAP_CONF_OPT_SIZE + len)
1842 *((u8 *) opt->val) = val;
1846 put_unaligned_le16(val, opt->val);
1850 put_unaligned_le32(val, opt->val);
1854 memcpy(opt->val, (void *) val, len);
1858 *ptr += L2CAP_CONF_OPT_SIZE + len;
1861 static void l2cap_ack_timeout(unsigned long arg)
1863 struct l2cap_chan *chan = (void *) arg;
1865 bh_lock_sock(chan->sk);
1866 l2cap_send_ack(chan);
1867 bh_unlock_sock(chan->sk);
1870 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1872 struct sock *sk = chan->sk;
1874 chan->expected_ack_seq = 0;
1875 chan->unacked_frames = 0;
1876 chan->buffer_seq = 0;
1877 chan->num_acked = 0;
1878 chan->frames_sent = 0;
1880 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1881 (unsigned long) chan);
1882 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1883 (unsigned long) chan);
1884 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1886 skb_queue_head_init(&chan->srej_q);
1888 INIT_LIST_HEAD(&chan->srej_l);
1891 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1894 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1897 case L2CAP_MODE_STREAMING:
1898 case L2CAP_MODE_ERTM:
1899 if (l2cap_mode_supported(mode, remote_feat_mask))
1903 return L2CAP_MODE_BASIC;
1907 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
1909 struct l2cap_conf_req *req = data;
1910 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1911 void *ptr = req->data;
1912 void *endptr = data + data_size;
1914 BT_DBG("chan %p", chan);
1916 if (chan->num_conf_req || chan->num_conf_rsp)
1919 switch (chan->mode) {
1920 case L2CAP_MODE_STREAMING:
1921 case L2CAP_MODE_ERTM:
1922 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1927 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1932 if (chan->imtu != L2CAP_DEFAULT_MTU)
1933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
1935 switch (chan->mode) {
1936 case L2CAP_MODE_BASIC:
1937 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1938 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1941 rfc.mode = L2CAP_MODE_BASIC;
1943 rfc.max_transmit = 0;
1944 rfc.retrans_timeout = 0;
1945 rfc.monitor_timeout = 0;
1946 rfc.max_pdu_size = 0;
1948 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1949 (unsigned long) &rfc, endptr - ptr);
1952 case L2CAP_MODE_ERTM:
1953 rfc.mode = L2CAP_MODE_ERTM;
1954 rfc.txwin_size = chan->tx_win;
1955 rfc.max_transmit = chan->max_tx;
1956 rfc.retrans_timeout = 0;
1957 rfc.monitor_timeout = 0;
1958 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1959 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1960 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1962 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1963 (unsigned long) &rfc, endptr - ptr);
1965 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1968 if (chan->fcs == L2CAP_FCS_NONE ||
1969 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1970 chan->fcs = L2CAP_FCS_NONE;
1971 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs,
1976 case L2CAP_MODE_STREAMING:
1977 rfc.mode = L2CAP_MODE_STREAMING;
1979 rfc.max_transmit = 0;
1980 rfc.retrans_timeout = 0;
1981 rfc.monitor_timeout = 0;
1982 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1983 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1984 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1987 (unsigned long) &rfc, endptr - ptr);
1989 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1992 if (chan->fcs == L2CAP_FCS_NONE ||
1993 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1994 chan->fcs = L2CAP_FCS_NONE;
1995 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs,
2001 req->dcid = cpu_to_le16(chan->dcid);
2002 req->flags = cpu_to_le16(0);
2007 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
2009 struct l2cap_conf_rsp *rsp = data;
2010 void *ptr = rsp->data;
2011 void *endptr = data + data_size;
2012 void *req = chan->conf_req;
2013 int len = chan->conf_len;
2014 int type, hint, olen;
2016 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2017 u16 mtu = L2CAP_DEFAULT_MTU;
2018 u16 result = L2CAP_CONF_SUCCESS;
2020 BT_DBG("chan %p", chan);
2022 while (len >= L2CAP_CONF_OPT_SIZE) {
2023 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2025 hint = type & L2CAP_CONF_HINT;
2026 type &= L2CAP_CONF_MASK;
2029 case L2CAP_CONF_MTU:
2033 case L2CAP_CONF_FLUSH_TO:
2034 chan->flush_to = val;
2037 case L2CAP_CONF_QOS:
2040 case L2CAP_CONF_RFC:
2041 if (olen == sizeof(rfc))
2042 memcpy(&rfc, (void *) val, olen);
2045 case L2CAP_CONF_FCS:
2046 if (val == L2CAP_FCS_NONE)
2047 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2055 result = L2CAP_CONF_UNKNOWN;
2056 *((u8 *) ptr++) = type;
2061 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2064 switch (chan->mode) {
2065 case L2CAP_MODE_STREAMING:
2066 case L2CAP_MODE_ERTM:
2067 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2068 chan->mode = l2cap_select_mode(rfc.mode,
2069 chan->conn->feat_mask);
2073 if (chan->mode != rfc.mode)
2074 return -ECONNREFUSED;
2080 if (chan->mode != rfc.mode) {
2081 result = L2CAP_CONF_UNACCEPT;
2082 rfc.mode = chan->mode;
2084 if (chan->num_conf_rsp == 1)
2085 return -ECONNREFUSED;
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2088 (unsigned long) &rfc, endptr - ptr);
2092 if (result == L2CAP_CONF_SUCCESS) {
2093 /* Configure output options and let the other side know
2094 * which ones we don't like. */
2096 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2097 result = L2CAP_CONF_UNACCEPT;
2100 set_bit(CONF_MTU_DONE, &chan->conf_state);
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
2105 case L2CAP_MODE_BASIC:
2106 chan->fcs = L2CAP_FCS_NONE;
2107 set_bit(CONF_MODE_DONE, &chan->conf_state);
2110 case L2CAP_MODE_ERTM:
2111 chan->remote_tx_win = rfc.txwin_size;
2112 chan->remote_max_tx = rfc.max_transmit;
2114 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2115 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2117 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2119 rfc.retrans_timeout =
2120 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2121 rfc.monitor_timeout =
2122 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2124 set_bit(CONF_MODE_DONE, &chan->conf_state);
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2127 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
2131 case L2CAP_MODE_STREAMING:
2132 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2133 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2135 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2137 set_bit(CONF_MODE_DONE, &chan->conf_state);
2139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2140 (unsigned long) &rfc, endptr - ptr);
2145 result = L2CAP_CONF_UNACCEPT;
2147 memset(&rfc, 0, sizeof(rfc));
2148 rfc.mode = chan->mode;
2151 if (result == L2CAP_CONF_SUCCESS)
2152 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2154 rsp->scid = cpu_to_le16(chan->dcid);
2155 rsp->result = cpu_to_le16(result);
2156 rsp->flags = cpu_to_le16(0x0000);
2161 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
2162 void *data, size_t size, u16 *result)
2164 struct l2cap_conf_req *req = data;
2165 void *ptr = req->data;
2166 void *endptr = data + size;
2169 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2171 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2173 while (len >= L2CAP_CONF_OPT_SIZE) {
2174 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2177 case L2CAP_CONF_MTU:
2178 if (val < L2CAP_DEFAULT_MIN_MTU) {
2179 *result = L2CAP_CONF_UNACCEPT;
2180 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2183 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
2186 case L2CAP_CONF_FLUSH_TO:
2187 chan->flush_to = val;
2188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2189 2, chan->flush_to, endptr - ptr);
2192 case L2CAP_CONF_RFC:
2193 if (olen == sizeof(rfc))
2194 memcpy(&rfc, (void *)val, olen);
2196 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2197 rfc.mode != chan->mode)
2198 return -ECONNREFUSED;
2202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2203 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
2208 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2209 return -ECONNREFUSED;
2211 chan->mode = rfc.mode;
2213 if (*result == L2CAP_CONF_SUCCESS) {
2215 case L2CAP_MODE_ERTM:
2216 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2217 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2218 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2220 case L2CAP_MODE_STREAMING:
2221 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2225 req->dcid = cpu_to_le16(chan->dcid);
2226 req->flags = cpu_to_le16(0x0000);
2231 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2233 struct l2cap_conf_rsp *rsp = data;
2234 void *ptr = rsp->data;
2236 BT_DBG("chan %p", chan);
2238 rsp->scid = cpu_to_le16(chan->dcid);
2239 rsp->result = cpu_to_le16(result);
2240 rsp->flags = cpu_to_le16(flags);
2245 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2247 struct l2cap_conn_rsp rsp;
2248 struct l2cap_conn *conn = chan->conn;
2251 rsp.scid = cpu_to_le16(chan->dcid);
2252 rsp.dcid = cpu_to_le16(chan->scid);
2253 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2254 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2255 l2cap_send_cmd(conn, chan->ident,
2256 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2258 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2261 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2262 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
2263 chan->num_conf_req++;
2266 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2270 struct l2cap_conf_rfc rfc;
2272 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2274 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2277 while (len >= L2CAP_CONF_OPT_SIZE) {
2278 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2280 if (type != L2CAP_CONF_RFC)
2283 if (olen != sizeof(rfc))
2286 memcpy(&rfc, (void *)val, olen);
2290 /* Use sane default values in case a misbehaving remote device
2291 * did not send an RFC option.
2293 rfc.mode = chan->mode;
2294 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2295 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2296 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2298 BT_ERR("Expected RFC option was not found, using defaults");
2302 case L2CAP_MODE_ERTM:
2303 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2304 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2305 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2307 case L2CAP_MODE_STREAMING:
2308 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2312 static inline int l2cap_command_rej(struct l2cap_conn *conn,
2313 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2316 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2318 if (cmd_len < sizeof(*rej))
2321 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2324 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2325 cmd->ident == conn->info_ident) {
2326 del_timer(&conn->info_timer);
2328 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2329 conn->info_ident = 0;
2331 l2cap_conn_start(conn);
2337 static int l2cap_connect_req(struct l2cap_conn *conn,
2338 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2340 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2341 struct l2cap_conn_rsp rsp;
2342 struct l2cap_chan *chan = NULL, *pchan;
2343 struct sock *parent, *sk = NULL;
2344 int result, status = L2CAP_CS_NO_INFO;
2349 if (cmd_len < sizeof(struct l2cap_conn_req))
2352 scid = __le16_to_cpu(req->scid);
2355 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2357 /* Check if we have socket listening on psm */
2358 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2360 result = L2CAP_CR_BAD_PSM;
2366 bh_lock_sock(parent);
2368 /* Check if the ACL is secure enough (if not SDP) */
2369 if (psm != cpu_to_le16(0x0001) &&
2370 !hci_conn_check_link_mode(conn->hcon)) {
2371 conn->disc_reason = 0x05;
2372 result = L2CAP_CR_SEC_BLOCK;
2376 result = L2CAP_CR_NO_MEM;
2378 /* Check for backlog size */
2379 if (sk_acceptq_is_full(parent)) {
2380 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2384 chan = pchan->ops->new_connection(pchan->data);
2390 write_lock_bh(&conn->chan_lock);
2392 /* Check if we already have channel with that dcid */
2393 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2394 write_unlock_bh(&conn->chan_lock);
2395 sock_set_flag(sk, SOCK_ZAPPED);
2396 chan->ops->close(chan->data);
2400 hci_conn_hold(conn->hcon);
2402 bacpy(&bt_sk(sk)->src, conn->src);
2403 bacpy(&bt_sk(sk)->dst, conn->dst);
2407 bt_accept_enqueue(parent, sk);
2409 __l2cap_chan_add(conn, chan);
2413 __set_chan_timer(chan, sk->sk_sndtimeo);
2415 chan->ident = cmd->ident;
2417 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2418 if (l2cap_check_security(chan)) {
2419 if (bt_sk(sk)->defer_setup) {
2420 l2cap_state_change(chan, BT_CONNECT2);
2421 result = L2CAP_CR_PEND;
2422 status = L2CAP_CS_AUTHOR_PEND;
2423 parent->sk_data_ready(parent, 0);
2425 l2cap_state_change(chan, BT_CONFIG);
2426 result = L2CAP_CR_SUCCESS;
2427 status = L2CAP_CS_NO_INFO;
2430 l2cap_state_change(chan, BT_CONNECT2);
2431 result = L2CAP_CR_PEND;
2432 status = L2CAP_CS_AUTHEN_PEND;
2435 l2cap_state_change(chan, BT_CONNECT2);
2436 result = L2CAP_CR_PEND;
2437 status = L2CAP_CS_NO_INFO;
2440 write_unlock_bh(&conn->chan_lock);
2443 bh_unlock_sock(parent);
2446 rsp.scid = cpu_to_le16(scid);
2447 rsp.dcid = cpu_to_le16(dcid);
2448 rsp.result = cpu_to_le16(result);
2449 rsp.status = cpu_to_le16(status);
2450 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2452 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2453 struct l2cap_info_req info;
2454 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2456 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2457 conn->info_ident = l2cap_get_ident(conn);
2459 mod_timer(&conn->info_timer, jiffies +
2460 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2462 l2cap_send_cmd(conn, conn->info_ident,
2463 L2CAP_INFO_REQ, sizeof(info), &info);
2466 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2467 result == L2CAP_CR_SUCCESS) {
2469 set_bit(CONF_REQ_SENT, &chan->conf_state);
2470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2471 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
2472 chan->num_conf_req++;
2478 static int l2cap_connect_rsp(struct l2cap_conn *conn,
2479 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2482 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2483 u16 scid, dcid, result, status;
2484 struct l2cap_chan *chan;
2488 if (cmd_len < sizeof(*rsp))
2491 scid = __le16_to_cpu(rsp->scid);
2492 dcid = __le16_to_cpu(rsp->dcid);
2493 result = __le16_to_cpu(rsp->result);
2494 status = __le16_to_cpu(rsp->status);
2496 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2499 chan = l2cap_get_chan_by_scid(conn, scid);
2503 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2511 case L2CAP_CR_SUCCESS:
2512 l2cap_state_change(chan, BT_CONFIG);
2515 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2517 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2520 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2521 l2cap_build_conf_req(chan, req, sizeof(req)), req);
2522 chan->num_conf_req++;
2526 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2530 /* don't delete l2cap channel if sk is owned by user */
2531 if (sock_owned_by_user(sk)) {
2532 l2cap_state_change(chan, BT_DISCONN);
2533 __clear_chan_timer(chan);
2534 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2538 l2cap_chan_del(chan, ECONNREFUSED);
2546 static inline void set_default_fcs(struct l2cap_chan *chan)
2548 /* FCS is enabled only in ERTM or streaming mode, if one or both
2551 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2552 chan->fcs = L2CAP_FCS_NONE;
2553 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2554 chan->fcs = L2CAP_FCS_CRC16;
2557 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2559 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2562 struct l2cap_chan *chan;
2566 if (cmd_len < sizeof(*req))
2569 dcid = __le16_to_cpu(req->dcid);
2570 flags = __le16_to_cpu(req->flags);
2572 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2574 chan = l2cap_get_chan_by_scid(conn, dcid);
2580 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2581 struct l2cap_cmd_rej_cid rej;
2583 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2584 rej.scid = cpu_to_le16(chan->scid);
2585 rej.dcid = cpu_to_le16(chan->dcid);
2587 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2592 /* Reject if config buffer is too small. */
2593 len = cmd_len - sizeof(*req);
2594 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2595 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2596 l2cap_build_conf_rsp(chan, rsp,
2597 L2CAP_CONF_REJECT, flags), rsp);
2602 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2603 chan->conf_len += len;
2605 if (flags & 0x0001) {
2606 /* Incomplete config. Send empty response. */
2607 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2608 l2cap_build_conf_rsp(chan, rsp,
2609 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2613 /* Complete config. */
2614 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
2616 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2620 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2621 chan->num_conf_rsp++;
2623 /* Reset config buffer. */
2626 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2629 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2630 set_default_fcs(chan);
2632 l2cap_state_change(chan, BT_CONNECTED);
2634 chan->next_tx_seq = 0;
2635 chan->expected_tx_seq = 0;
2636 skb_queue_head_init(&chan->tx_q);
2637 if (chan->mode == L2CAP_MODE_ERTM)
2638 l2cap_ertm_init(chan);
2640 l2cap_chan_ready(sk);
2644 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2647 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
2648 chan->num_conf_req++;
2656 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
2657 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2660 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2661 u16 scid, flags, result;
2662 struct l2cap_chan *chan;
2664 int len = cmd_len - sizeof(*rsp);
2666 if (cmd_len < sizeof(*rsp))
2669 scid = __le16_to_cpu(rsp->scid);
2670 flags = __le16_to_cpu(rsp->flags);
2671 result = __le16_to_cpu(rsp->result);
2673 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2674 scid, flags, result);
2676 chan = l2cap_get_chan_by_scid(conn, scid);
2683 case L2CAP_CONF_SUCCESS:
2684 l2cap_conf_rfc_get(chan, rsp->data, len);
2687 case L2CAP_CONF_UNACCEPT:
2688 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2691 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2692 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2696 /* throw out any old stored conf requests */
2697 result = L2CAP_CONF_SUCCESS;
2698 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2699 req, sizeof(req), &result);
2701 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2705 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2706 L2CAP_CONF_REQ, len, req);
2707 chan->num_conf_req++;
2708 if (result != L2CAP_CONF_SUCCESS)
2714 sk->sk_err = ECONNRESET;
2715 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2716 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2723 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2725 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2726 set_default_fcs(chan);
2728 l2cap_state_change(chan, BT_CONNECTED);
2729 chan->next_tx_seq = 0;
2730 chan->expected_tx_seq = 0;
2731 skb_queue_head_init(&chan->tx_q);
2732 if (chan->mode == L2CAP_MODE_ERTM)
2733 l2cap_ertm_init(chan);
2735 l2cap_chan_ready(sk);
2743 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
2744 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2747 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2748 struct l2cap_disconn_rsp rsp;
2750 struct l2cap_chan *chan;
2753 if (cmd_len != sizeof(*req))
2756 scid = __le16_to_cpu(req->scid);
2757 dcid = __le16_to_cpu(req->dcid);
2759 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2761 chan = l2cap_get_chan_by_scid(conn, dcid);
2767 rsp.dcid = cpu_to_le16(chan->scid);
2768 rsp.scid = cpu_to_le16(chan->dcid);
2769 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2771 sk->sk_shutdown = SHUTDOWN_MASK;
2773 /* don't delete l2cap channel if sk is owned by user */
2774 if (sock_owned_by_user(sk)) {
2775 l2cap_state_change(chan, BT_DISCONN);
2776 __clear_chan_timer(chan);
2777 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2782 l2cap_chan_del(chan, ECONNRESET);
2785 chan->ops->close(chan->data);
2789 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
2790 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2793 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2795 struct l2cap_chan *chan;
2798 if (cmd_len != sizeof(*rsp))
2801 scid = __le16_to_cpu(rsp->scid);
2802 dcid = __le16_to_cpu(rsp->dcid);
2804 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2806 chan = l2cap_get_chan_by_scid(conn, scid);
2812 /* don't delete l2cap channel if sk is owned by user */
2813 if (sock_owned_by_user(sk)) {
2814 l2cap_state_change(chan,BT_DISCONN);
2815 __clear_chan_timer(chan);
2816 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2821 l2cap_chan_del(chan, 0);
2824 chan->ops->close(chan->data);
2828 static inline int l2cap_information_req(struct l2cap_conn *conn,
2829 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2832 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2835 if (cmd_len != sizeof(*req))
2838 type = __le16_to_cpu(req->type);
2840 BT_DBG("type 0x%4.4x", type);
2842 if (type == L2CAP_IT_FEAT_MASK) {
2844 u32 feat_mask = l2cap_feat_mask;
2845 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2846 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2847 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2849 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2851 put_unaligned_le32(feat_mask, rsp->data);
2852 l2cap_send_cmd(conn, cmd->ident,
2853 L2CAP_INFO_RSP, sizeof(buf), buf);
2854 } else if (type == L2CAP_IT_FIXED_CHAN) {
2856 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2857 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2858 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2859 memcpy(buf + 4, l2cap_fixed_chan, 8);
2860 l2cap_send_cmd(conn, cmd->ident,
2861 L2CAP_INFO_RSP, sizeof(buf), buf);
2863 struct l2cap_info_rsp rsp;
2864 rsp.type = cpu_to_le16(type);
2865 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2866 l2cap_send_cmd(conn, cmd->ident,
2867 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2873 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
2874 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2877 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2880 if (cmd_len < sizeof(*rsp))
2883 type = __le16_to_cpu(rsp->type);
2884 result = __le16_to_cpu(rsp->result);
2886 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2888 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2889 if (cmd->ident != conn->info_ident ||
2890 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2893 del_timer(&conn->info_timer);
2895 if (result != L2CAP_IR_SUCCESS) {
2896 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2897 conn->info_ident = 0;
2899 l2cap_conn_start(conn);
2904 if (type == L2CAP_IT_FEAT_MASK) {
2905 conn->feat_mask = get_unaligned_le32(rsp->data);
2907 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2908 struct l2cap_info_req req;
2909 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2911 conn->info_ident = l2cap_get_ident(conn);
2913 l2cap_send_cmd(conn, conn->info_ident,
2914 L2CAP_INFO_REQ, sizeof(req), &req);
2916 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2917 conn->info_ident = 0;
2919 l2cap_conn_start(conn);
2921 } else if (type == L2CAP_IT_FIXED_CHAN) {
2922 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2923 conn->info_ident = 0;
2925 l2cap_conn_start(conn);
2931 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2936 if (min > max || min < 6 || max > 3200)
2939 if (to_multiplier < 10 || to_multiplier > 3200)
2942 if (max >= to_multiplier * 8)
2945 max_latency = (to_multiplier * 8 / max) - 1;
2946 if (latency > 499 || latency > max_latency)
2952 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2953 struct l2cap_cmd_hdr *cmd, u8 *data)
2955 struct hci_conn *hcon = conn->hcon;
2956 struct l2cap_conn_param_update_req *req;
2957 struct l2cap_conn_param_update_rsp rsp;
2958 u16 min, max, latency, to_multiplier, cmd_len;
2961 if (!(hcon->link_mode & HCI_LM_MASTER))
2964 cmd_len = __le16_to_cpu(cmd->len);
2965 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2968 req = (struct l2cap_conn_param_update_req *) data;
2969 min = __le16_to_cpu(req->min);
2970 max = __le16_to_cpu(req->max);
2971 latency = __le16_to_cpu(req->latency);
2972 to_multiplier = __le16_to_cpu(req->to_multiplier);
2974 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2975 min, max, latency, to_multiplier);
2977 memset(&rsp, 0, sizeof(rsp));
2979 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2981 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2983 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2985 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2989 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2994 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2995 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2999 switch (cmd->code) {
3000 case L2CAP_COMMAND_REJ:
3001 l2cap_command_rej(conn, cmd, cmd_len, data);
3004 case L2CAP_CONN_REQ:
3005 err = l2cap_connect_req(conn, cmd, cmd_len, data);
3008 case L2CAP_CONN_RSP:
3009 err = l2cap_connect_rsp(conn, cmd, cmd_len, data);
3012 case L2CAP_CONF_REQ:
3013 err = l2cap_config_req(conn, cmd, cmd_len, data);
3016 case L2CAP_CONF_RSP:
3017 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
3020 case L2CAP_DISCONN_REQ:
3021 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
3024 case L2CAP_DISCONN_RSP:
3025 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
3028 case L2CAP_ECHO_REQ:
3029 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3032 case L2CAP_ECHO_RSP:
3035 case L2CAP_INFO_REQ:
3036 err = l2cap_information_req(conn, cmd, cmd_len, data);
3039 case L2CAP_INFO_RSP:
3040 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
3044 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3052 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3053 struct l2cap_cmd_hdr *cmd, u8 *data)
3055 switch (cmd->code) {
3056 case L2CAP_COMMAND_REJ:
3059 case L2CAP_CONN_PARAM_UPDATE_REQ:
3060 return l2cap_conn_param_update_req(conn, cmd, data);
3062 case L2CAP_CONN_PARAM_UPDATE_RSP:
3066 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3071 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3072 struct sk_buff *skb)
3074 u8 *data = skb->data;
3076 struct l2cap_cmd_hdr cmd;
3079 l2cap_raw_recv(conn, skb);
3081 while (len >= L2CAP_CMD_HDR_SIZE) {
3083 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3084 data += L2CAP_CMD_HDR_SIZE;
3085 len -= L2CAP_CMD_HDR_SIZE;
3087 cmd_len = le16_to_cpu(cmd.len);
3089 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3091 if (cmd_len > len || !cmd.ident) {
3092 BT_DBG("corrupted command");
3096 if (conn->hcon->type == LE_LINK)
3097 err = l2cap_le_sig_cmd(conn, &cmd, data);
3099 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3102 struct l2cap_cmd_rej_unk rej;
3104 BT_ERR("Wrong link type (%d)", err);
3106 /* FIXME: Map err to a valid reason */
3107 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3108 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3118 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3120 u16 our_fcs, rcv_fcs;
3121 int hdr_size = L2CAP_HDR_SIZE + 2;
3123 if (chan->fcs == L2CAP_FCS_CRC16) {
3124 skb_trim(skb, skb->len - 2);
3125 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3126 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3128 if (our_fcs != rcv_fcs)
3134 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3138 chan->frames_sent = 0;
3140 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3142 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3143 control |= L2CAP_SUPER_RCV_NOT_READY;
3144 l2cap_send_sframe(chan, control);
3145 set_bit(CONN_RNR_SENT, &chan->conn_state);
3148 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3149 l2cap_retransmit_frames(chan);
3151 l2cap_ertm_send(chan);
3153 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3154 chan->frames_sent == 0) {
3155 control |= L2CAP_SUPER_RCV_READY;
3156 l2cap_send_sframe(chan, control);
3160 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3162 struct sk_buff *next_skb;
3163 int tx_seq_offset, next_tx_seq_offset;
3165 bt_cb(skb)->tx_seq = tx_seq;
3166 bt_cb(skb)->sar = sar;
3168 next_skb = skb_peek(&chan->srej_q);
3170 __skb_queue_tail(&chan->srej_q, skb);
3174 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3175 if (tx_seq_offset < 0)
3176 tx_seq_offset += 64;
3179 if (bt_cb(next_skb)->tx_seq == tx_seq)
3182 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3183 chan->buffer_seq) % 64;
3184 if (next_tx_seq_offset < 0)
3185 next_tx_seq_offset += 64;
3187 if (next_tx_seq_offset > tx_seq_offset) {
3188 __skb_queue_before(&chan->srej_q, next_skb, skb);
3192 if (skb_queue_is_last(&chan->srej_q, next_skb))
3195 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3197 __skb_queue_tail(&chan->srej_q, skb);
3202 static void append_skb_frag(struct sk_buff *skb,
3203 struct sk_buff *new_frag, struct sk_buff **last_frag)
3205 /* skb->len reflects data in skb as well as all fragments
3206 * skb->data_len reflects only data in fragments
3208 if (!skb_has_frag_list(skb))
3209 skb_shinfo(skb)->frag_list = new_frag;
3211 new_frag->next = NULL;
3213 (*last_frag)->next = new_frag;
3214 *last_frag = new_frag;
3216 skb->len += new_frag->len;
3217 skb->data_len += new_frag->len;
3218 skb->truesize += new_frag->truesize;
3221 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3225 switch (control & L2CAP_CTRL_SAR) {
3226 case L2CAP_SDU_UNSEGMENTED:
3230 err = chan->ops->recv(chan->data, skb);
3233 case L2CAP_SDU_START:
3237 chan->sdu_len = get_unaligned_le16(skb->data);
3240 if (chan->sdu_len > chan->imtu) {
3245 if (skb->len >= chan->sdu_len)
3249 chan->sdu_last_frag = skb;
3255 case L2CAP_SDU_CONTINUE:
3259 append_skb_frag(chan->sdu, skb,
3260 &chan->sdu_last_frag);
3263 if (chan->sdu->len >= chan->sdu_len)
3273 append_skb_frag(chan->sdu, skb,
3274 &chan->sdu_last_frag);
3277 if (chan->sdu->len != chan->sdu_len)
3280 err = chan->ops->recv(chan->data, chan->sdu);
3283 /* Reassembly complete */
3285 chan->sdu_last_frag = NULL;
3293 kfree_skb(chan->sdu);
3295 chan->sdu_last_frag = NULL;
3302 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3306 BT_DBG("chan %p, Enter local busy", chan);
3308 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3310 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3311 control |= L2CAP_SUPER_RCV_NOT_READY;
3312 l2cap_send_sframe(chan, control);
3314 set_bit(CONN_RNR_SENT, &chan->conn_state);
3316 __clear_ack_timer(chan);
3319 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3323 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3326 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3327 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3328 l2cap_send_sframe(chan, control);
3329 chan->retry_count = 1;
3331 __clear_retrans_timer(chan);
3332 __set_monitor_timer(chan);
3334 set_bit(CONN_WAIT_F, &chan->conn_state);
3337 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3338 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3340 BT_DBG("chan %p, Exit local busy", chan);
3343 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3345 if (chan->mode == L2CAP_MODE_ERTM) {
3347 l2cap_ertm_enter_local_busy(chan);
3349 l2cap_ertm_exit_local_busy(chan);
3353 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3355 struct sk_buff *skb;
3358 while ((skb = skb_peek(&chan->srej_q)) &&
3359 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3362 if (bt_cb(skb)->tx_seq != tx_seq)
3365 skb = skb_dequeue(&chan->srej_q);
3366 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3367 err = l2cap_reassemble_sdu(chan, skb, control);
3370 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3374 chan->buffer_seq_srej =
3375 (chan->buffer_seq_srej + 1) % 64;
3376 tx_seq = (tx_seq + 1) % 64;
3380 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3382 struct srej_list *l, *tmp;
3385 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3386 if (l->tx_seq == tx_seq) {
3391 control = L2CAP_SUPER_SELECT_REJECT;
3392 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3393 l2cap_send_sframe(chan, control);
3395 list_add_tail(&l->list, &chan->srej_l);
3399 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3401 struct srej_list *new;
3404 while (tx_seq != chan->expected_tx_seq) {
3405 control = L2CAP_SUPER_SELECT_REJECT;
3406 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3407 l2cap_send_sframe(chan, control);
3409 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3410 new->tx_seq = chan->expected_tx_seq;
3411 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3412 list_add_tail(&new->list, &chan->srej_l);
3414 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3417 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3419 u8 tx_seq = __get_txseq(rx_control);
3420 u8 req_seq = __get_reqseq(rx_control);
3421 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3422 int tx_seq_offset, expected_tx_seq_offset;
3423 int num_to_ack = (chan->tx_win/6) + 1;
3426 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3427 tx_seq, rx_control);
3429 if (L2CAP_CTRL_FINAL & rx_control &&
3430 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3431 __clear_monitor_timer(chan);
3432 if (chan->unacked_frames > 0)
3433 __set_retrans_timer(chan);
3434 clear_bit(CONN_WAIT_F, &chan->conn_state);
3437 chan->expected_ack_seq = req_seq;
3438 l2cap_drop_acked_frames(chan);
3440 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3441 if (tx_seq_offset < 0)
3442 tx_seq_offset += 64;
3444 /* invalid tx_seq */
3445 if (tx_seq_offset >= chan->tx_win) {
3446 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3450 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3453 if (tx_seq == chan->expected_tx_seq)
3456 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3457 struct srej_list *first;
3459 first = list_first_entry(&chan->srej_l,
3460 struct srej_list, list);
3461 if (tx_seq == first->tx_seq) {
3462 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3463 l2cap_check_srej_gap(chan, tx_seq);
3465 list_del(&first->list);
3468 if (list_empty(&chan->srej_l)) {
3469 chan->buffer_seq = chan->buffer_seq_srej;
3470 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3471 l2cap_send_ack(chan);
3472 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3475 struct srej_list *l;
3477 /* duplicated tx_seq */
3478 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3481 list_for_each_entry(l, &chan->srej_l, list) {
3482 if (l->tx_seq == tx_seq) {
3483 l2cap_resend_srejframe(chan, tx_seq);
3487 l2cap_send_srejframe(chan, tx_seq);
3490 expected_tx_seq_offset =
3491 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3492 if (expected_tx_seq_offset < 0)
3493 expected_tx_seq_offset += 64;
3495 /* duplicated tx_seq */
3496 if (tx_seq_offset < expected_tx_seq_offset)
3499 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3501 BT_DBG("chan %p, Enter SREJ", chan);
3503 INIT_LIST_HEAD(&chan->srej_l);
3504 chan->buffer_seq_srej = chan->buffer_seq;
3506 __skb_queue_head_init(&chan->srej_q);
3507 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3509 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3511 l2cap_send_srejframe(chan, tx_seq);
3513 __clear_ack_timer(chan);
3518 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3520 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3521 bt_cb(skb)->tx_seq = tx_seq;
3522 bt_cb(skb)->sar = sar;
3523 __skb_queue_tail(&chan->srej_q, skb);
3527 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3528 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3530 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3534 if (rx_control & L2CAP_CTRL_FINAL) {
3535 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3536 l2cap_retransmit_frames(chan);
3539 __set_ack_timer(chan);
3541 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3542 if (chan->num_acked == num_to_ack - 1)
3543 l2cap_send_ack(chan);
3552 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3554 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3557 chan->expected_ack_seq = __get_reqseq(rx_control);
3558 l2cap_drop_acked_frames(chan);
3560 if (rx_control & L2CAP_CTRL_POLL) {
3561 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3562 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3563 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3564 (chan->unacked_frames > 0))
3565 __set_retrans_timer(chan);
3567 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3568 l2cap_send_srejtail(chan);
3570 l2cap_send_i_or_rr_or_rnr(chan);
3573 } else if (rx_control & L2CAP_CTRL_FINAL) {
3574 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3576 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3577 l2cap_retransmit_frames(chan);
3580 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3581 (chan->unacked_frames > 0))
3582 __set_retrans_timer(chan);
3584 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3585 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3586 l2cap_send_ack(chan);
3588 l2cap_ertm_send(chan);
3592 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3594 u8 tx_seq = __get_reqseq(rx_control);
3596 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3598 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3600 chan->expected_ack_seq = tx_seq;
3601 l2cap_drop_acked_frames(chan);
3603 if (rx_control & L2CAP_CTRL_FINAL) {
3604 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3605 l2cap_retransmit_frames(chan);
3607 l2cap_retransmit_frames(chan);
3609 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3610 set_bit(CONN_REJ_ACT, &chan->conn_state);
3613 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3615 u8 tx_seq = __get_reqseq(rx_control);
3617 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3619 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3621 if (rx_control & L2CAP_CTRL_POLL) {
3622 chan->expected_ack_seq = tx_seq;
3623 l2cap_drop_acked_frames(chan);
3625 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3626 l2cap_retransmit_one_frame(chan, tx_seq);
3628 l2cap_ertm_send(chan);
3630 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3631 chan->srej_save_reqseq = tx_seq;
3632 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3634 } else if (rx_control & L2CAP_CTRL_FINAL) {
3635 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3636 chan->srej_save_reqseq == tx_seq)
3637 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3639 l2cap_retransmit_one_frame(chan, tx_seq);
3641 l2cap_retransmit_one_frame(chan, tx_seq);
3642 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3643 chan->srej_save_reqseq = tx_seq;
3644 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3649 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3651 u8 tx_seq = __get_reqseq(rx_control);
3653 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3655 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3656 chan->expected_ack_seq = tx_seq;
3657 l2cap_drop_acked_frames(chan);
3659 if (rx_control & L2CAP_CTRL_POLL)
3660 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3662 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3663 __clear_retrans_timer(chan);
3664 if (rx_control & L2CAP_CTRL_POLL)
3665 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3669 if (rx_control & L2CAP_CTRL_POLL)
3670 l2cap_send_srejtail(chan);
3672 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3675 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3677 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3679 if (L2CAP_CTRL_FINAL & rx_control &&
3680 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3681 __clear_monitor_timer(chan);
3682 if (chan->unacked_frames > 0)
3683 __set_retrans_timer(chan);
3684 clear_bit(CONN_WAIT_F, &chan->conn_state);
3687 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3688 case L2CAP_SUPER_RCV_READY:
3689 l2cap_data_channel_rrframe(chan, rx_control);
3692 case L2CAP_SUPER_REJECT:
3693 l2cap_data_channel_rejframe(chan, rx_control);
3696 case L2CAP_SUPER_SELECT_REJECT:
3697 l2cap_data_channel_srejframe(chan, rx_control);
3700 case L2CAP_SUPER_RCV_NOT_READY:
3701 l2cap_data_channel_rnrframe(chan, rx_control);
3709 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3711 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3714 int len, next_tx_seq_offset, req_seq_offset;
3716 control = get_unaligned_le16(skb->data);
3721 * We can just drop the corrupted I-frame here.
3722 * Receiver will miss it and start proper recovery
3723 * procedures and ask retransmission.
3725 if (l2cap_check_fcs(chan, skb))
3728 if (__is_sar_start(control) && __is_iframe(control))
3731 if (chan->fcs == L2CAP_FCS_CRC16)
3734 if (len > chan->mps) {
3735 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3739 req_seq = __get_reqseq(control);
3740 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3741 if (req_seq_offset < 0)
3742 req_seq_offset += 64;
3744 next_tx_seq_offset =
3745 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3746 if (next_tx_seq_offset < 0)
3747 next_tx_seq_offset += 64;
3749 /* check for invalid req-seq */
3750 if (req_seq_offset > next_tx_seq_offset) {
3751 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3755 if (__is_iframe(control)) {
3757 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3761 l2cap_data_channel_iframe(chan, control, skb);
3765 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3769 l2cap_data_channel_sframe(chan, control, skb);
3779 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3781 struct l2cap_chan *chan;
3782 struct sock *sk = NULL;
3787 chan = l2cap_get_chan_by_scid(conn, cid);
3789 BT_DBG("unknown cid 0x%4.4x", cid);
3795 BT_DBG("chan %p, len %d", chan, skb->len);
3797 if (chan->state != BT_CONNECTED)
3800 switch (chan->mode) {
3801 case L2CAP_MODE_BASIC:
3802 /* If socket recv buffers overflows we drop data here
3803 * which is *bad* because L2CAP has to be reliable.
3804 * But we don't have any other choice. L2CAP doesn't
3805 * provide flow control mechanism. */
3807 if (chan->imtu < skb->len)
3810 if (!chan->ops->recv(chan->data, skb))
3814 case L2CAP_MODE_ERTM:
3815 if (!sock_owned_by_user(sk)) {
3816 l2cap_ertm_data_rcv(sk, skb);
3818 if (sk_add_backlog(sk, skb))
3824 case L2CAP_MODE_STREAMING:
3825 control = get_unaligned_le16(skb->data);
3829 if (l2cap_check_fcs(chan, skb))
3832 if (__is_sar_start(control))
3835 if (chan->fcs == L2CAP_FCS_CRC16)
3838 if (len > chan->mps || len < 0 || __is_sframe(control))
3841 tx_seq = __get_txseq(control);
3843 if (chan->expected_tx_seq != tx_seq) {
3844 /* Frame(s) missing - must discard partial SDU */
3845 kfree_skb(chan->sdu);
3847 chan->sdu_last_frag = NULL;
3850 /* TODO: Notify userland of missing data */
3853 chan->expected_tx_seq = (tx_seq + 1) % 64;
3855 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3856 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3861 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3875 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3877 struct sock *sk = NULL;
3878 struct l2cap_chan *chan;
3880 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3888 BT_DBG("sk %p, len %d", sk, skb->len);
3890 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3893 if (chan->imtu < skb->len)
3896 if (!chan->ops->recv(chan->data, skb))
3908 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3910 struct sock *sk = NULL;
3911 struct l2cap_chan *chan;
3913 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3921 BT_DBG("sk %p, len %d", sk, skb->len);
3923 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3926 if (chan->imtu < skb->len)
3929 if (!chan->ops->recv(chan->data, skb))
3941 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3943 struct l2cap_hdr *lh = (void *) skb->data;
3947 skb_pull(skb, L2CAP_HDR_SIZE);
3948 cid = __le16_to_cpu(lh->cid);
3949 len = __le16_to_cpu(lh->len);
3951 if (len != skb->len) {
3956 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3959 case L2CAP_CID_LE_SIGNALING:
3960 case L2CAP_CID_SIGNALING:
3961 l2cap_sig_channel(conn, skb);
3964 case L2CAP_CID_CONN_LESS:
3965 psm = get_unaligned_le16(skb->data);
3967 l2cap_conless_channel(conn, psm, skb);
3970 case L2CAP_CID_LE_DATA:
3971 l2cap_att_channel(conn, cid, skb);
3975 if (smp_sig_channel(conn, skb))
3976 l2cap_conn_del(conn->hcon, EACCES);
3980 l2cap_data_channel(conn, cid, skb);
3985 /* ---- L2CAP interface with lower layer (HCI) ---- */
3987 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3989 int exact = 0, lm1 = 0, lm2 = 0;
3990 struct l2cap_chan *c;
3992 if (type != ACL_LINK)
3995 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3997 /* Find listening sockets and check their link_mode */
3998 read_lock(&chan_list_lock);
3999 list_for_each_entry(c, &chan_list, global_l) {
4000 struct sock *sk = c->sk;
4002 if (c->state != BT_LISTEN)
4005 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4006 lm1 |= HCI_LM_ACCEPT;
4008 lm1 |= HCI_LM_MASTER;
4010 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4011 lm2 |= HCI_LM_ACCEPT;
4013 lm2 |= HCI_LM_MASTER;
4016 read_unlock(&chan_list_lock);
4018 return exact ? lm1 : lm2;
4021 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4023 struct l2cap_conn *conn;
4025 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4027 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4031 conn = l2cap_conn_add(hcon, status);
4033 l2cap_conn_ready(conn);
4035 l2cap_conn_del(hcon, bt_to_errno(status));
4040 static int l2cap_disconn_ind(struct hci_conn *hcon)
4042 struct l2cap_conn *conn = hcon->l2cap_data;
4044 BT_DBG("hcon %p", hcon);
4046 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4049 return conn->disc_reason;
4052 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4054 BT_DBG("hcon %p reason %d", hcon, reason);
4056 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4059 l2cap_conn_del(hcon, bt_to_errno(reason));
4064 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4066 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4069 if (encrypt == 0x00) {
4070 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4071 __clear_chan_timer(chan);
4072 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4073 } else if (chan->sec_level == BT_SECURITY_HIGH)
4074 l2cap_chan_close(chan, ECONNREFUSED);
4076 if (chan->sec_level == BT_SECURITY_MEDIUM)
4077 __clear_chan_timer(chan);
4081 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4083 struct l2cap_conn *conn = hcon->l2cap_data;
4084 struct l2cap_chan *chan;
4089 BT_DBG("conn %p", conn);
4091 if (hcon->type == LE_LINK) {
4092 smp_distribute_keys(conn, 0);
4093 del_timer(&conn->security_timer);
4096 read_lock(&conn->chan_lock);
4098 list_for_each_entry(chan, &conn->chan_l, list) {
4099 struct sock *sk = chan->sk;
4103 BT_DBG("chan->scid %d", chan->scid);
4105 if (chan->scid == L2CAP_CID_LE_DATA) {
4106 if (!status && encrypt) {
4107 chan->sec_level = hcon->sec_level;
4108 l2cap_chan_ready(sk);
4115 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4120 if (!status && (chan->state == BT_CONNECTED ||
4121 chan->state == BT_CONFIG)) {
4122 l2cap_check_encryption(chan, encrypt);
4127 if (chan->state == BT_CONNECT) {
4129 struct l2cap_conn_req req;
4130 req.scid = cpu_to_le16(chan->scid);
4131 req.psm = chan->psm;
4133 chan->ident = l2cap_get_ident(conn);
4134 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4136 l2cap_send_cmd(conn, chan->ident,
4137 L2CAP_CONN_REQ, sizeof(req), &req);
4139 __clear_chan_timer(chan);
4140 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4142 } else if (chan->state == BT_CONNECT2) {
4143 struct l2cap_conn_rsp rsp;
4147 if (bt_sk(sk)->defer_setup) {
4148 struct sock *parent = bt_sk(sk)->parent;
4149 res = L2CAP_CR_PEND;
4150 stat = L2CAP_CS_AUTHOR_PEND;
4152 parent->sk_data_ready(parent, 0);
4154 l2cap_state_change(chan, BT_CONFIG);
4155 res = L2CAP_CR_SUCCESS;
4156 stat = L2CAP_CS_NO_INFO;
4159 l2cap_state_change(chan, BT_DISCONN);
4160 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4161 res = L2CAP_CR_SEC_BLOCK;
4162 stat = L2CAP_CS_NO_INFO;
4165 rsp.scid = cpu_to_le16(chan->dcid);
4166 rsp.dcid = cpu_to_le16(chan->scid);
4167 rsp.result = cpu_to_le16(res);
4168 rsp.status = cpu_to_le16(stat);
4169 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4176 read_unlock(&conn->chan_lock);
4181 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4183 struct l2cap_conn *conn = hcon->l2cap_data;
4186 conn = l2cap_conn_add(hcon, 0);
4191 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4193 if (!(flags & ACL_CONT)) {
4194 struct l2cap_hdr *hdr;
4195 struct l2cap_chan *chan;
4200 BT_ERR("Unexpected start frame (len %d)", skb->len);
4201 kfree_skb(conn->rx_skb);
4202 conn->rx_skb = NULL;
4204 l2cap_conn_unreliable(conn, ECOMM);
4207 /* Start fragment always begin with Basic L2CAP header */
4208 if (skb->len < L2CAP_HDR_SIZE) {
4209 BT_ERR("Frame is too short (len %d)", skb->len);
4210 l2cap_conn_unreliable(conn, ECOMM);
4214 hdr = (struct l2cap_hdr *) skb->data;
4215 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4216 cid = __le16_to_cpu(hdr->cid);
4218 if (len == skb->len) {
4219 /* Complete frame received */
4220 l2cap_recv_frame(conn, skb);
4224 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4226 if (skb->len > len) {
4227 BT_ERR("Frame is too long (len %d, expected len %d)",
4229 l2cap_conn_unreliable(conn, ECOMM);
4233 chan = l2cap_get_chan_by_scid(conn, cid);
4235 if (chan && chan->sk) {
4236 struct sock *sk = chan->sk;
4238 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4239 BT_ERR("Frame exceeding recv MTU (len %d, "
4243 l2cap_conn_unreliable(conn, ECOMM);
4249 /* Allocate skb for the complete frame (with header) */
4250 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4254 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4256 conn->rx_len = len - skb->len;
4258 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4260 if (!conn->rx_len) {
4261 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4262 l2cap_conn_unreliable(conn, ECOMM);
4266 if (skb->len > conn->rx_len) {
4267 BT_ERR("Fragment is too long (len %d, expected %d)",
4268 skb->len, conn->rx_len);
4269 kfree_skb(conn->rx_skb);
4270 conn->rx_skb = NULL;
4272 l2cap_conn_unreliable(conn, ECOMM);
4276 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4278 conn->rx_len -= skb->len;
4280 if (!conn->rx_len) {
4281 /* Complete frame received */
4282 l2cap_recv_frame(conn, conn->rx_skb);
4283 conn->rx_skb = NULL;
4292 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4294 struct l2cap_chan *c;
4296 read_lock_bh(&chan_list_lock);
4298 list_for_each_entry(c, &chan_list, global_l) {
4299 struct sock *sk = c->sk;
4301 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4302 batostr(&bt_sk(sk)->src),
4303 batostr(&bt_sk(sk)->dst),
4304 c->state, __le16_to_cpu(c->psm),
4305 c->scid, c->dcid, c->imtu, c->omtu,
4306 c->sec_level, c->mode);
4309 read_unlock_bh(&chan_list_lock);
4314 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4316 return single_open(file, l2cap_debugfs_show, inode->i_private);
4319 static const struct file_operations l2cap_debugfs_fops = {
4320 .open = l2cap_debugfs_open,
4322 .llseek = seq_lseek,
4323 .release = single_release,
4326 static struct dentry *l2cap_debugfs;
4328 static struct hci_proto l2cap_hci_proto = {
4330 .id = HCI_PROTO_L2CAP,
4331 .connect_ind = l2cap_connect_ind,
4332 .connect_cfm = l2cap_connect_cfm,
4333 .disconn_ind = l2cap_disconn_ind,
4334 .disconn_cfm = l2cap_disconn_cfm,
4335 .security_cfm = l2cap_security_cfm,
4336 .recv_acldata = l2cap_recv_acldata
4339 int __init l2cap_init(void)
4343 err = l2cap_init_sockets();
4347 err = hci_register_proto(&l2cap_hci_proto);
4349 BT_ERR("L2CAP protocol registration failed");
4350 bt_sock_unregister(BTPROTO_L2CAP);
4355 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4356 bt_debugfs, NULL, &l2cap_debugfs_fops);
4358 BT_ERR("Failed to create L2CAP debug file");
4364 l2cap_cleanup_sockets();
4368 void l2cap_exit(void)
4370 debugfs_remove(l2cap_debugfs);
4372 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4373 BT_ERR("L2CAP protocol unregistration failed");
4375 l2cap_cleanup_sockets();
4378 module_param(disable_ertm, bool, 0644);
4379 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");