2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 __l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
879 write_unlock_bh(&conn->chan_lock);
882 bh_unlock_sock(parent);
885 static void l2cap_chan_ready(struct sock *sk)
887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct sock *parent = bt_sk(sk)->parent;
890 BT_DBG("sk %p, parent %p", sk, parent);
892 chan->conf_state = 0;
893 __clear_chan_timer(chan);
895 l2cap_state_change(chan, BT_CONNECTED);
896 sk->sk_state_change(sk);
899 parent->sk_data_ready(parent, 0);
902 static void l2cap_conn_ready(struct l2cap_conn *conn)
904 struct l2cap_chan *chan;
905 struct hci_conn *hcon = conn->hcon;
907 BT_DBG("conn %p", conn);
909 if (!hcon->out && hcon->type == LE_LINK)
910 l2cap_le_conn_ready(conn);
912 if (hcon->out && hcon->type == LE_LINK)
913 smp_conn_security(hcon, hcon->pending_sec_level);
915 read_lock(&conn->chan_lock);
917 list_for_each_entry(chan, &conn->chan_l, list) {
918 struct sock *sk = chan->sk;
922 if (hcon->type == LE_LINK) {
923 if (smp_conn_security(hcon, chan->sec_level))
924 l2cap_chan_ready(sk);
926 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
927 __clear_chan_timer(chan);
928 l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
931 } else if (chan->state == BT_CONNECT)
932 l2cap_do_start(chan);
937 read_unlock(&conn->chan_lock);
940 /* Notify sockets that we cannot guaranty reliability anymore */
941 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
943 struct l2cap_chan *chan;
945 BT_DBG("conn %p", conn);
947 read_lock(&conn->chan_lock);
949 list_for_each_entry(chan, &conn->chan_l, list) {
950 struct sock *sk = chan->sk;
952 if (chan->force_reliable)
956 read_unlock(&conn->chan_lock);
959 static void l2cap_info_timeout(unsigned long arg)
961 struct l2cap_conn *conn = (void *) arg;
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
964 conn->info_ident = 0;
966 l2cap_conn_start(conn);
969 static void l2cap_conn_del(struct hci_conn *hcon, int err)
971 struct l2cap_conn *conn = hcon->l2cap_data;
972 struct l2cap_chan *chan, *l;
978 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
980 kfree_skb(conn->rx_skb);
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
986 l2cap_chan_del(chan, err);
988 chan->ops->close(chan->data);
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
992 del_timer_sync(&conn->info_timer);
994 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
995 del_timer(&conn->security_timer);
996 smp_chan_destroy(conn);
999 hcon->l2cap_data = NULL;
1003 static void security_timeout(unsigned long arg)
1005 struct l2cap_conn *conn = (void *) arg;
1007 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1010 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1012 struct l2cap_conn *conn = hcon->l2cap_data;
1017 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1021 hcon->l2cap_data = conn;
1024 BT_DBG("hcon %p conn %p", hcon, conn);
1026 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1027 conn->mtu = hcon->hdev->le_mtu;
1029 conn->mtu = hcon->hdev->acl_mtu;
1031 conn->src = &hcon->hdev->bdaddr;
1032 conn->dst = &hcon->dst;
1034 conn->feat_mask = 0;
1036 spin_lock_init(&conn->lock);
1037 rwlock_init(&conn->chan_lock);
1039 INIT_LIST_HEAD(&conn->chan_l);
1041 if (hcon->type == LE_LINK)
1042 setup_timer(&conn->security_timer, security_timeout,
1043 (unsigned long) conn);
1045 setup_timer(&conn->info_timer, l2cap_info_timeout,
1046 (unsigned long) conn);
1048 conn->disc_reason = 0x13;
1053 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1055 write_lock_bh(&conn->chan_lock);
1056 __l2cap_chan_add(conn, chan);
1057 write_unlock_bh(&conn->chan_lock);
1060 /* ---- Socket interface ---- */
1062 /* Find socket with psm and source bdaddr.
1063 * Returns closest match.
1065 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1067 struct l2cap_chan *c, *c1 = NULL;
1069 read_lock(&chan_list_lock);
1071 list_for_each_entry(c, &chan_list, global_l) {
1072 struct sock *sk = c->sk;
1074 if (state && c->state != state)
1077 if (c->psm == psm) {
1079 if (!bacmp(&bt_sk(sk)->src, src)) {
1080 read_unlock(&chan_list_lock);
1085 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1090 read_unlock(&chan_list_lock);
1095 int l2cap_chan_connect(struct l2cap_chan *chan)
1097 struct sock *sk = chan->sk;
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 bdaddr_t *dst = &bt_sk(sk)->dst;
1100 struct l2cap_conn *conn;
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1106 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1109 hdev = hci_get_route(dst, src);
1111 return -EHOSTUNREACH;
1113 hci_dev_lock_bh(hdev);
1115 auth_type = l2cap_get_auth_type(chan);
1117 if (chan->dcid == L2CAP_CID_LE_DATA)
1118 hcon = hci_connect(hdev, LE_LINK, dst,
1119 chan->sec_level, auth_type);
1121 hcon = hci_connect(hdev, ACL_LINK, dst,
1122 chan->sec_level, auth_type);
1125 err = PTR_ERR(hcon);
1129 conn = l2cap_conn_add(hcon, 0);
1136 /* Update source addr of the socket */
1137 bacpy(src, conn->src);
1139 l2cap_chan_add(conn, chan);
1141 l2cap_state_change(chan, BT_CONNECT);
1142 __set_chan_timer(chan, sk->sk_sndtimeo);
1144 if (hcon->state == BT_CONNECTED) {
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1146 __clear_chan_timer(chan);
1147 if (l2cap_check_security(chan))
1148 l2cap_state_change(chan, BT_CONNECTED);
1150 l2cap_do_start(chan);
1156 hci_dev_unlock_bh(hdev);
1161 int __l2cap_wait_ack(struct sock *sk)
1163 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1164 DECLARE_WAITQUEUE(wait, current);
1168 add_wait_queue(sk_sleep(sk), &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 while (chan->unacked_frames > 0 && chan->conn) {
1174 if (signal_pending(current)) {
1175 err = sock_intr_errno(timeo);
1180 timeo = schedule_timeout(timeo);
1182 set_current_state(TASK_INTERRUPTIBLE);
1184 err = sock_error(sk);
1188 set_current_state(TASK_RUNNING);
1189 remove_wait_queue(sk_sleep(sk), &wait);
1193 static void l2cap_monitor_timeout(unsigned long arg)
1195 struct l2cap_chan *chan = (void *) arg;
1196 struct sock *sk = chan->sk;
1198 BT_DBG("chan %p", chan);
1201 if (chan->retry_count >= chan->remote_max_tx) {
1202 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1207 chan->retry_count++;
1208 __set_monitor_timer(chan);
1210 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1214 static void l2cap_retrans_timeout(unsigned long arg)
1216 struct l2cap_chan *chan = (void *) arg;
1217 struct sock *sk = chan->sk;
1219 BT_DBG("chan %p", chan);
1222 chan->retry_count = 1;
1223 __set_monitor_timer(chan);
1225 set_bit(CONN_WAIT_F, &chan->conn_state);
1227 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1231 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1233 struct sk_buff *skb;
1235 while ((skb = skb_peek(&chan->tx_q)) &&
1236 chan->unacked_frames) {
1237 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1240 skb = skb_dequeue(&chan->tx_q);
1243 chan->unacked_frames--;
1246 if (!chan->unacked_frames)
1247 __clear_retrans_timer(chan);
1250 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1252 struct hci_conn *hcon = chan->conn->hcon;
1255 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1257 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1258 flags = ACL_START_NO_FLUSH;
1262 bt_cb(skb)->force_active = chan->force_active;
1263 hci_send_acl(hcon, skb, flags);
1266 static void l2cap_streaming_send(struct l2cap_chan *chan)
1268 struct sk_buff *skb;
1271 while ((skb = skb_dequeue(&chan->tx_q))) {
1272 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1273 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1274 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1276 if (chan->fcs == L2CAP_FCS_CRC16) {
1277 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1278 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1281 l2cap_do_send(chan, skb);
1283 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1287 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1289 struct sk_buff *skb, *tx_skb;
1292 skb = skb_peek(&chan->tx_q);
1297 if (bt_cb(skb)->tx_seq == tx_seq)
1300 if (skb_queue_is_last(&chan->tx_q, skb))
1303 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1305 if (chan->remote_max_tx &&
1306 bt_cb(skb)->retries == chan->remote_max_tx) {
1307 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 tx_skb = skb_clone(skb, GFP_ATOMIC);
1312 bt_cb(skb)->retries++;
1313 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1314 control &= L2CAP_CTRL_SAR;
1316 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1317 control |= L2CAP_CTRL_FINAL;
1319 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1320 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (chan->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 l2cap_do_send(chan, tx_skb);
1332 static int l2cap_ertm_send(struct l2cap_chan *chan)
1334 struct sk_buff *skb, *tx_skb;
1338 if (chan->state != BT_CONNECTED)
1341 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1343 if (chan->remote_max_tx &&
1344 bt_cb(skb)->retries == chan->remote_max_tx) {
1345 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1349 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1353 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1354 control &= L2CAP_CTRL_SAR;
1356 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1357 control |= L2CAP_CTRL_FINAL;
1359 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1360 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1361 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1364 if (chan->fcs == L2CAP_FCS_CRC16) {
1365 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1366 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1369 l2cap_do_send(chan, tx_skb);
1371 __set_retrans_timer(chan);
1373 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1374 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1376 if (bt_cb(skb)->retries == 1)
1377 chan->unacked_frames++;
1379 chan->frames_sent++;
1381 if (skb_queue_is_last(&chan->tx_q, skb))
1382 chan->tx_send_head = NULL;
1384 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1392 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1396 if (!skb_queue_empty(&chan->tx_q))
1397 chan->tx_send_head = chan->tx_q.next;
1399 chan->next_tx_seq = chan->expected_ack_seq;
1400 ret = l2cap_ertm_send(chan);
1404 static void l2cap_send_ack(struct l2cap_chan *chan)
1408 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1410 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1411 control |= L2CAP_SUPER_RCV_NOT_READY;
1412 set_bit(CONN_RNR_SENT, &chan->conn_state);
1413 l2cap_send_sframe(chan, control);
1417 if (l2cap_ertm_send(chan) > 0)
1420 control |= L2CAP_SUPER_RCV_READY;
1421 l2cap_send_sframe(chan, control);
1424 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1426 struct srej_list *tail;
1429 control = L2CAP_SUPER_SELECT_REJECT;
1430 control |= L2CAP_CTRL_FINAL;
1432 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1433 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1435 l2cap_send_sframe(chan, control);
1438 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1441 struct sk_buff **frag;
1444 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1453 count = min_t(unsigned int, conn->mtu, len);
1455 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1458 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1464 frag = &(*frag)->next;
1470 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1472 struct sock *sk = chan->sk;
1473 struct l2cap_conn *conn = chan->conn;
1474 struct sk_buff *skb;
1475 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1476 struct l2cap_hdr *lh;
1478 BT_DBG("sk %p len %d", sk, (int)len);
1480 count = min_t(unsigned int, (conn->mtu - hlen), len);
1481 skb = bt_skb_send_alloc(sk, count + hlen,
1482 msg->msg_flags & MSG_DONTWAIT, &err);
1484 return ERR_PTR(err);
1486 /* Create L2CAP header */
1487 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1488 lh->cid = cpu_to_le16(chan->dcid);
1489 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1490 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1492 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1493 if (unlikely(err < 0)) {
1495 return ERR_PTR(err);
1500 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1502 struct sock *sk = chan->sk;
1503 struct l2cap_conn *conn = chan->conn;
1504 struct sk_buff *skb;
1505 int err, count, hlen = L2CAP_HDR_SIZE;
1506 struct l2cap_hdr *lh;
1508 BT_DBG("sk %p len %d", sk, (int)len);
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1514 return ERR_PTR(err);
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(chan->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1524 return ERR_PTR(err);
1529 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1530 struct msghdr *msg, size_t len,
1531 u16 control, u16 sdulen)
1533 struct sock *sk = chan->sk;
1534 struct l2cap_conn *conn = chan->conn;
1535 struct sk_buff *skb;
1536 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1537 struct l2cap_hdr *lh;
1539 BT_DBG("sk %p len %d", sk, (int)len);
1542 return ERR_PTR(-ENOTCONN);
1547 if (chan->fcs == L2CAP_FCS_CRC16)
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1554 return ERR_PTR(err);
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1560 put_unaligned_le16(control, skb_put(skb, 2));
1562 put_unaligned_le16(sdulen, skb_put(skb, 2));
1564 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1565 if (unlikely(err < 0)) {
1567 return ERR_PTR(err);
1570 if (chan->fcs == L2CAP_FCS_CRC16)
1571 put_unaligned_le16(0, skb_put(skb, 2));
1573 bt_cb(skb)->retries = 0;
1577 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1579 struct sk_buff *skb;
1580 struct sk_buff_head sar_queue;
1584 skb_queue_head_init(&sar_queue);
1585 control = L2CAP_SDU_START;
1586 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1588 return PTR_ERR(skb);
1590 __skb_queue_tail(&sar_queue, skb);
1591 len -= chan->remote_mps;
1592 size += chan->remote_mps;
1597 if (len > chan->remote_mps) {
1598 control = L2CAP_SDU_CONTINUE;
1599 buflen = chan->remote_mps;
1601 control = L2CAP_SDU_END;
1605 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1607 skb_queue_purge(&sar_queue);
1608 return PTR_ERR(skb);
1611 __skb_queue_tail(&sar_queue, skb);
1615 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1616 if (chan->tx_send_head == NULL)
1617 chan->tx_send_head = sar_queue.next;
1622 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1624 struct sk_buff *skb;
1628 /* Connectionless channel */
1629 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1630 skb = l2cap_create_connless_pdu(chan, msg, len);
1632 return PTR_ERR(skb);
1634 l2cap_do_send(chan, skb);
1638 switch (chan->mode) {
1639 case L2CAP_MODE_BASIC:
1640 /* Check outgoing MTU */
1641 if (len > chan->omtu)
1644 /* Create a basic PDU */
1645 skb = l2cap_create_basic_pdu(chan, msg, len);
1647 return PTR_ERR(skb);
1649 l2cap_do_send(chan, skb);
1653 case L2CAP_MODE_ERTM:
1654 case L2CAP_MODE_STREAMING:
1655 /* Entire SDU fits into one PDU */
1656 if (len <= chan->remote_mps) {
1657 control = L2CAP_SDU_UNSEGMENTED;
1658 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1661 return PTR_ERR(skb);
1663 __skb_queue_tail(&chan->tx_q, skb);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = skb;
1669 /* Segment SDU into multiples PDUs */
1670 err = l2cap_sar_segment_sdu(chan, msg, len);
1675 if (chan->mode == L2CAP_MODE_STREAMING) {
1676 l2cap_streaming_send(chan);
1681 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1682 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1687 err = l2cap_ertm_send(chan);
1694 BT_DBG("bad state %1.1x", chan->mode);
1701 /* Copy frame to all raw sockets on that connection */
1702 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1704 struct sk_buff *nskb;
1705 struct l2cap_chan *chan;
1707 BT_DBG("conn %p", conn);
1709 read_lock(&conn->chan_lock);
1710 list_for_each_entry(chan, &conn->chan_l, list) {
1711 struct sock *sk = chan->sk;
1712 if (chan->chan_type != L2CAP_CHAN_RAW)
1715 /* Don't send frame to the socket it came from */
1718 nskb = skb_clone(skb, GFP_ATOMIC);
1722 if (chan->ops->recv(chan->data, nskb))
1725 read_unlock(&conn->chan_lock);
1728 /* ---- L2CAP signalling commands ---- */
1729 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1730 u8 code, u8 ident, u16 dlen, void *data)
1732 struct sk_buff *skb, **frag;
1733 struct l2cap_cmd_hdr *cmd;
1734 struct l2cap_hdr *lh;
1737 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1738 conn, code, ident, dlen);
1740 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
1743 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1744 count = min_t(unsigned int, conn->mtu, len);
1746 skb = bt_skb_alloc(count, GFP_ATOMIC);
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1753 if (conn->hcon->type == LE_LINK)
1754 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1756 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1758 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1761 cmd->len = cpu_to_le16(dlen);
1764 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1765 memcpy(skb_put(skb, count), data, count);
1771 /* Continuation fragments (no L2CAP header) */
1772 frag = &skb_shinfo(skb)->frag_list;
1774 count = min_t(unsigned int, conn->mtu, len);
1776 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1780 memcpy(skb_put(*frag, count), data, count);
1785 frag = &(*frag)->next;
1795 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1797 struct l2cap_conf_opt *opt = *ptr;
1800 len = L2CAP_CONF_OPT_SIZE + opt->len;
1808 *val = *((u8 *) opt->val);
1812 *val = get_unaligned_le16(opt->val);
1816 *val = get_unaligned_le32(opt->val);
1820 *val = (unsigned long) opt->val;
1824 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1828 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1830 struct l2cap_conf_opt *opt = *ptr;
1832 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1839 *((u8 *) opt->val) = val;
1843 put_unaligned_le16(val, opt->val);
1847 put_unaligned_le32(val, opt->val);
1851 memcpy(opt->val, (void *) val, len);
1855 *ptr += L2CAP_CONF_OPT_SIZE + len;
1858 static void l2cap_ack_timeout(unsigned long arg)
1860 struct l2cap_chan *chan = (void *) arg;
1862 bh_lock_sock(chan->sk);
1863 l2cap_send_ack(chan);
1864 bh_unlock_sock(chan->sk);
1867 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1869 struct sock *sk = chan->sk;
1871 chan->expected_ack_seq = 0;
1872 chan->unacked_frames = 0;
1873 chan->buffer_seq = 0;
1874 chan->num_acked = 0;
1875 chan->frames_sent = 0;
1877 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1878 (unsigned long) chan);
1879 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1880 (unsigned long) chan);
1881 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1883 skb_queue_head_init(&chan->srej_q);
1885 INIT_LIST_HEAD(&chan->srej_l);
1888 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1891 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1894 case L2CAP_MODE_STREAMING:
1895 case L2CAP_MODE_ERTM:
1896 if (l2cap_mode_supported(mode, remote_feat_mask))
1900 return L2CAP_MODE_BASIC;
1904 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1906 struct l2cap_conf_req *req = data;
1907 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1908 void *ptr = req->data;
1910 BT_DBG("chan %p", chan);
1912 if (chan->num_conf_req || chan->num_conf_rsp)
1915 switch (chan->mode) {
1916 case L2CAP_MODE_STREAMING:
1917 case L2CAP_MODE_ERTM:
1918 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1923 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1928 if (chan->imtu != L2CAP_DEFAULT_MTU)
1929 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1931 switch (chan->mode) {
1932 case L2CAP_MODE_BASIC:
1933 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1934 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1937 rfc.mode = L2CAP_MODE_BASIC;
1939 rfc.max_transmit = 0;
1940 rfc.retrans_timeout = 0;
1941 rfc.monitor_timeout = 0;
1942 rfc.max_pdu_size = 0;
1944 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1945 (unsigned long) &rfc);
1948 case L2CAP_MODE_ERTM:
1949 rfc.mode = L2CAP_MODE_ERTM;
1950 rfc.txwin_size = chan->tx_win;
1951 rfc.max_transmit = chan->max_tx;
1952 rfc.retrans_timeout = 0;
1953 rfc.monitor_timeout = 0;
1954 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1955 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1956 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1958 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1959 (unsigned long) &rfc);
1961 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1964 if (chan->fcs == L2CAP_FCS_NONE ||
1965 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1966 chan->fcs = L2CAP_FCS_NONE;
1967 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1971 case L2CAP_MODE_STREAMING:
1972 rfc.mode = L2CAP_MODE_STREAMING;
1974 rfc.max_transmit = 0;
1975 rfc.retrans_timeout = 0;
1976 rfc.monitor_timeout = 0;
1977 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1978 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1979 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1981 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1982 (unsigned long) &rfc);
1984 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1987 if (chan->fcs == L2CAP_FCS_NONE ||
1988 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1989 chan->fcs = L2CAP_FCS_NONE;
1990 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1995 req->dcid = cpu_to_le16(chan->dcid);
1996 req->flags = cpu_to_le16(0);
2001 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2003 struct l2cap_conf_rsp *rsp = data;
2004 void *ptr = rsp->data;
2005 void *req = chan->conf_req;
2006 int len = chan->conf_len;
2007 int type, hint, olen;
2009 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2010 u16 mtu = L2CAP_DEFAULT_MTU;
2011 u16 result = L2CAP_CONF_SUCCESS;
2013 BT_DBG("chan %p", chan);
2015 while (len >= L2CAP_CONF_OPT_SIZE) {
2016 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2018 hint = type & L2CAP_CONF_HINT;
2019 type &= L2CAP_CONF_MASK;
2022 case L2CAP_CONF_MTU:
2026 case L2CAP_CONF_FLUSH_TO:
2027 chan->flush_to = val;
2030 case L2CAP_CONF_QOS:
2033 case L2CAP_CONF_RFC:
2034 if (olen == sizeof(rfc))
2035 memcpy(&rfc, (void *) val, olen);
2038 case L2CAP_CONF_FCS:
2039 if (val == L2CAP_FCS_NONE)
2040 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2048 result = L2CAP_CONF_UNKNOWN;
2049 *((u8 *) ptr++) = type;
2054 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2057 switch (chan->mode) {
2058 case L2CAP_MODE_STREAMING:
2059 case L2CAP_MODE_ERTM:
2060 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2061 chan->mode = l2cap_select_mode(rfc.mode,
2062 chan->conn->feat_mask);
2066 if (chan->mode != rfc.mode)
2067 return -ECONNREFUSED;
2073 if (chan->mode != rfc.mode) {
2074 result = L2CAP_CONF_UNACCEPT;
2075 rfc.mode = chan->mode;
2077 if (chan->num_conf_rsp == 1)
2078 return -ECONNREFUSED;
2080 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2081 sizeof(rfc), (unsigned long) &rfc);
2085 if (result == L2CAP_CONF_SUCCESS) {
2086 /* Configure output options and let the other side know
2087 * which ones we don't like. */
2089 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2090 result = L2CAP_CONF_UNACCEPT;
2093 set_bit(CONF_MTU_DONE, &chan->conf_state);
2095 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2098 case L2CAP_MODE_BASIC:
2099 chan->fcs = L2CAP_FCS_NONE;
2100 set_bit(CONF_MODE_DONE, &chan->conf_state);
2103 case L2CAP_MODE_ERTM:
2104 chan->remote_tx_win = rfc.txwin_size;
2105 chan->remote_max_tx = rfc.max_transmit;
2107 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2108 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2110 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2112 rfc.retrans_timeout =
2113 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2114 rfc.monitor_timeout =
2115 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2117 set_bit(CONF_MODE_DONE, &chan->conf_state);
2119 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2120 sizeof(rfc), (unsigned long) &rfc);
2124 case L2CAP_MODE_STREAMING:
2125 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2126 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2128 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2130 set_bit(CONF_MODE_DONE, &chan->conf_state);
2132 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2133 sizeof(rfc), (unsigned long) &rfc);
2138 result = L2CAP_CONF_UNACCEPT;
2140 memset(&rfc, 0, sizeof(rfc));
2141 rfc.mode = chan->mode;
2144 if (result == L2CAP_CONF_SUCCESS)
2145 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2147 rsp->scid = cpu_to_le16(chan->dcid);
2148 rsp->result = cpu_to_le16(result);
2149 rsp->flags = cpu_to_le16(0x0000);
2154 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2156 struct l2cap_conf_req *req = data;
2157 void *ptr = req->data;
2160 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2162 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2164 while (len >= L2CAP_CONF_OPT_SIZE) {
2165 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2168 case L2CAP_CONF_MTU:
2169 if (val < L2CAP_DEFAULT_MIN_MTU) {
2170 *result = L2CAP_CONF_UNACCEPT;
2171 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2177 case L2CAP_CONF_FLUSH_TO:
2178 chan->flush_to = val;
2179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2183 case L2CAP_CONF_RFC:
2184 if (olen == sizeof(rfc))
2185 memcpy(&rfc, (void *)val, olen);
2187 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2188 rfc.mode != chan->mode)
2189 return -ECONNREFUSED;
2193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2194 sizeof(rfc), (unsigned long) &rfc);
2199 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2200 return -ECONNREFUSED;
2202 chan->mode = rfc.mode;
2204 if (*result == L2CAP_CONF_SUCCESS) {
2206 case L2CAP_MODE_ERTM:
2207 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2208 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2209 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2211 case L2CAP_MODE_STREAMING:
2212 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2216 req->dcid = cpu_to_le16(chan->dcid);
2217 req->flags = cpu_to_le16(0x0000);
2222 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2224 struct l2cap_conf_rsp *rsp = data;
2225 void *ptr = rsp->data;
2227 BT_DBG("chan %p", chan);
2229 rsp->scid = cpu_to_le16(chan->dcid);
2230 rsp->result = cpu_to_le16(result);
2231 rsp->flags = cpu_to_le16(flags);
2236 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2238 struct l2cap_conn_rsp rsp;
2239 struct l2cap_conn *conn = chan->conn;
2242 rsp.scid = cpu_to_le16(chan->dcid);
2243 rsp.dcid = cpu_to_le16(chan->scid);
2244 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2245 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2246 l2cap_send_cmd(conn, chan->ident,
2247 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2249 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2252 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2253 l2cap_build_conf_req(chan, buf), buf);
2254 chan->num_conf_req++;
2257 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2261 struct l2cap_conf_rfc rfc;
2263 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2265 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2268 while (len >= L2CAP_CONF_OPT_SIZE) {
2269 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2271 if (type != L2CAP_CONF_RFC)
2274 if (olen != sizeof(rfc))
2277 memcpy(&rfc, (void *)val, olen);
2281 /* Use sane default values in case a misbehaving remote device
2282 * did not send an RFC option.
2284 rfc.mode = chan->mode;
2285 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2286 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2287 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2289 BT_ERR("Expected RFC option was not found, using defaults");
2293 case L2CAP_MODE_ERTM:
2294 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2295 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2296 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2298 case L2CAP_MODE_STREAMING:
2299 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2303 static inline int l2cap_command_rej(struct l2cap_conn *conn,
2304 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2307 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2309 if (cmd_len < sizeof(*rej))
2312 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2315 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2316 cmd->ident == conn->info_ident) {
2317 del_timer(&conn->info_timer);
2319 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2320 conn->info_ident = 0;
2322 l2cap_conn_start(conn);
2328 static int l2cap_connect_req(struct l2cap_conn *conn,
2329 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2331 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2332 struct l2cap_conn_rsp rsp;
2333 struct l2cap_chan *chan = NULL, *pchan;
2334 struct sock *parent, *sk = NULL;
2335 int result, status = L2CAP_CS_NO_INFO;
2340 if (cmd_len < sizeof(struct l2cap_conn_req))
2343 scid = __le16_to_cpu(req->scid);
2346 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2348 /* Check if we have socket listening on psm */
2349 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2351 result = L2CAP_CR_BAD_PSM;
2357 bh_lock_sock(parent);
2359 /* Check if the ACL is secure enough (if not SDP) */
2360 if (psm != cpu_to_le16(0x0001) &&
2361 !hci_conn_check_link_mode(conn->hcon)) {
2362 conn->disc_reason = 0x05;
2363 result = L2CAP_CR_SEC_BLOCK;
2367 result = L2CAP_CR_NO_MEM;
2369 /* Check for backlog size */
2370 if (sk_acceptq_is_full(parent)) {
2371 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2375 chan = pchan->ops->new_connection(pchan->data);
2381 write_lock_bh(&conn->chan_lock);
2383 /* Check if we already have channel with that dcid */
2384 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2385 write_unlock_bh(&conn->chan_lock);
2386 sock_set_flag(sk, SOCK_ZAPPED);
2387 chan->ops->close(chan->data);
2391 hci_conn_hold(conn->hcon);
2393 bacpy(&bt_sk(sk)->src, conn->src);
2394 bacpy(&bt_sk(sk)->dst, conn->dst);
2398 bt_accept_enqueue(parent, sk);
2400 __l2cap_chan_add(conn, chan);
2404 __set_chan_timer(chan, sk->sk_sndtimeo);
2406 chan->ident = cmd->ident;
2408 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2409 if (l2cap_check_security(chan)) {
2410 if (bt_sk(sk)->defer_setup) {
2411 l2cap_state_change(chan, BT_CONNECT2);
2412 result = L2CAP_CR_PEND;
2413 status = L2CAP_CS_AUTHOR_PEND;
2414 parent->sk_data_ready(parent, 0);
2416 l2cap_state_change(chan, BT_CONFIG);
2417 result = L2CAP_CR_SUCCESS;
2418 status = L2CAP_CS_NO_INFO;
2421 l2cap_state_change(chan, BT_CONNECT2);
2422 result = L2CAP_CR_PEND;
2423 status = L2CAP_CS_AUTHEN_PEND;
2426 l2cap_state_change(chan, BT_CONNECT2);
2427 result = L2CAP_CR_PEND;
2428 status = L2CAP_CS_NO_INFO;
2431 write_unlock_bh(&conn->chan_lock);
2434 bh_unlock_sock(parent);
2437 rsp.scid = cpu_to_le16(scid);
2438 rsp.dcid = cpu_to_le16(dcid);
2439 rsp.result = cpu_to_le16(result);
2440 rsp.status = cpu_to_le16(status);
2441 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2443 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2444 struct l2cap_info_req info;
2445 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2447 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2448 conn->info_ident = l2cap_get_ident(conn);
2450 mod_timer(&conn->info_timer, jiffies +
2451 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2453 l2cap_send_cmd(conn, conn->info_ident,
2454 L2CAP_INFO_REQ, sizeof(info), &info);
2457 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2458 result == L2CAP_CR_SUCCESS) {
2460 set_bit(CONF_REQ_SENT, &chan->conf_state);
2461 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2462 l2cap_build_conf_req(chan, buf), buf);
2463 chan->num_conf_req++;
2469 static int l2cap_connect_rsp(struct l2cap_conn *conn,
2470 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2473 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2474 u16 scid, dcid, result, status;
2475 struct l2cap_chan *chan;
2479 if (cmd_len < sizeof(*rsp))
2482 scid = __le16_to_cpu(rsp->scid);
2483 dcid = __le16_to_cpu(rsp->dcid);
2484 result = __le16_to_cpu(rsp->result);
2485 status = __le16_to_cpu(rsp->status);
2487 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2490 chan = l2cap_get_chan_by_scid(conn, scid);
2494 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2502 case L2CAP_CR_SUCCESS:
2503 l2cap_state_change(chan, BT_CONFIG);
2506 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2508 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2511 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2512 l2cap_build_conf_req(chan, req), req);
2513 chan->num_conf_req++;
2517 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2521 /* don't delete l2cap channel if sk is owned by user */
2522 if (sock_owned_by_user(sk)) {
2523 l2cap_state_change(chan, BT_DISCONN);
2524 __clear_chan_timer(chan);
2525 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2529 l2cap_chan_del(chan, ECONNREFUSED);
2537 static inline void set_default_fcs(struct l2cap_chan *chan)
2539 /* FCS is enabled only in ERTM or streaming mode, if one or both
2542 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2543 chan->fcs = L2CAP_FCS_NONE;
2544 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2545 chan->fcs = L2CAP_FCS_CRC16;
2548 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2550 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2553 struct l2cap_chan *chan;
2557 if (cmd_len < sizeof(*req))
2560 dcid = __le16_to_cpu(req->dcid);
2561 flags = __le16_to_cpu(req->flags);
2563 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2565 chan = l2cap_get_chan_by_scid(conn, dcid);
2571 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2572 struct l2cap_cmd_rej_cid rej;
2574 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2575 rej.scid = cpu_to_le16(chan->scid);
2576 rej.dcid = cpu_to_le16(chan->dcid);
2578 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2583 /* Reject if config buffer is too small. */
2584 len = cmd_len - sizeof(*req);
2585 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2586 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2587 l2cap_build_conf_rsp(chan, rsp,
2588 L2CAP_CONF_REJECT, flags), rsp);
2593 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2594 chan->conf_len += len;
2596 if (flags & 0x0001) {
2597 /* Incomplete config. Send empty response. */
2598 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2599 l2cap_build_conf_rsp(chan, rsp,
2600 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2604 /* Complete config. */
2605 len = l2cap_parse_conf_req(chan, rsp);
2607 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2611 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2612 chan->num_conf_rsp++;
2614 /* Reset config buffer. */
2617 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2620 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2621 set_default_fcs(chan);
2623 l2cap_state_change(chan, BT_CONNECTED);
2625 chan->next_tx_seq = 0;
2626 chan->expected_tx_seq = 0;
2627 skb_queue_head_init(&chan->tx_q);
2628 if (chan->mode == L2CAP_MODE_ERTM)
2629 l2cap_ertm_init(chan);
2631 l2cap_chan_ready(sk);
2635 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2637 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2638 l2cap_build_conf_req(chan, buf), buf);
2639 chan->num_conf_req++;
2647 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
2648 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2651 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2652 u16 scid, flags, result;
2653 struct l2cap_chan *chan;
2655 int len = cmd_len - sizeof(*rsp);
2657 if (cmd_len < sizeof(*rsp))
2660 scid = __le16_to_cpu(rsp->scid);
2661 flags = __le16_to_cpu(rsp->flags);
2662 result = __le16_to_cpu(rsp->result);
2664 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2665 scid, flags, result);
2667 chan = l2cap_get_chan_by_scid(conn, scid);
2674 case L2CAP_CONF_SUCCESS:
2675 l2cap_conf_rfc_get(chan, rsp->data, len);
2678 case L2CAP_CONF_UNACCEPT:
2679 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2682 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2683 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2687 /* throw out any old stored conf requests */
2688 result = L2CAP_CONF_SUCCESS;
2689 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2692 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2696 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2697 L2CAP_CONF_REQ, len, req);
2698 chan->num_conf_req++;
2699 if (result != L2CAP_CONF_SUCCESS)
2705 sk->sk_err = ECONNRESET;
2706 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2707 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2714 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2716 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2717 set_default_fcs(chan);
2719 l2cap_state_change(chan, BT_CONNECTED);
2720 chan->next_tx_seq = 0;
2721 chan->expected_tx_seq = 0;
2722 skb_queue_head_init(&chan->tx_q);
2723 if (chan->mode == L2CAP_MODE_ERTM)
2724 l2cap_ertm_init(chan);
2726 l2cap_chan_ready(sk);
2734 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
2735 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2738 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2739 struct l2cap_disconn_rsp rsp;
2741 struct l2cap_chan *chan;
2744 if (cmd_len != sizeof(*req))
2747 scid = __le16_to_cpu(req->scid);
2748 dcid = __le16_to_cpu(req->dcid);
2750 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2752 chan = l2cap_get_chan_by_scid(conn, dcid);
2758 rsp.dcid = cpu_to_le16(chan->scid);
2759 rsp.scid = cpu_to_le16(chan->dcid);
2760 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2762 sk->sk_shutdown = SHUTDOWN_MASK;
2764 /* don't delete l2cap channel if sk is owned by user */
2765 if (sock_owned_by_user(sk)) {
2766 l2cap_state_change(chan, BT_DISCONN);
2767 __clear_chan_timer(chan);
2768 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2773 l2cap_chan_del(chan, ECONNRESET);
2776 chan->ops->close(chan->data);
2780 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
2781 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2784 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2786 struct l2cap_chan *chan;
2789 if (cmd_len != sizeof(*rsp))
2792 scid = __le16_to_cpu(rsp->scid);
2793 dcid = __le16_to_cpu(rsp->dcid);
2795 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2797 chan = l2cap_get_chan_by_scid(conn, scid);
2803 /* don't delete l2cap channel if sk is owned by user */
2804 if (sock_owned_by_user(sk)) {
2805 l2cap_state_change(chan,BT_DISCONN);
2806 __clear_chan_timer(chan);
2807 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2812 l2cap_chan_del(chan, 0);
2815 chan->ops->close(chan->data);
2819 static inline int l2cap_information_req(struct l2cap_conn *conn,
2820 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2823 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2826 if (cmd_len != sizeof(*req))
2829 type = __le16_to_cpu(req->type);
2831 BT_DBG("type 0x%4.4x", type);
2833 if (type == L2CAP_IT_FEAT_MASK) {
2835 u32 feat_mask = l2cap_feat_mask;
2836 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2837 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2838 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2840 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2842 put_unaligned_le32(feat_mask, rsp->data);
2843 l2cap_send_cmd(conn, cmd->ident,
2844 L2CAP_INFO_RSP, sizeof(buf), buf);
2845 } else if (type == L2CAP_IT_FIXED_CHAN) {
2847 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2848 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2849 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2850 memcpy(buf + 4, l2cap_fixed_chan, 8);
2851 l2cap_send_cmd(conn, cmd->ident,
2852 L2CAP_INFO_RSP, sizeof(buf), buf);
2854 struct l2cap_info_rsp rsp;
2855 rsp.type = cpu_to_le16(type);
2856 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2857 l2cap_send_cmd(conn, cmd->ident,
2858 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2864 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
2865 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2868 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2871 if (cmd_len != sizeof(*rsp))
2874 type = __le16_to_cpu(rsp->type);
2875 result = __le16_to_cpu(rsp->result);
2877 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2879 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2880 if (cmd->ident != conn->info_ident ||
2881 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2884 del_timer(&conn->info_timer);
2886 if (result != L2CAP_IR_SUCCESS) {
2887 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2888 conn->info_ident = 0;
2890 l2cap_conn_start(conn);
2895 if (type == L2CAP_IT_FEAT_MASK) {
2896 conn->feat_mask = get_unaligned_le32(rsp->data);
2898 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2899 struct l2cap_info_req req;
2900 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2902 conn->info_ident = l2cap_get_ident(conn);
2904 l2cap_send_cmd(conn, conn->info_ident,
2905 L2CAP_INFO_REQ, sizeof(req), &req);
2907 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2908 conn->info_ident = 0;
2910 l2cap_conn_start(conn);
2912 } else if (type == L2CAP_IT_FIXED_CHAN) {
2913 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2914 conn->info_ident = 0;
2916 l2cap_conn_start(conn);
2922 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2927 if (min > max || min < 6 || max > 3200)
2930 if (to_multiplier < 10 || to_multiplier > 3200)
2933 if (max >= to_multiplier * 8)
2936 max_latency = (to_multiplier * 8 / max) - 1;
2937 if (latency > 499 || latency > max_latency)
2943 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2944 struct l2cap_cmd_hdr *cmd, u8 *data)
2946 struct hci_conn *hcon = conn->hcon;
2947 struct l2cap_conn_param_update_req *req;
2948 struct l2cap_conn_param_update_rsp rsp;
2949 u16 min, max, latency, to_multiplier, cmd_len;
2952 if (!(hcon->link_mode & HCI_LM_MASTER))
2955 cmd_len = __le16_to_cpu(cmd->len);
2956 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2959 req = (struct l2cap_conn_param_update_req *) data;
2960 min = __le16_to_cpu(req->min);
2961 max = __le16_to_cpu(req->max);
2962 latency = __le16_to_cpu(req->latency);
2963 to_multiplier = __le16_to_cpu(req->to_multiplier);
2965 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2966 min, max, latency, to_multiplier);
2968 memset(&rsp, 0, sizeof(rsp));
2970 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2972 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2974 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2976 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2980 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2985 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2986 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2990 switch (cmd->code) {
2991 case L2CAP_COMMAND_REJ:
2992 l2cap_command_rej(conn, cmd, cmd_len, data);
2995 case L2CAP_CONN_REQ:
2996 err = l2cap_connect_req(conn, cmd, cmd_len, data);
2999 case L2CAP_CONN_RSP:
3000 err = l2cap_connect_rsp(conn, cmd, cmd_len, data);
3003 case L2CAP_CONF_REQ:
3004 err = l2cap_config_req(conn, cmd, cmd_len, data);
3007 case L2CAP_CONF_RSP:
3008 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
3011 case L2CAP_DISCONN_REQ:
3012 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
3015 case L2CAP_DISCONN_RSP:
3016 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
3019 case L2CAP_ECHO_REQ:
3020 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3023 case L2CAP_ECHO_RSP:
3026 case L2CAP_INFO_REQ:
3027 err = l2cap_information_req(conn, cmd, cmd_len, data);
3030 case L2CAP_INFO_RSP:
3031 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
3035 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3043 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3044 struct l2cap_cmd_hdr *cmd, u8 *data)
3046 switch (cmd->code) {
3047 case L2CAP_COMMAND_REJ:
3050 case L2CAP_CONN_PARAM_UPDATE_REQ:
3051 return l2cap_conn_param_update_req(conn, cmd, data);
3053 case L2CAP_CONN_PARAM_UPDATE_RSP:
3057 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3062 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3063 struct sk_buff *skb)
3065 u8 *data = skb->data;
3067 struct l2cap_cmd_hdr cmd;
3070 l2cap_raw_recv(conn, skb);
3072 while (len >= L2CAP_CMD_HDR_SIZE) {
3074 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3075 data += L2CAP_CMD_HDR_SIZE;
3076 len -= L2CAP_CMD_HDR_SIZE;
3078 cmd_len = le16_to_cpu(cmd.len);
3080 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3082 if (cmd_len > len || !cmd.ident) {
3083 BT_DBG("corrupted command");
3087 if (conn->hcon->type == LE_LINK)
3088 err = l2cap_le_sig_cmd(conn, &cmd, data);
3090 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3093 struct l2cap_cmd_rej_unk rej;
3095 BT_ERR("Wrong link type (%d)", err);
3097 /* FIXME: Map err to a valid reason */
3098 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3099 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3109 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3111 u16 our_fcs, rcv_fcs;
3112 int hdr_size = L2CAP_HDR_SIZE + 2;
3114 if (chan->fcs == L2CAP_FCS_CRC16) {
3115 skb_trim(skb, skb->len - 2);
3116 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3117 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3119 if (our_fcs != rcv_fcs)
3125 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3129 chan->frames_sent = 0;
3131 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3133 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3134 control |= L2CAP_SUPER_RCV_NOT_READY;
3135 l2cap_send_sframe(chan, control);
3136 set_bit(CONN_RNR_SENT, &chan->conn_state);
3139 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3140 l2cap_retransmit_frames(chan);
3142 l2cap_ertm_send(chan);
3144 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3145 chan->frames_sent == 0) {
3146 control |= L2CAP_SUPER_RCV_READY;
3147 l2cap_send_sframe(chan, control);
3151 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3153 struct sk_buff *next_skb;
3154 int tx_seq_offset, next_tx_seq_offset;
3156 bt_cb(skb)->tx_seq = tx_seq;
3157 bt_cb(skb)->sar = sar;
3159 next_skb = skb_peek(&chan->srej_q);
3161 __skb_queue_tail(&chan->srej_q, skb);
3165 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3166 if (tx_seq_offset < 0)
3167 tx_seq_offset += 64;
3170 if (bt_cb(next_skb)->tx_seq == tx_seq)
3173 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3174 chan->buffer_seq) % 64;
3175 if (next_tx_seq_offset < 0)
3176 next_tx_seq_offset += 64;
3178 if (next_tx_seq_offset > tx_seq_offset) {
3179 __skb_queue_before(&chan->srej_q, next_skb, skb);
3183 if (skb_queue_is_last(&chan->srej_q, next_skb))
3186 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3188 __skb_queue_tail(&chan->srej_q, skb);
3193 static void append_skb_frag(struct sk_buff *skb,
3194 struct sk_buff *new_frag, struct sk_buff **last_frag)
3196 /* skb->len reflects data in skb as well as all fragments
3197 * skb->data_len reflects only data in fragments
3199 if (!skb_has_frag_list(skb))
3200 skb_shinfo(skb)->frag_list = new_frag;
3202 new_frag->next = NULL;
3204 (*last_frag)->next = new_frag;
3205 *last_frag = new_frag;
3207 skb->len += new_frag->len;
3208 skb->data_len += new_frag->len;
3209 skb->truesize += new_frag->truesize;
3212 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3216 switch (control & L2CAP_CTRL_SAR) {
3217 case L2CAP_SDU_UNSEGMENTED:
3221 err = chan->ops->recv(chan->data, skb);
3224 case L2CAP_SDU_START:
3228 chan->sdu_len = get_unaligned_le16(skb->data);
3231 if (chan->sdu_len > chan->imtu) {
3236 if (skb->len >= chan->sdu_len)
3240 chan->sdu_last_frag = skb;
3246 case L2CAP_SDU_CONTINUE:
3250 append_skb_frag(chan->sdu, skb,
3251 &chan->sdu_last_frag);
3254 if (chan->sdu->len >= chan->sdu_len)
3264 append_skb_frag(chan->sdu, skb,
3265 &chan->sdu_last_frag);
3268 if (chan->sdu->len != chan->sdu_len)
3271 err = chan->ops->recv(chan->data, chan->sdu);
3274 /* Reassembly complete */
3276 chan->sdu_last_frag = NULL;
3284 kfree_skb(chan->sdu);
3286 chan->sdu_last_frag = NULL;
3293 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3297 BT_DBG("chan %p, Enter local busy", chan);
3299 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3301 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3302 control |= L2CAP_SUPER_RCV_NOT_READY;
3303 l2cap_send_sframe(chan, control);
3305 set_bit(CONN_RNR_SENT, &chan->conn_state);
3307 __clear_ack_timer(chan);
3310 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3314 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3317 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3318 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3319 l2cap_send_sframe(chan, control);
3320 chan->retry_count = 1;
3322 __clear_retrans_timer(chan);
3323 __set_monitor_timer(chan);
3325 set_bit(CONN_WAIT_F, &chan->conn_state);
3328 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3329 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3331 BT_DBG("chan %p, Exit local busy", chan);
3334 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3336 if (chan->mode == L2CAP_MODE_ERTM) {
3338 l2cap_ertm_enter_local_busy(chan);
3340 l2cap_ertm_exit_local_busy(chan);
3344 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3346 struct sk_buff *skb;
3349 while ((skb = skb_peek(&chan->srej_q)) &&
3350 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3353 if (bt_cb(skb)->tx_seq != tx_seq)
3356 skb = skb_dequeue(&chan->srej_q);
3357 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3358 err = l2cap_reassemble_sdu(chan, skb, control);
3361 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3365 chan->buffer_seq_srej =
3366 (chan->buffer_seq_srej + 1) % 64;
3367 tx_seq = (tx_seq + 1) % 64;
3371 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3373 struct srej_list *l, *tmp;
3376 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3377 if (l->tx_seq == tx_seq) {
3382 control = L2CAP_SUPER_SELECT_REJECT;
3383 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3384 l2cap_send_sframe(chan, control);
3386 list_add_tail(&l->list, &chan->srej_l);
3390 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3392 struct srej_list *new;
3395 while (tx_seq != chan->expected_tx_seq) {
3396 control = L2CAP_SUPER_SELECT_REJECT;
3397 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3398 l2cap_send_sframe(chan, control);
3400 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3401 new->tx_seq = chan->expected_tx_seq;
3402 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3403 list_add_tail(&new->list, &chan->srej_l);
3405 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3408 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3410 u8 tx_seq = __get_txseq(rx_control);
3411 u8 req_seq = __get_reqseq(rx_control);
3412 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3413 int tx_seq_offset, expected_tx_seq_offset;
3414 int num_to_ack = (chan->tx_win/6) + 1;
3417 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3418 tx_seq, rx_control);
3420 if (L2CAP_CTRL_FINAL & rx_control &&
3421 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3422 __clear_monitor_timer(chan);
3423 if (chan->unacked_frames > 0)
3424 __set_retrans_timer(chan);
3425 clear_bit(CONN_WAIT_F, &chan->conn_state);
3428 chan->expected_ack_seq = req_seq;
3429 l2cap_drop_acked_frames(chan);
3431 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3432 if (tx_seq_offset < 0)
3433 tx_seq_offset += 64;
3435 /* invalid tx_seq */
3436 if (tx_seq_offset >= chan->tx_win) {
3437 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3441 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3444 if (tx_seq == chan->expected_tx_seq)
3447 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3448 struct srej_list *first;
3450 first = list_first_entry(&chan->srej_l,
3451 struct srej_list, list);
3452 if (tx_seq == first->tx_seq) {
3453 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3454 l2cap_check_srej_gap(chan, tx_seq);
3456 list_del(&first->list);
3459 if (list_empty(&chan->srej_l)) {
3460 chan->buffer_seq = chan->buffer_seq_srej;
3461 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3462 l2cap_send_ack(chan);
3463 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3466 struct srej_list *l;
3468 /* duplicated tx_seq */
3469 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3472 list_for_each_entry(l, &chan->srej_l, list) {
3473 if (l->tx_seq == tx_seq) {
3474 l2cap_resend_srejframe(chan, tx_seq);
3478 l2cap_send_srejframe(chan, tx_seq);
3481 expected_tx_seq_offset =
3482 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3483 if (expected_tx_seq_offset < 0)
3484 expected_tx_seq_offset += 64;
3486 /* duplicated tx_seq */
3487 if (tx_seq_offset < expected_tx_seq_offset)
3490 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3492 BT_DBG("chan %p, Enter SREJ", chan);
3494 INIT_LIST_HEAD(&chan->srej_l);
3495 chan->buffer_seq_srej = chan->buffer_seq;
3497 __skb_queue_head_init(&chan->srej_q);
3498 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3500 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3502 l2cap_send_srejframe(chan, tx_seq);
3504 __clear_ack_timer(chan);
3509 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3511 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3512 bt_cb(skb)->tx_seq = tx_seq;
3513 bt_cb(skb)->sar = sar;
3514 __skb_queue_tail(&chan->srej_q, skb);
3518 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3519 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3521 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3525 if (rx_control & L2CAP_CTRL_FINAL) {
3526 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3527 l2cap_retransmit_frames(chan);
3530 __set_ack_timer(chan);
3532 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3533 if (chan->num_acked == num_to_ack - 1)
3534 l2cap_send_ack(chan);
3543 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3545 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3548 chan->expected_ack_seq = __get_reqseq(rx_control);
3549 l2cap_drop_acked_frames(chan);
3551 if (rx_control & L2CAP_CTRL_POLL) {
3552 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3553 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3554 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3555 (chan->unacked_frames > 0))
3556 __set_retrans_timer(chan);
3558 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3559 l2cap_send_srejtail(chan);
3561 l2cap_send_i_or_rr_or_rnr(chan);
3564 } else if (rx_control & L2CAP_CTRL_FINAL) {
3565 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3567 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3568 l2cap_retransmit_frames(chan);
3571 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3572 (chan->unacked_frames > 0))
3573 __set_retrans_timer(chan);
3575 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3576 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3577 l2cap_send_ack(chan);
3579 l2cap_ertm_send(chan);
3583 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3585 u8 tx_seq = __get_reqseq(rx_control);
3587 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3589 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3591 chan->expected_ack_seq = tx_seq;
3592 l2cap_drop_acked_frames(chan);
3594 if (rx_control & L2CAP_CTRL_FINAL) {
3595 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3596 l2cap_retransmit_frames(chan);
3598 l2cap_retransmit_frames(chan);
3600 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3601 set_bit(CONN_REJ_ACT, &chan->conn_state);
3604 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3606 u8 tx_seq = __get_reqseq(rx_control);
3608 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3610 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3612 if (rx_control & L2CAP_CTRL_POLL) {
3613 chan->expected_ack_seq = tx_seq;
3614 l2cap_drop_acked_frames(chan);
3616 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3617 l2cap_retransmit_one_frame(chan, tx_seq);
3619 l2cap_ertm_send(chan);
3621 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3622 chan->srej_save_reqseq = tx_seq;
3623 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3625 } else if (rx_control & L2CAP_CTRL_FINAL) {
3626 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3627 chan->srej_save_reqseq == tx_seq)
3628 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3630 l2cap_retransmit_one_frame(chan, tx_seq);
3632 l2cap_retransmit_one_frame(chan, tx_seq);
3633 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3634 chan->srej_save_reqseq = tx_seq;
3635 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3640 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3642 u8 tx_seq = __get_reqseq(rx_control);
3644 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3646 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3647 chan->expected_ack_seq = tx_seq;
3648 l2cap_drop_acked_frames(chan);
3650 if (rx_control & L2CAP_CTRL_POLL)
3651 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3653 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3654 __clear_retrans_timer(chan);
3655 if (rx_control & L2CAP_CTRL_POLL)
3656 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3660 if (rx_control & L2CAP_CTRL_POLL)
3661 l2cap_send_srejtail(chan);
3663 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3666 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3668 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3670 if (L2CAP_CTRL_FINAL & rx_control &&
3671 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3672 __clear_monitor_timer(chan);
3673 if (chan->unacked_frames > 0)
3674 __set_retrans_timer(chan);
3675 clear_bit(CONN_WAIT_F, &chan->conn_state);
3678 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3679 case L2CAP_SUPER_RCV_READY:
3680 l2cap_data_channel_rrframe(chan, rx_control);
3683 case L2CAP_SUPER_REJECT:
3684 l2cap_data_channel_rejframe(chan, rx_control);
3687 case L2CAP_SUPER_SELECT_REJECT:
3688 l2cap_data_channel_srejframe(chan, rx_control);
3691 case L2CAP_SUPER_RCV_NOT_READY:
3692 l2cap_data_channel_rnrframe(chan, rx_control);
3700 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3702 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3705 int len, next_tx_seq_offset, req_seq_offset;
3707 control = get_unaligned_le16(skb->data);
3712 * We can just drop the corrupted I-frame here.
3713 * Receiver will miss it and start proper recovery
3714 * procedures and ask retransmission.
3716 if (l2cap_check_fcs(chan, skb))
3719 if (__is_sar_start(control) && __is_iframe(control))
3722 if (chan->fcs == L2CAP_FCS_CRC16)
3725 if (len > chan->mps) {
3726 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3730 req_seq = __get_reqseq(control);
3731 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3732 if (req_seq_offset < 0)
3733 req_seq_offset += 64;
3735 next_tx_seq_offset =
3736 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3737 if (next_tx_seq_offset < 0)
3738 next_tx_seq_offset += 64;
3740 /* check for invalid req-seq */
3741 if (req_seq_offset > next_tx_seq_offset) {
3742 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3746 if (__is_iframe(control)) {
3748 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3752 l2cap_data_channel_iframe(chan, control, skb);
3756 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3760 l2cap_data_channel_sframe(chan, control, skb);
3770 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3772 struct l2cap_chan *chan;
3773 struct sock *sk = NULL;
3778 chan = l2cap_get_chan_by_scid(conn, cid);
3780 BT_DBG("unknown cid 0x%4.4x", cid);
3786 BT_DBG("chan %p, len %d", chan, skb->len);
3788 if (chan->state != BT_CONNECTED)
3791 switch (chan->mode) {
3792 case L2CAP_MODE_BASIC:
3793 /* If socket recv buffers overflows we drop data here
3794 * which is *bad* because L2CAP has to be reliable.
3795 * But we don't have any other choice. L2CAP doesn't
3796 * provide flow control mechanism. */
3798 if (chan->imtu < skb->len)
3801 if (!chan->ops->recv(chan->data, skb))
3805 case L2CAP_MODE_ERTM:
3806 if (!sock_owned_by_user(sk)) {
3807 l2cap_ertm_data_rcv(sk, skb);
3809 if (sk_add_backlog(sk, skb))
3815 case L2CAP_MODE_STREAMING:
3816 control = get_unaligned_le16(skb->data);
3820 if (l2cap_check_fcs(chan, skb))
3823 if (__is_sar_start(control))
3826 if (chan->fcs == L2CAP_FCS_CRC16)
3829 if (len > chan->mps || len < 0 || __is_sframe(control))
3832 tx_seq = __get_txseq(control);
3834 if (chan->expected_tx_seq != tx_seq) {
3835 /* Frame(s) missing - must discard partial SDU */
3836 kfree_skb(chan->sdu);
3838 chan->sdu_last_frag = NULL;
3841 /* TODO: Notify userland of missing data */
3844 chan->expected_tx_seq = (tx_seq + 1) % 64;
3846 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3847 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3852 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3866 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3868 struct sock *sk = NULL;
3869 struct l2cap_chan *chan;
3871 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3879 BT_DBG("sk %p, len %d", sk, skb->len);
3881 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3884 if (chan->imtu < skb->len)
3887 if (!chan->ops->recv(chan->data, skb))
3899 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3901 struct sock *sk = NULL;
3902 struct l2cap_chan *chan;
3904 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3912 BT_DBG("sk %p, len %d", sk, skb->len);
3914 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3917 if (chan->imtu < skb->len)
3920 if (!chan->ops->recv(chan->data, skb))
3932 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3934 struct l2cap_hdr *lh = (void *) skb->data;
3938 skb_pull(skb, L2CAP_HDR_SIZE);
3939 cid = __le16_to_cpu(lh->cid);
3940 len = __le16_to_cpu(lh->len);
3942 if (len != skb->len) {
3947 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3950 case L2CAP_CID_LE_SIGNALING:
3951 case L2CAP_CID_SIGNALING:
3952 l2cap_sig_channel(conn, skb);
3955 case L2CAP_CID_CONN_LESS:
3956 psm = get_unaligned_le16(skb->data);
3958 l2cap_conless_channel(conn, psm, skb);
3961 case L2CAP_CID_LE_DATA:
3962 l2cap_att_channel(conn, cid, skb);
3966 if (smp_sig_channel(conn, skb))
3967 l2cap_conn_del(conn->hcon, EACCES);
3971 l2cap_data_channel(conn, cid, skb);
3976 /* ---- L2CAP interface with lower layer (HCI) ---- */
3978 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3980 int exact = 0, lm1 = 0, lm2 = 0;
3981 struct l2cap_chan *c;
3983 if (type != ACL_LINK)
3986 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3988 /* Find listening sockets and check their link_mode */
3989 read_lock(&chan_list_lock);
3990 list_for_each_entry(c, &chan_list, global_l) {
3991 struct sock *sk = c->sk;
3993 if (c->state != BT_LISTEN)
3996 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3997 lm1 |= HCI_LM_ACCEPT;
3999 lm1 |= HCI_LM_MASTER;
4001 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4002 lm2 |= HCI_LM_ACCEPT;
4004 lm2 |= HCI_LM_MASTER;
4007 read_unlock(&chan_list_lock);
4009 return exact ? lm1 : lm2;
4012 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4014 struct l2cap_conn *conn;
4016 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4018 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4022 conn = l2cap_conn_add(hcon, status);
4024 l2cap_conn_ready(conn);
4026 l2cap_conn_del(hcon, bt_to_errno(status));
4031 static int l2cap_disconn_ind(struct hci_conn *hcon)
4033 struct l2cap_conn *conn = hcon->l2cap_data;
4035 BT_DBG("hcon %p", hcon);
4037 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4040 return conn->disc_reason;
4043 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4045 BT_DBG("hcon %p reason %d", hcon, reason);
4047 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4050 l2cap_conn_del(hcon, bt_to_errno(reason));
4055 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4057 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4060 if (encrypt == 0x00) {
4061 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4062 __clear_chan_timer(chan);
4063 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4064 } else if (chan->sec_level == BT_SECURITY_HIGH)
4065 l2cap_chan_close(chan, ECONNREFUSED);
4067 if (chan->sec_level == BT_SECURITY_MEDIUM)
4068 __clear_chan_timer(chan);
4072 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4074 struct l2cap_conn *conn = hcon->l2cap_data;
4075 struct l2cap_chan *chan;
4080 BT_DBG("conn %p", conn);
4082 if (hcon->type == LE_LINK) {
4083 smp_distribute_keys(conn, 0);
4084 del_timer(&conn->security_timer);
4087 read_lock(&conn->chan_lock);
4089 list_for_each_entry(chan, &conn->chan_l, list) {
4090 struct sock *sk = chan->sk;
4094 BT_DBG("chan->scid %d", chan->scid);
4096 if (chan->scid == L2CAP_CID_LE_DATA) {
4097 if (!status && encrypt) {
4098 chan->sec_level = hcon->sec_level;
4099 l2cap_chan_ready(sk);
4106 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4111 if (!status && (chan->state == BT_CONNECTED ||
4112 chan->state == BT_CONFIG)) {
4113 l2cap_check_encryption(chan, encrypt);
4118 if (chan->state == BT_CONNECT) {
4120 struct l2cap_conn_req req;
4121 req.scid = cpu_to_le16(chan->scid);
4122 req.psm = chan->psm;
4124 chan->ident = l2cap_get_ident(conn);
4125 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4127 l2cap_send_cmd(conn, chan->ident,
4128 L2CAP_CONN_REQ, sizeof(req), &req);
4130 __clear_chan_timer(chan);
4131 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4133 } else if (chan->state == BT_CONNECT2) {
4134 struct l2cap_conn_rsp rsp;
4138 if (bt_sk(sk)->defer_setup) {
4139 struct sock *parent = bt_sk(sk)->parent;
4140 res = L2CAP_CR_PEND;
4141 stat = L2CAP_CS_AUTHOR_PEND;
4143 parent->sk_data_ready(parent, 0);
4145 l2cap_state_change(chan, BT_CONFIG);
4146 res = L2CAP_CR_SUCCESS;
4147 stat = L2CAP_CS_NO_INFO;
4150 l2cap_state_change(chan, BT_DISCONN);
4151 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4152 res = L2CAP_CR_SEC_BLOCK;
4153 stat = L2CAP_CS_NO_INFO;
4156 rsp.scid = cpu_to_le16(chan->dcid);
4157 rsp.dcid = cpu_to_le16(chan->scid);
4158 rsp.result = cpu_to_le16(res);
4159 rsp.status = cpu_to_le16(stat);
4160 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4167 read_unlock(&conn->chan_lock);
4172 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4174 struct l2cap_conn *conn = hcon->l2cap_data;
4177 conn = l2cap_conn_add(hcon, 0);
4182 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4184 if (!(flags & ACL_CONT)) {
4185 struct l2cap_hdr *hdr;
4186 struct l2cap_chan *chan;
4191 BT_ERR("Unexpected start frame (len %d)", skb->len);
4192 kfree_skb(conn->rx_skb);
4193 conn->rx_skb = NULL;
4195 l2cap_conn_unreliable(conn, ECOMM);
4198 /* Start fragment always begin with Basic L2CAP header */
4199 if (skb->len < L2CAP_HDR_SIZE) {
4200 BT_ERR("Frame is too short (len %d)", skb->len);
4201 l2cap_conn_unreliable(conn, ECOMM);
4205 hdr = (struct l2cap_hdr *) skb->data;
4206 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4207 cid = __le16_to_cpu(hdr->cid);
4209 if (len == skb->len) {
4210 /* Complete frame received */
4211 l2cap_recv_frame(conn, skb);
4215 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4217 if (skb->len > len) {
4218 BT_ERR("Frame is too long (len %d, expected len %d)",
4220 l2cap_conn_unreliable(conn, ECOMM);
4224 chan = l2cap_get_chan_by_scid(conn, cid);
4226 if (chan && chan->sk) {
4227 struct sock *sk = chan->sk;
4229 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4230 BT_ERR("Frame exceeding recv MTU (len %d, "
4234 l2cap_conn_unreliable(conn, ECOMM);
4240 /* Allocate skb for the complete frame (with header) */
4241 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4245 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4247 conn->rx_len = len - skb->len;
4249 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4251 if (!conn->rx_len) {
4252 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4253 l2cap_conn_unreliable(conn, ECOMM);
4257 if (skb->len > conn->rx_len) {
4258 BT_ERR("Fragment is too long (len %d, expected %d)",
4259 skb->len, conn->rx_len);
4260 kfree_skb(conn->rx_skb);
4261 conn->rx_skb = NULL;
4263 l2cap_conn_unreliable(conn, ECOMM);
4267 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4269 conn->rx_len -= skb->len;
4271 if (!conn->rx_len) {
4272 /* Complete frame received */
4273 l2cap_recv_frame(conn, conn->rx_skb);
4274 conn->rx_skb = NULL;
4283 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4285 struct l2cap_chan *c;
4287 read_lock_bh(&chan_list_lock);
4289 list_for_each_entry(c, &chan_list, global_l) {
4290 struct sock *sk = c->sk;
4292 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4293 batostr(&bt_sk(sk)->src),
4294 batostr(&bt_sk(sk)->dst),
4295 c->state, __le16_to_cpu(c->psm),
4296 c->scid, c->dcid, c->imtu, c->omtu,
4297 c->sec_level, c->mode);
4300 read_unlock_bh(&chan_list_lock);
4305 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4307 return single_open(file, l2cap_debugfs_show, inode->i_private);
4310 static const struct file_operations l2cap_debugfs_fops = {
4311 .open = l2cap_debugfs_open,
4313 .llseek = seq_lseek,
4314 .release = single_release,
4317 static struct dentry *l2cap_debugfs;
4319 static struct hci_proto l2cap_hci_proto = {
4321 .id = HCI_PROTO_L2CAP,
4322 .connect_ind = l2cap_connect_ind,
4323 .connect_cfm = l2cap_connect_cfm,
4324 .disconn_ind = l2cap_disconn_ind,
4325 .disconn_cfm = l2cap_disconn_cfm,
4326 .security_cfm = l2cap_security_cfm,
4327 .recv_acldata = l2cap_recv_acldata
4330 int __init l2cap_init(void)
4334 err = l2cap_init_sockets();
4338 err = hci_register_proto(&l2cap_hci_proto);
4340 BT_ERR("L2CAP protocol registration failed");
4341 bt_sock_unregister(BTPROTO_L2CAP);
4346 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4347 bt_debugfs, NULL, &l2cap_debugfs_fops);
4349 BT_ERR("Failed to create L2CAP debug file");
4355 l2cap_cleanup_sockets();
4359 void l2cap_exit(void)
4361 debugfs_remove(l2cap_debugfs);
4363 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4364 BT_ERR("L2CAP protocol unregistration failed");
4366 l2cap_cleanup_sockets();
4369 module_param(disable_ertm, bool, 0644);
4370 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");