2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 __l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
879 write_unlock_bh(&conn->chan_lock);
882 bh_unlock_sock(parent);
885 static void l2cap_chan_ready(struct sock *sk)
887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct sock *parent = bt_sk(sk)->parent;
890 BT_DBG("sk %p, parent %p", sk, parent);
892 chan->conf_state = 0;
893 __clear_chan_timer(chan);
895 l2cap_state_change(chan, BT_CONNECTED);
896 sk->sk_state_change(sk);
899 parent->sk_data_ready(parent, 0);
902 static void l2cap_conn_ready(struct l2cap_conn *conn)
904 struct l2cap_chan *chan;
905 struct hci_conn *hcon = conn->hcon;
907 BT_DBG("conn %p", conn);
909 if (!hcon->out && hcon->type == LE_LINK)
910 l2cap_le_conn_ready(conn);
912 if (hcon->out && hcon->type == LE_LINK)
913 smp_conn_security(hcon, hcon->pending_sec_level);
915 read_lock(&conn->chan_lock);
917 list_for_each_entry(chan, &conn->chan_l, list) {
918 struct sock *sk = chan->sk;
922 if (hcon->type == LE_LINK) {
923 if (smp_conn_security(hcon, chan->sec_level))
924 l2cap_chan_ready(sk);
926 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
927 __clear_chan_timer(chan);
928 l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
931 } else if (chan->state == BT_CONNECT)
932 l2cap_do_start(chan);
937 read_unlock(&conn->chan_lock);
940 /* Notify sockets that we cannot guaranty reliability anymore */
941 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
943 struct l2cap_chan *chan;
945 BT_DBG("conn %p", conn);
947 read_lock(&conn->chan_lock);
949 list_for_each_entry(chan, &conn->chan_l, list) {
950 struct sock *sk = chan->sk;
952 if (chan->force_reliable)
956 read_unlock(&conn->chan_lock);
959 static void l2cap_info_timeout(unsigned long arg)
961 struct l2cap_conn *conn = (void *) arg;
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
964 conn->info_ident = 0;
966 l2cap_conn_start(conn);
969 static void l2cap_conn_del(struct hci_conn *hcon, int err)
971 struct l2cap_conn *conn = hcon->l2cap_data;
972 struct l2cap_chan *chan, *l;
978 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
980 kfree_skb(conn->rx_skb);
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
986 l2cap_chan_del(chan, err);
988 chan->ops->close(chan->data);
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
992 del_timer_sync(&conn->info_timer);
994 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
995 del_timer(&conn->security_timer);
996 smp_chan_destroy(conn);
999 hcon->l2cap_data = NULL;
1003 static void security_timeout(unsigned long arg)
1005 struct l2cap_conn *conn = (void *) arg;
1007 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1010 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1012 struct l2cap_conn *conn = hcon->l2cap_data;
1017 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1021 hcon->l2cap_data = conn;
1024 BT_DBG("hcon %p conn %p", hcon, conn);
1026 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1027 conn->mtu = hcon->hdev->le_mtu;
1029 conn->mtu = hcon->hdev->acl_mtu;
1031 conn->src = &hcon->hdev->bdaddr;
1032 conn->dst = &hcon->dst;
1034 conn->feat_mask = 0;
1036 spin_lock_init(&conn->lock);
1037 rwlock_init(&conn->chan_lock);
1039 INIT_LIST_HEAD(&conn->chan_l);
1041 if (hcon->type == LE_LINK)
1042 setup_timer(&conn->security_timer, security_timeout,
1043 (unsigned long) conn);
1045 setup_timer(&conn->info_timer, l2cap_info_timeout,
1046 (unsigned long) conn);
1048 conn->disc_reason = 0x13;
1053 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1055 write_lock_bh(&conn->chan_lock);
1056 __l2cap_chan_add(conn, chan);
1057 write_unlock_bh(&conn->chan_lock);
1060 /* ---- Socket interface ---- */
1062 /* Find socket with psm and source bdaddr.
1063 * Returns closest match.
1065 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1067 struct l2cap_chan *c, *c1 = NULL;
1069 read_lock(&chan_list_lock);
1071 list_for_each_entry(c, &chan_list, global_l) {
1072 struct sock *sk = c->sk;
1074 if (state && c->state != state)
1077 if (c->psm == psm) {
1079 if (!bacmp(&bt_sk(sk)->src, src)) {
1080 read_unlock(&chan_list_lock);
1085 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1090 read_unlock(&chan_list_lock);
1095 int l2cap_chan_connect(struct l2cap_chan *chan)
1097 struct sock *sk = chan->sk;
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 bdaddr_t *dst = &bt_sk(sk)->dst;
1100 struct l2cap_conn *conn;
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1106 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1109 hdev = hci_get_route(dst, src);
1111 return -EHOSTUNREACH;
1113 hci_dev_lock_bh(hdev);
1115 auth_type = l2cap_get_auth_type(chan);
1117 if (chan->dcid == L2CAP_CID_LE_DATA)
1118 hcon = hci_connect(hdev, LE_LINK, dst,
1119 chan->sec_level, auth_type);
1121 hcon = hci_connect(hdev, ACL_LINK, dst,
1122 chan->sec_level, auth_type);
1125 err = PTR_ERR(hcon);
1129 conn = l2cap_conn_add(hcon, 0);
1136 /* Update source addr of the socket */
1137 bacpy(src, conn->src);
1139 l2cap_chan_add(conn, chan);
1141 l2cap_state_change(chan, BT_CONNECT);
1142 __set_chan_timer(chan, sk->sk_sndtimeo);
1144 if (hcon->state == BT_CONNECTED) {
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1146 __clear_chan_timer(chan);
1147 if (l2cap_check_security(chan))
1148 l2cap_state_change(chan, BT_CONNECTED);
1150 l2cap_do_start(chan);
1156 hci_dev_unlock_bh(hdev);
1161 int __l2cap_wait_ack(struct sock *sk)
1163 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1164 DECLARE_WAITQUEUE(wait, current);
1168 add_wait_queue(sk_sleep(sk), &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 while (chan->unacked_frames > 0 && chan->conn) {
1174 if (signal_pending(current)) {
1175 err = sock_intr_errno(timeo);
1180 timeo = schedule_timeout(timeo);
1182 set_current_state(TASK_INTERRUPTIBLE);
1184 err = sock_error(sk);
1188 set_current_state(TASK_RUNNING);
1189 remove_wait_queue(sk_sleep(sk), &wait);
1193 static void l2cap_monitor_timeout(unsigned long arg)
1195 struct l2cap_chan *chan = (void *) arg;
1196 struct sock *sk = chan->sk;
1198 BT_DBG("chan %p", chan);
1201 if (chan->retry_count >= chan->remote_max_tx) {
1202 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1207 chan->retry_count++;
1208 __set_monitor_timer(chan);
1210 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1214 static void l2cap_retrans_timeout(unsigned long arg)
1216 struct l2cap_chan *chan = (void *) arg;
1217 struct sock *sk = chan->sk;
1219 BT_DBG("chan %p", chan);
1222 chan->retry_count = 1;
1223 __set_monitor_timer(chan);
1225 set_bit(CONN_WAIT_F, &chan->conn_state);
1227 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1231 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1233 struct sk_buff *skb;
1235 while ((skb = skb_peek(&chan->tx_q)) &&
1236 chan->unacked_frames) {
1237 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1240 skb = skb_dequeue(&chan->tx_q);
1243 chan->unacked_frames--;
1246 if (!chan->unacked_frames)
1247 __clear_retrans_timer(chan);
1250 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1252 struct hci_conn *hcon = chan->conn->hcon;
1255 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1257 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1258 flags = ACL_START_NO_FLUSH;
1262 bt_cb(skb)->force_active = chan->force_active;
1263 hci_send_acl(hcon, skb, flags);
1266 static void l2cap_streaming_send(struct l2cap_chan *chan)
1268 struct sk_buff *skb;
1271 while ((skb = skb_dequeue(&chan->tx_q))) {
1272 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1273 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1274 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1276 if (chan->fcs == L2CAP_FCS_CRC16) {
1277 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1278 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1281 l2cap_do_send(chan, skb);
1283 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1287 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1289 struct sk_buff *skb, *tx_skb;
1292 skb = skb_peek(&chan->tx_q);
1297 if (bt_cb(skb)->tx_seq == tx_seq)
1300 if (skb_queue_is_last(&chan->tx_q, skb))
1303 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1305 if (chan->remote_max_tx &&
1306 bt_cb(skb)->retries == chan->remote_max_tx) {
1307 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 tx_skb = skb_clone(skb, GFP_ATOMIC);
1312 bt_cb(skb)->retries++;
1313 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1314 control &= L2CAP_CTRL_SAR;
1316 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1317 control |= L2CAP_CTRL_FINAL;
1319 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1320 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (chan->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 l2cap_do_send(chan, tx_skb);
1332 static int l2cap_ertm_send(struct l2cap_chan *chan)
1334 struct sk_buff *skb, *tx_skb;
1338 if (chan->state != BT_CONNECTED)
1341 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1343 if (chan->remote_max_tx &&
1344 bt_cb(skb)->retries == chan->remote_max_tx) {
1345 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1349 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1353 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1354 control &= L2CAP_CTRL_SAR;
1356 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1357 control |= L2CAP_CTRL_FINAL;
1359 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1360 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1361 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1364 if (chan->fcs == L2CAP_FCS_CRC16) {
1365 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1366 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1369 l2cap_do_send(chan, tx_skb);
1371 __set_retrans_timer(chan);
1373 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1374 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1376 if (bt_cb(skb)->retries == 1)
1377 chan->unacked_frames++;
1379 chan->frames_sent++;
1381 if (skb_queue_is_last(&chan->tx_q, skb))
1382 chan->tx_send_head = NULL;
1384 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1392 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1396 if (!skb_queue_empty(&chan->tx_q))
1397 chan->tx_send_head = chan->tx_q.next;
1399 chan->next_tx_seq = chan->expected_ack_seq;
1400 ret = l2cap_ertm_send(chan);
1404 static void l2cap_send_ack(struct l2cap_chan *chan)
1408 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1410 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1411 control |= L2CAP_SUPER_RCV_NOT_READY;
1412 set_bit(CONN_RNR_SENT, &chan->conn_state);
1413 l2cap_send_sframe(chan, control);
1417 if (l2cap_ertm_send(chan) > 0)
1420 control |= L2CAP_SUPER_RCV_READY;
1421 l2cap_send_sframe(chan, control);
1424 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1426 struct srej_list *tail;
1429 control = L2CAP_SUPER_SELECT_REJECT;
1430 control |= L2CAP_CTRL_FINAL;
1432 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1433 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1435 l2cap_send_sframe(chan, control);
1438 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1441 struct sk_buff **frag;
1444 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1453 count = min_t(unsigned int, conn->mtu, len);
1455 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1458 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1464 frag = &(*frag)->next;
1470 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1472 struct sock *sk = chan->sk;
1473 struct l2cap_conn *conn = chan->conn;
1474 struct sk_buff *skb;
1475 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1476 struct l2cap_hdr *lh;
1478 BT_DBG("sk %p len %d", sk, (int)len);
1480 count = min_t(unsigned int, (conn->mtu - hlen), len);
1481 skb = bt_skb_send_alloc(sk, count + hlen,
1482 msg->msg_flags & MSG_DONTWAIT, &err);
1484 return ERR_PTR(err);
1486 /* Create L2CAP header */
1487 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1488 lh->cid = cpu_to_le16(chan->dcid);
1489 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1490 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1492 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1493 if (unlikely(err < 0)) {
1495 return ERR_PTR(err);
1500 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1502 struct sock *sk = chan->sk;
1503 struct l2cap_conn *conn = chan->conn;
1504 struct sk_buff *skb;
1505 int err, count, hlen = L2CAP_HDR_SIZE;
1506 struct l2cap_hdr *lh;
1508 BT_DBG("sk %p len %d", sk, (int)len);
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1514 return ERR_PTR(err);
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(chan->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1524 return ERR_PTR(err);
1529 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1530 struct msghdr *msg, size_t len,
1531 u16 control, u16 sdulen)
1533 struct sock *sk = chan->sk;
1534 struct l2cap_conn *conn = chan->conn;
1535 struct sk_buff *skb;
1536 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1537 struct l2cap_hdr *lh;
1539 BT_DBG("sk %p len %d", sk, (int)len);
1542 return ERR_PTR(-ENOTCONN);
1547 if (chan->fcs == L2CAP_FCS_CRC16)
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1554 return ERR_PTR(err);
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1560 put_unaligned_le16(control, skb_put(skb, 2));
1562 put_unaligned_le16(sdulen, skb_put(skb, 2));
1564 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1565 if (unlikely(err < 0)) {
1567 return ERR_PTR(err);
1570 if (chan->fcs == L2CAP_FCS_CRC16)
1571 put_unaligned_le16(0, skb_put(skb, 2));
1573 bt_cb(skb)->retries = 0;
1577 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1579 struct sk_buff *skb;
1580 struct sk_buff_head sar_queue;
1584 skb_queue_head_init(&sar_queue);
1585 control = L2CAP_SDU_START;
1586 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1588 return PTR_ERR(skb);
1590 __skb_queue_tail(&sar_queue, skb);
1591 len -= chan->remote_mps;
1592 size += chan->remote_mps;
1597 if (len > chan->remote_mps) {
1598 control = L2CAP_SDU_CONTINUE;
1599 buflen = chan->remote_mps;
1601 control = L2CAP_SDU_END;
1605 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1607 skb_queue_purge(&sar_queue);
1608 return PTR_ERR(skb);
1611 __skb_queue_tail(&sar_queue, skb);
1615 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1616 if (chan->tx_send_head == NULL)
1617 chan->tx_send_head = sar_queue.next;
1622 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1624 struct sk_buff *skb;
1628 /* Connectionless channel */
1629 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1630 skb = l2cap_create_connless_pdu(chan, msg, len);
1632 return PTR_ERR(skb);
1634 l2cap_do_send(chan, skb);
1638 switch (chan->mode) {
1639 case L2CAP_MODE_BASIC:
1640 /* Check outgoing MTU */
1641 if (len > chan->omtu)
1644 /* Create a basic PDU */
1645 skb = l2cap_create_basic_pdu(chan, msg, len);
1647 return PTR_ERR(skb);
1649 l2cap_do_send(chan, skb);
1653 case L2CAP_MODE_ERTM:
1654 case L2CAP_MODE_STREAMING:
1655 /* Entire SDU fits into one PDU */
1656 if (len <= chan->remote_mps) {
1657 control = L2CAP_SDU_UNSEGMENTED;
1658 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1661 return PTR_ERR(skb);
1663 __skb_queue_tail(&chan->tx_q, skb);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = skb;
1669 /* Segment SDU into multiples PDUs */
1670 err = l2cap_sar_segment_sdu(chan, msg, len);
1675 if (chan->mode == L2CAP_MODE_STREAMING) {
1676 l2cap_streaming_send(chan);
1681 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1682 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1687 err = l2cap_ertm_send(chan);
1694 BT_DBG("bad state %1.1x", chan->mode);
1701 /* Copy frame to all raw sockets on that connection */
1702 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1704 struct sk_buff *nskb;
1705 struct l2cap_chan *chan;
1707 BT_DBG("conn %p", conn);
1709 read_lock(&conn->chan_lock);
1710 list_for_each_entry(chan, &conn->chan_l, list) {
1711 struct sock *sk = chan->sk;
1712 if (chan->chan_type != L2CAP_CHAN_RAW)
1715 /* Don't send frame to the socket it came from */
1718 nskb = skb_clone(skb, GFP_ATOMIC);
1722 if (chan->ops->recv(chan->data, nskb))
1725 read_unlock(&conn->chan_lock);
1728 /* ---- L2CAP signalling commands ---- */
1729 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1730 u8 code, u8 ident, u16 dlen, void *data)
1732 struct sk_buff *skb, **frag;
1733 struct l2cap_cmd_hdr *cmd;
1734 struct l2cap_hdr *lh;
1737 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1738 conn, code, ident, dlen);
1740 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1741 count = min_t(unsigned int, conn->mtu, len);
1743 skb = bt_skb_alloc(count, GFP_ATOMIC);
1747 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1748 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1750 if (conn->hcon->type == LE_LINK)
1751 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1753 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1755 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1758 cmd->len = cpu_to_le16(dlen);
1761 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1762 memcpy(skb_put(skb, count), data, count);
1768 /* Continuation fragments (no L2CAP header) */
1769 frag = &skb_shinfo(skb)->frag_list;
1771 count = min_t(unsigned int, conn->mtu, len);
1773 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1777 memcpy(skb_put(*frag, count), data, count);
1782 frag = &(*frag)->next;
1792 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1794 struct l2cap_conf_opt *opt = *ptr;
1797 len = L2CAP_CONF_OPT_SIZE + opt->len;
1805 *val = *((u8 *) opt->val);
1809 *val = get_unaligned_le16(opt->val);
1813 *val = get_unaligned_le32(opt->val);
1817 *val = (unsigned long) opt->val;
1821 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1825 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1827 struct l2cap_conf_opt *opt = *ptr;
1829 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1836 *((u8 *) opt->val) = val;
1840 put_unaligned_le16(val, opt->val);
1844 put_unaligned_le32(val, opt->val);
1848 memcpy(opt->val, (void *) val, len);
1852 *ptr += L2CAP_CONF_OPT_SIZE + len;
1855 static void l2cap_ack_timeout(unsigned long arg)
1857 struct l2cap_chan *chan = (void *) arg;
1859 bh_lock_sock(chan->sk);
1860 l2cap_send_ack(chan);
1861 bh_unlock_sock(chan->sk);
1864 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1866 struct sock *sk = chan->sk;
1868 chan->expected_ack_seq = 0;
1869 chan->unacked_frames = 0;
1870 chan->buffer_seq = 0;
1871 chan->num_acked = 0;
1872 chan->frames_sent = 0;
1874 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1875 (unsigned long) chan);
1876 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1877 (unsigned long) chan);
1878 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1880 skb_queue_head_init(&chan->srej_q);
1882 INIT_LIST_HEAD(&chan->srej_l);
1885 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1888 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1891 case L2CAP_MODE_STREAMING:
1892 case L2CAP_MODE_ERTM:
1893 if (l2cap_mode_supported(mode, remote_feat_mask))
1897 return L2CAP_MODE_BASIC;
1901 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1903 struct l2cap_conf_req *req = data;
1904 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1905 void *ptr = req->data;
1907 BT_DBG("chan %p", chan);
1909 if (chan->num_conf_req || chan->num_conf_rsp)
1912 switch (chan->mode) {
1913 case L2CAP_MODE_STREAMING:
1914 case L2CAP_MODE_ERTM:
1915 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1920 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1925 if (chan->imtu != L2CAP_DEFAULT_MTU)
1926 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1928 switch (chan->mode) {
1929 case L2CAP_MODE_BASIC:
1930 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1931 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1934 rfc.mode = L2CAP_MODE_BASIC;
1936 rfc.max_transmit = 0;
1937 rfc.retrans_timeout = 0;
1938 rfc.monitor_timeout = 0;
1939 rfc.max_pdu_size = 0;
1941 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1942 (unsigned long) &rfc);
1945 case L2CAP_MODE_ERTM:
1946 rfc.mode = L2CAP_MODE_ERTM;
1947 rfc.txwin_size = chan->tx_win;
1948 rfc.max_transmit = chan->max_tx;
1949 rfc.retrans_timeout = 0;
1950 rfc.monitor_timeout = 0;
1951 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1952 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1953 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1956 (unsigned long) &rfc);
1958 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1961 if (chan->fcs == L2CAP_FCS_NONE ||
1962 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1963 chan->fcs = L2CAP_FCS_NONE;
1964 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1968 case L2CAP_MODE_STREAMING:
1969 rfc.mode = L2CAP_MODE_STREAMING;
1971 rfc.max_transmit = 0;
1972 rfc.retrans_timeout = 0;
1973 rfc.monitor_timeout = 0;
1974 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1975 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1976 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1978 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1979 (unsigned long) &rfc);
1981 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1984 if (chan->fcs == L2CAP_FCS_NONE ||
1985 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1986 chan->fcs = L2CAP_FCS_NONE;
1987 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1992 req->dcid = cpu_to_le16(chan->dcid);
1993 req->flags = cpu_to_le16(0);
1998 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2000 struct l2cap_conf_rsp *rsp = data;
2001 void *ptr = rsp->data;
2002 void *req = chan->conf_req;
2003 int len = chan->conf_len;
2004 int type, hint, olen;
2006 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2007 u16 mtu = L2CAP_DEFAULT_MTU;
2008 u16 result = L2CAP_CONF_SUCCESS;
2010 BT_DBG("chan %p", chan);
2012 while (len >= L2CAP_CONF_OPT_SIZE) {
2013 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2015 hint = type & L2CAP_CONF_HINT;
2016 type &= L2CAP_CONF_MASK;
2019 case L2CAP_CONF_MTU:
2023 case L2CAP_CONF_FLUSH_TO:
2024 chan->flush_to = val;
2027 case L2CAP_CONF_QOS:
2030 case L2CAP_CONF_RFC:
2031 if (olen == sizeof(rfc))
2032 memcpy(&rfc, (void *) val, olen);
2035 case L2CAP_CONF_FCS:
2036 if (val == L2CAP_FCS_NONE)
2037 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2045 result = L2CAP_CONF_UNKNOWN;
2046 *((u8 *) ptr++) = type;
2051 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2054 switch (chan->mode) {
2055 case L2CAP_MODE_STREAMING:
2056 case L2CAP_MODE_ERTM:
2057 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2058 chan->mode = l2cap_select_mode(rfc.mode,
2059 chan->conn->feat_mask);
2063 if (chan->mode != rfc.mode)
2064 return -ECONNREFUSED;
2070 if (chan->mode != rfc.mode) {
2071 result = L2CAP_CONF_UNACCEPT;
2072 rfc.mode = chan->mode;
2074 if (chan->num_conf_rsp == 1)
2075 return -ECONNREFUSED;
2077 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2078 sizeof(rfc), (unsigned long) &rfc);
2082 if (result == L2CAP_CONF_SUCCESS) {
2083 /* Configure output options and let the other side know
2084 * which ones we don't like. */
2086 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2087 result = L2CAP_CONF_UNACCEPT;
2090 set_bit(CONF_MTU_DONE, &chan->conf_state);
2092 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2095 case L2CAP_MODE_BASIC:
2096 chan->fcs = L2CAP_FCS_NONE;
2097 set_bit(CONF_MODE_DONE, &chan->conf_state);
2100 case L2CAP_MODE_ERTM:
2101 chan->remote_tx_win = rfc.txwin_size;
2102 chan->remote_max_tx = rfc.max_transmit;
2104 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2105 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2107 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2109 rfc.retrans_timeout =
2110 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2111 rfc.monitor_timeout =
2112 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2114 set_bit(CONF_MODE_DONE, &chan->conf_state);
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2117 sizeof(rfc), (unsigned long) &rfc);
2121 case L2CAP_MODE_STREAMING:
2122 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2123 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2125 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2127 set_bit(CONF_MODE_DONE, &chan->conf_state);
2129 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2130 sizeof(rfc), (unsigned long) &rfc);
2135 result = L2CAP_CONF_UNACCEPT;
2137 memset(&rfc, 0, sizeof(rfc));
2138 rfc.mode = chan->mode;
2141 if (result == L2CAP_CONF_SUCCESS)
2142 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2144 rsp->scid = cpu_to_le16(chan->dcid);
2145 rsp->result = cpu_to_le16(result);
2146 rsp->flags = cpu_to_le16(0x0000);
2151 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2153 struct l2cap_conf_req *req = data;
2154 void *ptr = req->data;
2157 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2159 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2161 while (len >= L2CAP_CONF_OPT_SIZE) {
2162 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2165 case L2CAP_CONF_MTU:
2166 if (val < L2CAP_DEFAULT_MIN_MTU) {
2167 *result = L2CAP_CONF_UNACCEPT;
2168 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2174 case L2CAP_CONF_FLUSH_TO:
2175 chan->flush_to = val;
2176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2180 case L2CAP_CONF_RFC:
2181 if (olen == sizeof(rfc))
2182 memcpy(&rfc, (void *)val, olen);
2184 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2185 rfc.mode != chan->mode)
2186 return -ECONNREFUSED;
2190 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2191 sizeof(rfc), (unsigned long) &rfc);
2196 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2197 return -ECONNREFUSED;
2199 chan->mode = rfc.mode;
2201 if (*result == L2CAP_CONF_SUCCESS) {
2203 case L2CAP_MODE_ERTM:
2204 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2205 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2206 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2208 case L2CAP_MODE_STREAMING:
2209 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2213 req->dcid = cpu_to_le16(chan->dcid);
2214 req->flags = cpu_to_le16(0x0000);
2219 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2221 struct l2cap_conf_rsp *rsp = data;
2222 void *ptr = rsp->data;
2224 BT_DBG("chan %p", chan);
2226 rsp->scid = cpu_to_le16(chan->dcid);
2227 rsp->result = cpu_to_le16(result);
2228 rsp->flags = cpu_to_le16(flags);
2233 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2235 struct l2cap_conn_rsp rsp;
2236 struct l2cap_conn *conn = chan->conn;
2239 rsp.scid = cpu_to_le16(chan->dcid);
2240 rsp.dcid = cpu_to_le16(chan->scid);
2241 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2242 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2243 l2cap_send_cmd(conn, chan->ident,
2244 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2246 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2249 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2250 l2cap_build_conf_req(chan, buf), buf);
2251 chan->num_conf_req++;
2254 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2258 struct l2cap_conf_rfc rfc;
2260 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2262 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2265 while (len >= L2CAP_CONF_OPT_SIZE) {
2266 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2268 if (type != L2CAP_CONF_RFC)
2271 if (olen != sizeof(rfc))
2274 memcpy(&rfc, (void *)val, olen);
2278 /* Use sane default values in case a misbehaving remote device
2279 * did not send an RFC option.
2281 rfc.mode = chan->mode;
2282 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2283 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2284 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2286 BT_ERR("Expected RFC option was not found, using defaults");
2290 case L2CAP_MODE_ERTM:
2291 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2292 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2293 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2295 case L2CAP_MODE_STREAMING:
2296 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2300 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2302 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2304 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2307 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2308 cmd->ident == conn->info_ident) {
2309 del_timer(&conn->info_timer);
2311 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2312 conn->info_ident = 0;
2314 l2cap_conn_start(conn);
2320 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2322 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2323 struct l2cap_conn_rsp rsp;
2324 struct l2cap_chan *chan = NULL, *pchan;
2325 struct sock *parent, *sk = NULL;
2326 int result, status = L2CAP_CS_NO_INFO;
2328 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2329 __le16 psm = req->psm;
2331 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2333 /* Check if we have socket listening on psm */
2334 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2336 result = L2CAP_CR_BAD_PSM;
2342 bh_lock_sock(parent);
2344 /* Check if the ACL is secure enough (if not SDP) */
2345 if (psm != cpu_to_le16(0x0001) &&
2346 !hci_conn_check_link_mode(conn->hcon)) {
2347 conn->disc_reason = 0x05;
2348 result = L2CAP_CR_SEC_BLOCK;
2352 result = L2CAP_CR_NO_MEM;
2354 /* Check for backlog size */
2355 if (sk_acceptq_is_full(parent)) {
2356 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2360 chan = pchan->ops->new_connection(pchan->data);
2366 write_lock_bh(&conn->chan_lock);
2368 /* Check if we already have channel with that dcid */
2369 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2370 write_unlock_bh(&conn->chan_lock);
2371 sock_set_flag(sk, SOCK_ZAPPED);
2372 chan->ops->close(chan->data);
2376 hci_conn_hold(conn->hcon);
2378 bacpy(&bt_sk(sk)->src, conn->src);
2379 bacpy(&bt_sk(sk)->dst, conn->dst);
2383 bt_accept_enqueue(parent, sk);
2385 __l2cap_chan_add(conn, chan);
2389 __set_chan_timer(chan, sk->sk_sndtimeo);
2391 chan->ident = cmd->ident;
2393 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2394 if (l2cap_check_security(chan)) {
2395 if (bt_sk(sk)->defer_setup) {
2396 l2cap_state_change(chan, BT_CONNECT2);
2397 result = L2CAP_CR_PEND;
2398 status = L2CAP_CS_AUTHOR_PEND;
2399 parent->sk_data_ready(parent, 0);
2401 l2cap_state_change(chan, BT_CONFIG);
2402 result = L2CAP_CR_SUCCESS;
2403 status = L2CAP_CS_NO_INFO;
2406 l2cap_state_change(chan, BT_CONNECT2);
2407 result = L2CAP_CR_PEND;
2408 status = L2CAP_CS_AUTHEN_PEND;
2411 l2cap_state_change(chan, BT_CONNECT2);
2412 result = L2CAP_CR_PEND;
2413 status = L2CAP_CS_NO_INFO;
2416 write_unlock_bh(&conn->chan_lock);
2419 bh_unlock_sock(parent);
2422 rsp.scid = cpu_to_le16(scid);
2423 rsp.dcid = cpu_to_le16(dcid);
2424 rsp.result = cpu_to_le16(result);
2425 rsp.status = cpu_to_le16(status);
2426 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2428 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2429 struct l2cap_info_req info;
2430 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2432 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2433 conn->info_ident = l2cap_get_ident(conn);
2435 mod_timer(&conn->info_timer, jiffies +
2436 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2438 l2cap_send_cmd(conn, conn->info_ident,
2439 L2CAP_INFO_REQ, sizeof(info), &info);
2442 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2443 result == L2CAP_CR_SUCCESS) {
2445 set_bit(CONF_REQ_SENT, &chan->conf_state);
2446 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2447 l2cap_build_conf_req(chan, buf), buf);
2448 chan->num_conf_req++;
2454 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2456 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2457 u16 scid, dcid, result, status;
2458 struct l2cap_chan *chan;
2462 scid = __le16_to_cpu(rsp->scid);
2463 dcid = __le16_to_cpu(rsp->dcid);
2464 result = __le16_to_cpu(rsp->result);
2465 status = __le16_to_cpu(rsp->status);
2467 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2470 chan = l2cap_get_chan_by_scid(conn, scid);
2474 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2482 case L2CAP_CR_SUCCESS:
2483 l2cap_state_change(chan, BT_CONFIG);
2486 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2488 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2491 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2492 l2cap_build_conf_req(chan, req), req);
2493 chan->num_conf_req++;
2497 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2501 /* don't delete l2cap channel if sk is owned by user */
2502 if (sock_owned_by_user(sk)) {
2503 l2cap_state_change(chan, BT_DISCONN);
2504 __clear_chan_timer(chan);
2505 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2509 l2cap_chan_del(chan, ECONNREFUSED);
2517 static inline void set_default_fcs(struct l2cap_chan *chan)
2519 /* FCS is enabled only in ERTM or streaming mode, if one or both
2522 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2523 chan->fcs = L2CAP_FCS_NONE;
2524 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2525 chan->fcs = L2CAP_FCS_CRC16;
2528 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2530 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2533 struct l2cap_chan *chan;
2537 dcid = __le16_to_cpu(req->dcid);
2538 flags = __le16_to_cpu(req->flags);
2540 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2542 chan = l2cap_get_chan_by_scid(conn, dcid);
2548 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2549 struct l2cap_cmd_rej_cid rej;
2551 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2552 rej.scid = cpu_to_le16(chan->scid);
2553 rej.dcid = cpu_to_le16(chan->dcid);
2555 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2560 /* Reject if config buffer is too small. */
2561 len = cmd_len - sizeof(*req);
2562 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2563 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2564 l2cap_build_conf_rsp(chan, rsp,
2565 L2CAP_CONF_REJECT, flags), rsp);
2570 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2571 chan->conf_len += len;
2573 if (flags & 0x0001) {
2574 /* Incomplete config. Send empty response. */
2575 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2576 l2cap_build_conf_rsp(chan, rsp,
2577 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2581 /* Complete config. */
2582 len = l2cap_parse_conf_req(chan, rsp);
2584 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2588 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2589 chan->num_conf_rsp++;
2591 /* Reset config buffer. */
2594 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2597 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2598 set_default_fcs(chan);
2600 l2cap_state_change(chan, BT_CONNECTED);
2602 chan->next_tx_seq = 0;
2603 chan->expected_tx_seq = 0;
2604 skb_queue_head_init(&chan->tx_q);
2605 if (chan->mode == L2CAP_MODE_ERTM)
2606 l2cap_ertm_init(chan);
2608 l2cap_chan_ready(sk);
2612 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2614 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2615 l2cap_build_conf_req(chan, buf), buf);
2616 chan->num_conf_req++;
2624 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2626 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2627 u16 scid, flags, result;
2628 struct l2cap_chan *chan;
2630 int len = cmd->len - sizeof(*rsp);
2632 scid = __le16_to_cpu(rsp->scid);
2633 flags = __le16_to_cpu(rsp->flags);
2634 result = __le16_to_cpu(rsp->result);
2636 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2637 scid, flags, result);
2639 chan = l2cap_get_chan_by_scid(conn, scid);
2646 case L2CAP_CONF_SUCCESS:
2647 l2cap_conf_rfc_get(chan, rsp->data, len);
2650 case L2CAP_CONF_UNACCEPT:
2651 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2654 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2655 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2659 /* throw out any old stored conf requests */
2660 result = L2CAP_CONF_SUCCESS;
2661 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2664 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2668 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2669 L2CAP_CONF_REQ, len, req);
2670 chan->num_conf_req++;
2671 if (result != L2CAP_CONF_SUCCESS)
2677 sk->sk_err = ECONNRESET;
2678 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2679 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2686 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2688 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2689 set_default_fcs(chan);
2691 l2cap_state_change(chan, BT_CONNECTED);
2692 chan->next_tx_seq = 0;
2693 chan->expected_tx_seq = 0;
2694 skb_queue_head_init(&chan->tx_q);
2695 if (chan->mode == L2CAP_MODE_ERTM)
2696 l2cap_ertm_init(chan);
2698 l2cap_chan_ready(sk);
2706 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2708 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2709 struct l2cap_disconn_rsp rsp;
2711 struct l2cap_chan *chan;
2714 scid = __le16_to_cpu(req->scid);
2715 dcid = __le16_to_cpu(req->dcid);
2717 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2719 chan = l2cap_get_chan_by_scid(conn, dcid);
2725 rsp.dcid = cpu_to_le16(chan->scid);
2726 rsp.scid = cpu_to_le16(chan->dcid);
2727 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2729 sk->sk_shutdown = SHUTDOWN_MASK;
2731 /* don't delete l2cap channel if sk is owned by user */
2732 if (sock_owned_by_user(sk)) {
2733 l2cap_state_change(chan, BT_DISCONN);
2734 __clear_chan_timer(chan);
2735 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2740 l2cap_chan_del(chan, ECONNRESET);
2743 chan->ops->close(chan->data);
2747 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2749 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2751 struct l2cap_chan *chan;
2754 scid = __le16_to_cpu(rsp->scid);
2755 dcid = __le16_to_cpu(rsp->dcid);
2757 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2759 chan = l2cap_get_chan_by_scid(conn, scid);
2765 /* don't delete l2cap channel if sk is owned by user */
2766 if (sock_owned_by_user(sk)) {
2767 l2cap_state_change(chan,BT_DISCONN);
2768 __clear_chan_timer(chan);
2769 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2774 l2cap_chan_del(chan, 0);
2777 chan->ops->close(chan->data);
2781 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2783 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2786 type = __le16_to_cpu(req->type);
2788 BT_DBG("type 0x%4.4x", type);
2790 if (type == L2CAP_IT_FEAT_MASK) {
2792 u32 feat_mask = l2cap_feat_mask;
2793 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2794 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2795 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2797 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2799 put_unaligned_le32(feat_mask, rsp->data);
2800 l2cap_send_cmd(conn, cmd->ident,
2801 L2CAP_INFO_RSP, sizeof(buf), buf);
2802 } else if (type == L2CAP_IT_FIXED_CHAN) {
2804 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2805 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2806 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2807 memcpy(buf + 4, l2cap_fixed_chan, 8);
2808 l2cap_send_cmd(conn, cmd->ident,
2809 L2CAP_INFO_RSP, sizeof(buf), buf);
2811 struct l2cap_info_rsp rsp;
2812 rsp.type = cpu_to_le16(type);
2813 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2814 l2cap_send_cmd(conn, cmd->ident,
2815 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2821 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2823 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2826 type = __le16_to_cpu(rsp->type);
2827 result = __le16_to_cpu(rsp->result);
2829 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2831 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2832 if (cmd->ident != conn->info_ident ||
2833 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2836 del_timer(&conn->info_timer);
2838 if (result != L2CAP_IR_SUCCESS) {
2839 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2840 conn->info_ident = 0;
2842 l2cap_conn_start(conn);
2847 if (type == L2CAP_IT_FEAT_MASK) {
2848 conn->feat_mask = get_unaligned_le32(rsp->data);
2850 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2851 struct l2cap_info_req req;
2852 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2854 conn->info_ident = l2cap_get_ident(conn);
2856 l2cap_send_cmd(conn, conn->info_ident,
2857 L2CAP_INFO_REQ, sizeof(req), &req);
2859 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2860 conn->info_ident = 0;
2862 l2cap_conn_start(conn);
2864 } else if (type == L2CAP_IT_FIXED_CHAN) {
2865 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2866 conn->info_ident = 0;
2868 l2cap_conn_start(conn);
2874 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2879 if (min > max || min < 6 || max > 3200)
2882 if (to_multiplier < 10 || to_multiplier > 3200)
2885 if (max >= to_multiplier * 8)
2888 max_latency = (to_multiplier * 8 / max) - 1;
2889 if (latency > 499 || latency > max_latency)
2895 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2896 struct l2cap_cmd_hdr *cmd, u8 *data)
2898 struct hci_conn *hcon = conn->hcon;
2899 struct l2cap_conn_param_update_req *req;
2900 struct l2cap_conn_param_update_rsp rsp;
2901 u16 min, max, latency, to_multiplier, cmd_len;
2904 if (!(hcon->link_mode & HCI_LM_MASTER))
2907 cmd_len = __le16_to_cpu(cmd->len);
2908 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2911 req = (struct l2cap_conn_param_update_req *) data;
2912 min = __le16_to_cpu(req->min);
2913 max = __le16_to_cpu(req->max);
2914 latency = __le16_to_cpu(req->latency);
2915 to_multiplier = __le16_to_cpu(req->to_multiplier);
2917 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2918 min, max, latency, to_multiplier);
2920 memset(&rsp, 0, sizeof(rsp));
2922 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2924 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2926 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2928 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2932 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2937 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2938 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2942 switch (cmd->code) {
2943 case L2CAP_COMMAND_REJ:
2944 l2cap_command_rej(conn, cmd, data);
2947 case L2CAP_CONN_REQ:
2948 err = l2cap_connect_req(conn, cmd, data);
2951 case L2CAP_CONN_RSP:
2952 err = l2cap_connect_rsp(conn, cmd, data);
2955 case L2CAP_CONF_REQ:
2956 err = l2cap_config_req(conn, cmd, cmd_len, data);
2959 case L2CAP_CONF_RSP:
2960 err = l2cap_config_rsp(conn, cmd, data);
2963 case L2CAP_DISCONN_REQ:
2964 err = l2cap_disconnect_req(conn, cmd, data);
2967 case L2CAP_DISCONN_RSP:
2968 err = l2cap_disconnect_rsp(conn, cmd, data);
2971 case L2CAP_ECHO_REQ:
2972 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2975 case L2CAP_ECHO_RSP:
2978 case L2CAP_INFO_REQ:
2979 err = l2cap_information_req(conn, cmd, data);
2982 case L2CAP_INFO_RSP:
2983 err = l2cap_information_rsp(conn, cmd, data);
2987 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2995 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2996 struct l2cap_cmd_hdr *cmd, u8 *data)
2998 switch (cmd->code) {
2999 case L2CAP_COMMAND_REJ:
3002 case L2CAP_CONN_PARAM_UPDATE_REQ:
3003 return l2cap_conn_param_update_req(conn, cmd, data);
3005 case L2CAP_CONN_PARAM_UPDATE_RSP:
3009 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3014 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3015 struct sk_buff *skb)
3017 u8 *data = skb->data;
3019 struct l2cap_cmd_hdr cmd;
3022 l2cap_raw_recv(conn, skb);
3024 while (len >= L2CAP_CMD_HDR_SIZE) {
3026 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3027 data += L2CAP_CMD_HDR_SIZE;
3028 len -= L2CAP_CMD_HDR_SIZE;
3030 cmd_len = le16_to_cpu(cmd.len);
3032 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3034 if (cmd_len > len || !cmd.ident) {
3035 BT_DBG("corrupted command");
3039 if (conn->hcon->type == LE_LINK)
3040 err = l2cap_le_sig_cmd(conn, &cmd, data);
3042 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3045 struct l2cap_cmd_rej_unk rej;
3047 BT_ERR("Wrong link type (%d)", err);
3049 /* FIXME: Map err to a valid reason */
3050 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3051 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3061 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3063 u16 our_fcs, rcv_fcs;
3064 int hdr_size = L2CAP_HDR_SIZE + 2;
3066 if (chan->fcs == L2CAP_FCS_CRC16) {
3067 skb_trim(skb, skb->len - 2);
3068 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3069 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3071 if (our_fcs != rcv_fcs)
3077 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3081 chan->frames_sent = 0;
3083 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3085 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3086 control |= L2CAP_SUPER_RCV_NOT_READY;
3087 l2cap_send_sframe(chan, control);
3088 set_bit(CONN_RNR_SENT, &chan->conn_state);
3091 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3092 l2cap_retransmit_frames(chan);
3094 l2cap_ertm_send(chan);
3096 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3097 chan->frames_sent == 0) {
3098 control |= L2CAP_SUPER_RCV_READY;
3099 l2cap_send_sframe(chan, control);
3103 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3105 struct sk_buff *next_skb;
3106 int tx_seq_offset, next_tx_seq_offset;
3108 bt_cb(skb)->tx_seq = tx_seq;
3109 bt_cb(skb)->sar = sar;
3111 next_skb = skb_peek(&chan->srej_q);
3113 __skb_queue_tail(&chan->srej_q, skb);
3117 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3118 if (tx_seq_offset < 0)
3119 tx_seq_offset += 64;
3122 if (bt_cb(next_skb)->tx_seq == tx_seq)
3125 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3126 chan->buffer_seq) % 64;
3127 if (next_tx_seq_offset < 0)
3128 next_tx_seq_offset += 64;
3130 if (next_tx_seq_offset > tx_seq_offset) {
3131 __skb_queue_before(&chan->srej_q, next_skb, skb);
3135 if (skb_queue_is_last(&chan->srej_q, next_skb))
3138 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3140 __skb_queue_tail(&chan->srej_q, skb);
3145 static void append_skb_frag(struct sk_buff *skb,
3146 struct sk_buff *new_frag, struct sk_buff **last_frag)
3148 /* skb->len reflects data in skb as well as all fragments
3149 * skb->data_len reflects only data in fragments
3151 if (!skb_has_frag_list(skb))
3152 skb_shinfo(skb)->frag_list = new_frag;
3154 new_frag->next = NULL;
3156 (*last_frag)->next = new_frag;
3157 *last_frag = new_frag;
3159 skb->len += new_frag->len;
3160 skb->data_len += new_frag->len;
3161 skb->truesize += new_frag->truesize;
3164 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3168 switch (control & L2CAP_CTRL_SAR) {
3169 case L2CAP_SDU_UNSEGMENTED:
3173 err = chan->ops->recv(chan->data, skb);
3176 case L2CAP_SDU_START:
3180 chan->sdu_len = get_unaligned_le16(skb->data);
3183 if (chan->sdu_len > chan->imtu) {
3188 if (skb->len >= chan->sdu_len)
3192 chan->sdu_last_frag = skb;
3198 case L2CAP_SDU_CONTINUE:
3202 append_skb_frag(chan->sdu, skb,
3203 &chan->sdu_last_frag);
3206 if (chan->sdu->len >= chan->sdu_len)
3216 append_skb_frag(chan->sdu, skb,
3217 &chan->sdu_last_frag);
3220 if (chan->sdu->len != chan->sdu_len)
3223 err = chan->ops->recv(chan->data, chan->sdu);
3226 /* Reassembly complete */
3228 chan->sdu_last_frag = NULL;
3236 kfree_skb(chan->sdu);
3238 chan->sdu_last_frag = NULL;
3245 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3249 BT_DBG("chan %p, Enter local busy", chan);
3251 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3253 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3254 control |= L2CAP_SUPER_RCV_NOT_READY;
3255 l2cap_send_sframe(chan, control);
3257 set_bit(CONN_RNR_SENT, &chan->conn_state);
3259 __clear_ack_timer(chan);
3262 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3266 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3269 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3270 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3271 l2cap_send_sframe(chan, control);
3272 chan->retry_count = 1;
3274 __clear_retrans_timer(chan);
3275 __set_monitor_timer(chan);
3277 set_bit(CONN_WAIT_F, &chan->conn_state);
3280 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3281 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3283 BT_DBG("chan %p, Exit local busy", chan);
3286 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3288 if (chan->mode == L2CAP_MODE_ERTM) {
3290 l2cap_ertm_enter_local_busy(chan);
3292 l2cap_ertm_exit_local_busy(chan);
3296 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3298 struct sk_buff *skb;
3301 while ((skb = skb_peek(&chan->srej_q)) &&
3302 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3305 if (bt_cb(skb)->tx_seq != tx_seq)
3308 skb = skb_dequeue(&chan->srej_q);
3309 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3310 err = l2cap_reassemble_sdu(chan, skb, control);
3313 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3317 chan->buffer_seq_srej =
3318 (chan->buffer_seq_srej + 1) % 64;
3319 tx_seq = (tx_seq + 1) % 64;
3323 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3325 struct srej_list *l, *tmp;
3328 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3329 if (l->tx_seq == tx_seq) {
3334 control = L2CAP_SUPER_SELECT_REJECT;
3335 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3336 l2cap_send_sframe(chan, control);
3338 list_add_tail(&l->list, &chan->srej_l);
3342 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3344 struct srej_list *new;
3347 while (tx_seq != chan->expected_tx_seq) {
3348 control = L2CAP_SUPER_SELECT_REJECT;
3349 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3350 l2cap_send_sframe(chan, control);
3352 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3353 new->tx_seq = chan->expected_tx_seq;
3354 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3355 list_add_tail(&new->list, &chan->srej_l);
3357 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3360 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3362 u8 tx_seq = __get_txseq(rx_control);
3363 u8 req_seq = __get_reqseq(rx_control);
3364 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3365 int tx_seq_offset, expected_tx_seq_offset;
3366 int num_to_ack = (chan->tx_win/6) + 1;
3369 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3370 tx_seq, rx_control);
3372 if (L2CAP_CTRL_FINAL & rx_control &&
3373 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3374 __clear_monitor_timer(chan);
3375 if (chan->unacked_frames > 0)
3376 __set_retrans_timer(chan);
3377 clear_bit(CONN_WAIT_F, &chan->conn_state);
3380 chan->expected_ack_seq = req_seq;
3381 l2cap_drop_acked_frames(chan);
3383 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3384 if (tx_seq_offset < 0)
3385 tx_seq_offset += 64;
3387 /* invalid tx_seq */
3388 if (tx_seq_offset >= chan->tx_win) {
3389 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3393 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3396 if (tx_seq == chan->expected_tx_seq)
3399 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3400 struct srej_list *first;
3402 first = list_first_entry(&chan->srej_l,
3403 struct srej_list, list);
3404 if (tx_seq == first->tx_seq) {
3405 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3406 l2cap_check_srej_gap(chan, tx_seq);
3408 list_del(&first->list);
3411 if (list_empty(&chan->srej_l)) {
3412 chan->buffer_seq = chan->buffer_seq_srej;
3413 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3414 l2cap_send_ack(chan);
3415 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3418 struct srej_list *l;
3420 /* duplicated tx_seq */
3421 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3424 list_for_each_entry(l, &chan->srej_l, list) {
3425 if (l->tx_seq == tx_seq) {
3426 l2cap_resend_srejframe(chan, tx_seq);
3430 l2cap_send_srejframe(chan, tx_seq);
3433 expected_tx_seq_offset =
3434 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3435 if (expected_tx_seq_offset < 0)
3436 expected_tx_seq_offset += 64;
3438 /* duplicated tx_seq */
3439 if (tx_seq_offset < expected_tx_seq_offset)
3442 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3444 BT_DBG("chan %p, Enter SREJ", chan);
3446 INIT_LIST_HEAD(&chan->srej_l);
3447 chan->buffer_seq_srej = chan->buffer_seq;
3449 __skb_queue_head_init(&chan->srej_q);
3450 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3452 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3454 l2cap_send_srejframe(chan, tx_seq);
3456 __clear_ack_timer(chan);
3461 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3463 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3464 bt_cb(skb)->tx_seq = tx_seq;
3465 bt_cb(skb)->sar = sar;
3466 __skb_queue_tail(&chan->srej_q, skb);
3470 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3471 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3473 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3477 if (rx_control & L2CAP_CTRL_FINAL) {
3478 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3479 l2cap_retransmit_frames(chan);
3482 __set_ack_timer(chan);
3484 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3485 if (chan->num_acked == num_to_ack - 1)
3486 l2cap_send_ack(chan);
3495 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3497 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3500 chan->expected_ack_seq = __get_reqseq(rx_control);
3501 l2cap_drop_acked_frames(chan);
3503 if (rx_control & L2CAP_CTRL_POLL) {
3504 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3505 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3506 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3507 (chan->unacked_frames > 0))
3508 __set_retrans_timer(chan);
3510 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3511 l2cap_send_srejtail(chan);
3513 l2cap_send_i_or_rr_or_rnr(chan);
3516 } else if (rx_control & L2CAP_CTRL_FINAL) {
3517 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3519 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3520 l2cap_retransmit_frames(chan);
3523 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3524 (chan->unacked_frames > 0))
3525 __set_retrans_timer(chan);
3527 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3528 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3529 l2cap_send_ack(chan);
3531 l2cap_ertm_send(chan);
3535 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3537 u8 tx_seq = __get_reqseq(rx_control);
3539 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3541 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3543 chan->expected_ack_seq = tx_seq;
3544 l2cap_drop_acked_frames(chan);
3546 if (rx_control & L2CAP_CTRL_FINAL) {
3547 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3548 l2cap_retransmit_frames(chan);
3550 l2cap_retransmit_frames(chan);
3552 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3553 set_bit(CONN_REJ_ACT, &chan->conn_state);
3556 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3558 u8 tx_seq = __get_reqseq(rx_control);
3560 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3562 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3564 if (rx_control & L2CAP_CTRL_POLL) {
3565 chan->expected_ack_seq = tx_seq;
3566 l2cap_drop_acked_frames(chan);
3568 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3569 l2cap_retransmit_one_frame(chan, tx_seq);
3571 l2cap_ertm_send(chan);
3573 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3574 chan->srej_save_reqseq = tx_seq;
3575 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3577 } else if (rx_control & L2CAP_CTRL_FINAL) {
3578 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3579 chan->srej_save_reqseq == tx_seq)
3580 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3582 l2cap_retransmit_one_frame(chan, tx_seq);
3584 l2cap_retransmit_one_frame(chan, tx_seq);
3585 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3586 chan->srej_save_reqseq = tx_seq;
3587 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3592 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3594 u8 tx_seq = __get_reqseq(rx_control);
3596 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3598 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3599 chan->expected_ack_seq = tx_seq;
3600 l2cap_drop_acked_frames(chan);
3602 if (rx_control & L2CAP_CTRL_POLL)
3603 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3605 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3606 __clear_retrans_timer(chan);
3607 if (rx_control & L2CAP_CTRL_POLL)
3608 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3612 if (rx_control & L2CAP_CTRL_POLL)
3613 l2cap_send_srejtail(chan);
3615 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3618 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3620 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3622 if (L2CAP_CTRL_FINAL & rx_control &&
3623 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3624 __clear_monitor_timer(chan);
3625 if (chan->unacked_frames > 0)
3626 __set_retrans_timer(chan);
3627 clear_bit(CONN_WAIT_F, &chan->conn_state);
3630 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3631 case L2CAP_SUPER_RCV_READY:
3632 l2cap_data_channel_rrframe(chan, rx_control);
3635 case L2CAP_SUPER_REJECT:
3636 l2cap_data_channel_rejframe(chan, rx_control);
3639 case L2CAP_SUPER_SELECT_REJECT:
3640 l2cap_data_channel_srejframe(chan, rx_control);
3643 case L2CAP_SUPER_RCV_NOT_READY:
3644 l2cap_data_channel_rnrframe(chan, rx_control);
3652 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3654 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3657 int len, next_tx_seq_offset, req_seq_offset;
3659 control = get_unaligned_le16(skb->data);
3664 * We can just drop the corrupted I-frame here.
3665 * Receiver will miss it and start proper recovery
3666 * procedures and ask retransmission.
3668 if (l2cap_check_fcs(chan, skb))
3671 if (__is_sar_start(control) && __is_iframe(control))
3674 if (chan->fcs == L2CAP_FCS_CRC16)
3677 if (len > chan->mps) {
3678 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3682 req_seq = __get_reqseq(control);
3683 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3684 if (req_seq_offset < 0)
3685 req_seq_offset += 64;
3687 next_tx_seq_offset =
3688 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3689 if (next_tx_seq_offset < 0)
3690 next_tx_seq_offset += 64;
3692 /* check for invalid req-seq */
3693 if (req_seq_offset > next_tx_seq_offset) {
3694 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3698 if (__is_iframe(control)) {
3700 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3704 l2cap_data_channel_iframe(chan, control, skb);
3708 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3712 l2cap_data_channel_sframe(chan, control, skb);
3722 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3724 struct l2cap_chan *chan;
3725 struct sock *sk = NULL;
3730 chan = l2cap_get_chan_by_scid(conn, cid);
3732 BT_DBG("unknown cid 0x%4.4x", cid);
3738 BT_DBG("chan %p, len %d", chan, skb->len);
3740 if (chan->state != BT_CONNECTED)
3743 switch (chan->mode) {
3744 case L2CAP_MODE_BASIC:
3745 /* If socket recv buffers overflows we drop data here
3746 * which is *bad* because L2CAP has to be reliable.
3747 * But we don't have any other choice. L2CAP doesn't
3748 * provide flow control mechanism. */
3750 if (chan->imtu < skb->len)
3753 if (!chan->ops->recv(chan->data, skb))
3757 case L2CAP_MODE_ERTM:
3758 if (!sock_owned_by_user(sk)) {
3759 l2cap_ertm_data_rcv(sk, skb);
3761 if (sk_add_backlog(sk, skb))
3767 case L2CAP_MODE_STREAMING:
3768 control = get_unaligned_le16(skb->data);
3772 if (l2cap_check_fcs(chan, skb))
3775 if (__is_sar_start(control))
3778 if (chan->fcs == L2CAP_FCS_CRC16)
3781 if (len > chan->mps || len < 0 || __is_sframe(control))
3784 tx_seq = __get_txseq(control);
3786 if (chan->expected_tx_seq != tx_seq) {
3787 /* Frame(s) missing - must discard partial SDU */
3788 kfree_skb(chan->sdu);
3790 chan->sdu_last_frag = NULL;
3793 /* TODO: Notify userland of missing data */
3796 chan->expected_tx_seq = (tx_seq + 1) % 64;
3798 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3799 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3804 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3818 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3820 struct sock *sk = NULL;
3821 struct l2cap_chan *chan;
3823 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3831 BT_DBG("sk %p, len %d", sk, skb->len);
3833 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3836 if (chan->imtu < skb->len)
3839 if (!chan->ops->recv(chan->data, skb))
3851 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3853 struct sock *sk = NULL;
3854 struct l2cap_chan *chan;
3856 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3864 BT_DBG("sk %p, len %d", sk, skb->len);
3866 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3869 if (chan->imtu < skb->len)
3872 if (!chan->ops->recv(chan->data, skb))
3884 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3886 struct l2cap_hdr *lh = (void *) skb->data;
3890 skb_pull(skb, L2CAP_HDR_SIZE);
3891 cid = __le16_to_cpu(lh->cid);
3892 len = __le16_to_cpu(lh->len);
3894 if (len != skb->len) {
3899 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3902 case L2CAP_CID_LE_SIGNALING:
3903 case L2CAP_CID_SIGNALING:
3904 l2cap_sig_channel(conn, skb);
3907 case L2CAP_CID_CONN_LESS:
3908 psm = get_unaligned_le16(skb->data);
3910 l2cap_conless_channel(conn, psm, skb);
3913 case L2CAP_CID_LE_DATA:
3914 l2cap_att_channel(conn, cid, skb);
3918 if (smp_sig_channel(conn, skb))
3919 l2cap_conn_del(conn->hcon, EACCES);
3923 l2cap_data_channel(conn, cid, skb);
3928 /* ---- L2CAP interface with lower layer (HCI) ---- */
3930 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3932 int exact = 0, lm1 = 0, lm2 = 0;
3933 struct l2cap_chan *c;
3935 if (type != ACL_LINK)
3938 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3940 /* Find listening sockets and check their link_mode */
3941 read_lock(&chan_list_lock);
3942 list_for_each_entry(c, &chan_list, global_l) {
3943 struct sock *sk = c->sk;
3945 if (c->state != BT_LISTEN)
3948 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3949 lm1 |= HCI_LM_ACCEPT;
3951 lm1 |= HCI_LM_MASTER;
3953 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3954 lm2 |= HCI_LM_ACCEPT;
3956 lm2 |= HCI_LM_MASTER;
3959 read_unlock(&chan_list_lock);
3961 return exact ? lm1 : lm2;
3964 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3966 struct l2cap_conn *conn;
3968 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3970 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3974 conn = l2cap_conn_add(hcon, status);
3976 l2cap_conn_ready(conn);
3978 l2cap_conn_del(hcon, bt_to_errno(status));
3983 static int l2cap_disconn_ind(struct hci_conn *hcon)
3985 struct l2cap_conn *conn = hcon->l2cap_data;
3987 BT_DBG("hcon %p", hcon);
3989 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3992 return conn->disc_reason;
3995 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3997 BT_DBG("hcon %p reason %d", hcon, reason);
3999 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4002 l2cap_conn_del(hcon, bt_to_errno(reason));
4007 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4009 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4012 if (encrypt == 0x00) {
4013 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4014 __clear_chan_timer(chan);
4015 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4016 } else if (chan->sec_level == BT_SECURITY_HIGH)
4017 l2cap_chan_close(chan, ECONNREFUSED);
4019 if (chan->sec_level == BT_SECURITY_MEDIUM)
4020 __clear_chan_timer(chan);
4024 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4026 struct l2cap_conn *conn = hcon->l2cap_data;
4027 struct l2cap_chan *chan;
4032 BT_DBG("conn %p", conn);
4034 if (hcon->type == LE_LINK) {
4035 smp_distribute_keys(conn, 0);
4036 del_timer(&conn->security_timer);
4039 read_lock(&conn->chan_lock);
4041 list_for_each_entry(chan, &conn->chan_l, list) {
4042 struct sock *sk = chan->sk;
4046 BT_DBG("chan->scid %d", chan->scid);
4048 if (chan->scid == L2CAP_CID_LE_DATA) {
4049 if (!status && encrypt) {
4050 chan->sec_level = hcon->sec_level;
4051 l2cap_chan_ready(sk);
4058 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4063 if (!status && (chan->state == BT_CONNECTED ||
4064 chan->state == BT_CONFIG)) {
4065 l2cap_check_encryption(chan, encrypt);
4070 if (chan->state == BT_CONNECT) {
4072 struct l2cap_conn_req req;
4073 req.scid = cpu_to_le16(chan->scid);
4074 req.psm = chan->psm;
4076 chan->ident = l2cap_get_ident(conn);
4077 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4079 l2cap_send_cmd(conn, chan->ident,
4080 L2CAP_CONN_REQ, sizeof(req), &req);
4082 __clear_chan_timer(chan);
4083 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4085 } else if (chan->state == BT_CONNECT2) {
4086 struct l2cap_conn_rsp rsp;
4090 if (bt_sk(sk)->defer_setup) {
4091 struct sock *parent = bt_sk(sk)->parent;
4092 res = L2CAP_CR_PEND;
4093 stat = L2CAP_CS_AUTHOR_PEND;
4095 parent->sk_data_ready(parent, 0);
4097 l2cap_state_change(chan, BT_CONFIG);
4098 res = L2CAP_CR_SUCCESS;
4099 stat = L2CAP_CS_NO_INFO;
4102 l2cap_state_change(chan, BT_DISCONN);
4103 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4104 res = L2CAP_CR_SEC_BLOCK;
4105 stat = L2CAP_CS_NO_INFO;
4108 rsp.scid = cpu_to_le16(chan->dcid);
4109 rsp.dcid = cpu_to_le16(chan->scid);
4110 rsp.result = cpu_to_le16(res);
4111 rsp.status = cpu_to_le16(stat);
4112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4119 read_unlock(&conn->chan_lock);
4124 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4126 struct l2cap_conn *conn = hcon->l2cap_data;
4129 conn = l2cap_conn_add(hcon, 0);
4134 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4136 if (!(flags & ACL_CONT)) {
4137 struct l2cap_hdr *hdr;
4138 struct l2cap_chan *chan;
4143 BT_ERR("Unexpected start frame (len %d)", skb->len);
4144 kfree_skb(conn->rx_skb);
4145 conn->rx_skb = NULL;
4147 l2cap_conn_unreliable(conn, ECOMM);
4150 /* Start fragment always begin with Basic L2CAP header */
4151 if (skb->len < L2CAP_HDR_SIZE) {
4152 BT_ERR("Frame is too short (len %d)", skb->len);
4153 l2cap_conn_unreliable(conn, ECOMM);
4157 hdr = (struct l2cap_hdr *) skb->data;
4158 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4159 cid = __le16_to_cpu(hdr->cid);
4161 if (len == skb->len) {
4162 /* Complete frame received */
4163 l2cap_recv_frame(conn, skb);
4167 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4169 if (skb->len > len) {
4170 BT_ERR("Frame is too long (len %d, expected len %d)",
4172 l2cap_conn_unreliable(conn, ECOMM);
4176 chan = l2cap_get_chan_by_scid(conn, cid);
4178 if (chan && chan->sk) {
4179 struct sock *sk = chan->sk;
4181 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4182 BT_ERR("Frame exceeding recv MTU (len %d, "
4186 l2cap_conn_unreliable(conn, ECOMM);
4192 /* Allocate skb for the complete frame (with header) */
4193 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4197 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4199 conn->rx_len = len - skb->len;
4201 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4203 if (!conn->rx_len) {
4204 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4205 l2cap_conn_unreliable(conn, ECOMM);
4209 if (skb->len > conn->rx_len) {
4210 BT_ERR("Fragment is too long (len %d, expected %d)",
4211 skb->len, conn->rx_len);
4212 kfree_skb(conn->rx_skb);
4213 conn->rx_skb = NULL;
4215 l2cap_conn_unreliable(conn, ECOMM);
4219 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4221 conn->rx_len -= skb->len;
4223 if (!conn->rx_len) {
4224 /* Complete frame received */
4225 l2cap_recv_frame(conn, conn->rx_skb);
4226 conn->rx_skb = NULL;
4235 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4237 struct l2cap_chan *c;
4239 read_lock_bh(&chan_list_lock);
4241 list_for_each_entry(c, &chan_list, global_l) {
4242 struct sock *sk = c->sk;
4244 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4245 batostr(&bt_sk(sk)->src),
4246 batostr(&bt_sk(sk)->dst),
4247 c->state, __le16_to_cpu(c->psm),
4248 c->scid, c->dcid, c->imtu, c->omtu,
4249 c->sec_level, c->mode);
4252 read_unlock_bh(&chan_list_lock);
4257 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4259 return single_open(file, l2cap_debugfs_show, inode->i_private);
4262 static const struct file_operations l2cap_debugfs_fops = {
4263 .open = l2cap_debugfs_open,
4265 .llseek = seq_lseek,
4266 .release = single_release,
4269 static struct dentry *l2cap_debugfs;
4271 static struct hci_proto l2cap_hci_proto = {
4273 .id = HCI_PROTO_L2CAP,
4274 .connect_ind = l2cap_connect_ind,
4275 .connect_cfm = l2cap_connect_cfm,
4276 .disconn_ind = l2cap_disconn_ind,
4277 .disconn_cfm = l2cap_disconn_cfm,
4278 .security_cfm = l2cap_security_cfm,
4279 .recv_acldata = l2cap_recv_acldata
4282 int __init l2cap_init(void)
4286 err = l2cap_init_sockets();
4290 err = hci_register_proto(&l2cap_hci_proto);
4292 BT_ERR("L2CAP protocol registration failed");
4293 bt_sock_unregister(BTPROTO_L2CAP);
4298 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4299 bt_debugfs, NULL, &l2cap_debugfs_fops);
4301 BT_ERR("Failed to create L2CAP debug file");
4307 l2cap_cleanup_sockets();
4311 void l2cap_exit(void)
4313 debugfs_remove(l2cap_debugfs);
4315 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4316 BT_ERR("L2CAP protocol unregistration failed");
4318 l2cap_cleanup_sockets();
4321 module_param(disable_ertm, bool, 0644);
4322 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");