2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE))
243 skb_queue_purge(TX_QUEUE(sk));
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
265 static inline u8 l2cap_get_auth_type(struct sock *sk)
267 if (sk->sk_type == SOCK_RAW) {
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 return HCI_AT_DEDICATED_BONDING_MITM;
271 case BT_SECURITY_MEDIUM:
272 return HCI_AT_DEDICATED_BONDING;
274 return HCI_AT_NO_BONDING;
276 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
281 return HCI_AT_NO_BONDING_MITM;
283 return HCI_AT_NO_BONDING;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 return HCI_AT_GENERAL_BONDING_MITM;
288 case BT_SECURITY_MEDIUM:
289 return HCI_AT_GENERAL_BONDING;
291 return HCI_AT_NO_BONDING;
296 /* Service level security */
297 static inline int l2cap_check_security(struct sock *sk)
299 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
302 auth_type = l2cap_get_auth_type(sk);
304 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
308 u8 l2cap_get_ident(struct l2cap_conn *conn)
312 /* Get next available identificator.
313 * 1 - 128 are used by kernel.
314 * 129 - 199 are reserved.
315 * 200 - 254 are used by utilities like l2ping, etc.
318 spin_lock_bh(&conn->lock);
320 if (++conn->tx_ident > 128)
325 spin_unlock_bh(&conn->lock);
330 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
332 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
340 if (lmp_no_flush_capable(conn->hcon->hdev))
341 flags = ACL_START_NO_FLUSH;
345 hci_send_acl(conn->hcon, skb, flags);
348 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
351 struct l2cap_hdr *lh;
352 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
353 struct l2cap_conn *conn = pi->conn;
354 struct sock *sk = (struct sock *)pi;
355 int count, hlen = L2CAP_HDR_SIZE + 2;
358 if (sk->sk_state != BT_CONNECTED)
361 if (pi->fcs == L2CAP_FCS_CRC16)
364 BT_DBG("chan %p, control 0x%2.2x", chan, control);
366 count = min_t(unsigned int, conn->mtu, hlen);
367 control |= L2CAP_CTRL_FRAME_TYPE;
369 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
370 control |= L2CAP_CTRL_FINAL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
374 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
375 control |= L2CAP_CTRL_POLL;
376 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
379 skb = bt_skb_alloc(count, GFP_ATOMIC);
383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
384 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
385 lh->cid = cpu_to_le16(pi->dcid);
386 put_unaligned_le16(control, skb_put(skb, 2));
388 if (pi->fcs == L2CAP_FCS_CRC16) {
389 u16 fcs = crc16(0, (u8 *)lh, count - 2);
390 put_unaligned_le16(fcs, skb_put(skb, 2));
393 if (lmp_no_flush_capable(conn->hcon->hdev))
394 flags = ACL_START_NO_FLUSH;
398 hci_send_acl(pi->conn->hcon, skb, flags);
401 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
403 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
404 control |= L2CAP_SUPER_RCV_NOT_READY;
405 chan->conn_state |= L2CAP_CONN_RNR_SENT;
407 control |= L2CAP_SUPER_RCV_READY;
409 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
411 l2cap_send_sframe(chan, control);
414 static inline int __l2cap_no_conn_pending(struct sock *sk)
416 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
419 static void l2cap_do_start(struct l2cap_chan *chan)
421 struct sock *sk = chan->sk;
422 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
428 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
429 struct l2cap_conn_req req;
430 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
431 req.psm = l2cap_pi(sk)->psm;
433 chan->ident = l2cap_get_ident(conn);
434 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
436 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
440 struct l2cap_info_req req;
441 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
444 conn->info_ident = l2cap_get_ident(conn);
446 mod_timer(&conn->info_timer, jiffies +
447 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
449 l2cap_send_cmd(conn, conn->info_ident,
450 L2CAP_INFO_REQ, sizeof(req), &req);
454 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
456 u32 local_feat_mask = l2cap_feat_mask;
458 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
461 case L2CAP_MODE_ERTM:
462 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
463 case L2CAP_MODE_STREAMING:
464 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
470 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
473 struct l2cap_disconn_req req;
480 skb_queue_purge(TX_QUEUE(sk));
482 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
483 del_timer(&chan->retrans_timer);
484 del_timer(&chan->monitor_timer);
485 del_timer(&chan->ack_timer);
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
493 sk->sk_state = BT_DISCONN;
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn *conn)
500 struct l2cap_chan *chan, *tmp;
502 BT_DBG("conn %p", conn);
504 read_lock(&conn->chan_lock);
506 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
507 struct sock *sk = chan->sk;
511 if (sk->sk_type != SOCK_SEQPACKET &&
512 sk->sk_type != SOCK_STREAM) {
517 if (sk->sk_state == BT_CONNECT) {
518 struct l2cap_conn_req req;
520 if (!l2cap_check_security(sk) ||
521 !__l2cap_no_conn_pending(sk)) {
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
528 && l2cap_pi(sk)->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn->chan_lock);
533 __l2cap_sock_close(sk, ECONNRESET);
534 read_lock_bh(&conn->chan_lock);
539 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
540 req.psm = l2cap_pi(sk)->psm;
542 chan->ident = l2cap_get_ident(conn);
543 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
548 } else if (sk->sk_state == BT_CONNECT2) {
549 struct l2cap_conn_rsp rsp;
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554 if (l2cap_check_security(sk)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
574 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
575 rsp.result != L2CAP_CR_SUCCESS) {
580 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
582 l2cap_build_conf_req(chan, buf), buf);
583 chan->num_conf_req++;
589 read_unlock(&conn->chan_lock);
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
600 read_lock(&l2cap_sk_list.lock);
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
606 if (l2cap_pi(sk)->scid == cid) {
608 if (!bacmp(&bt_sk(sk)->src, src))
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
619 read_unlock(&l2cap_sk_list.lock);
624 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
626 struct sock *parent, *sk;
627 struct l2cap_chan *chan;
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
647 chan = l2cap_chan_alloc(sk);
653 write_lock_bh(&conn->chan_lock);
655 hci_conn_hold(conn->hcon);
657 l2cap_sock_init(sk, parent);
659 bacpy(&bt_sk(sk)->src, conn->src);
660 bacpy(&bt_sk(sk)->dst, conn->dst);
662 bt_accept_enqueue(parent, sk);
664 __l2cap_chan_add(conn, chan);
666 l2cap_pi(sk)->chan = chan;
668 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
670 sk->sk_state = BT_CONNECTED;
671 parent->sk_data_ready(parent, 0);
673 write_unlock_bh(&conn->chan_lock);
676 bh_unlock_sock(parent);
679 static void l2cap_conn_ready(struct l2cap_conn *conn)
681 struct l2cap_chan *chan;
683 BT_DBG("conn %p", conn);
685 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
686 l2cap_le_conn_ready(conn);
688 read_lock(&conn->chan_lock);
690 list_for_each_entry(chan, &conn->chan_l, list) {
691 struct sock *sk = chan->sk;
695 if (conn->hcon->type == LE_LINK) {
696 l2cap_sock_clear_timer(sk);
697 sk->sk_state = BT_CONNECTED;
698 sk->sk_state_change(sk);
701 if (sk->sk_type != SOCK_SEQPACKET &&
702 sk->sk_type != SOCK_STREAM) {
703 l2cap_sock_clear_timer(sk);
704 sk->sk_state = BT_CONNECTED;
705 sk->sk_state_change(sk);
706 } else if (sk->sk_state == BT_CONNECT)
707 l2cap_do_start(chan);
712 read_unlock(&conn->chan_lock);
715 /* Notify sockets that we cannot guaranty reliability anymore */
716 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
718 struct l2cap_chan *chan;
720 BT_DBG("conn %p", conn);
722 read_lock(&conn->chan_lock);
724 list_for_each_entry(chan, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
727 if (l2cap_pi(sk)->force_reliable)
731 read_unlock(&conn->chan_lock);
734 static void l2cap_info_timeout(unsigned long arg)
736 struct l2cap_conn *conn = (void *) arg;
738 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
739 conn->info_ident = 0;
741 l2cap_conn_start(conn);
744 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
746 struct l2cap_conn *conn = hcon->l2cap_data;
751 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
755 hcon->l2cap_data = conn;
758 BT_DBG("hcon %p conn %p", hcon, conn);
760 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
761 conn->mtu = hcon->hdev->le_mtu;
763 conn->mtu = hcon->hdev->acl_mtu;
765 conn->src = &hcon->hdev->bdaddr;
766 conn->dst = &hcon->dst;
770 spin_lock_init(&conn->lock);
771 rwlock_init(&conn->chan_lock);
773 INIT_LIST_HEAD(&conn->chan_l);
775 if (hcon->type != LE_LINK)
776 setup_timer(&conn->info_timer, l2cap_info_timeout,
777 (unsigned long) conn);
779 conn->disc_reason = 0x13;
784 static void l2cap_conn_del(struct hci_conn *hcon, int err)
786 struct l2cap_conn *conn = hcon->l2cap_data;
787 struct l2cap_chan *chan, *l;
793 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
795 kfree_skb(conn->rx_skb);
798 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
801 l2cap_chan_del(chan, err);
806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
807 del_timer_sync(&conn->info_timer);
809 hcon->l2cap_data = NULL;
813 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
815 write_lock_bh(&conn->chan_lock);
816 __l2cap_chan_add(conn, chan);
817 write_unlock_bh(&conn->chan_lock);
820 /* ---- Socket interface ---- */
822 /* Find socket with psm and source bdaddr.
823 * Returns closest match.
825 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
827 struct sock *sk = NULL, *sk1 = NULL;
828 struct hlist_node *node;
830 read_lock(&l2cap_sk_list.lock);
832 sk_for_each(sk, node, &l2cap_sk_list.head) {
833 if (state && sk->sk_state != state)
836 if (l2cap_pi(sk)->psm == psm) {
838 if (!bacmp(&bt_sk(sk)->src, src))
842 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
847 read_unlock(&l2cap_sk_list.lock);
849 return node ? sk : sk1;
852 int l2cap_do_connect(struct sock *sk)
854 bdaddr_t *src = &bt_sk(sk)->src;
855 bdaddr_t *dst = &bt_sk(sk)->dst;
856 struct l2cap_conn *conn;
857 struct l2cap_chan *chan;
858 struct hci_conn *hcon;
859 struct hci_dev *hdev;
863 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
866 hdev = hci_get_route(dst, src);
868 return -EHOSTUNREACH;
870 hci_dev_lock_bh(hdev);
872 auth_type = l2cap_get_auth_type(sk);
874 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
875 hcon = hci_connect(hdev, LE_LINK, dst,
876 l2cap_pi(sk)->sec_level, auth_type);
878 hcon = hci_connect(hdev, ACL_LINK, dst,
879 l2cap_pi(sk)->sec_level, auth_type);
886 conn = l2cap_conn_add(hcon, 0);
893 chan = l2cap_chan_alloc(sk);
900 /* Update source addr of the socket */
901 bacpy(src, conn->src);
903 l2cap_chan_add(conn, chan);
905 l2cap_pi(sk)->chan = chan;
907 sk->sk_state = BT_CONNECT;
908 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
910 if (hcon->state == BT_CONNECTED) {
911 if (sk->sk_type != SOCK_SEQPACKET &&
912 sk->sk_type != SOCK_STREAM) {
913 l2cap_sock_clear_timer(sk);
914 if (l2cap_check_security(sk))
915 sk->sk_state = BT_CONNECTED;
917 l2cap_do_start(chan);
923 hci_dev_unlock_bh(hdev);
928 int __l2cap_wait_ack(struct sock *sk)
930 DECLARE_WAITQUEUE(wait, current);
934 add_wait_queue(sk_sleep(sk), &wait);
935 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
936 set_current_state(TASK_INTERRUPTIBLE);
941 if (signal_pending(current)) {
942 err = sock_intr_errno(timeo);
947 timeo = schedule_timeout(timeo);
950 err = sock_error(sk);
954 set_current_state(TASK_RUNNING);
955 remove_wait_queue(sk_sleep(sk), &wait);
959 static void l2cap_monitor_timeout(unsigned long arg)
961 struct l2cap_chan *chan = (void *) arg;
962 struct sock *sk = chan->sk;
964 BT_DBG("chan %p", chan);
967 if (chan->retry_count >= chan->remote_max_tx) {
968 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
974 __mod_monitor_timer();
976 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
980 static void l2cap_retrans_timeout(unsigned long arg)
982 struct l2cap_chan *chan = (void *) arg;
983 struct sock *sk = chan->sk;
985 BT_DBG("chan %p", chan);
988 chan->retry_count = 1;
989 __mod_monitor_timer();
991 chan->conn_state |= L2CAP_CONN_WAIT_F;
993 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
997 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
999 struct sock *sk = chan->sk;
1000 struct sk_buff *skb;
1002 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1003 chan->unacked_frames) {
1004 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1007 skb = skb_dequeue(TX_QUEUE(sk));
1010 chan->unacked_frames--;
1013 if (!chan->unacked_frames)
1014 del_timer(&chan->retrans_timer);
1017 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1019 struct l2cap_pinfo *pi = l2cap_pi(sk);
1020 struct hci_conn *hcon = pi->conn->hcon;
1023 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1025 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1026 flags = ACL_START_NO_FLUSH;
1030 hci_send_acl(hcon, skb, flags);
1033 void l2cap_streaming_send(struct l2cap_chan *chan)
1035 struct sock *sk = chan->sk;
1036 struct sk_buff *skb;
1037 struct l2cap_pinfo *pi = l2cap_pi(sk);
1040 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1041 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1042 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1043 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1045 if (pi->fcs == L2CAP_FCS_CRC16) {
1046 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1047 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1050 l2cap_do_send(sk, skb);
1052 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1056 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1058 struct sock *sk = chan->sk;
1059 struct l2cap_pinfo *pi = l2cap_pi(sk);
1060 struct sk_buff *skb, *tx_skb;
1063 skb = skb_peek(TX_QUEUE(sk));
1068 if (bt_cb(skb)->tx_seq == tx_seq)
1071 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1074 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1076 if (chan->remote_max_tx &&
1077 bt_cb(skb)->retries == chan->remote_max_tx) {
1078 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1082 tx_skb = skb_clone(skb, GFP_ATOMIC);
1083 bt_cb(skb)->retries++;
1084 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1086 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1087 control |= L2CAP_CTRL_FINAL;
1088 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1091 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1092 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1094 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1096 if (pi->fcs == L2CAP_FCS_CRC16) {
1097 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1098 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1101 l2cap_do_send(sk, tx_skb);
1104 int l2cap_ertm_send(struct l2cap_chan *chan)
1106 struct sk_buff *skb, *tx_skb;
1107 struct sock *sk = chan->sk;
1108 struct l2cap_pinfo *pi = l2cap_pi(sk);
1112 if (sk->sk_state != BT_CONNECTED)
1115 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(chan))) {
1117 if (chan->remote_max_tx &&
1118 bt_cb(skb)->retries == chan->remote_max_tx) {
1119 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1123 tx_skb = skb_clone(skb, GFP_ATOMIC);
1125 bt_cb(skb)->retries++;
1127 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1128 control &= L2CAP_CTRL_SAR;
1130 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1131 control |= L2CAP_CTRL_FINAL;
1132 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1134 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1135 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1136 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1139 if (pi->fcs == L2CAP_FCS_CRC16) {
1140 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1141 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1144 l2cap_do_send(sk, tx_skb);
1146 __mod_retrans_timer();
1148 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1149 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1151 if (bt_cb(skb)->retries == 1)
1152 chan->unacked_frames++;
1154 chan->frames_sent++;
1156 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1157 sk->sk_send_head = NULL;
1159 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1167 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1169 struct sock *sk = chan->sk;
1172 if (!skb_queue_empty(TX_QUEUE(sk)))
1173 sk->sk_send_head = TX_QUEUE(sk)->next;
1175 chan->next_tx_seq = chan->expected_ack_seq;
1176 ret = l2cap_ertm_send(chan);
1180 static void l2cap_send_ack(struct l2cap_chan *chan)
1184 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1186 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1187 control |= L2CAP_SUPER_RCV_NOT_READY;
1188 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1189 l2cap_send_sframe(chan, control);
1193 if (l2cap_ertm_send(chan) > 0)
1196 control |= L2CAP_SUPER_RCV_READY;
1197 l2cap_send_sframe(chan, control);
1200 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1202 struct srej_list *tail;
1205 control = L2CAP_SUPER_SELECT_REJECT;
1206 control |= L2CAP_CTRL_FINAL;
1208 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1209 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1211 l2cap_send_sframe(chan, control);
1214 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1217 struct sk_buff **frag;
1220 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1226 /* Continuation fragments (no L2CAP header) */
1227 frag = &skb_shinfo(skb)->frag_list;
1229 count = min_t(unsigned int, conn->mtu, len);
1231 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1234 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1240 frag = &(*frag)->next;
1246 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1248 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1249 struct sk_buff *skb;
1250 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1251 struct l2cap_hdr *lh;
1253 BT_DBG("sk %p len %d", sk, (int)len);
1255 count = min_t(unsigned int, (conn->mtu - hlen), len);
1256 skb = bt_skb_send_alloc(sk, count + hlen,
1257 msg->msg_flags & MSG_DONTWAIT, &err);
1259 return ERR_PTR(err);
1261 /* Create L2CAP header */
1262 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1263 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1264 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1265 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1267 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1268 if (unlikely(err < 0)) {
1270 return ERR_PTR(err);
1275 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1278 struct sk_buff *skb;
1279 int err, count, hlen = L2CAP_HDR_SIZE;
1280 struct l2cap_hdr *lh;
1282 BT_DBG("sk %p len %d", sk, (int)len);
1284 count = min_t(unsigned int, (conn->mtu - hlen), len);
1285 skb = bt_skb_send_alloc(sk, count + hlen,
1286 msg->msg_flags & MSG_DONTWAIT, &err);
1288 return ERR_PTR(err);
1290 /* Create L2CAP header */
1291 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1292 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1293 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1295 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1296 if (unlikely(err < 0)) {
1298 return ERR_PTR(err);
1303 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1305 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1306 struct sk_buff *skb;
1307 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1308 struct l2cap_hdr *lh;
1310 BT_DBG("sk %p len %d", sk, (int)len);
1313 return ERR_PTR(-ENOTCONN);
1318 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1321 count = min_t(unsigned int, (conn->mtu - hlen), len);
1322 skb = bt_skb_send_alloc(sk, count + hlen,
1323 msg->msg_flags & MSG_DONTWAIT, &err);
1325 return ERR_PTR(err);
1327 /* Create L2CAP header */
1328 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1329 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1330 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1331 put_unaligned_le16(control, skb_put(skb, 2));
1333 put_unaligned_le16(sdulen, skb_put(skb, 2));
1335 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1336 if (unlikely(err < 0)) {
1338 return ERR_PTR(err);
1341 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1342 put_unaligned_le16(0, skb_put(skb, 2));
1344 bt_cb(skb)->retries = 0;
1348 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1350 struct sock *sk = chan->sk;
1351 struct sk_buff *skb;
1352 struct sk_buff_head sar_queue;
1356 skb_queue_head_init(&sar_queue);
1357 control = L2CAP_SDU_START;
1358 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1360 return PTR_ERR(skb);
1362 __skb_queue_tail(&sar_queue, skb);
1363 len -= chan->remote_mps;
1364 size += chan->remote_mps;
1369 if (len > chan->remote_mps) {
1370 control = L2CAP_SDU_CONTINUE;
1371 buflen = chan->remote_mps;
1373 control = L2CAP_SDU_END;
1377 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1379 skb_queue_purge(&sar_queue);
1380 return PTR_ERR(skb);
1383 __skb_queue_tail(&sar_queue, skb);
1387 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1388 if (sk->sk_send_head == NULL)
1389 sk->sk_send_head = sar_queue.next;
1394 static void l2cap_chan_ready(struct sock *sk)
1396 struct sock *parent = bt_sk(sk)->parent;
1398 BT_DBG("sk %p, parent %p", sk, parent);
1400 l2cap_pi(sk)->conf_state = 0;
1401 l2cap_sock_clear_timer(sk);
1404 /* Outgoing channel.
1405 * Wake up socket sleeping on connect.
1407 sk->sk_state = BT_CONNECTED;
1408 sk->sk_state_change(sk);
1410 /* Incoming channel.
1411 * Wake up socket sleeping on accept.
1413 parent->sk_data_ready(parent, 0);
1417 /* Copy frame to all raw sockets on that connection */
1418 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1420 struct sk_buff *nskb;
1421 struct l2cap_chan *chan;
1423 BT_DBG("conn %p", conn);
1425 read_lock(&conn->chan_lock);
1426 list_for_each_entry(chan, &conn->chan_l, list) {
1427 struct sock *sk = chan->sk;
1428 if (sk->sk_type != SOCK_RAW)
1431 /* Don't send frame to the socket it came from */
1434 nskb = skb_clone(skb, GFP_ATOMIC);
1438 if (sock_queue_rcv_skb(sk, nskb))
1441 read_unlock(&conn->chan_lock);
1444 /* ---- L2CAP signalling commands ---- */
1445 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1446 u8 code, u8 ident, u16 dlen, void *data)
1448 struct sk_buff *skb, **frag;
1449 struct l2cap_cmd_hdr *cmd;
1450 struct l2cap_hdr *lh;
1453 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1454 conn, code, ident, dlen);
1456 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1457 count = min_t(unsigned int, conn->mtu, len);
1459 skb = bt_skb_alloc(count, GFP_ATOMIC);
1463 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1464 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1466 if (conn->hcon->type == LE_LINK)
1467 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1469 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1471 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1474 cmd->len = cpu_to_le16(dlen);
1477 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1478 memcpy(skb_put(skb, count), data, count);
1484 /* Continuation fragments (no L2CAP header) */
1485 frag = &skb_shinfo(skb)->frag_list;
1487 count = min_t(unsigned int, conn->mtu, len);
1489 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1493 memcpy(skb_put(*frag, count), data, count);
1498 frag = &(*frag)->next;
1508 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1510 struct l2cap_conf_opt *opt = *ptr;
1513 len = L2CAP_CONF_OPT_SIZE + opt->len;
1521 *val = *((u8 *) opt->val);
1525 *val = get_unaligned_le16(opt->val);
1529 *val = get_unaligned_le32(opt->val);
1533 *val = (unsigned long) opt->val;
1537 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1541 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1543 struct l2cap_conf_opt *opt = *ptr;
1545 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1552 *((u8 *) opt->val) = val;
1556 put_unaligned_le16(val, opt->val);
1560 put_unaligned_le32(val, opt->val);
1564 memcpy(opt->val, (void *) val, len);
1568 *ptr += L2CAP_CONF_OPT_SIZE + len;
1571 static void l2cap_ack_timeout(unsigned long arg)
1573 struct l2cap_chan *chan = (void *) arg;
1575 bh_lock_sock(chan->sk);
1576 l2cap_send_ack(chan);
1577 bh_unlock_sock(chan->sk);
1580 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1582 struct sock *sk = chan->sk;
1584 chan->expected_ack_seq = 0;
1585 chan->unacked_frames = 0;
1586 chan->buffer_seq = 0;
1587 chan->num_acked = 0;
1588 chan->frames_sent = 0;
1590 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1591 (unsigned long) chan);
1592 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1593 (unsigned long) chan);
1594 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1596 skb_queue_head_init(&chan->srej_q);
1597 skb_queue_head_init(&chan->busy_q);
1599 INIT_LIST_HEAD(&chan->srej_l);
1601 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1603 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1606 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1609 case L2CAP_MODE_STREAMING:
1610 case L2CAP_MODE_ERTM:
1611 if (l2cap_mode_supported(mode, remote_feat_mask))
1615 return L2CAP_MODE_BASIC;
1619 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1621 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1622 struct l2cap_conf_req *req = data;
1623 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1624 void *ptr = req->data;
1626 BT_DBG("chan %p", chan);
1628 if (chan->num_conf_req || chan->num_conf_rsp)
1632 case L2CAP_MODE_STREAMING:
1633 case L2CAP_MODE_ERTM:
1634 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1639 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1644 if (pi->imtu != L2CAP_DEFAULT_MTU)
1645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1648 case L2CAP_MODE_BASIC:
1649 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1650 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1653 rfc.mode = L2CAP_MODE_BASIC;
1655 rfc.max_transmit = 0;
1656 rfc.retrans_timeout = 0;
1657 rfc.monitor_timeout = 0;
1658 rfc.max_pdu_size = 0;
1660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1661 (unsigned long) &rfc);
1664 case L2CAP_MODE_ERTM:
1665 rfc.mode = L2CAP_MODE_ERTM;
1666 rfc.txwin_size = pi->tx_win;
1667 rfc.max_transmit = pi->max_tx;
1668 rfc.retrans_timeout = 0;
1669 rfc.monitor_timeout = 0;
1670 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1671 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1672 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1675 (unsigned long) &rfc);
1677 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1680 if (pi->fcs == L2CAP_FCS_NONE ||
1681 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1682 pi->fcs = L2CAP_FCS_NONE;
1683 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1687 case L2CAP_MODE_STREAMING:
1688 rfc.mode = L2CAP_MODE_STREAMING;
1690 rfc.max_transmit = 0;
1691 rfc.retrans_timeout = 0;
1692 rfc.monitor_timeout = 0;
1693 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1694 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1695 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1698 (unsigned long) &rfc);
1700 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1703 if (pi->fcs == L2CAP_FCS_NONE ||
1704 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1705 pi->fcs = L2CAP_FCS_NONE;
1706 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1711 req->dcid = cpu_to_le16(pi->dcid);
1712 req->flags = cpu_to_le16(0);
1717 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1719 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1720 struct l2cap_conf_rsp *rsp = data;
1721 void *ptr = rsp->data;
1722 void *req = chan->conf_req;
1723 int len = chan->conf_len;
1724 int type, hint, olen;
1726 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1727 u16 mtu = L2CAP_DEFAULT_MTU;
1728 u16 result = L2CAP_CONF_SUCCESS;
1730 BT_DBG("chan %p", chan);
1732 while (len >= L2CAP_CONF_OPT_SIZE) {
1733 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1735 hint = type & L2CAP_CONF_HINT;
1736 type &= L2CAP_CONF_MASK;
1739 case L2CAP_CONF_MTU:
1743 case L2CAP_CONF_FLUSH_TO:
1747 case L2CAP_CONF_QOS:
1750 case L2CAP_CONF_RFC:
1751 if (olen == sizeof(rfc))
1752 memcpy(&rfc, (void *) val, olen);
1755 case L2CAP_CONF_FCS:
1756 if (val == L2CAP_FCS_NONE)
1757 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1765 result = L2CAP_CONF_UNKNOWN;
1766 *((u8 *) ptr++) = type;
1771 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1775 case L2CAP_MODE_STREAMING:
1776 case L2CAP_MODE_ERTM:
1777 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1778 pi->mode = l2cap_select_mode(rfc.mode,
1779 pi->conn->feat_mask);
1783 if (pi->mode != rfc.mode)
1784 return -ECONNREFUSED;
1790 if (pi->mode != rfc.mode) {
1791 result = L2CAP_CONF_UNACCEPT;
1792 rfc.mode = pi->mode;
1794 if (chan->num_conf_rsp == 1)
1795 return -ECONNREFUSED;
1797 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1798 sizeof(rfc), (unsigned long) &rfc);
1802 if (result == L2CAP_CONF_SUCCESS) {
1803 /* Configure output options and let the other side know
1804 * which ones we don't like. */
1806 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1807 result = L2CAP_CONF_UNACCEPT;
1810 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1812 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1815 case L2CAP_MODE_BASIC:
1816 pi->fcs = L2CAP_FCS_NONE;
1817 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1820 case L2CAP_MODE_ERTM:
1821 chan->remote_tx_win = rfc.txwin_size;
1822 chan->remote_max_tx = rfc.max_transmit;
1824 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1825 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1827 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1829 rfc.retrans_timeout =
1830 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1831 rfc.monitor_timeout =
1832 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1834 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1836 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1837 sizeof(rfc), (unsigned long) &rfc);
1841 case L2CAP_MODE_STREAMING:
1842 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1843 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1845 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1847 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1849 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1850 sizeof(rfc), (unsigned long) &rfc);
1855 result = L2CAP_CONF_UNACCEPT;
1857 memset(&rfc, 0, sizeof(rfc));
1858 rfc.mode = pi->mode;
1861 if (result == L2CAP_CONF_SUCCESS)
1862 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1864 rsp->scid = cpu_to_le16(pi->dcid);
1865 rsp->result = cpu_to_le16(result);
1866 rsp->flags = cpu_to_le16(0x0000);
1871 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1873 struct l2cap_pinfo *pi = l2cap_pi(sk);
1874 struct l2cap_conf_req *req = data;
1875 void *ptr = req->data;
1878 struct l2cap_conf_rfc rfc;
1880 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1882 while (len >= L2CAP_CONF_OPT_SIZE) {
1883 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1886 case L2CAP_CONF_MTU:
1887 if (val < L2CAP_DEFAULT_MIN_MTU) {
1888 *result = L2CAP_CONF_UNACCEPT;
1889 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1895 case L2CAP_CONF_FLUSH_TO:
1897 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1901 case L2CAP_CONF_RFC:
1902 if (olen == sizeof(rfc))
1903 memcpy(&rfc, (void *)val, olen);
1905 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1906 rfc.mode != pi->mode)
1907 return -ECONNREFUSED;
1911 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1912 sizeof(rfc), (unsigned long) &rfc);
1917 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1918 return -ECONNREFUSED;
1920 pi->mode = rfc.mode;
1922 if (*result == L2CAP_CONF_SUCCESS) {
1924 case L2CAP_MODE_ERTM:
1925 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1926 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1927 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1929 case L2CAP_MODE_STREAMING:
1930 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1934 req->dcid = cpu_to_le16(pi->dcid);
1935 req->flags = cpu_to_le16(0x0000);
1940 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1942 struct l2cap_conf_rsp *rsp = data;
1943 void *ptr = rsp->data;
1945 BT_DBG("sk %p", sk);
1947 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1948 rsp->result = cpu_to_le16(result);
1949 rsp->flags = cpu_to_le16(flags);
1954 void __l2cap_connect_rsp_defer(struct sock *sk)
1956 struct l2cap_conn_rsp rsp;
1957 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1958 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1961 sk->sk_state = BT_CONFIG;
1963 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1964 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1965 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1966 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1967 l2cap_send_cmd(conn, chan->ident,
1968 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1970 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1973 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1974 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1975 l2cap_build_conf_req(chan, buf), buf);
1976 chan->num_conf_req++;
1979 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1981 struct l2cap_pinfo *pi = l2cap_pi(sk);
1984 struct l2cap_conf_rfc rfc;
1986 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1988 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1991 while (len >= L2CAP_CONF_OPT_SIZE) {
1992 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1995 case L2CAP_CONF_RFC:
1996 if (olen == sizeof(rfc))
1997 memcpy(&rfc, (void *)val, olen);
2004 case L2CAP_MODE_ERTM:
2005 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2006 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2007 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2009 case L2CAP_MODE_STREAMING:
2010 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2014 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2016 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2018 if (rej->reason != 0x0000)
2021 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2022 cmd->ident == conn->info_ident) {
2023 del_timer(&conn->info_timer);
2025 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2026 conn->info_ident = 0;
2028 l2cap_conn_start(conn);
2034 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2036 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2037 struct l2cap_conn_rsp rsp;
2038 struct l2cap_chan *chan = NULL;
2039 struct sock *parent, *sk = NULL;
2040 int result, status = L2CAP_CS_NO_INFO;
2042 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2043 __le16 psm = req->psm;
2045 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2047 /* Check if we have socket listening on psm */
2048 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2050 result = L2CAP_CR_BAD_PSM;
2054 bh_lock_sock(parent);
2056 /* Check if the ACL is secure enough (if not SDP) */
2057 if (psm != cpu_to_le16(0x0001) &&
2058 !hci_conn_check_link_mode(conn->hcon)) {
2059 conn->disc_reason = 0x05;
2060 result = L2CAP_CR_SEC_BLOCK;
2064 result = L2CAP_CR_NO_MEM;
2066 /* Check for backlog size */
2067 if (sk_acceptq_is_full(parent)) {
2068 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2072 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2076 chan = l2cap_chan_alloc(sk);
2078 l2cap_sock_kill(sk);
2082 write_lock_bh(&conn->chan_lock);
2084 /* Check if we already have channel with that dcid */
2085 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2086 write_unlock_bh(&conn->chan_lock);
2087 sock_set_flag(sk, SOCK_ZAPPED);
2088 l2cap_sock_kill(sk);
2092 hci_conn_hold(conn->hcon);
2094 l2cap_sock_init(sk, parent);
2095 bacpy(&bt_sk(sk)->src, conn->src);
2096 bacpy(&bt_sk(sk)->dst, conn->dst);
2097 l2cap_pi(sk)->psm = psm;
2098 l2cap_pi(sk)->dcid = scid;
2100 bt_accept_enqueue(parent, sk);
2102 __l2cap_chan_add(conn, chan);
2104 l2cap_pi(sk)->chan = chan;
2106 dcid = l2cap_pi(sk)->scid;
2108 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2110 chan->ident = cmd->ident;
2112 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2113 if (l2cap_check_security(sk)) {
2114 if (bt_sk(sk)->defer_setup) {
2115 sk->sk_state = BT_CONNECT2;
2116 result = L2CAP_CR_PEND;
2117 status = L2CAP_CS_AUTHOR_PEND;
2118 parent->sk_data_ready(parent, 0);
2120 sk->sk_state = BT_CONFIG;
2121 result = L2CAP_CR_SUCCESS;
2122 status = L2CAP_CS_NO_INFO;
2125 sk->sk_state = BT_CONNECT2;
2126 result = L2CAP_CR_PEND;
2127 status = L2CAP_CS_AUTHEN_PEND;
2130 sk->sk_state = BT_CONNECT2;
2131 result = L2CAP_CR_PEND;
2132 status = L2CAP_CS_NO_INFO;
2135 write_unlock_bh(&conn->chan_lock);
2138 bh_unlock_sock(parent);
2141 rsp.scid = cpu_to_le16(scid);
2142 rsp.dcid = cpu_to_le16(dcid);
2143 rsp.result = cpu_to_le16(result);
2144 rsp.status = cpu_to_le16(status);
2145 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2147 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2148 struct l2cap_info_req info;
2149 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2151 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2152 conn->info_ident = l2cap_get_ident(conn);
2154 mod_timer(&conn->info_timer, jiffies +
2155 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2157 l2cap_send_cmd(conn, conn->info_ident,
2158 L2CAP_INFO_REQ, sizeof(info), &info);
2161 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2162 result == L2CAP_CR_SUCCESS) {
2164 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2165 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2166 l2cap_build_conf_req(chan, buf), buf);
2167 chan->num_conf_req++;
2173 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2175 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2176 u16 scid, dcid, result, status;
2177 struct l2cap_chan *chan;
2181 scid = __le16_to_cpu(rsp->scid);
2182 dcid = __le16_to_cpu(rsp->dcid);
2183 result = __le16_to_cpu(rsp->result);
2184 status = __le16_to_cpu(rsp->status);
2186 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2189 chan = l2cap_get_chan_by_scid(conn, scid);
2193 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2201 case L2CAP_CR_SUCCESS:
2202 sk->sk_state = BT_CONFIG;
2204 l2cap_pi(sk)->dcid = dcid;
2205 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2207 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2210 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2212 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2213 l2cap_build_conf_req(chan, req), req);
2214 chan->num_conf_req++;
2218 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2222 /* don't delete l2cap channel if sk is owned by user */
2223 if (sock_owned_by_user(sk)) {
2224 sk->sk_state = BT_DISCONN;
2225 l2cap_sock_clear_timer(sk);
2226 l2cap_sock_set_timer(sk, HZ / 5);
2230 l2cap_chan_del(chan, ECONNREFUSED);
2238 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2240 /* FCS is enabled only in ERTM or streaming mode, if one or both
2243 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2244 pi->fcs = L2CAP_FCS_NONE;
2245 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2246 pi->fcs = L2CAP_FCS_CRC16;
2249 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2251 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2254 struct l2cap_chan *chan;
2258 dcid = __le16_to_cpu(req->dcid);
2259 flags = __le16_to_cpu(req->flags);
2261 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2263 chan = l2cap_get_chan_by_scid(conn, dcid);
2269 if (sk->sk_state != BT_CONFIG) {
2270 struct l2cap_cmd_rej rej;
2272 rej.reason = cpu_to_le16(0x0002);
2273 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2278 /* Reject if config buffer is too small. */
2279 len = cmd_len - sizeof(*req);
2280 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2281 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2282 l2cap_build_conf_rsp(sk, rsp,
2283 L2CAP_CONF_REJECT, flags), rsp);
2288 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2289 chan->conf_len += len;
2291 if (flags & 0x0001) {
2292 /* Incomplete config. Send empty response. */
2293 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2294 l2cap_build_conf_rsp(sk, rsp,
2295 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2299 /* Complete config. */
2300 len = l2cap_parse_conf_req(chan, rsp);
2302 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2306 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2307 chan->num_conf_rsp++;
2309 /* Reset config buffer. */
2312 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2315 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2316 set_default_fcs(l2cap_pi(sk));
2318 sk->sk_state = BT_CONNECTED;
2320 chan->next_tx_seq = 0;
2321 chan->expected_tx_seq = 0;
2322 __skb_queue_head_init(TX_QUEUE(sk));
2323 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2324 l2cap_ertm_init(chan);
2326 l2cap_chan_ready(sk);
2330 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2332 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2333 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2334 l2cap_build_conf_req(chan, buf), buf);
2335 chan->num_conf_req++;
2343 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2345 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2346 u16 scid, flags, result;
2347 struct l2cap_chan *chan;
2349 int len = cmd->len - sizeof(*rsp);
2351 scid = __le16_to_cpu(rsp->scid);
2352 flags = __le16_to_cpu(rsp->flags);
2353 result = __le16_to_cpu(rsp->result);
2355 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2356 scid, flags, result);
2358 chan = l2cap_get_chan_by_scid(conn, scid);
2365 case L2CAP_CONF_SUCCESS:
2366 l2cap_conf_rfc_get(sk, rsp->data, len);
2369 case L2CAP_CONF_UNACCEPT:
2370 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2373 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2374 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2378 /* throw out any old stored conf requests */
2379 result = L2CAP_CONF_SUCCESS;
2380 len = l2cap_parse_conf_rsp(sk, rsp->data,
2383 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2387 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2388 L2CAP_CONF_REQ, len, req);
2389 chan->num_conf_req++;
2390 if (result != L2CAP_CONF_SUCCESS)
2396 sk->sk_err = ECONNRESET;
2397 l2cap_sock_set_timer(sk, HZ * 5);
2398 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2405 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2407 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2408 set_default_fcs(l2cap_pi(sk));
2410 sk->sk_state = BT_CONNECTED;
2411 chan->next_tx_seq = 0;
2412 chan->expected_tx_seq = 0;
2413 __skb_queue_head_init(TX_QUEUE(sk));
2414 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2415 l2cap_ertm_init(chan);
2417 l2cap_chan_ready(sk);
2425 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2427 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2428 struct l2cap_disconn_rsp rsp;
2430 struct l2cap_chan *chan;
2433 scid = __le16_to_cpu(req->scid);
2434 dcid = __le16_to_cpu(req->dcid);
2436 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2438 chan = l2cap_get_chan_by_scid(conn, dcid);
2444 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2445 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2446 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2448 sk->sk_shutdown = SHUTDOWN_MASK;
2450 /* don't delete l2cap channel if sk is owned by user */
2451 if (sock_owned_by_user(sk)) {
2452 sk->sk_state = BT_DISCONN;
2453 l2cap_sock_clear_timer(sk);
2454 l2cap_sock_set_timer(sk, HZ / 5);
2459 l2cap_chan_del(chan, ECONNRESET);
2462 l2cap_sock_kill(sk);
2466 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2468 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2470 struct l2cap_chan *chan;
2473 scid = __le16_to_cpu(rsp->scid);
2474 dcid = __le16_to_cpu(rsp->dcid);
2476 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2478 chan = l2cap_get_chan_by_scid(conn, scid);
2484 /* don't delete l2cap channel if sk is owned by user */
2485 if (sock_owned_by_user(sk)) {
2486 sk->sk_state = BT_DISCONN;
2487 l2cap_sock_clear_timer(sk);
2488 l2cap_sock_set_timer(sk, HZ / 5);
2493 l2cap_chan_del(chan, 0);
2496 l2cap_sock_kill(sk);
2500 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2502 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2505 type = __le16_to_cpu(req->type);
2507 BT_DBG("type 0x%4.4x", type);
2509 if (type == L2CAP_IT_FEAT_MASK) {
2511 u32 feat_mask = l2cap_feat_mask;
2512 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2513 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2514 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2516 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2518 put_unaligned_le32(feat_mask, rsp->data);
2519 l2cap_send_cmd(conn, cmd->ident,
2520 L2CAP_INFO_RSP, sizeof(buf), buf);
2521 } else if (type == L2CAP_IT_FIXED_CHAN) {
2523 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2524 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2525 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2526 memcpy(buf + 4, l2cap_fixed_chan, 8);
2527 l2cap_send_cmd(conn, cmd->ident,
2528 L2CAP_INFO_RSP, sizeof(buf), buf);
2530 struct l2cap_info_rsp rsp;
2531 rsp.type = cpu_to_le16(type);
2532 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2533 l2cap_send_cmd(conn, cmd->ident,
2534 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2540 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2542 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2545 type = __le16_to_cpu(rsp->type);
2546 result = __le16_to_cpu(rsp->result);
2548 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2550 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2551 if (cmd->ident != conn->info_ident ||
2552 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2555 del_timer(&conn->info_timer);
2557 if (result != L2CAP_IR_SUCCESS) {
2558 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2559 conn->info_ident = 0;
2561 l2cap_conn_start(conn);
2566 if (type == L2CAP_IT_FEAT_MASK) {
2567 conn->feat_mask = get_unaligned_le32(rsp->data);
2569 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2570 struct l2cap_info_req req;
2571 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2573 conn->info_ident = l2cap_get_ident(conn);
2575 l2cap_send_cmd(conn, conn->info_ident,
2576 L2CAP_INFO_REQ, sizeof(req), &req);
2578 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2579 conn->info_ident = 0;
2581 l2cap_conn_start(conn);
2583 } else if (type == L2CAP_IT_FIXED_CHAN) {
2584 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2585 conn->info_ident = 0;
2587 l2cap_conn_start(conn);
2593 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2598 if (min > max || min < 6 || max > 3200)
2601 if (to_multiplier < 10 || to_multiplier > 3200)
2604 if (max >= to_multiplier * 8)
2607 max_latency = (to_multiplier * 8 / max) - 1;
2608 if (latency > 499 || latency > max_latency)
2614 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2615 struct l2cap_cmd_hdr *cmd, u8 *data)
2617 struct hci_conn *hcon = conn->hcon;
2618 struct l2cap_conn_param_update_req *req;
2619 struct l2cap_conn_param_update_rsp rsp;
2620 u16 min, max, latency, to_multiplier, cmd_len;
2623 if (!(hcon->link_mode & HCI_LM_MASTER))
2626 cmd_len = __le16_to_cpu(cmd->len);
2627 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2630 req = (struct l2cap_conn_param_update_req *) data;
2631 min = __le16_to_cpu(req->min);
2632 max = __le16_to_cpu(req->max);
2633 latency = __le16_to_cpu(req->latency);
2634 to_multiplier = __le16_to_cpu(req->to_multiplier);
2636 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2637 min, max, latency, to_multiplier);
2639 memset(&rsp, 0, sizeof(rsp));
2641 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2643 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2645 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2647 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2651 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2656 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2657 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2661 switch (cmd->code) {
2662 case L2CAP_COMMAND_REJ:
2663 l2cap_command_rej(conn, cmd, data);
2666 case L2CAP_CONN_REQ:
2667 err = l2cap_connect_req(conn, cmd, data);
2670 case L2CAP_CONN_RSP:
2671 err = l2cap_connect_rsp(conn, cmd, data);
2674 case L2CAP_CONF_REQ:
2675 err = l2cap_config_req(conn, cmd, cmd_len, data);
2678 case L2CAP_CONF_RSP:
2679 err = l2cap_config_rsp(conn, cmd, data);
2682 case L2CAP_DISCONN_REQ:
2683 err = l2cap_disconnect_req(conn, cmd, data);
2686 case L2CAP_DISCONN_RSP:
2687 err = l2cap_disconnect_rsp(conn, cmd, data);
2690 case L2CAP_ECHO_REQ:
2691 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2694 case L2CAP_ECHO_RSP:
2697 case L2CAP_INFO_REQ:
2698 err = l2cap_information_req(conn, cmd, data);
2701 case L2CAP_INFO_RSP:
2702 err = l2cap_information_rsp(conn, cmd, data);
2706 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2714 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2715 struct l2cap_cmd_hdr *cmd, u8 *data)
2717 switch (cmd->code) {
2718 case L2CAP_COMMAND_REJ:
2721 case L2CAP_CONN_PARAM_UPDATE_REQ:
2722 return l2cap_conn_param_update_req(conn, cmd, data);
2724 case L2CAP_CONN_PARAM_UPDATE_RSP:
2728 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2733 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2734 struct sk_buff *skb)
2736 u8 *data = skb->data;
2738 struct l2cap_cmd_hdr cmd;
2741 l2cap_raw_recv(conn, skb);
2743 while (len >= L2CAP_CMD_HDR_SIZE) {
2745 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2746 data += L2CAP_CMD_HDR_SIZE;
2747 len -= L2CAP_CMD_HDR_SIZE;
2749 cmd_len = le16_to_cpu(cmd.len);
2751 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2753 if (cmd_len > len || !cmd.ident) {
2754 BT_DBG("corrupted command");
2758 if (conn->hcon->type == LE_LINK)
2759 err = l2cap_le_sig_cmd(conn, &cmd, data);
2761 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2764 struct l2cap_cmd_rej rej;
2766 BT_ERR("Wrong link type (%d)", err);
2768 /* FIXME: Map err to a valid reason */
2769 rej.reason = cpu_to_le16(0);
2770 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2780 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2782 u16 our_fcs, rcv_fcs;
2783 int hdr_size = L2CAP_HDR_SIZE + 2;
2785 if (pi->fcs == L2CAP_FCS_CRC16) {
2786 skb_trim(skb, skb->len - 2);
2787 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2788 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2790 if (our_fcs != rcv_fcs)
2796 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2800 chan->frames_sent = 0;
2802 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2804 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2805 control |= L2CAP_SUPER_RCV_NOT_READY;
2806 l2cap_send_sframe(chan, control);
2807 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2810 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2811 l2cap_retransmit_frames(chan);
2813 l2cap_ertm_send(chan);
2815 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2816 chan->frames_sent == 0) {
2817 control |= L2CAP_SUPER_RCV_READY;
2818 l2cap_send_sframe(chan, control);
2822 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2824 struct sk_buff *next_skb;
2825 int tx_seq_offset, next_tx_seq_offset;
2827 bt_cb(skb)->tx_seq = tx_seq;
2828 bt_cb(skb)->sar = sar;
2830 next_skb = skb_peek(&chan->srej_q);
2832 __skb_queue_tail(&chan->srej_q, skb);
2836 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2837 if (tx_seq_offset < 0)
2838 tx_seq_offset += 64;
2841 if (bt_cb(next_skb)->tx_seq == tx_seq)
2844 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2845 chan->buffer_seq) % 64;
2846 if (next_tx_seq_offset < 0)
2847 next_tx_seq_offset += 64;
2849 if (next_tx_seq_offset > tx_seq_offset) {
2850 __skb_queue_before(&chan->srej_q, next_skb, skb);
2854 if (skb_queue_is_last(&chan->srej_q, next_skb))
2857 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2859 __skb_queue_tail(&chan->srej_q, skb);
2864 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2866 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2867 struct sk_buff *_skb;
2870 switch (control & L2CAP_CTRL_SAR) {
2871 case L2CAP_SDU_UNSEGMENTED:
2872 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2875 err = sock_queue_rcv_skb(chan->sk, skb);
2881 case L2CAP_SDU_START:
2882 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2885 chan->sdu_len = get_unaligned_le16(skb->data);
2887 if (chan->sdu_len > pi->imtu)
2890 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2894 /* pull sdu_len bytes only after alloc, because of Local Busy
2895 * condition we have to be sure that this will be executed
2896 * only once, i.e., when alloc does not fail */
2899 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2901 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2902 chan->partial_sdu_len = skb->len;
2905 case L2CAP_SDU_CONTINUE:
2906 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2912 chan->partial_sdu_len += skb->len;
2913 if (chan->partial_sdu_len > chan->sdu_len)
2916 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2921 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2927 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2928 chan->partial_sdu_len += skb->len;
2930 if (chan->partial_sdu_len > pi->imtu)
2933 if (chan->partial_sdu_len != chan->sdu_len)
2936 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2939 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2941 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2945 err = sock_queue_rcv_skb(chan->sk, _skb);
2948 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2952 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2953 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2955 kfree_skb(chan->sdu);
2963 kfree_skb(chan->sdu);
2967 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2972 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2974 struct sk_buff *skb;
2978 while ((skb = skb_dequeue(&chan->busy_q))) {
2979 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2980 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2982 skb_queue_head(&chan->busy_q, skb);
2986 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2989 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2992 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2993 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2994 l2cap_send_sframe(chan, control);
2995 chan->retry_count = 1;
2997 del_timer(&chan->retrans_timer);
2998 __mod_monitor_timer();
3000 chan->conn_state |= L2CAP_CONN_WAIT_F;
3003 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3004 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3006 BT_DBG("chan %p, Exit local busy", chan);
3011 static void l2cap_busy_work(struct work_struct *work)
3013 DECLARE_WAITQUEUE(wait, current);
3014 struct l2cap_chan *chan =
3015 container_of(work, struct l2cap_chan, busy_work);
3016 struct sock *sk = chan->sk;
3017 int n_tries = 0, timeo = HZ/5, err;
3018 struct sk_buff *skb;
3022 add_wait_queue(sk_sleep(sk), &wait);
3023 while ((skb = skb_peek(&chan->busy_q))) {
3024 set_current_state(TASK_INTERRUPTIBLE);
3026 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3028 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3035 if (signal_pending(current)) {
3036 err = sock_intr_errno(timeo);
3041 timeo = schedule_timeout(timeo);
3044 err = sock_error(sk);
3048 if (l2cap_try_push_rx_skb(chan) == 0)
3052 set_current_state(TASK_RUNNING);
3053 remove_wait_queue(sk_sleep(sk), &wait);
3058 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3062 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3063 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3064 __skb_queue_tail(&chan->busy_q, skb);
3065 return l2cap_try_push_rx_skb(chan);
3070 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3072 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3076 /* Busy Condition */
3077 BT_DBG("chan %p, Enter local busy", chan);
3079 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3080 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3081 __skb_queue_tail(&chan->busy_q, skb);
3083 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3084 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3085 l2cap_send_sframe(chan, sctrl);
3087 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3089 del_timer(&chan->ack_timer);
3091 queue_work(_busy_wq, &chan->busy_work);
3096 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3098 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3099 struct sk_buff *_skb;
3103 * TODO: We have to notify the userland if some data is lost with the
3107 switch (control & L2CAP_CTRL_SAR) {
3108 case L2CAP_SDU_UNSEGMENTED:
3109 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3110 kfree_skb(chan->sdu);
3114 err = sock_queue_rcv_skb(chan->sk, skb);
3120 case L2CAP_SDU_START:
3121 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3122 kfree_skb(chan->sdu);
3126 chan->sdu_len = get_unaligned_le16(skb->data);
3129 if (chan->sdu_len > pi->imtu) {
3134 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3140 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3142 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3143 chan->partial_sdu_len = skb->len;
3147 case L2CAP_SDU_CONTINUE:
3148 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3151 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3153 chan->partial_sdu_len += skb->len;
3154 if (chan->partial_sdu_len > chan->sdu_len)
3155 kfree_skb(chan->sdu);
3162 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3165 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3167 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3168 chan->partial_sdu_len += skb->len;
3170 if (chan->partial_sdu_len > pi->imtu)
3173 if (chan->partial_sdu_len == chan->sdu_len) {
3174 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3175 err = sock_queue_rcv_skb(chan->sk, _skb);
3182 kfree_skb(chan->sdu);
3190 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3192 struct sk_buff *skb;
3195 while ((skb = skb_peek(&chan->srej_q))) {
3196 if (bt_cb(skb)->tx_seq != tx_seq)
3199 skb = skb_dequeue(&chan->srej_q);
3200 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3201 l2cap_ertm_reassembly_sdu(chan, skb, control);
3202 chan->buffer_seq_srej =
3203 (chan->buffer_seq_srej + 1) % 64;
3204 tx_seq = (tx_seq + 1) % 64;
3208 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3210 struct srej_list *l, *tmp;
3213 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3214 if (l->tx_seq == tx_seq) {
3219 control = L2CAP_SUPER_SELECT_REJECT;
3220 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3221 l2cap_send_sframe(chan, control);
3223 list_add_tail(&l->list, &chan->srej_l);
3227 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3229 struct srej_list *new;
3232 while (tx_seq != chan->expected_tx_seq) {
3233 control = L2CAP_SUPER_SELECT_REJECT;
3234 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3235 l2cap_send_sframe(chan, control);
3237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3238 new->tx_seq = chan->expected_tx_seq;
3239 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3240 list_add_tail(&new->list, &chan->srej_l);
3242 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3245 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3247 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3248 u8 tx_seq = __get_txseq(rx_control);
3249 u8 req_seq = __get_reqseq(rx_control);
3250 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3251 int tx_seq_offset, expected_tx_seq_offset;
3252 int num_to_ack = (pi->tx_win/6) + 1;
3255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3256 tx_seq, rx_control);
3258 if (L2CAP_CTRL_FINAL & rx_control &&
3259 chan->conn_state & L2CAP_CONN_WAIT_F) {
3260 del_timer(&chan->monitor_timer);
3261 if (chan->unacked_frames > 0)
3262 __mod_retrans_timer();
3263 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3266 chan->expected_ack_seq = req_seq;
3267 l2cap_drop_acked_frames(chan);
3269 if (tx_seq == chan->expected_tx_seq)
3272 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3273 if (tx_seq_offset < 0)
3274 tx_seq_offset += 64;
3276 /* invalid tx_seq */
3277 if (tx_seq_offset >= pi->tx_win) {
3278 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3282 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3285 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3286 struct srej_list *first;
3288 first = list_first_entry(&chan->srej_l,
3289 struct srej_list, list);
3290 if (tx_seq == first->tx_seq) {
3291 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3292 l2cap_check_srej_gap(chan, tx_seq);
3294 list_del(&first->list);
3297 if (list_empty(&chan->srej_l)) {
3298 chan->buffer_seq = chan->buffer_seq_srej;
3299 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3300 l2cap_send_ack(chan);
3301 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3304 struct srej_list *l;
3306 /* duplicated tx_seq */
3307 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3310 list_for_each_entry(l, &chan->srej_l, list) {
3311 if (l->tx_seq == tx_seq) {
3312 l2cap_resend_srejframe(chan, tx_seq);
3316 l2cap_send_srejframe(chan, tx_seq);
3319 expected_tx_seq_offset =
3320 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3321 if (expected_tx_seq_offset < 0)
3322 expected_tx_seq_offset += 64;
3324 /* duplicated tx_seq */
3325 if (tx_seq_offset < expected_tx_seq_offset)
3328 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3330 BT_DBG("chan %p, Enter SREJ", chan);
3332 INIT_LIST_HEAD(&chan->srej_l);
3333 chan->buffer_seq_srej = chan->buffer_seq;
3335 __skb_queue_head_init(&chan->srej_q);
3336 __skb_queue_head_init(&chan->busy_q);
3337 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3339 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3341 l2cap_send_srejframe(chan, tx_seq);
3343 del_timer(&chan->ack_timer);
3348 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3350 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3351 bt_cb(skb)->tx_seq = tx_seq;
3352 bt_cb(skb)->sar = sar;
3353 __skb_queue_tail(&chan->srej_q, skb);
3357 err = l2cap_push_rx_skb(chan, skb, rx_control);
3361 if (rx_control & L2CAP_CTRL_FINAL) {
3362 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3363 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3365 l2cap_retransmit_frames(chan);
3370 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3371 if (chan->num_acked == num_to_ack - 1)
3372 l2cap_send_ack(chan);
3381 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3383 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3386 chan->expected_ack_seq = __get_reqseq(rx_control);
3387 l2cap_drop_acked_frames(chan);
3389 if (rx_control & L2CAP_CTRL_POLL) {
3390 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3391 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3392 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3393 (chan->unacked_frames > 0))
3394 __mod_retrans_timer();
3396 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3397 l2cap_send_srejtail(chan);
3399 l2cap_send_i_or_rr_or_rnr(chan);
3402 } else if (rx_control & L2CAP_CTRL_FINAL) {
3403 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3405 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3406 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3408 l2cap_retransmit_frames(chan);
3411 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3412 (chan->unacked_frames > 0))
3413 __mod_retrans_timer();
3415 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3416 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3417 l2cap_send_ack(chan);
3419 l2cap_ertm_send(chan);
3423 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3425 u8 tx_seq = __get_reqseq(rx_control);
3427 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3429 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3431 chan->expected_ack_seq = tx_seq;
3432 l2cap_drop_acked_frames(chan);
3434 if (rx_control & L2CAP_CTRL_FINAL) {
3435 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3436 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3438 l2cap_retransmit_frames(chan);
3440 l2cap_retransmit_frames(chan);
3442 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3443 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3446 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3448 u8 tx_seq = __get_reqseq(rx_control);
3450 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3452 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3454 if (rx_control & L2CAP_CTRL_POLL) {
3455 chan->expected_ack_seq = tx_seq;
3456 l2cap_drop_acked_frames(chan);
3458 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3459 l2cap_retransmit_one_frame(chan, tx_seq);
3461 l2cap_ertm_send(chan);
3463 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3464 chan->srej_save_reqseq = tx_seq;
3465 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3467 } else if (rx_control & L2CAP_CTRL_FINAL) {
3468 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3469 chan->srej_save_reqseq == tx_seq)
3470 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3472 l2cap_retransmit_one_frame(chan, tx_seq);
3474 l2cap_retransmit_one_frame(chan, tx_seq);
3475 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3476 chan->srej_save_reqseq = tx_seq;
3477 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3482 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3484 u8 tx_seq = __get_reqseq(rx_control);
3486 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3488 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3489 chan->expected_ack_seq = tx_seq;
3490 l2cap_drop_acked_frames(chan);
3492 if (rx_control & L2CAP_CTRL_POLL)
3493 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3495 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3496 del_timer(&chan->retrans_timer);
3497 if (rx_control & L2CAP_CTRL_POLL)
3498 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3502 if (rx_control & L2CAP_CTRL_POLL)
3503 l2cap_send_srejtail(chan);
3505 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3508 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3510 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3512 if (L2CAP_CTRL_FINAL & rx_control &&
3513 chan->conn_state & L2CAP_CONN_WAIT_F) {
3514 del_timer(&chan->monitor_timer);
3515 if (chan->unacked_frames > 0)
3516 __mod_retrans_timer();
3517 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3520 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3521 case L2CAP_SUPER_RCV_READY:
3522 l2cap_data_channel_rrframe(chan, rx_control);
3525 case L2CAP_SUPER_REJECT:
3526 l2cap_data_channel_rejframe(chan, rx_control);
3529 case L2CAP_SUPER_SELECT_REJECT:
3530 l2cap_data_channel_srejframe(chan, rx_control);
3533 case L2CAP_SUPER_RCV_NOT_READY:
3534 l2cap_data_channel_rnrframe(chan, rx_control);
3542 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3548 int len, next_tx_seq_offset, req_seq_offset;
3550 control = get_unaligned_le16(skb->data);
3555 * We can just drop the corrupted I-frame here.
3556 * Receiver will miss it and start proper recovery
3557 * procedures and ask retransmission.
3559 if (l2cap_check_fcs(pi, skb))
3562 if (__is_sar_start(control) && __is_iframe(control))
3565 if (pi->fcs == L2CAP_FCS_CRC16)
3568 if (len > pi->mps) {
3569 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3573 req_seq = __get_reqseq(control);
3574 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3575 if (req_seq_offset < 0)
3576 req_seq_offset += 64;
3578 next_tx_seq_offset =
3579 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3580 if (next_tx_seq_offset < 0)
3581 next_tx_seq_offset += 64;
3583 /* check for invalid req-seq */
3584 if (req_seq_offset > next_tx_seq_offset) {
3585 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3589 if (__is_iframe(control)) {
3591 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3595 l2cap_data_channel_iframe(chan, control, skb);
3599 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3603 l2cap_data_channel_sframe(chan, control, skb);
3613 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3615 struct l2cap_chan *chan;
3617 struct l2cap_pinfo *pi;
3622 chan = l2cap_get_chan_by_scid(conn, cid);
3624 BT_DBG("unknown cid 0x%4.4x", cid);
3631 BT_DBG("chan %p, len %d", chan, skb->len);
3633 if (sk->sk_state != BT_CONNECTED)
3637 case L2CAP_MODE_BASIC:
3638 /* If socket recv buffers overflows we drop data here
3639 * which is *bad* because L2CAP has to be reliable.
3640 * But we don't have any other choice. L2CAP doesn't
3641 * provide flow control mechanism. */
3643 if (pi->imtu < skb->len)
3646 if (!sock_queue_rcv_skb(sk, skb))
3650 case L2CAP_MODE_ERTM:
3651 if (!sock_owned_by_user(sk)) {
3652 l2cap_ertm_data_rcv(sk, skb);
3654 if (sk_add_backlog(sk, skb))
3660 case L2CAP_MODE_STREAMING:
3661 control = get_unaligned_le16(skb->data);
3665 if (l2cap_check_fcs(pi, skb))
3668 if (__is_sar_start(control))
3671 if (pi->fcs == L2CAP_FCS_CRC16)
3674 if (len > pi->mps || len < 0 || __is_sframe(control))
3677 tx_seq = __get_txseq(control);
3679 if (chan->expected_tx_seq == tx_seq)
3680 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3682 chan->expected_tx_seq = (tx_seq + 1) % 64;
3684 l2cap_streaming_reassembly_sdu(chan, skb, control);
3689 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3703 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3707 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3713 BT_DBG("sk %p, len %d", sk, skb->len);
3715 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3718 if (l2cap_pi(sk)->imtu < skb->len)
3721 if (!sock_queue_rcv_skb(sk, skb))
3733 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3735 struct l2cap_hdr *lh = (void *) skb->data;
3739 skb_pull(skb, L2CAP_HDR_SIZE);
3740 cid = __le16_to_cpu(lh->cid);
3741 len = __le16_to_cpu(lh->len);
3743 if (len != skb->len) {
3748 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3751 case L2CAP_CID_LE_SIGNALING:
3752 case L2CAP_CID_SIGNALING:
3753 l2cap_sig_channel(conn, skb);
3756 case L2CAP_CID_CONN_LESS:
3757 psm = get_unaligned_le16(skb->data);
3759 l2cap_conless_channel(conn, psm, skb);
3763 l2cap_data_channel(conn, cid, skb);
3768 /* ---- L2CAP interface with lower layer (HCI) ---- */
3770 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3772 int exact = 0, lm1 = 0, lm2 = 0;
3773 register struct sock *sk;
3774 struct hlist_node *node;
3776 if (type != ACL_LINK)
3779 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3781 /* Find listening sockets and check their link_mode */
3782 read_lock(&l2cap_sk_list.lock);
3783 sk_for_each(sk, node, &l2cap_sk_list.head) {
3784 if (sk->sk_state != BT_LISTEN)
3787 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3788 lm1 |= HCI_LM_ACCEPT;
3789 if (l2cap_pi(sk)->role_switch)
3790 lm1 |= HCI_LM_MASTER;
3792 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3793 lm2 |= HCI_LM_ACCEPT;
3794 if (l2cap_pi(sk)->role_switch)
3795 lm2 |= HCI_LM_MASTER;
3798 read_unlock(&l2cap_sk_list.lock);
3800 return exact ? lm1 : lm2;
3803 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3805 struct l2cap_conn *conn;
3807 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3809 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3813 conn = l2cap_conn_add(hcon, status);
3815 l2cap_conn_ready(conn);
3817 l2cap_conn_del(hcon, bt_err(status));
3822 static int l2cap_disconn_ind(struct hci_conn *hcon)
3824 struct l2cap_conn *conn = hcon->l2cap_data;
3826 BT_DBG("hcon %p", hcon);
3828 if (hcon->type != ACL_LINK || !conn)
3831 return conn->disc_reason;
3834 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3836 BT_DBG("hcon %p reason %d", hcon, reason);
3838 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3841 l2cap_conn_del(hcon, bt_err(reason));
3846 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3848 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3851 if (encrypt == 0x00) {
3852 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3853 l2cap_sock_clear_timer(sk);
3854 l2cap_sock_set_timer(sk, HZ * 5);
3855 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3856 __l2cap_sock_close(sk, ECONNREFUSED);
3858 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3859 l2cap_sock_clear_timer(sk);
3863 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3865 struct l2cap_conn *conn = hcon->l2cap_data;
3866 struct l2cap_chan *chan;
3871 BT_DBG("conn %p", conn);
3873 read_lock(&conn->chan_lock);
3875 list_for_each_entry(chan, &conn->chan_l, list) {
3876 struct sock *sk = chan->sk;
3880 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3885 if (!status && (sk->sk_state == BT_CONNECTED ||
3886 sk->sk_state == BT_CONFIG)) {
3887 l2cap_check_encryption(sk, encrypt);
3892 if (sk->sk_state == BT_CONNECT) {
3894 struct l2cap_conn_req req;
3895 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3896 req.psm = l2cap_pi(sk)->psm;
3898 chan->ident = l2cap_get_ident(conn);
3899 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3901 l2cap_send_cmd(conn, chan->ident,
3902 L2CAP_CONN_REQ, sizeof(req), &req);
3904 l2cap_sock_clear_timer(sk);
3905 l2cap_sock_set_timer(sk, HZ / 10);
3907 } else if (sk->sk_state == BT_CONNECT2) {
3908 struct l2cap_conn_rsp rsp;
3912 sk->sk_state = BT_CONFIG;
3913 result = L2CAP_CR_SUCCESS;
3915 sk->sk_state = BT_DISCONN;
3916 l2cap_sock_set_timer(sk, HZ / 10);
3917 result = L2CAP_CR_SEC_BLOCK;
3920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3922 rsp.result = cpu_to_le16(result);
3923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3924 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3931 read_unlock(&conn->chan_lock);
3936 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3938 struct l2cap_conn *conn = hcon->l2cap_data;
3941 conn = l2cap_conn_add(hcon, 0);
3946 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3948 if (!(flags & ACL_CONT)) {
3949 struct l2cap_hdr *hdr;
3950 struct l2cap_chan *chan;
3955 BT_ERR("Unexpected start frame (len %d)", skb->len);
3956 kfree_skb(conn->rx_skb);
3957 conn->rx_skb = NULL;
3959 l2cap_conn_unreliable(conn, ECOMM);
3962 /* Start fragment always begin with Basic L2CAP header */
3963 if (skb->len < L2CAP_HDR_SIZE) {
3964 BT_ERR("Frame is too short (len %d)", skb->len);
3965 l2cap_conn_unreliable(conn, ECOMM);
3969 hdr = (struct l2cap_hdr *) skb->data;
3970 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3971 cid = __le16_to_cpu(hdr->cid);
3973 if (len == skb->len) {
3974 /* Complete frame received */
3975 l2cap_recv_frame(conn, skb);
3979 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3981 if (skb->len > len) {
3982 BT_ERR("Frame is too long (len %d, expected len %d)",
3984 l2cap_conn_unreliable(conn, ECOMM);
3988 chan = l2cap_get_chan_by_scid(conn, cid);
3990 if (chan && chan->sk) {
3991 struct sock *sk = chan->sk;
3993 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3994 BT_ERR("Frame exceeding recv MTU (len %d, "
3996 l2cap_pi(sk)->imtu);
3998 l2cap_conn_unreliable(conn, ECOMM);
4004 /* Allocate skb for the complete frame (with header) */
4005 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4009 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4011 conn->rx_len = len - skb->len;
4013 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4015 if (!conn->rx_len) {
4016 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4017 l2cap_conn_unreliable(conn, ECOMM);
4021 if (skb->len > conn->rx_len) {
4022 BT_ERR("Fragment is too long (len %d, expected %d)",
4023 skb->len, conn->rx_len);
4024 kfree_skb(conn->rx_skb);
4025 conn->rx_skb = NULL;
4027 l2cap_conn_unreliable(conn, ECOMM);
4031 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4033 conn->rx_len -= skb->len;
4035 if (!conn->rx_len) {
4036 /* Complete frame received */
4037 l2cap_recv_frame(conn, conn->rx_skb);
4038 conn->rx_skb = NULL;
4047 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4050 struct hlist_node *node;
4052 read_lock_bh(&l2cap_sk_list.lock);
4054 sk_for_each(sk, node, &l2cap_sk_list.head) {
4055 struct l2cap_pinfo *pi = l2cap_pi(sk);
4057 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4058 batostr(&bt_sk(sk)->src),
4059 batostr(&bt_sk(sk)->dst),
4060 sk->sk_state, __le16_to_cpu(pi->psm),
4062 pi->imtu, pi->omtu, pi->sec_level,
4066 read_unlock_bh(&l2cap_sk_list.lock);
4071 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4073 return single_open(file, l2cap_debugfs_show, inode->i_private);
4076 static const struct file_operations l2cap_debugfs_fops = {
4077 .open = l2cap_debugfs_open,
4079 .llseek = seq_lseek,
4080 .release = single_release,
4083 static struct dentry *l2cap_debugfs;
4085 static struct hci_proto l2cap_hci_proto = {
4087 .id = HCI_PROTO_L2CAP,
4088 .connect_ind = l2cap_connect_ind,
4089 .connect_cfm = l2cap_connect_cfm,
4090 .disconn_ind = l2cap_disconn_ind,
4091 .disconn_cfm = l2cap_disconn_cfm,
4092 .security_cfm = l2cap_security_cfm,
4093 .recv_acldata = l2cap_recv_acldata
4096 int __init l2cap_init(void)
4100 err = l2cap_init_sockets();
4104 _busy_wq = create_singlethread_workqueue("l2cap");
4110 err = hci_register_proto(&l2cap_hci_proto);
4112 BT_ERR("L2CAP protocol registration failed");
4113 bt_sock_unregister(BTPROTO_L2CAP);
4118 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4119 bt_debugfs, NULL, &l2cap_debugfs_fops);
4121 BT_ERR("Failed to create L2CAP debug file");
4127 destroy_workqueue(_busy_wq);
4128 l2cap_cleanup_sockets();
4132 void l2cap_exit(void)
4134 debugfs_remove(l2cap_debugfs);
4136 flush_workqueue(_busy_wq);
4137 destroy_workqueue(_busy_wq);
4139 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4140 BT_ERR("L2CAP protocol unregistration failed");
4142 l2cap_cleanup_sockets();
4145 module_param(disable_ertm, bool, 0644);
4146 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");