2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock_bh(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
203 write_unlock_bh(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
248 BT_DBG("chan %p state %d", chan, chan->state);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
268 l2cap_chan_close(chan, reason);
272 chan->ops->close(chan->data);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
342 list_add(&chan->list, &conn->chan_l);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
413 l2cap_chan_close(chan, ECONNRESET);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
442 l2cap_chan_del(chan, reason);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
465 l2cap_chan_del(chan, reason);
470 l2cap_chan_del(chan, reason);
474 sock_set_flag(sk, SOCK_ZAPPED);
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
497 return HCI_AT_NO_BONDING;
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
538 spin_unlock_bh(&conn->lock);
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
548 BT_DBG("code 0x%2.2x", code);
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
571 if (chan->state != BT_CONNECTED)
574 if (chan->fcs == L2CAP_FCS_CRC16)
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684 struct l2cap_disconn_req req;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
746 req.scid = cpu_to_le16(chan->scid);
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
767 parent->sk_data_ready(parent, 0);
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
815 if (c->scid == cid) {
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 read_unlock(&chan_list_lock);
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
856 chan = pchan->ops->new_connection(pchan->data);
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
866 bacpy(&bt_sk(sk)->src, conn->src);
867 bacpy(&bt_sk(sk)->dst, conn->dst);
869 bt_accept_enqueue(parent, sk);
871 __l2cap_chan_add(conn, chan);
873 __set_chan_timer(chan, sk->sk_sndtimeo);
875 l2cap_state_change(chan, BT_CONNECTED);
876 parent->sk_data_ready(parent, 0);
878 write_unlock_bh(&conn->chan_lock);
881 bh_unlock_sock(parent);
884 static void l2cap_chan_ready(struct sock *sk)
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
889 BT_DBG("sk %p, parent %p", sk, parent);
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
898 parent->sk_data_ready(parent, 0);
901 static void l2cap_conn_ready(struct l2cap_conn *conn)
903 struct l2cap_chan *chan;
905 BT_DBG("conn %p", conn);
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn);
910 read_lock(&conn->chan_lock);
912 list_for_each_entry(chan, &conn->chan_l, list) {
913 struct sock *sk = chan->sk;
917 if (conn->hcon->type == LE_LINK) {
918 if (smp_conn_security(conn, chan->sec_level))
919 l2cap_chan_ready(sk);
921 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
922 __clear_chan_timer(chan);
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
926 } else if (chan->state == BT_CONNECT)
927 l2cap_do_start(chan);
932 read_unlock(&conn->chan_lock);
935 /* Notify sockets that we cannot guaranty reliability anymore */
936 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
938 struct l2cap_chan *chan;
940 BT_DBG("conn %p", conn);
942 read_lock(&conn->chan_lock);
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
947 if (chan->force_reliable)
951 read_unlock(&conn->chan_lock);
954 static void l2cap_info_timeout(unsigned long arg)
956 struct l2cap_conn *conn = (void *) arg;
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
959 conn->info_ident = 0;
961 l2cap_conn_start(conn);
964 static void l2cap_conn_del(struct hci_conn *hcon, int err)
966 struct l2cap_conn *conn = hcon->l2cap_data;
967 struct l2cap_chan *chan, *l;
973 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
975 kfree_skb(conn->rx_skb);
978 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
981 l2cap_chan_del(chan, err);
983 chan->ops->close(chan->data);
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer);
989 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
990 del_timer(&conn->security_timer);
992 hcon->l2cap_data = NULL;
996 static void security_timeout(unsigned long arg)
998 struct l2cap_conn *conn = (void *) arg;
1000 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1003 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1005 struct l2cap_conn *conn = hcon->l2cap_data;
1010 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1014 hcon->l2cap_data = conn;
1017 BT_DBG("hcon %p conn %p", hcon, conn);
1019 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1020 conn->mtu = hcon->hdev->le_mtu;
1022 conn->mtu = hcon->hdev->acl_mtu;
1024 conn->src = &hcon->hdev->bdaddr;
1025 conn->dst = &hcon->dst;
1027 conn->feat_mask = 0;
1029 spin_lock_init(&conn->lock);
1030 rwlock_init(&conn->chan_lock);
1032 INIT_LIST_HEAD(&conn->chan_l);
1034 if (hcon->type == LE_LINK)
1035 setup_timer(&conn->security_timer, security_timeout,
1036 (unsigned long) conn);
1038 setup_timer(&conn->info_timer, l2cap_info_timeout,
1039 (unsigned long) conn);
1041 conn->disc_reason = 0x13;
1046 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1048 write_lock_bh(&conn->chan_lock);
1049 __l2cap_chan_add(conn, chan);
1050 write_unlock_bh(&conn->chan_lock);
1053 /* ---- Socket interface ---- */
1055 /* Find socket with psm and source bdaddr.
1056 * Returns closest match.
1058 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1060 struct l2cap_chan *c, *c1 = NULL;
1062 read_lock(&chan_list_lock);
1064 list_for_each_entry(c, &chan_list, global_l) {
1065 struct sock *sk = c->sk;
1067 if (state && c->state != state)
1070 if (c->psm == psm) {
1072 if (!bacmp(&bt_sk(sk)->src, src)) {
1073 read_unlock(&chan_list_lock);
1078 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1083 read_unlock(&chan_list_lock);
1088 int l2cap_chan_connect(struct l2cap_chan *chan)
1090 struct sock *sk = chan->sk;
1091 bdaddr_t *src = &bt_sk(sk)->src;
1092 bdaddr_t *dst = &bt_sk(sk)->dst;
1093 struct l2cap_conn *conn;
1094 struct hci_conn *hcon;
1095 struct hci_dev *hdev;
1099 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1102 hdev = hci_get_route(dst, src);
1104 return -EHOSTUNREACH;
1106 hci_dev_lock_bh(hdev);
1108 auth_type = l2cap_get_auth_type(chan);
1110 if (chan->dcid == L2CAP_CID_LE_DATA)
1111 hcon = hci_connect(hdev, LE_LINK, dst,
1112 chan->sec_level, auth_type);
1114 hcon = hci_connect(hdev, ACL_LINK, dst,
1115 chan->sec_level, auth_type);
1118 err = PTR_ERR(hcon);
1122 conn = l2cap_conn_add(hcon, 0);
1129 /* Update source addr of the socket */
1130 bacpy(src, conn->src);
1132 l2cap_chan_add(conn, chan);
1134 l2cap_state_change(chan, BT_CONNECT);
1135 __set_chan_timer(chan, sk->sk_sndtimeo);
1137 if (hcon->state == BT_CONNECTED) {
1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1139 __clear_chan_timer(chan);
1140 if (l2cap_check_security(chan))
1141 l2cap_state_change(chan, BT_CONNECTED);
1143 l2cap_do_start(chan);
1149 hci_dev_unlock_bh(hdev);
1154 int __l2cap_wait_ack(struct sock *sk)
1156 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1157 DECLARE_WAITQUEUE(wait, current);
1161 add_wait_queue(sk_sleep(sk), &wait);
1162 while ((chan->unacked_frames > 0 && chan->conn)) {
1163 set_current_state(TASK_INTERRUPTIBLE);
1168 if (signal_pending(current)) {
1169 err = sock_intr_errno(timeo);
1174 timeo = schedule_timeout(timeo);
1177 err = sock_error(sk);
1181 set_current_state(TASK_RUNNING);
1182 remove_wait_queue(sk_sleep(sk), &wait);
1186 static void l2cap_monitor_timeout(unsigned long arg)
1188 struct l2cap_chan *chan = (void *) arg;
1189 struct sock *sk = chan->sk;
1191 BT_DBG("chan %p", chan);
1194 if (chan->retry_count >= chan->remote_max_tx) {
1195 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1200 chan->retry_count++;
1201 __set_monitor_timer(chan);
1203 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1207 static void l2cap_retrans_timeout(unsigned long arg)
1209 struct l2cap_chan *chan = (void *) arg;
1210 struct sock *sk = chan->sk;
1212 BT_DBG("chan %p", chan);
1215 chan->retry_count = 1;
1216 __set_monitor_timer(chan);
1218 set_bit(CONN_WAIT_F, &chan->conn_state);
1220 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1224 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1226 struct sk_buff *skb;
1228 while ((skb = skb_peek(&chan->tx_q)) &&
1229 chan->unacked_frames) {
1230 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1233 skb = skb_dequeue(&chan->tx_q);
1236 chan->unacked_frames--;
1239 if (!chan->unacked_frames)
1240 __clear_retrans_timer(chan);
1243 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1245 struct hci_conn *hcon = chan->conn->hcon;
1248 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1250 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1251 flags = ACL_START_NO_FLUSH;
1255 bt_cb(skb)->force_active = chan->force_active;
1256 hci_send_acl(hcon, skb, flags);
1259 void l2cap_streaming_send(struct l2cap_chan *chan)
1261 struct sk_buff *skb;
1264 while ((skb = skb_dequeue(&chan->tx_q))) {
1265 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1266 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1267 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1269 if (chan->fcs == L2CAP_FCS_CRC16) {
1270 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1271 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1274 l2cap_do_send(chan, skb);
1276 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1280 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1282 struct sk_buff *skb, *tx_skb;
1285 skb = skb_peek(&chan->tx_q);
1290 if (bt_cb(skb)->tx_seq == tx_seq)
1293 if (skb_queue_is_last(&chan->tx_q, skb))
1296 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1298 if (chan->remote_max_tx &&
1299 bt_cb(skb)->retries == chan->remote_max_tx) {
1300 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1304 tx_skb = skb_clone(skb, GFP_ATOMIC);
1305 bt_cb(skb)->retries++;
1306 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1307 control &= L2CAP_CTRL_SAR;
1309 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1310 control |= L2CAP_CTRL_FINAL;
1312 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1313 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1315 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1317 if (chan->fcs == L2CAP_FCS_CRC16) {
1318 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1319 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1322 l2cap_do_send(chan, tx_skb);
1325 int l2cap_ertm_send(struct l2cap_chan *chan)
1327 struct sk_buff *skb, *tx_skb;
1331 if (chan->state != BT_CONNECTED)
1334 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1336 if (chan->remote_max_tx &&
1337 bt_cb(skb)->retries == chan->remote_max_tx) {
1338 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1342 tx_skb = skb_clone(skb, GFP_ATOMIC);
1344 bt_cb(skb)->retries++;
1346 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1347 control &= L2CAP_CTRL_SAR;
1349 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1350 control |= L2CAP_CTRL_FINAL;
1352 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1353 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1354 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1357 if (chan->fcs == L2CAP_FCS_CRC16) {
1358 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1359 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1362 l2cap_do_send(chan, tx_skb);
1364 __set_retrans_timer(chan);
1366 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1367 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1369 if (bt_cb(skb)->retries == 1)
1370 chan->unacked_frames++;
1372 chan->frames_sent++;
1374 if (skb_queue_is_last(&chan->tx_q, skb))
1375 chan->tx_send_head = NULL;
1377 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1385 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1389 if (!skb_queue_empty(&chan->tx_q))
1390 chan->tx_send_head = chan->tx_q.next;
1392 chan->next_tx_seq = chan->expected_ack_seq;
1393 ret = l2cap_ertm_send(chan);
1397 static void l2cap_send_ack(struct l2cap_chan *chan)
1401 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1403 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1404 control |= L2CAP_SUPER_RCV_NOT_READY;
1405 set_bit(CONN_RNR_SENT, &chan->conn_state);
1406 l2cap_send_sframe(chan, control);
1410 if (l2cap_ertm_send(chan) > 0)
1413 control |= L2CAP_SUPER_RCV_READY;
1414 l2cap_send_sframe(chan, control);
1417 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1419 struct srej_list *tail;
1422 control = L2CAP_SUPER_SELECT_REJECT;
1423 control |= L2CAP_CTRL_FINAL;
1425 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1426 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1428 l2cap_send_sframe(chan, control);
1431 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1433 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1434 struct sk_buff **frag;
1437 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1443 /* Continuation fragments (no L2CAP header) */
1444 frag = &skb_shinfo(skb)->frag_list;
1446 count = min_t(unsigned int, conn->mtu, len);
1448 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1451 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 frag = &(*frag)->next;
1463 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1465 struct sock *sk = chan->sk;
1466 struct l2cap_conn *conn = chan->conn;
1467 struct sk_buff *skb;
1468 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1469 struct l2cap_hdr *lh;
1471 BT_DBG("sk %p len %d", sk, (int)len);
1473 count = min_t(unsigned int, (conn->mtu - hlen), len);
1474 skb = bt_skb_send_alloc(sk, count + hlen,
1475 msg->msg_flags & MSG_DONTWAIT, &err);
1477 return ERR_PTR(err);
1479 /* Create L2CAP header */
1480 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1481 lh->cid = cpu_to_le16(chan->dcid);
1482 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1483 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1485 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1486 if (unlikely(err < 0)) {
1488 return ERR_PTR(err);
1493 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1495 struct sock *sk = chan->sk;
1496 struct l2cap_conn *conn = chan->conn;
1497 struct sk_buff *skb;
1498 int err, count, hlen = L2CAP_HDR_SIZE;
1499 struct l2cap_hdr *lh;
1501 BT_DBG("sk %p len %d", sk, (int)len);
1503 count = min_t(unsigned int, (conn->mtu - hlen), len);
1504 skb = bt_skb_send_alloc(sk, count + hlen,
1505 msg->msg_flags & MSG_DONTWAIT, &err);
1507 return ERR_PTR(err);
1509 /* Create L2CAP header */
1510 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1511 lh->cid = cpu_to_le16(chan->dcid);
1512 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1514 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1515 if (unlikely(err < 0)) {
1517 return ERR_PTR(err);
1522 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1524 struct sock *sk = chan->sk;
1525 struct l2cap_conn *conn = chan->conn;
1526 struct sk_buff *skb;
1527 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1528 struct l2cap_hdr *lh;
1530 BT_DBG("sk %p len %d", sk, (int)len);
1533 return ERR_PTR(-ENOTCONN);
1538 if (chan->fcs == L2CAP_FCS_CRC16)
1541 count = min_t(unsigned int, (conn->mtu - hlen), len);
1542 skb = bt_skb_send_alloc(sk, count + hlen,
1543 msg->msg_flags & MSG_DONTWAIT, &err);
1545 return ERR_PTR(err);
1547 /* Create L2CAP header */
1548 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1549 lh->cid = cpu_to_le16(chan->dcid);
1550 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1551 put_unaligned_le16(control, skb_put(skb, 2));
1553 put_unaligned_le16(sdulen, skb_put(skb, 2));
1555 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1556 if (unlikely(err < 0)) {
1558 return ERR_PTR(err);
1561 if (chan->fcs == L2CAP_FCS_CRC16)
1562 put_unaligned_le16(0, skb_put(skb, 2));
1564 bt_cb(skb)->retries = 0;
1568 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1570 struct sk_buff *skb;
1571 struct sk_buff_head sar_queue;
1575 skb_queue_head_init(&sar_queue);
1576 control = L2CAP_SDU_START;
1577 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1579 return PTR_ERR(skb);
1581 __skb_queue_tail(&sar_queue, skb);
1582 len -= chan->remote_mps;
1583 size += chan->remote_mps;
1588 if (len > chan->remote_mps) {
1589 control = L2CAP_SDU_CONTINUE;
1590 buflen = chan->remote_mps;
1592 control = L2CAP_SDU_END;
1596 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1598 skb_queue_purge(&sar_queue);
1599 return PTR_ERR(skb);
1602 __skb_queue_tail(&sar_queue, skb);
1606 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1607 if (chan->tx_send_head == NULL)
1608 chan->tx_send_head = sar_queue.next;
1613 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1615 struct sk_buff *skb;
1619 /* Connectionless channel */
1620 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1621 skb = l2cap_create_connless_pdu(chan, msg, len);
1623 return PTR_ERR(skb);
1625 l2cap_do_send(chan, skb);
1629 switch (chan->mode) {
1630 case L2CAP_MODE_BASIC:
1631 /* Check outgoing MTU */
1632 if (len > chan->omtu)
1635 /* Create a basic PDU */
1636 skb = l2cap_create_basic_pdu(chan, msg, len);
1638 return PTR_ERR(skb);
1640 l2cap_do_send(chan, skb);
1644 case L2CAP_MODE_ERTM:
1645 case L2CAP_MODE_STREAMING:
1646 /* Entire SDU fits into one PDU */
1647 if (len <= chan->remote_mps) {
1648 control = L2CAP_SDU_UNSEGMENTED;
1649 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1652 return PTR_ERR(skb);
1654 __skb_queue_tail(&chan->tx_q, skb);
1656 if (chan->tx_send_head == NULL)
1657 chan->tx_send_head = skb;
1660 /* Segment SDU into multiples PDUs */
1661 err = l2cap_sar_segment_sdu(chan, msg, len);
1666 if (chan->mode == L2CAP_MODE_STREAMING) {
1667 l2cap_streaming_send(chan);
1672 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1673 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1678 err = l2cap_ertm_send(chan);
1685 BT_DBG("bad state %1.1x", chan->mode);
1692 /* Copy frame to all raw sockets on that connection */
1693 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1695 struct sk_buff *nskb;
1696 struct l2cap_chan *chan;
1698 BT_DBG("conn %p", conn);
1700 read_lock(&conn->chan_lock);
1701 list_for_each_entry(chan, &conn->chan_l, list) {
1702 struct sock *sk = chan->sk;
1703 if (chan->chan_type != L2CAP_CHAN_RAW)
1706 /* Don't send frame to the socket it came from */
1709 nskb = skb_clone(skb, GFP_ATOMIC);
1713 if (chan->ops->recv(chan->data, nskb))
1716 read_unlock(&conn->chan_lock);
1719 /* ---- L2CAP signalling commands ---- */
1720 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1721 u8 code, u8 ident, u16 dlen, void *data)
1723 struct sk_buff *skb, **frag;
1724 struct l2cap_cmd_hdr *cmd;
1725 struct l2cap_hdr *lh;
1728 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1729 conn, code, ident, dlen);
1731 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1732 count = min_t(unsigned int, conn->mtu, len);
1734 skb = bt_skb_alloc(count, GFP_ATOMIC);
1738 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1739 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1741 if (conn->hcon->type == LE_LINK)
1742 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1744 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1746 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1749 cmd->len = cpu_to_le16(dlen);
1752 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1753 memcpy(skb_put(skb, count), data, count);
1759 /* Continuation fragments (no L2CAP header) */
1760 frag = &skb_shinfo(skb)->frag_list;
1762 count = min_t(unsigned int, conn->mtu, len);
1764 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1768 memcpy(skb_put(*frag, count), data, count);
1773 frag = &(*frag)->next;
1783 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1785 struct l2cap_conf_opt *opt = *ptr;
1788 len = L2CAP_CONF_OPT_SIZE + opt->len;
1796 *val = *((u8 *) opt->val);
1800 *val = get_unaligned_le16(opt->val);
1804 *val = get_unaligned_le32(opt->val);
1808 *val = (unsigned long) opt->val;
1812 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1816 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1818 struct l2cap_conf_opt *opt = *ptr;
1820 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1827 *((u8 *) opt->val) = val;
1831 put_unaligned_le16(val, opt->val);
1835 put_unaligned_le32(val, opt->val);
1839 memcpy(opt->val, (void *) val, len);
1843 *ptr += L2CAP_CONF_OPT_SIZE + len;
1846 static void l2cap_ack_timeout(unsigned long arg)
1848 struct l2cap_chan *chan = (void *) arg;
1850 bh_lock_sock(chan->sk);
1851 l2cap_send_ack(chan);
1852 bh_unlock_sock(chan->sk);
1855 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1857 struct sock *sk = chan->sk;
1859 chan->expected_ack_seq = 0;
1860 chan->unacked_frames = 0;
1861 chan->buffer_seq = 0;
1862 chan->num_acked = 0;
1863 chan->frames_sent = 0;
1865 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1866 (unsigned long) chan);
1867 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1868 (unsigned long) chan);
1869 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1871 skb_queue_head_init(&chan->srej_q);
1873 INIT_LIST_HEAD(&chan->srej_l);
1876 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1879 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1882 case L2CAP_MODE_STREAMING:
1883 case L2CAP_MODE_ERTM:
1884 if (l2cap_mode_supported(mode, remote_feat_mask))
1888 return L2CAP_MODE_BASIC;
1892 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1894 struct l2cap_conf_req *req = data;
1895 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1896 void *ptr = req->data;
1898 BT_DBG("chan %p", chan);
1900 if (chan->num_conf_req || chan->num_conf_rsp)
1903 switch (chan->mode) {
1904 case L2CAP_MODE_STREAMING:
1905 case L2CAP_MODE_ERTM:
1906 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1911 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1916 if (chan->imtu != L2CAP_DEFAULT_MTU)
1917 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1919 switch (chan->mode) {
1920 case L2CAP_MODE_BASIC:
1921 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1922 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1925 rfc.mode = L2CAP_MODE_BASIC;
1927 rfc.max_transmit = 0;
1928 rfc.retrans_timeout = 0;
1929 rfc.monitor_timeout = 0;
1930 rfc.max_pdu_size = 0;
1932 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1933 (unsigned long) &rfc);
1936 case L2CAP_MODE_ERTM:
1937 rfc.mode = L2CAP_MODE_ERTM;
1938 rfc.txwin_size = chan->tx_win;
1939 rfc.max_transmit = chan->max_tx;
1940 rfc.retrans_timeout = 0;
1941 rfc.monitor_timeout = 0;
1942 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1943 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1944 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1946 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1947 (unsigned long) &rfc);
1949 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1952 if (chan->fcs == L2CAP_FCS_NONE ||
1953 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1954 chan->fcs = L2CAP_FCS_NONE;
1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1959 case L2CAP_MODE_STREAMING:
1960 rfc.mode = L2CAP_MODE_STREAMING;
1962 rfc.max_transmit = 0;
1963 rfc.retrans_timeout = 0;
1964 rfc.monitor_timeout = 0;
1965 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1966 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1967 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1969 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1970 (unsigned long) &rfc);
1972 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1975 if (chan->fcs == L2CAP_FCS_NONE ||
1976 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1977 chan->fcs = L2CAP_FCS_NONE;
1978 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1983 req->dcid = cpu_to_le16(chan->dcid);
1984 req->flags = cpu_to_le16(0);
1989 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1991 struct l2cap_conf_rsp *rsp = data;
1992 void *ptr = rsp->data;
1993 void *req = chan->conf_req;
1994 int len = chan->conf_len;
1995 int type, hint, olen;
1997 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1998 u16 mtu = L2CAP_DEFAULT_MTU;
1999 u16 result = L2CAP_CONF_SUCCESS;
2001 BT_DBG("chan %p", chan);
2003 while (len >= L2CAP_CONF_OPT_SIZE) {
2004 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2006 hint = type & L2CAP_CONF_HINT;
2007 type &= L2CAP_CONF_MASK;
2010 case L2CAP_CONF_MTU:
2014 case L2CAP_CONF_FLUSH_TO:
2015 chan->flush_to = val;
2018 case L2CAP_CONF_QOS:
2021 case L2CAP_CONF_RFC:
2022 if (olen == sizeof(rfc))
2023 memcpy(&rfc, (void *) val, olen);
2026 case L2CAP_CONF_FCS:
2027 if (val == L2CAP_FCS_NONE)
2028 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2036 result = L2CAP_CONF_UNKNOWN;
2037 *((u8 *) ptr++) = type;
2042 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2045 switch (chan->mode) {
2046 case L2CAP_MODE_STREAMING:
2047 case L2CAP_MODE_ERTM:
2048 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2049 chan->mode = l2cap_select_mode(rfc.mode,
2050 chan->conn->feat_mask);
2054 if (chan->mode != rfc.mode)
2055 return -ECONNREFUSED;
2061 if (chan->mode != rfc.mode) {
2062 result = L2CAP_CONF_UNACCEPT;
2063 rfc.mode = chan->mode;
2065 if (chan->num_conf_rsp == 1)
2066 return -ECONNREFUSED;
2068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2069 sizeof(rfc), (unsigned long) &rfc);
2073 if (result == L2CAP_CONF_SUCCESS) {
2074 /* Configure output options and let the other side know
2075 * which ones we don't like. */
2077 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2078 result = L2CAP_CONF_UNACCEPT;
2081 set_bit(CONF_MTU_DONE, &chan->conf_state);
2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2086 case L2CAP_MODE_BASIC:
2087 chan->fcs = L2CAP_FCS_NONE;
2088 set_bit(CONF_MODE_DONE, &chan->conf_state);
2091 case L2CAP_MODE_ERTM:
2092 chan->remote_tx_win = rfc.txwin_size;
2093 chan->remote_max_tx = rfc.max_transmit;
2095 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2096 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2098 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2100 rfc.retrans_timeout =
2101 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2102 rfc.monitor_timeout =
2103 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2105 set_bit(CONF_MODE_DONE, &chan->conf_state);
2107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2108 sizeof(rfc), (unsigned long) &rfc);
2112 case L2CAP_MODE_STREAMING:
2113 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2114 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2116 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2118 set_bit(CONF_MODE_DONE, &chan->conf_state);
2120 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2121 sizeof(rfc), (unsigned long) &rfc);
2126 result = L2CAP_CONF_UNACCEPT;
2128 memset(&rfc, 0, sizeof(rfc));
2129 rfc.mode = chan->mode;
2132 if (result == L2CAP_CONF_SUCCESS)
2133 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2135 rsp->scid = cpu_to_le16(chan->dcid);
2136 rsp->result = cpu_to_le16(result);
2137 rsp->flags = cpu_to_le16(0x0000);
2142 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2144 struct l2cap_conf_req *req = data;
2145 void *ptr = req->data;
2148 struct l2cap_conf_rfc rfc;
2150 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2152 while (len >= L2CAP_CONF_OPT_SIZE) {
2153 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2156 case L2CAP_CONF_MTU:
2157 if (val < L2CAP_DEFAULT_MIN_MTU) {
2158 *result = L2CAP_CONF_UNACCEPT;
2159 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2162 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2165 case L2CAP_CONF_FLUSH_TO:
2166 chan->flush_to = val;
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2171 case L2CAP_CONF_RFC:
2172 if (olen == sizeof(rfc))
2173 memcpy(&rfc, (void *)val, olen);
2175 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2176 rfc.mode != chan->mode)
2177 return -ECONNREFUSED;
2181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2182 sizeof(rfc), (unsigned long) &rfc);
2187 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2188 return -ECONNREFUSED;
2190 chan->mode = rfc.mode;
2192 if (*result == L2CAP_CONF_SUCCESS) {
2194 case L2CAP_MODE_ERTM:
2195 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2196 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2197 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2199 case L2CAP_MODE_STREAMING:
2200 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2204 req->dcid = cpu_to_le16(chan->dcid);
2205 req->flags = cpu_to_le16(0x0000);
2210 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2212 struct l2cap_conf_rsp *rsp = data;
2213 void *ptr = rsp->data;
2215 BT_DBG("chan %p", chan);
2217 rsp->scid = cpu_to_le16(chan->dcid);
2218 rsp->result = cpu_to_le16(result);
2219 rsp->flags = cpu_to_le16(flags);
2224 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2226 struct l2cap_conn_rsp rsp;
2227 struct l2cap_conn *conn = chan->conn;
2230 rsp.scid = cpu_to_le16(chan->dcid);
2231 rsp.dcid = cpu_to_le16(chan->scid);
2232 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2233 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2234 l2cap_send_cmd(conn, chan->ident,
2235 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2237 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2240 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2241 l2cap_build_conf_req(chan, buf), buf);
2242 chan->num_conf_req++;
2245 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2249 struct l2cap_conf_rfc rfc;
2251 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2253 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2256 while (len >= L2CAP_CONF_OPT_SIZE) {
2257 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2260 case L2CAP_CONF_RFC:
2261 if (olen == sizeof(rfc))
2262 memcpy(&rfc, (void *)val, olen);
2269 case L2CAP_MODE_ERTM:
2270 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2271 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2272 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2274 case L2CAP_MODE_STREAMING:
2275 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2279 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2281 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2283 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2286 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2287 cmd->ident == conn->info_ident) {
2288 del_timer(&conn->info_timer);
2290 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2291 conn->info_ident = 0;
2293 l2cap_conn_start(conn);
2299 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2301 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2302 struct l2cap_conn_rsp rsp;
2303 struct l2cap_chan *chan = NULL, *pchan;
2304 struct sock *parent, *sk = NULL;
2305 int result, status = L2CAP_CS_NO_INFO;
2307 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2308 __le16 psm = req->psm;
2310 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2312 /* Check if we have socket listening on psm */
2313 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2315 result = L2CAP_CR_BAD_PSM;
2321 bh_lock_sock(parent);
2323 /* Check if the ACL is secure enough (if not SDP) */
2324 if (psm != cpu_to_le16(0x0001) &&
2325 !hci_conn_check_link_mode(conn->hcon)) {
2326 conn->disc_reason = 0x05;
2327 result = L2CAP_CR_SEC_BLOCK;
2331 result = L2CAP_CR_NO_MEM;
2333 /* Check for backlog size */
2334 if (sk_acceptq_is_full(parent)) {
2335 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2339 chan = pchan->ops->new_connection(pchan->data);
2345 write_lock_bh(&conn->chan_lock);
2347 /* Check if we already have channel with that dcid */
2348 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2349 write_unlock_bh(&conn->chan_lock);
2350 sock_set_flag(sk, SOCK_ZAPPED);
2351 chan->ops->close(chan->data);
2355 hci_conn_hold(conn->hcon);
2357 bacpy(&bt_sk(sk)->src, conn->src);
2358 bacpy(&bt_sk(sk)->dst, conn->dst);
2362 bt_accept_enqueue(parent, sk);
2364 __l2cap_chan_add(conn, chan);
2368 __set_chan_timer(chan, sk->sk_sndtimeo);
2370 chan->ident = cmd->ident;
2372 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2373 if (l2cap_check_security(chan)) {
2374 if (bt_sk(sk)->defer_setup) {
2375 l2cap_state_change(chan, BT_CONNECT2);
2376 result = L2CAP_CR_PEND;
2377 status = L2CAP_CS_AUTHOR_PEND;
2378 parent->sk_data_ready(parent, 0);
2380 l2cap_state_change(chan, BT_CONFIG);
2381 result = L2CAP_CR_SUCCESS;
2382 status = L2CAP_CS_NO_INFO;
2385 l2cap_state_change(chan, BT_CONNECT2);
2386 result = L2CAP_CR_PEND;
2387 status = L2CAP_CS_AUTHEN_PEND;
2390 l2cap_state_change(chan, BT_CONNECT2);
2391 result = L2CAP_CR_PEND;
2392 status = L2CAP_CS_NO_INFO;
2395 write_unlock_bh(&conn->chan_lock);
2398 bh_unlock_sock(parent);
2401 rsp.scid = cpu_to_le16(scid);
2402 rsp.dcid = cpu_to_le16(dcid);
2403 rsp.result = cpu_to_le16(result);
2404 rsp.status = cpu_to_le16(status);
2405 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2407 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2408 struct l2cap_info_req info;
2409 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2412 conn->info_ident = l2cap_get_ident(conn);
2414 mod_timer(&conn->info_timer, jiffies +
2415 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2417 l2cap_send_cmd(conn, conn->info_ident,
2418 L2CAP_INFO_REQ, sizeof(info), &info);
2421 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2422 result == L2CAP_CR_SUCCESS) {
2424 set_bit(CONF_REQ_SENT, &chan->conf_state);
2425 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2426 l2cap_build_conf_req(chan, buf), buf);
2427 chan->num_conf_req++;
2433 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2435 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2436 u16 scid, dcid, result, status;
2437 struct l2cap_chan *chan;
2441 scid = __le16_to_cpu(rsp->scid);
2442 dcid = __le16_to_cpu(rsp->dcid);
2443 result = __le16_to_cpu(rsp->result);
2444 status = __le16_to_cpu(rsp->status);
2446 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2449 chan = l2cap_get_chan_by_scid(conn, scid);
2453 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2461 case L2CAP_CR_SUCCESS:
2462 l2cap_state_change(chan, BT_CONFIG);
2465 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2467 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2471 l2cap_build_conf_req(chan, req), req);
2472 chan->num_conf_req++;
2476 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2480 /* don't delete l2cap channel if sk is owned by user */
2481 if (sock_owned_by_user(sk)) {
2482 l2cap_state_change(chan, BT_DISCONN);
2483 __clear_chan_timer(chan);
2484 __set_chan_timer(chan, HZ / 5);
2488 l2cap_chan_del(chan, ECONNREFUSED);
2496 static inline void set_default_fcs(struct l2cap_chan *chan)
2498 /* FCS is enabled only in ERTM or streaming mode, if one or both
2501 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2502 chan->fcs = L2CAP_FCS_NONE;
2503 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2504 chan->fcs = L2CAP_FCS_CRC16;
2507 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2509 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2512 struct l2cap_chan *chan;
2516 dcid = __le16_to_cpu(req->dcid);
2517 flags = __le16_to_cpu(req->flags);
2519 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2521 chan = l2cap_get_chan_by_scid(conn, dcid);
2527 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2528 struct l2cap_cmd_rej_cid rej;
2530 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2531 rej.scid = cpu_to_le16(chan->scid);
2532 rej.dcid = cpu_to_le16(chan->dcid);
2534 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2539 /* Reject if config buffer is too small. */
2540 len = cmd_len - sizeof(*req);
2541 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2542 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2543 l2cap_build_conf_rsp(chan, rsp,
2544 L2CAP_CONF_REJECT, flags), rsp);
2549 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2550 chan->conf_len += len;
2552 if (flags & 0x0001) {
2553 /* Incomplete config. Send empty response. */
2554 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2555 l2cap_build_conf_rsp(chan, rsp,
2556 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2560 /* Complete config. */
2561 len = l2cap_parse_conf_req(chan, rsp);
2563 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2567 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2568 chan->num_conf_rsp++;
2570 /* Reset config buffer. */
2573 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2576 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2577 set_default_fcs(chan);
2579 l2cap_state_change(chan, BT_CONNECTED);
2581 chan->next_tx_seq = 0;
2582 chan->expected_tx_seq = 0;
2583 skb_queue_head_init(&chan->tx_q);
2584 if (chan->mode == L2CAP_MODE_ERTM)
2585 l2cap_ertm_init(chan);
2587 l2cap_chan_ready(sk);
2591 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2593 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2594 l2cap_build_conf_req(chan, buf), buf);
2595 chan->num_conf_req++;
2603 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2605 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2606 u16 scid, flags, result;
2607 struct l2cap_chan *chan;
2609 int len = cmd->len - sizeof(*rsp);
2611 scid = __le16_to_cpu(rsp->scid);
2612 flags = __le16_to_cpu(rsp->flags);
2613 result = __le16_to_cpu(rsp->result);
2615 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2616 scid, flags, result);
2618 chan = l2cap_get_chan_by_scid(conn, scid);
2625 case L2CAP_CONF_SUCCESS:
2626 l2cap_conf_rfc_get(chan, rsp->data, len);
2629 case L2CAP_CONF_UNACCEPT:
2630 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2633 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2634 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2638 /* throw out any old stored conf requests */
2639 result = L2CAP_CONF_SUCCESS;
2640 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2643 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2647 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2648 L2CAP_CONF_REQ, len, req);
2649 chan->num_conf_req++;
2650 if (result != L2CAP_CONF_SUCCESS)
2656 sk->sk_err = ECONNRESET;
2657 __set_chan_timer(chan, HZ * 5);
2658 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2665 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2667 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2668 set_default_fcs(chan);
2670 l2cap_state_change(chan, BT_CONNECTED);
2671 chan->next_tx_seq = 0;
2672 chan->expected_tx_seq = 0;
2673 skb_queue_head_init(&chan->tx_q);
2674 if (chan->mode == L2CAP_MODE_ERTM)
2675 l2cap_ertm_init(chan);
2677 l2cap_chan_ready(sk);
2685 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2687 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2688 struct l2cap_disconn_rsp rsp;
2690 struct l2cap_chan *chan;
2693 scid = __le16_to_cpu(req->scid);
2694 dcid = __le16_to_cpu(req->dcid);
2696 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2698 chan = l2cap_get_chan_by_scid(conn, dcid);
2704 rsp.dcid = cpu_to_le16(chan->scid);
2705 rsp.scid = cpu_to_le16(chan->dcid);
2706 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2708 sk->sk_shutdown = SHUTDOWN_MASK;
2710 /* don't delete l2cap channel if sk is owned by user */
2711 if (sock_owned_by_user(sk)) {
2712 l2cap_state_change(chan, BT_DISCONN);
2713 __clear_chan_timer(chan);
2714 __set_chan_timer(chan, HZ / 5);
2719 l2cap_chan_del(chan, ECONNRESET);
2722 chan->ops->close(chan->data);
2726 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2728 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2730 struct l2cap_chan *chan;
2733 scid = __le16_to_cpu(rsp->scid);
2734 dcid = __le16_to_cpu(rsp->dcid);
2736 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2738 chan = l2cap_get_chan_by_scid(conn, scid);
2744 /* don't delete l2cap channel if sk is owned by user */
2745 if (sock_owned_by_user(sk)) {
2746 l2cap_state_change(chan,BT_DISCONN);
2747 __clear_chan_timer(chan);
2748 __set_chan_timer(chan, HZ / 5);
2753 l2cap_chan_del(chan, 0);
2756 chan->ops->close(chan->data);
2760 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2762 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2765 type = __le16_to_cpu(req->type);
2767 BT_DBG("type 0x%4.4x", type);
2769 if (type == L2CAP_IT_FEAT_MASK) {
2771 u32 feat_mask = l2cap_feat_mask;
2772 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2773 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2774 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2776 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2778 put_unaligned_le32(feat_mask, rsp->data);
2779 l2cap_send_cmd(conn, cmd->ident,
2780 L2CAP_INFO_RSP, sizeof(buf), buf);
2781 } else if (type == L2CAP_IT_FIXED_CHAN) {
2783 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2784 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2785 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2786 memcpy(buf + 4, l2cap_fixed_chan, 8);
2787 l2cap_send_cmd(conn, cmd->ident,
2788 L2CAP_INFO_RSP, sizeof(buf), buf);
2790 struct l2cap_info_rsp rsp;
2791 rsp.type = cpu_to_le16(type);
2792 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2793 l2cap_send_cmd(conn, cmd->ident,
2794 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2800 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2802 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2805 type = __le16_to_cpu(rsp->type);
2806 result = __le16_to_cpu(rsp->result);
2808 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2810 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2811 if (cmd->ident != conn->info_ident ||
2812 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2815 del_timer(&conn->info_timer);
2817 if (result != L2CAP_IR_SUCCESS) {
2818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2819 conn->info_ident = 0;
2821 l2cap_conn_start(conn);
2826 if (type == L2CAP_IT_FEAT_MASK) {
2827 conn->feat_mask = get_unaligned_le32(rsp->data);
2829 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2830 struct l2cap_info_req req;
2831 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2833 conn->info_ident = l2cap_get_ident(conn);
2835 l2cap_send_cmd(conn, conn->info_ident,
2836 L2CAP_INFO_REQ, sizeof(req), &req);
2838 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2839 conn->info_ident = 0;
2841 l2cap_conn_start(conn);
2843 } else if (type == L2CAP_IT_FIXED_CHAN) {
2844 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2845 conn->info_ident = 0;
2847 l2cap_conn_start(conn);
2853 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2858 if (min > max || min < 6 || max > 3200)
2861 if (to_multiplier < 10 || to_multiplier > 3200)
2864 if (max >= to_multiplier * 8)
2867 max_latency = (to_multiplier * 8 / max) - 1;
2868 if (latency > 499 || latency > max_latency)
2874 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2875 struct l2cap_cmd_hdr *cmd, u8 *data)
2877 struct hci_conn *hcon = conn->hcon;
2878 struct l2cap_conn_param_update_req *req;
2879 struct l2cap_conn_param_update_rsp rsp;
2880 u16 min, max, latency, to_multiplier, cmd_len;
2883 if (!(hcon->link_mode & HCI_LM_MASTER))
2886 cmd_len = __le16_to_cpu(cmd->len);
2887 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2890 req = (struct l2cap_conn_param_update_req *) data;
2891 min = __le16_to_cpu(req->min);
2892 max = __le16_to_cpu(req->max);
2893 latency = __le16_to_cpu(req->latency);
2894 to_multiplier = __le16_to_cpu(req->to_multiplier);
2896 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2897 min, max, latency, to_multiplier);
2899 memset(&rsp, 0, sizeof(rsp));
2901 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2903 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2905 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2907 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2911 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2916 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2917 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2921 switch (cmd->code) {
2922 case L2CAP_COMMAND_REJ:
2923 l2cap_command_rej(conn, cmd, data);
2926 case L2CAP_CONN_REQ:
2927 err = l2cap_connect_req(conn, cmd, data);
2930 case L2CAP_CONN_RSP:
2931 err = l2cap_connect_rsp(conn, cmd, data);
2934 case L2CAP_CONF_REQ:
2935 err = l2cap_config_req(conn, cmd, cmd_len, data);
2938 case L2CAP_CONF_RSP:
2939 err = l2cap_config_rsp(conn, cmd, data);
2942 case L2CAP_DISCONN_REQ:
2943 err = l2cap_disconnect_req(conn, cmd, data);
2946 case L2CAP_DISCONN_RSP:
2947 err = l2cap_disconnect_rsp(conn, cmd, data);
2950 case L2CAP_ECHO_REQ:
2951 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2954 case L2CAP_ECHO_RSP:
2957 case L2CAP_INFO_REQ:
2958 err = l2cap_information_req(conn, cmd, data);
2961 case L2CAP_INFO_RSP:
2962 err = l2cap_information_rsp(conn, cmd, data);
2966 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2974 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2975 struct l2cap_cmd_hdr *cmd, u8 *data)
2977 switch (cmd->code) {
2978 case L2CAP_COMMAND_REJ:
2981 case L2CAP_CONN_PARAM_UPDATE_REQ:
2982 return l2cap_conn_param_update_req(conn, cmd, data);
2984 case L2CAP_CONN_PARAM_UPDATE_RSP:
2988 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2993 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2994 struct sk_buff *skb)
2996 u8 *data = skb->data;
2998 struct l2cap_cmd_hdr cmd;
3001 l2cap_raw_recv(conn, skb);
3003 while (len >= L2CAP_CMD_HDR_SIZE) {
3005 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3006 data += L2CAP_CMD_HDR_SIZE;
3007 len -= L2CAP_CMD_HDR_SIZE;
3009 cmd_len = le16_to_cpu(cmd.len);
3011 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3013 if (cmd_len > len || !cmd.ident) {
3014 BT_DBG("corrupted command");
3018 if (conn->hcon->type == LE_LINK)
3019 err = l2cap_le_sig_cmd(conn, &cmd, data);
3021 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3024 struct l2cap_cmd_rej_unk rej;
3026 BT_ERR("Wrong link type (%d)", err);
3028 /* FIXME: Map err to a valid reason */
3029 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3030 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3040 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3042 u16 our_fcs, rcv_fcs;
3043 int hdr_size = L2CAP_HDR_SIZE + 2;
3045 if (chan->fcs == L2CAP_FCS_CRC16) {
3046 skb_trim(skb, skb->len - 2);
3047 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3048 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3050 if (our_fcs != rcv_fcs)
3056 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3060 chan->frames_sent = 0;
3062 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3064 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3065 control |= L2CAP_SUPER_RCV_NOT_READY;
3066 l2cap_send_sframe(chan, control);
3067 set_bit(CONN_RNR_SENT, &chan->conn_state);
3070 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3071 l2cap_retransmit_frames(chan);
3073 l2cap_ertm_send(chan);
3075 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3076 chan->frames_sent == 0) {
3077 control |= L2CAP_SUPER_RCV_READY;
3078 l2cap_send_sframe(chan, control);
3082 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3084 struct sk_buff *next_skb;
3085 int tx_seq_offset, next_tx_seq_offset;
3087 bt_cb(skb)->tx_seq = tx_seq;
3088 bt_cb(skb)->sar = sar;
3090 next_skb = skb_peek(&chan->srej_q);
3092 __skb_queue_tail(&chan->srej_q, skb);
3096 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3097 if (tx_seq_offset < 0)
3098 tx_seq_offset += 64;
3101 if (bt_cb(next_skb)->tx_seq == tx_seq)
3104 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3105 chan->buffer_seq) % 64;
3106 if (next_tx_seq_offset < 0)
3107 next_tx_seq_offset += 64;
3109 if (next_tx_seq_offset > tx_seq_offset) {
3110 __skb_queue_before(&chan->srej_q, next_skb, skb);
3114 if (skb_queue_is_last(&chan->srej_q, next_skb))
3117 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3119 __skb_queue_tail(&chan->srej_q, skb);
3124 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3126 struct sk_buff *_skb;
3129 switch (control & L2CAP_CTRL_SAR) {
3130 case L2CAP_SDU_UNSEGMENTED:
3131 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3134 return chan->ops->recv(chan->data, skb);
3136 case L2CAP_SDU_START:
3137 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3140 chan->sdu_len = get_unaligned_le16(skb->data);
3142 if (chan->sdu_len > chan->imtu)
3145 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3149 /* pull sdu_len bytes only after alloc, because of Local Busy
3150 * condition we have to be sure that this will be executed
3151 * only once, i.e., when alloc does not fail */
3154 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3156 set_bit(CONN_SAR_SDU, &chan->conn_state);
3157 chan->partial_sdu_len = skb->len;
3160 case L2CAP_SDU_CONTINUE:
3161 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3167 chan->partial_sdu_len += skb->len;
3168 if (chan->partial_sdu_len > chan->sdu_len)
3171 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3176 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3182 chan->partial_sdu_len += skb->len;
3184 if (chan->partial_sdu_len > chan->imtu)
3187 if (chan->partial_sdu_len != chan->sdu_len)
3190 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3192 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3197 err = chan->ops->recv(chan->data, _skb);
3203 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3205 kfree_skb(chan->sdu);
3213 kfree_skb(chan->sdu);
3217 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3222 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3226 BT_DBG("chan %p, Enter local busy", chan);
3228 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3230 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3231 control |= L2CAP_SUPER_RCV_NOT_READY;
3232 l2cap_send_sframe(chan, control);
3234 set_bit(CONN_RNR_SENT, &chan->conn_state);
3236 __clear_ack_timer(chan);
3239 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3243 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3246 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3247 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3248 l2cap_send_sframe(chan, control);
3249 chan->retry_count = 1;
3251 __clear_retrans_timer(chan);
3252 __set_monitor_timer(chan);
3254 set_bit(CONN_WAIT_F, &chan->conn_state);
3257 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3258 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3260 BT_DBG("chan %p, Exit local busy", chan);
3263 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3265 if (chan->mode == L2CAP_MODE_ERTM) {
3267 l2cap_ertm_enter_local_busy(chan);
3269 l2cap_ertm_exit_local_busy(chan);
3273 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3275 struct sk_buff *_skb;
3279 * TODO: We have to notify the userland if some data is lost with the
3283 switch (control & L2CAP_CTRL_SAR) {
3284 case L2CAP_SDU_UNSEGMENTED:
3285 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3286 kfree_skb(chan->sdu);
3290 err = chan->ops->recv(chan->data, skb);
3296 case L2CAP_SDU_START:
3297 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3298 kfree_skb(chan->sdu);
3302 chan->sdu_len = get_unaligned_le16(skb->data);
3305 if (chan->sdu_len > chan->imtu) {
3310 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3316 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3318 set_bit(CONN_SAR_SDU, &chan->conn_state);
3319 chan->partial_sdu_len = skb->len;
3323 case L2CAP_SDU_CONTINUE:
3324 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3327 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3329 chan->partial_sdu_len += skb->len;
3330 if (chan->partial_sdu_len > chan->sdu_len)
3331 kfree_skb(chan->sdu);
3338 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3341 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3343 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3344 chan->partial_sdu_len += skb->len;
3346 if (chan->partial_sdu_len > chan->imtu)
3349 if (chan->partial_sdu_len == chan->sdu_len) {
3350 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3351 err = chan->ops->recv(chan->data, _skb);
3358 kfree_skb(chan->sdu);
3366 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3368 struct sk_buff *skb;
3371 while ((skb = skb_peek(&chan->srej_q)) &&
3372 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3375 if (bt_cb(skb)->tx_seq != tx_seq)
3378 skb = skb_dequeue(&chan->srej_q);
3379 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3380 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3383 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3387 chan->buffer_seq_srej =
3388 (chan->buffer_seq_srej + 1) % 64;
3389 tx_seq = (tx_seq + 1) % 64;
3393 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3395 struct srej_list *l, *tmp;
3398 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3399 if (l->tx_seq == tx_seq) {
3404 control = L2CAP_SUPER_SELECT_REJECT;
3405 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3406 l2cap_send_sframe(chan, control);
3408 list_add_tail(&l->list, &chan->srej_l);
3412 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3414 struct srej_list *new;
3417 while (tx_seq != chan->expected_tx_seq) {
3418 control = L2CAP_SUPER_SELECT_REJECT;
3419 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3420 l2cap_send_sframe(chan, control);
3422 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3423 new->tx_seq = chan->expected_tx_seq;
3424 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3425 list_add_tail(&new->list, &chan->srej_l);
3427 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3430 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3432 u8 tx_seq = __get_txseq(rx_control);
3433 u8 req_seq = __get_reqseq(rx_control);
3434 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3435 int tx_seq_offset, expected_tx_seq_offset;
3436 int num_to_ack = (chan->tx_win/6) + 1;
3439 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3440 tx_seq, rx_control);
3442 if (L2CAP_CTRL_FINAL & rx_control &&
3443 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3444 __clear_monitor_timer(chan);
3445 if (chan->unacked_frames > 0)
3446 __set_retrans_timer(chan);
3447 clear_bit(CONN_WAIT_F, &chan->conn_state);
3450 chan->expected_ack_seq = req_seq;
3451 l2cap_drop_acked_frames(chan);
3453 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3454 if (tx_seq_offset < 0)
3455 tx_seq_offset += 64;
3457 /* invalid tx_seq */
3458 if (tx_seq_offset >= chan->tx_win) {
3459 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3463 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3466 if (tx_seq == chan->expected_tx_seq)
3469 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3470 struct srej_list *first;
3472 first = list_first_entry(&chan->srej_l,
3473 struct srej_list, list);
3474 if (tx_seq == first->tx_seq) {
3475 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3476 l2cap_check_srej_gap(chan, tx_seq);
3478 list_del(&first->list);
3481 if (list_empty(&chan->srej_l)) {
3482 chan->buffer_seq = chan->buffer_seq_srej;
3483 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3484 l2cap_send_ack(chan);
3485 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3488 struct srej_list *l;
3490 /* duplicated tx_seq */
3491 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3494 list_for_each_entry(l, &chan->srej_l, list) {
3495 if (l->tx_seq == tx_seq) {
3496 l2cap_resend_srejframe(chan, tx_seq);
3500 l2cap_send_srejframe(chan, tx_seq);
3503 expected_tx_seq_offset =
3504 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3505 if (expected_tx_seq_offset < 0)
3506 expected_tx_seq_offset += 64;
3508 /* duplicated tx_seq */
3509 if (tx_seq_offset < expected_tx_seq_offset)
3512 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3514 BT_DBG("chan %p, Enter SREJ", chan);
3516 INIT_LIST_HEAD(&chan->srej_l);
3517 chan->buffer_seq_srej = chan->buffer_seq;
3519 __skb_queue_head_init(&chan->srej_q);
3520 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3522 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3524 l2cap_send_srejframe(chan, tx_seq);
3526 __clear_ack_timer(chan);
3531 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3533 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3534 bt_cb(skb)->tx_seq = tx_seq;
3535 bt_cb(skb)->sar = sar;
3536 __skb_queue_tail(&chan->srej_q, skb);
3540 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3541 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3547 if (rx_control & L2CAP_CTRL_FINAL) {
3548 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3549 l2cap_retransmit_frames(chan);
3552 __set_ack_timer(chan);
3554 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3555 if (chan->num_acked == num_to_ack - 1)
3556 l2cap_send_ack(chan);
3565 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3567 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3570 chan->expected_ack_seq = __get_reqseq(rx_control);
3571 l2cap_drop_acked_frames(chan);
3573 if (rx_control & L2CAP_CTRL_POLL) {
3574 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3575 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3576 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3577 (chan->unacked_frames > 0))
3578 __set_retrans_timer(chan);
3580 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3581 l2cap_send_srejtail(chan);
3583 l2cap_send_i_or_rr_or_rnr(chan);
3586 } else if (rx_control & L2CAP_CTRL_FINAL) {
3587 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3589 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3590 l2cap_retransmit_frames(chan);
3593 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3594 (chan->unacked_frames > 0))
3595 __set_retrans_timer(chan);
3597 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3598 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3599 l2cap_send_ack(chan);
3601 l2cap_ertm_send(chan);
3605 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3607 u8 tx_seq = __get_reqseq(rx_control);
3609 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3611 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3613 chan->expected_ack_seq = tx_seq;
3614 l2cap_drop_acked_frames(chan);
3616 if (rx_control & L2CAP_CTRL_FINAL) {
3617 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3618 l2cap_retransmit_frames(chan);
3620 l2cap_retransmit_frames(chan);
3622 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3623 set_bit(CONN_REJ_ACT, &chan->conn_state);
3626 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3628 u8 tx_seq = __get_reqseq(rx_control);
3630 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3632 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3634 if (rx_control & L2CAP_CTRL_POLL) {
3635 chan->expected_ack_seq = tx_seq;
3636 l2cap_drop_acked_frames(chan);
3638 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3639 l2cap_retransmit_one_frame(chan, tx_seq);
3641 l2cap_ertm_send(chan);
3643 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3644 chan->srej_save_reqseq = tx_seq;
3645 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3647 } else if (rx_control & L2CAP_CTRL_FINAL) {
3648 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3649 chan->srej_save_reqseq == tx_seq)
3650 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3652 l2cap_retransmit_one_frame(chan, tx_seq);
3654 l2cap_retransmit_one_frame(chan, tx_seq);
3655 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3656 chan->srej_save_reqseq = tx_seq;
3657 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3662 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3664 u8 tx_seq = __get_reqseq(rx_control);
3666 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3668 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3669 chan->expected_ack_seq = tx_seq;
3670 l2cap_drop_acked_frames(chan);
3672 if (rx_control & L2CAP_CTRL_POLL)
3673 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3675 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3676 __clear_retrans_timer(chan);
3677 if (rx_control & L2CAP_CTRL_POLL)
3678 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3682 if (rx_control & L2CAP_CTRL_POLL)
3683 l2cap_send_srejtail(chan);
3685 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3688 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3690 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3692 if (L2CAP_CTRL_FINAL & rx_control &&
3693 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3694 __clear_monitor_timer(chan);
3695 if (chan->unacked_frames > 0)
3696 __set_retrans_timer(chan);
3697 clear_bit(CONN_WAIT_F, &chan->conn_state);
3700 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3701 case L2CAP_SUPER_RCV_READY:
3702 l2cap_data_channel_rrframe(chan, rx_control);
3705 case L2CAP_SUPER_REJECT:
3706 l2cap_data_channel_rejframe(chan, rx_control);
3709 case L2CAP_SUPER_SELECT_REJECT:
3710 l2cap_data_channel_srejframe(chan, rx_control);
3713 case L2CAP_SUPER_RCV_NOT_READY:
3714 l2cap_data_channel_rnrframe(chan, rx_control);
3722 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3724 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3727 int len, next_tx_seq_offset, req_seq_offset;
3729 control = get_unaligned_le16(skb->data);
3734 * We can just drop the corrupted I-frame here.
3735 * Receiver will miss it and start proper recovery
3736 * procedures and ask retransmission.
3738 if (l2cap_check_fcs(chan, skb))
3741 if (__is_sar_start(control) && __is_iframe(control))
3744 if (chan->fcs == L2CAP_FCS_CRC16)
3747 if (len > chan->mps) {
3748 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3752 req_seq = __get_reqseq(control);
3753 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3754 if (req_seq_offset < 0)
3755 req_seq_offset += 64;
3757 next_tx_seq_offset =
3758 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3759 if (next_tx_seq_offset < 0)
3760 next_tx_seq_offset += 64;
3762 /* check for invalid req-seq */
3763 if (req_seq_offset > next_tx_seq_offset) {
3764 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3768 if (__is_iframe(control)) {
3770 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3774 l2cap_data_channel_iframe(chan, control, skb);
3778 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3782 l2cap_data_channel_sframe(chan, control, skb);
3792 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3794 struct l2cap_chan *chan;
3795 struct sock *sk = NULL;
3800 chan = l2cap_get_chan_by_scid(conn, cid);
3802 BT_DBG("unknown cid 0x%4.4x", cid);
3808 BT_DBG("chan %p, len %d", chan, skb->len);
3810 if (chan->state != BT_CONNECTED)
3813 switch (chan->mode) {
3814 case L2CAP_MODE_BASIC:
3815 /* If socket recv buffers overflows we drop data here
3816 * which is *bad* because L2CAP has to be reliable.
3817 * But we don't have any other choice. L2CAP doesn't
3818 * provide flow control mechanism. */
3820 if (chan->imtu < skb->len)
3823 if (!chan->ops->recv(chan->data, skb))
3827 case L2CAP_MODE_ERTM:
3828 if (!sock_owned_by_user(sk)) {
3829 l2cap_ertm_data_rcv(sk, skb);
3831 if (sk_add_backlog(sk, skb))
3837 case L2CAP_MODE_STREAMING:
3838 control = get_unaligned_le16(skb->data);
3842 if (l2cap_check_fcs(chan, skb))
3845 if (__is_sar_start(control))
3848 if (chan->fcs == L2CAP_FCS_CRC16)
3851 if (len > chan->mps || len < 0 || __is_sframe(control))
3854 tx_seq = __get_txseq(control);
3856 if (chan->expected_tx_seq == tx_seq)
3857 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3859 chan->expected_tx_seq = (tx_seq + 1) % 64;
3861 l2cap_streaming_reassembly_sdu(chan, skb, control);
3866 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3880 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3882 struct sock *sk = NULL;
3883 struct l2cap_chan *chan;
3885 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3893 BT_DBG("sk %p, len %d", sk, skb->len);
3895 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3898 if (chan->imtu < skb->len)
3901 if (!chan->ops->recv(chan->data, skb))
3913 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3915 struct sock *sk = NULL;
3916 struct l2cap_chan *chan;
3918 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3926 BT_DBG("sk %p, len %d", sk, skb->len);
3928 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3931 if (chan->imtu < skb->len)
3934 if (!chan->ops->recv(chan->data, skb))
3946 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3948 struct l2cap_hdr *lh = (void *) skb->data;
3952 skb_pull(skb, L2CAP_HDR_SIZE);
3953 cid = __le16_to_cpu(lh->cid);
3954 len = __le16_to_cpu(lh->len);
3956 if (len != skb->len) {
3961 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3964 case L2CAP_CID_LE_SIGNALING:
3965 case L2CAP_CID_SIGNALING:
3966 l2cap_sig_channel(conn, skb);
3969 case L2CAP_CID_CONN_LESS:
3970 psm = get_unaligned_le16(skb->data);
3972 l2cap_conless_channel(conn, psm, skb);
3975 case L2CAP_CID_LE_DATA:
3976 l2cap_att_channel(conn, cid, skb);
3980 if (smp_sig_channel(conn, skb))
3981 l2cap_conn_del(conn->hcon, EACCES);
3985 l2cap_data_channel(conn, cid, skb);
3990 /* ---- L2CAP interface with lower layer (HCI) ---- */
3992 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3994 int exact = 0, lm1 = 0, lm2 = 0;
3995 struct l2cap_chan *c;
3997 if (type != ACL_LINK)
4000 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4002 /* Find listening sockets and check their link_mode */
4003 read_lock(&chan_list_lock);
4004 list_for_each_entry(c, &chan_list, global_l) {
4005 struct sock *sk = c->sk;
4007 if (c->state != BT_LISTEN)
4010 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4011 lm1 |= HCI_LM_ACCEPT;
4013 lm1 |= HCI_LM_MASTER;
4015 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4016 lm2 |= HCI_LM_ACCEPT;
4018 lm2 |= HCI_LM_MASTER;
4021 read_unlock(&chan_list_lock);
4023 return exact ? lm1 : lm2;
4026 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4028 struct l2cap_conn *conn;
4030 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4032 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4036 conn = l2cap_conn_add(hcon, status);
4038 l2cap_conn_ready(conn);
4040 l2cap_conn_del(hcon, bt_to_errno(status));
4045 static int l2cap_disconn_ind(struct hci_conn *hcon)
4047 struct l2cap_conn *conn = hcon->l2cap_data;
4049 BT_DBG("hcon %p", hcon);
4051 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4054 return conn->disc_reason;
4057 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4059 BT_DBG("hcon %p reason %d", hcon, reason);
4061 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4064 l2cap_conn_del(hcon, bt_to_errno(reason));
4069 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4071 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4074 if (encrypt == 0x00) {
4075 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4076 __clear_chan_timer(chan);
4077 __set_chan_timer(chan, HZ * 5);
4078 } else if (chan->sec_level == BT_SECURITY_HIGH)
4079 l2cap_chan_close(chan, ECONNREFUSED);
4081 if (chan->sec_level == BT_SECURITY_MEDIUM)
4082 __clear_chan_timer(chan);
4086 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4088 struct l2cap_conn *conn = hcon->l2cap_data;
4089 struct l2cap_chan *chan;
4094 BT_DBG("conn %p", conn);
4096 read_lock(&conn->chan_lock);
4098 list_for_each_entry(chan, &conn->chan_l, list) {
4099 struct sock *sk = chan->sk;
4103 BT_DBG("chan->scid %d", chan->scid);
4105 if (chan->scid == L2CAP_CID_LE_DATA) {
4106 if (!status && encrypt) {
4107 chan->sec_level = hcon->sec_level;
4108 del_timer(&conn->security_timer);
4109 l2cap_chan_ready(sk);
4110 smp_distribute_keys(conn, 0);
4117 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4122 if (!status && (chan->state == BT_CONNECTED ||
4123 chan->state == BT_CONFIG)) {
4124 l2cap_check_encryption(chan, encrypt);
4129 if (chan->state == BT_CONNECT) {
4131 struct l2cap_conn_req req;
4132 req.scid = cpu_to_le16(chan->scid);
4133 req.psm = chan->psm;
4135 chan->ident = l2cap_get_ident(conn);
4136 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4138 l2cap_send_cmd(conn, chan->ident,
4139 L2CAP_CONN_REQ, sizeof(req), &req);
4141 __clear_chan_timer(chan);
4142 __set_chan_timer(chan, HZ / 10);
4144 } else if (chan->state == BT_CONNECT2) {
4145 struct l2cap_conn_rsp rsp;
4149 if (bt_sk(sk)->defer_setup) {
4150 struct sock *parent = bt_sk(sk)->parent;
4151 res = L2CAP_CR_PEND;
4152 stat = L2CAP_CS_AUTHOR_PEND;
4154 parent->sk_data_ready(parent, 0);
4156 l2cap_state_change(chan, BT_CONFIG);
4157 res = L2CAP_CR_SUCCESS;
4158 stat = L2CAP_CS_NO_INFO;
4161 l2cap_state_change(chan, BT_DISCONN);
4162 __set_chan_timer(chan, HZ / 10);
4163 res = L2CAP_CR_SEC_BLOCK;
4164 stat = L2CAP_CS_NO_INFO;
4167 rsp.scid = cpu_to_le16(chan->dcid);
4168 rsp.dcid = cpu_to_le16(chan->scid);
4169 rsp.result = cpu_to_le16(res);
4170 rsp.status = cpu_to_le16(stat);
4171 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4178 read_unlock(&conn->chan_lock);
4183 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4185 struct l2cap_conn *conn = hcon->l2cap_data;
4188 conn = l2cap_conn_add(hcon, 0);
4193 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4195 if (!(flags & ACL_CONT)) {
4196 struct l2cap_hdr *hdr;
4197 struct l2cap_chan *chan;
4202 BT_ERR("Unexpected start frame (len %d)", skb->len);
4203 kfree_skb(conn->rx_skb);
4204 conn->rx_skb = NULL;
4206 l2cap_conn_unreliable(conn, ECOMM);
4209 /* Start fragment always begin with Basic L2CAP header */
4210 if (skb->len < L2CAP_HDR_SIZE) {
4211 BT_ERR("Frame is too short (len %d)", skb->len);
4212 l2cap_conn_unreliable(conn, ECOMM);
4216 hdr = (struct l2cap_hdr *) skb->data;
4217 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4218 cid = __le16_to_cpu(hdr->cid);
4220 if (len == skb->len) {
4221 /* Complete frame received */
4222 l2cap_recv_frame(conn, skb);
4226 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4228 if (skb->len > len) {
4229 BT_ERR("Frame is too long (len %d, expected len %d)",
4231 l2cap_conn_unreliable(conn, ECOMM);
4235 chan = l2cap_get_chan_by_scid(conn, cid);
4237 if (chan && chan->sk) {
4238 struct sock *sk = chan->sk;
4240 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4241 BT_ERR("Frame exceeding recv MTU (len %d, "
4245 l2cap_conn_unreliable(conn, ECOMM);
4251 /* Allocate skb for the complete frame (with header) */
4252 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4256 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4258 conn->rx_len = len - skb->len;
4260 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4262 if (!conn->rx_len) {
4263 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4264 l2cap_conn_unreliable(conn, ECOMM);
4268 if (skb->len > conn->rx_len) {
4269 BT_ERR("Fragment is too long (len %d, expected %d)",
4270 skb->len, conn->rx_len);
4271 kfree_skb(conn->rx_skb);
4272 conn->rx_skb = NULL;
4274 l2cap_conn_unreliable(conn, ECOMM);
4278 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4280 conn->rx_len -= skb->len;
4282 if (!conn->rx_len) {
4283 /* Complete frame received */
4284 l2cap_recv_frame(conn, conn->rx_skb);
4285 conn->rx_skb = NULL;
4294 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4296 struct l2cap_chan *c;
4298 read_lock_bh(&chan_list_lock);
4300 list_for_each_entry(c, &chan_list, global_l) {
4301 struct sock *sk = c->sk;
4303 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4304 batostr(&bt_sk(sk)->src),
4305 batostr(&bt_sk(sk)->dst),
4306 c->state, __le16_to_cpu(c->psm),
4307 c->scid, c->dcid, c->imtu, c->omtu,
4308 c->sec_level, c->mode);
4311 read_unlock_bh(&chan_list_lock);
4316 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4318 return single_open(file, l2cap_debugfs_show, inode->i_private);
4321 static const struct file_operations l2cap_debugfs_fops = {
4322 .open = l2cap_debugfs_open,
4324 .llseek = seq_lseek,
4325 .release = single_release,
4328 static struct dentry *l2cap_debugfs;
4330 static struct hci_proto l2cap_hci_proto = {
4332 .id = HCI_PROTO_L2CAP,
4333 .connect_ind = l2cap_connect_ind,
4334 .connect_cfm = l2cap_connect_cfm,
4335 .disconn_ind = l2cap_disconn_ind,
4336 .disconn_cfm = l2cap_disconn_cfm,
4337 .security_cfm = l2cap_security_cfm,
4338 .recv_acldata = l2cap_recv_acldata
4341 int __init l2cap_init(void)
4345 err = l2cap_init_sockets();
4349 err = hci_register_proto(&l2cap_hci_proto);
4351 BT_ERR("L2CAP protocol registration failed");
4352 bt_sock_unregister(BTPROTO_L2CAP);
4357 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4358 bt_debugfs, NULL, &l2cap_debugfs_fops);
4360 BT_ERR("Failed to create L2CAP debug file");
4366 l2cap_cleanup_sockets();
4370 void l2cap_exit(void)
4372 debugfs_remove(l2cap_debugfs);
4374 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4375 BT_ERR("L2CAP protocol unregistration failed");
4377 l2cap_cleanup_sockets();
4380 module_param(disable_ertm, bool, 0644);
4381 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");