2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
66 DEFINE_RWLOCK(chan_list_lock);
68 static void l2cap_busy_work(struct work_struct *work);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
74 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
76 struct l2cap_chan *chan, int err);
78 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
97 list_for_each_entry(c, &conn->chan_l, list) {
104 /* Find channel with given SCID.
105 * Returns locked socket */
106 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
108 struct l2cap_chan *c;
110 read_lock(&conn->chan_lock);
111 c = __l2cap_get_chan_by_scid(conn, cid);
114 read_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
131 struct l2cap_chan *c;
133 read_lock(&conn->chan_lock);
134 c = __l2cap_get_chan_by_ident(conn, ident);
137 read_unlock(&conn->chan_lock);
141 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
143 struct l2cap_chan *c;
145 list_for_each_entry(c, &chan_list, global_l) {
146 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
155 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
159 write_lock_bh(&chan_list_lock);
161 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
174 for (p = 0x1001; p < 0x1100; p += 2)
175 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
176 chan->psm = cpu_to_le16(p);
177 chan->sport = cpu_to_le16(p);
184 write_unlock_bh(&chan_list_lock);
188 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
190 write_lock_bh(&chan_list_lock);
194 write_unlock_bh(&chan_list_lock);
199 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
201 u16 cid = L2CAP_CID_DYN_START;
203 for (; cid < L2CAP_CID_DYN_END; cid++) {
204 if (!__l2cap_get_chan_by_scid(conn, cid))
211 static void l2cap_chan_set_timer(struct l2cap_chan *chan, long timeout)
213 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->sk->sk_state,
215 if (!mod_timer(&chan->chan_timer, jiffies + timeout))
219 void l2cap_chan_clear_timer(struct l2cap_chan *chan)
221 BT_DBG("chan %p state %d", chan, chan->sk->sk_state);
223 if (timer_pending(&chan->chan_timer) && del_timer(&chan->chan_timer))
224 __sock_put(chan->sk);
227 static void l2cap_chan_timeout(unsigned long arg)
229 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
230 struct sock *sk = chan->sk;
233 BT_DBG("chan %p state %d", chan, sk->sk_state);
237 if (sock_owned_by_user(sk)) {
238 /* sk is owned by user. Try again later */
239 l2cap_chan_set_timer(chan, HZ / 5);
245 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
246 reason = ECONNREFUSED;
247 else if (sk->sk_state == BT_CONNECT &&
248 chan->sec_level != BT_SECURITY_SDP)
249 reason = ECONNREFUSED;
253 __l2cap_chan_close(chan, reason);
261 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
263 struct l2cap_chan *chan;
265 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
271 write_lock_bh(&chan_list_lock);
272 list_add(&chan->global_l, &chan_list);
273 write_unlock_bh(&chan_list_lock);
275 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
280 void l2cap_chan_destroy(struct l2cap_chan *chan)
282 write_lock_bh(&chan_list_lock);
283 list_del(&chan->global_l);
284 write_unlock_bh(&chan_list_lock);
289 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
291 struct sock *sk = chan->sk;
293 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
294 chan->psm, chan->dcid);
296 conn->disc_reason = 0x13;
300 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
301 if (conn->hcon->type == LE_LINK) {
303 chan->omtu = L2CAP_LE_DEFAULT_MTU;
304 chan->scid = L2CAP_CID_LE_DATA;
305 chan->dcid = L2CAP_CID_LE_DATA;
307 /* Alloc CID for connection-oriented socket */
308 chan->scid = l2cap_alloc_cid(conn);
309 chan->omtu = L2CAP_DEFAULT_MTU;
311 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
312 /* Connectionless socket */
313 chan->scid = L2CAP_CID_CONN_LESS;
314 chan->dcid = L2CAP_CID_CONN_LESS;
315 chan->omtu = L2CAP_DEFAULT_MTU;
317 /* Raw socket can send/recv signalling messages only */
318 chan->scid = L2CAP_CID_SIGNALING;
319 chan->dcid = L2CAP_CID_SIGNALING;
320 chan->omtu = L2CAP_DEFAULT_MTU;
325 list_add(&chan->list, &conn->chan_l);
329 * Must be called on the locked socket. */
330 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
332 struct sock *sk = chan->sk;
333 struct l2cap_conn *conn = chan->conn;
334 struct sock *parent = bt_sk(sk)->parent;
336 l2cap_chan_clear_timer(chan);
338 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
341 /* Delete from channel list */
342 write_lock_bh(&conn->chan_lock);
343 list_del(&chan->list);
344 write_unlock_bh(&conn->chan_lock);
348 hci_conn_put(conn->hcon);
351 sk->sk_state = BT_CLOSED;
352 sock_set_flag(sk, SOCK_ZAPPED);
358 bt_accept_unlink(sk);
359 parent->sk_data_ready(parent, 0);
361 sk->sk_state_change(sk);
363 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
364 chan->conf_state & L2CAP_CONF_INPUT_DONE))
367 skb_queue_purge(&chan->tx_q);
369 if (chan->mode == L2CAP_MODE_ERTM) {
370 struct srej_list *l, *tmp;
372 del_timer(&chan->retrans_timer);
373 del_timer(&chan->monitor_timer);
374 del_timer(&chan->ack_timer);
376 skb_queue_purge(&chan->srej_q);
377 skb_queue_purge(&chan->busy_q);
379 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
386 /* Must be called on unlocked socket. */
387 static void l2cap_chan_close(struct sock *sk)
389 l2cap_chan_clear_timer(l2cap_pi(sk)->chan);
391 __l2cap_chan_close(l2cap_pi(sk)->chan, ECONNRESET);
396 static void l2cap_chan_cleanup_listen(struct sock *parent)
400 BT_DBG("parent %p", parent);
402 /* Close not yet accepted channels */
403 while ((sk = bt_accept_dequeue(parent, NULL)))
404 l2cap_chan_close(sk);
406 parent->sk_state = BT_CLOSED;
407 sock_set_flag(parent, SOCK_ZAPPED);
410 void __l2cap_chan_close(struct l2cap_chan *chan, int reason)
412 struct l2cap_conn *conn = chan->conn;
413 struct sock *sk = chan->sk;
415 BT_DBG("chan %p state %d socket %p", chan, sk->sk_state, sk->sk_socket);
417 switch (sk->sk_state) {
419 l2cap_chan_cleanup_listen(sk);
424 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
425 conn->hcon->type == ACL_LINK) {
426 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
427 l2cap_send_disconn_req(conn, chan, reason);
429 l2cap_chan_del(chan, reason);
433 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
434 conn->hcon->type == ACL_LINK) {
435 struct l2cap_conn_rsp rsp;
438 if (bt_sk(sk)->defer_setup)
439 result = L2CAP_CR_SEC_BLOCK;
441 result = L2CAP_CR_BAD_PSM;
443 rsp.scid = cpu_to_le16(chan->dcid);
444 rsp.dcid = cpu_to_le16(chan->scid);
445 rsp.result = cpu_to_le16(result);
446 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
447 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
451 l2cap_chan_del(chan, reason);
456 l2cap_chan_del(chan, reason);
460 sock_set_flag(sk, SOCK_ZAPPED);
465 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
467 if (chan->chan_type == L2CAP_CHAN_RAW) {
468 switch (chan->sec_level) {
469 case BT_SECURITY_HIGH:
470 return HCI_AT_DEDICATED_BONDING_MITM;
471 case BT_SECURITY_MEDIUM:
472 return HCI_AT_DEDICATED_BONDING;
474 return HCI_AT_NO_BONDING;
476 } else if (chan->psm == cpu_to_le16(0x0001)) {
477 if (chan->sec_level == BT_SECURITY_LOW)
478 chan->sec_level = BT_SECURITY_SDP;
480 if (chan->sec_level == BT_SECURITY_HIGH)
481 return HCI_AT_NO_BONDING_MITM;
483 return HCI_AT_NO_BONDING;
485 switch (chan->sec_level) {
486 case BT_SECURITY_HIGH:
487 return HCI_AT_GENERAL_BONDING_MITM;
488 case BT_SECURITY_MEDIUM:
489 return HCI_AT_GENERAL_BONDING;
491 return HCI_AT_NO_BONDING;
496 /* Service level security */
497 static inline int l2cap_check_security(struct l2cap_chan *chan)
499 struct l2cap_conn *conn = chan->conn;
502 auth_type = l2cap_get_auth_type(chan);
504 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
507 u8 l2cap_get_ident(struct l2cap_conn *conn)
511 /* Get next available identificator.
512 * 1 - 128 are used by kernel.
513 * 129 - 199 are reserved.
514 * 200 - 254 are used by utilities like l2ping, etc.
517 spin_lock_bh(&conn->lock);
519 if (++conn->tx_ident > 128)
524 spin_unlock_bh(&conn->lock);
529 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
531 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
534 BT_DBG("code 0x%2.2x", code);
539 if (lmp_no_flush_capable(conn->hcon->hdev))
540 flags = ACL_START_NO_FLUSH;
544 hci_send_acl(conn->hcon, skb, flags);
547 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
550 struct l2cap_hdr *lh;
551 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
552 struct l2cap_conn *conn = chan->conn;
553 struct sock *sk = (struct sock *)pi;
554 int count, hlen = L2CAP_HDR_SIZE + 2;
557 if (sk->sk_state != BT_CONNECTED)
560 if (chan->fcs == L2CAP_FCS_CRC16)
563 BT_DBG("chan %p, control 0x%2.2x", chan, control);
565 count = min_t(unsigned int, conn->mtu, hlen);
566 control |= L2CAP_CTRL_FRAME_TYPE;
568 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
569 control |= L2CAP_CTRL_FINAL;
570 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
573 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
574 control |= L2CAP_CTRL_POLL;
575 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
578 skb = bt_skb_alloc(count, GFP_ATOMIC);
582 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
583 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
584 lh->cid = cpu_to_le16(chan->dcid);
585 put_unaligned_le16(control, skb_put(skb, 2));
587 if (chan->fcs == L2CAP_FCS_CRC16) {
588 u16 fcs = crc16(0, (u8 *)lh, count - 2);
589 put_unaligned_le16(fcs, skb_put(skb, 2));
592 if (lmp_no_flush_capable(conn->hcon->hdev))
593 flags = ACL_START_NO_FLUSH;
597 hci_send_acl(chan->conn->hcon, skb, flags);
600 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
602 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
603 control |= L2CAP_SUPER_RCV_NOT_READY;
604 chan->conn_state |= L2CAP_CONN_RNR_SENT;
606 control |= L2CAP_SUPER_RCV_READY;
608 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
610 l2cap_send_sframe(chan, control);
613 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
615 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
618 static void l2cap_do_start(struct l2cap_chan *chan)
620 struct l2cap_conn *conn = chan->conn;
622 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
623 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
626 if (l2cap_check_security(chan) &&
627 __l2cap_no_conn_pending(chan)) {
628 struct l2cap_conn_req req;
629 req.scid = cpu_to_le16(chan->scid);
632 chan->ident = l2cap_get_ident(conn);
633 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
635 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
639 struct l2cap_info_req req;
640 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
642 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
643 conn->info_ident = l2cap_get_ident(conn);
645 mod_timer(&conn->info_timer, jiffies +
646 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
648 l2cap_send_cmd(conn, conn->info_ident,
649 L2CAP_INFO_REQ, sizeof(req), &req);
653 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
655 u32 local_feat_mask = l2cap_feat_mask;
657 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
660 case L2CAP_MODE_ERTM:
661 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
662 case L2CAP_MODE_STREAMING:
663 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
669 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
672 struct l2cap_disconn_req req;
679 if (chan->mode == L2CAP_MODE_ERTM) {
680 del_timer(&chan->retrans_timer);
681 del_timer(&chan->monitor_timer);
682 del_timer(&chan->ack_timer);
685 req.dcid = cpu_to_le16(chan->dcid);
686 req.scid = cpu_to_le16(chan->scid);
687 l2cap_send_cmd(conn, l2cap_get_ident(conn),
688 L2CAP_DISCONN_REQ, sizeof(req), &req);
690 sk->sk_state = BT_DISCONN;
694 /* ---- L2CAP connections ---- */
695 static void l2cap_conn_start(struct l2cap_conn *conn)
697 struct l2cap_chan *chan, *tmp;
699 BT_DBG("conn %p", conn);
701 read_lock(&conn->chan_lock);
703 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
704 struct sock *sk = chan->sk;
708 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
713 if (sk->sk_state == BT_CONNECT) {
714 struct l2cap_conn_req req;
716 if (!l2cap_check_security(chan) ||
717 !__l2cap_no_conn_pending(chan)) {
722 if (!l2cap_mode_supported(chan->mode,
724 && chan->conf_state &
725 L2CAP_CONF_STATE2_DEVICE) {
726 /* __l2cap_chan_close() calls list_del(chan)
727 * so release the lock */
728 read_unlock_bh(&conn->chan_lock);
729 __l2cap_chan_close(chan, ECONNRESET);
730 read_lock_bh(&conn->chan_lock);
735 req.scid = cpu_to_le16(chan->scid);
738 chan->ident = l2cap_get_ident(conn);
739 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
741 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
744 } else if (sk->sk_state == BT_CONNECT2) {
745 struct l2cap_conn_rsp rsp;
747 rsp.scid = cpu_to_le16(chan->dcid);
748 rsp.dcid = cpu_to_le16(chan->scid);
750 if (l2cap_check_security(chan)) {
751 if (bt_sk(sk)->defer_setup) {
752 struct sock *parent = bt_sk(sk)->parent;
753 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
754 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
755 parent->sk_data_ready(parent, 0);
758 sk->sk_state = BT_CONFIG;
759 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
760 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
763 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
764 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
767 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
770 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
771 rsp.result != L2CAP_CR_SUCCESS) {
776 chan->conf_state |= L2CAP_CONF_REQ_SENT;
777 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
778 l2cap_build_conf_req(chan, buf), buf);
779 chan->num_conf_req++;
785 read_unlock(&conn->chan_lock);
788 /* Find socket with cid and source bdaddr.
789 * Returns closest match, locked.
791 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
793 struct l2cap_chan *c, *c1 = NULL;
795 read_lock(&chan_list_lock);
797 list_for_each_entry(c, &chan_list, global_l) {
798 struct sock *sk = c->sk;
800 if (state && sk->sk_state != state)
803 if (c->scid == cid) {
805 if (!bacmp(&bt_sk(sk)->src, src)) {
806 read_unlock(&chan_list_lock);
811 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
816 read_unlock(&chan_list_lock);
821 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
823 struct sock *parent, *sk;
824 struct l2cap_chan *chan, *pchan;
828 /* Check if we have socket listening on cid */
829 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
836 bh_lock_sock(parent);
838 /* Check for backlog size */
839 if (sk_acceptq_is_full(parent)) {
840 BT_DBG("backlog full %d", parent->sk_ack_backlog);
844 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
848 chan = l2cap_chan_create(sk);
854 l2cap_pi(sk)->chan = chan;
856 write_lock_bh(&conn->chan_lock);
858 hci_conn_hold(conn->hcon);
860 l2cap_sock_init(sk, parent);
862 bacpy(&bt_sk(sk)->src, conn->src);
863 bacpy(&bt_sk(sk)->dst, conn->dst);
865 bt_accept_enqueue(parent, sk);
867 __l2cap_chan_add(conn, chan);
869 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
871 sk->sk_state = BT_CONNECTED;
872 parent->sk_data_ready(parent, 0);
874 write_unlock_bh(&conn->chan_lock);
877 bh_unlock_sock(parent);
880 static void l2cap_conn_ready(struct l2cap_conn *conn)
882 struct l2cap_chan *chan;
884 BT_DBG("conn %p", conn);
886 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
887 l2cap_le_conn_ready(conn);
889 read_lock(&conn->chan_lock);
891 list_for_each_entry(chan, &conn->chan_l, list) {
892 struct sock *sk = chan->sk;
896 if (conn->hcon->type == LE_LINK) {
897 l2cap_chan_clear_timer(chan);
898 sk->sk_state = BT_CONNECTED;
899 sk->sk_state_change(sk);
902 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
903 l2cap_chan_clear_timer(chan);
904 sk->sk_state = BT_CONNECTED;
905 sk->sk_state_change(sk);
906 } else if (sk->sk_state == BT_CONNECT)
907 l2cap_do_start(chan);
912 read_unlock(&conn->chan_lock);
915 /* Notify sockets that we cannot guaranty reliability anymore */
916 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
918 struct l2cap_chan *chan;
920 BT_DBG("conn %p", conn);
922 read_lock(&conn->chan_lock);
924 list_for_each_entry(chan, &conn->chan_l, list) {
925 struct sock *sk = chan->sk;
927 if (chan->force_reliable)
931 read_unlock(&conn->chan_lock);
934 static void l2cap_info_timeout(unsigned long arg)
936 struct l2cap_conn *conn = (void *) arg;
938 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
939 conn->info_ident = 0;
941 l2cap_conn_start(conn);
944 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
946 struct l2cap_conn *conn = hcon->l2cap_data;
951 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
955 hcon->l2cap_data = conn;
958 BT_DBG("hcon %p conn %p", hcon, conn);
960 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
961 conn->mtu = hcon->hdev->le_mtu;
963 conn->mtu = hcon->hdev->acl_mtu;
965 conn->src = &hcon->hdev->bdaddr;
966 conn->dst = &hcon->dst;
970 spin_lock_init(&conn->lock);
971 rwlock_init(&conn->chan_lock);
973 INIT_LIST_HEAD(&conn->chan_l);
975 if (hcon->type != LE_LINK)
976 setup_timer(&conn->info_timer, l2cap_info_timeout,
977 (unsigned long) conn);
979 conn->disc_reason = 0x13;
984 static void l2cap_conn_del(struct hci_conn *hcon, int err)
986 struct l2cap_conn *conn = hcon->l2cap_data;
987 struct l2cap_chan *chan, *l;
993 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
995 kfree_skb(conn->rx_skb);
998 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1001 l2cap_chan_del(chan, err);
1003 l2cap_sock_kill(sk);
1006 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1007 del_timer_sync(&conn->info_timer);
1009 hcon->l2cap_data = NULL;
1013 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1015 write_lock_bh(&conn->chan_lock);
1016 __l2cap_chan_add(conn, chan);
1017 write_unlock_bh(&conn->chan_lock);
1020 /* ---- Socket interface ---- */
1022 /* Find socket with psm and source bdaddr.
1023 * Returns closest match.
1025 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1027 struct l2cap_chan *c, *c1 = NULL;
1029 read_lock(&chan_list_lock);
1031 list_for_each_entry(c, &chan_list, global_l) {
1032 struct sock *sk = c->sk;
1034 if (state && sk->sk_state != state)
1037 if (c->psm == psm) {
1039 if (!bacmp(&bt_sk(sk)->src, src)) {
1040 read_unlock(&chan_list_lock);
1045 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1050 read_unlock(&chan_list_lock);
1055 int l2cap_chan_connect(struct l2cap_chan *chan)
1057 struct sock *sk = chan->sk;
1058 bdaddr_t *src = &bt_sk(sk)->src;
1059 bdaddr_t *dst = &bt_sk(sk)->dst;
1060 struct l2cap_conn *conn;
1061 struct hci_conn *hcon;
1062 struct hci_dev *hdev;
1066 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1069 hdev = hci_get_route(dst, src);
1071 return -EHOSTUNREACH;
1073 hci_dev_lock_bh(hdev);
1075 auth_type = l2cap_get_auth_type(chan);
1077 if (chan->dcid == L2CAP_CID_LE_DATA)
1078 hcon = hci_connect(hdev, LE_LINK, dst,
1079 chan->sec_level, auth_type);
1081 hcon = hci_connect(hdev, ACL_LINK, dst,
1082 chan->sec_level, auth_type);
1085 err = PTR_ERR(hcon);
1089 conn = l2cap_conn_add(hcon, 0);
1096 /* Update source addr of the socket */
1097 bacpy(src, conn->src);
1099 l2cap_chan_add(conn, chan);
1101 sk->sk_state = BT_CONNECT;
1102 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
1104 if (hcon->state == BT_CONNECTED) {
1105 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1106 l2cap_chan_clear_timer(chan);
1107 if (l2cap_check_security(chan))
1108 sk->sk_state = BT_CONNECTED;
1110 l2cap_do_start(chan);
1116 hci_dev_unlock_bh(hdev);
1121 int __l2cap_wait_ack(struct sock *sk)
1123 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1124 DECLARE_WAITQUEUE(wait, current);
1128 add_wait_queue(sk_sleep(sk), &wait);
1129 while ((chan->unacked_frames > 0 && chan->conn)) {
1130 set_current_state(TASK_INTERRUPTIBLE);
1135 if (signal_pending(current)) {
1136 err = sock_intr_errno(timeo);
1141 timeo = schedule_timeout(timeo);
1144 err = sock_error(sk);
1148 set_current_state(TASK_RUNNING);
1149 remove_wait_queue(sk_sleep(sk), &wait);
1153 static void l2cap_monitor_timeout(unsigned long arg)
1155 struct l2cap_chan *chan = (void *) arg;
1156 struct sock *sk = chan->sk;
1158 BT_DBG("chan %p", chan);
1161 if (chan->retry_count >= chan->remote_max_tx) {
1162 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1167 chan->retry_count++;
1168 __mod_monitor_timer();
1170 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1174 static void l2cap_retrans_timeout(unsigned long arg)
1176 struct l2cap_chan *chan = (void *) arg;
1177 struct sock *sk = chan->sk;
1179 BT_DBG("chan %p", chan);
1182 chan->retry_count = 1;
1183 __mod_monitor_timer();
1185 chan->conn_state |= L2CAP_CONN_WAIT_F;
1187 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1191 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1193 struct sk_buff *skb;
1195 while ((skb = skb_peek(&chan->tx_q)) &&
1196 chan->unacked_frames) {
1197 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1200 skb = skb_dequeue(&chan->tx_q);
1203 chan->unacked_frames--;
1206 if (!chan->unacked_frames)
1207 del_timer(&chan->retrans_timer);
1210 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1212 struct hci_conn *hcon = chan->conn->hcon;
1215 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1217 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1218 flags = ACL_START_NO_FLUSH;
1222 hci_send_acl(hcon, skb, flags);
1225 void l2cap_streaming_send(struct l2cap_chan *chan)
1227 struct sk_buff *skb;
1230 while ((skb = skb_dequeue(&chan->tx_q))) {
1231 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1232 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1233 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1235 if (chan->fcs == L2CAP_FCS_CRC16) {
1236 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1237 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1240 l2cap_do_send(chan, skb);
1242 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1246 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1248 struct sk_buff *skb, *tx_skb;
1251 skb = skb_peek(&chan->tx_q);
1256 if (bt_cb(skb)->tx_seq == tx_seq)
1259 if (skb_queue_is_last(&chan->tx_q, skb))
1262 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1264 if (chan->remote_max_tx &&
1265 bt_cb(skb)->retries == chan->remote_max_tx) {
1266 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1270 tx_skb = skb_clone(skb, GFP_ATOMIC);
1271 bt_cb(skb)->retries++;
1272 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1273 control &= L2CAP_CTRL_SAR;
1275 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1276 control |= L2CAP_CTRL_FINAL;
1277 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1280 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1281 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1283 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1285 if (chan->fcs == L2CAP_FCS_CRC16) {
1286 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1287 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1290 l2cap_do_send(chan, tx_skb);
1293 int l2cap_ertm_send(struct l2cap_chan *chan)
1295 struct sk_buff *skb, *tx_skb;
1296 struct sock *sk = chan->sk;
1300 if (sk->sk_state != BT_CONNECTED)
1303 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1305 if (chan->remote_max_tx &&
1306 bt_cb(skb)->retries == chan->remote_max_tx) {
1307 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 tx_skb = skb_clone(skb, GFP_ATOMIC);
1313 bt_cb(skb)->retries++;
1315 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1316 control &= L2CAP_CTRL_SAR;
1318 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1319 control |= L2CAP_CTRL_FINAL;
1320 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1322 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1323 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1324 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1327 if (chan->fcs == L2CAP_FCS_CRC16) {
1328 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1329 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1332 l2cap_do_send(chan, tx_skb);
1334 __mod_retrans_timer();
1336 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1337 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1339 if (bt_cb(skb)->retries == 1)
1340 chan->unacked_frames++;
1342 chan->frames_sent++;
1344 if (skb_queue_is_last(&chan->tx_q, skb))
1345 chan->tx_send_head = NULL;
1347 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1355 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1359 if (!skb_queue_empty(&chan->tx_q))
1360 chan->tx_send_head = chan->tx_q.next;
1362 chan->next_tx_seq = chan->expected_ack_seq;
1363 ret = l2cap_ertm_send(chan);
1367 static void l2cap_send_ack(struct l2cap_chan *chan)
1371 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1373 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1374 control |= L2CAP_SUPER_RCV_NOT_READY;
1375 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1376 l2cap_send_sframe(chan, control);
1380 if (l2cap_ertm_send(chan) > 0)
1383 control |= L2CAP_SUPER_RCV_READY;
1384 l2cap_send_sframe(chan, control);
1387 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1389 struct srej_list *tail;
1392 control = L2CAP_SUPER_SELECT_REJECT;
1393 control |= L2CAP_CTRL_FINAL;
1395 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1396 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1398 l2cap_send_sframe(chan, control);
1401 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1403 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1404 struct sk_buff **frag;
1407 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1413 /* Continuation fragments (no L2CAP header) */
1414 frag = &skb_shinfo(skb)->frag_list;
1416 count = min_t(unsigned int, conn->mtu, len);
1418 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1421 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1427 frag = &(*frag)->next;
1433 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1435 struct sock *sk = chan->sk;
1436 struct l2cap_conn *conn = chan->conn;
1437 struct sk_buff *skb;
1438 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1439 struct l2cap_hdr *lh;
1441 BT_DBG("sk %p len %d", sk, (int)len);
1443 count = min_t(unsigned int, (conn->mtu - hlen), len);
1444 skb = bt_skb_send_alloc(sk, count + hlen,
1445 msg->msg_flags & MSG_DONTWAIT, &err);
1447 return ERR_PTR(err);
1449 /* Create L2CAP header */
1450 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1451 lh->cid = cpu_to_le16(chan->dcid);
1452 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1453 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1455 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1456 if (unlikely(err < 0)) {
1458 return ERR_PTR(err);
1463 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1465 struct sock *sk = chan->sk;
1466 struct l2cap_conn *conn = chan->conn;
1467 struct sk_buff *skb;
1468 int err, count, hlen = L2CAP_HDR_SIZE;
1469 struct l2cap_hdr *lh;
1471 BT_DBG("sk %p len %d", sk, (int)len);
1473 count = min_t(unsigned int, (conn->mtu - hlen), len);
1474 skb = bt_skb_send_alloc(sk, count + hlen,
1475 msg->msg_flags & MSG_DONTWAIT, &err);
1477 return ERR_PTR(err);
1479 /* Create L2CAP header */
1480 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1481 lh->cid = cpu_to_le16(chan->dcid);
1482 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1484 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1485 if (unlikely(err < 0)) {
1487 return ERR_PTR(err);
1492 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1494 struct sock *sk = chan->sk;
1495 struct l2cap_conn *conn = chan->conn;
1496 struct sk_buff *skb;
1497 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1498 struct l2cap_hdr *lh;
1500 BT_DBG("sk %p len %d", sk, (int)len);
1503 return ERR_PTR(-ENOTCONN);
1508 if (chan->fcs == L2CAP_FCS_CRC16)
1511 count = min_t(unsigned int, (conn->mtu - hlen), len);
1512 skb = bt_skb_send_alloc(sk, count + hlen,
1513 msg->msg_flags & MSG_DONTWAIT, &err);
1515 return ERR_PTR(err);
1517 /* Create L2CAP header */
1518 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1519 lh->cid = cpu_to_le16(chan->dcid);
1520 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 put_unaligned_le16(control, skb_put(skb, 2));
1523 put_unaligned_le16(sdulen, skb_put(skb, 2));
1525 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1526 if (unlikely(err < 0)) {
1528 return ERR_PTR(err);
1531 if (chan->fcs == L2CAP_FCS_CRC16)
1532 put_unaligned_le16(0, skb_put(skb, 2));
1534 bt_cb(skb)->retries = 0;
1538 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1540 struct sk_buff *skb;
1541 struct sk_buff_head sar_queue;
1545 skb_queue_head_init(&sar_queue);
1546 control = L2CAP_SDU_START;
1547 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1549 return PTR_ERR(skb);
1551 __skb_queue_tail(&sar_queue, skb);
1552 len -= chan->remote_mps;
1553 size += chan->remote_mps;
1558 if (len > chan->remote_mps) {
1559 control = L2CAP_SDU_CONTINUE;
1560 buflen = chan->remote_mps;
1562 control = L2CAP_SDU_END;
1566 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1568 skb_queue_purge(&sar_queue);
1569 return PTR_ERR(skb);
1572 __skb_queue_tail(&sar_queue, skb);
1576 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1577 if (chan->tx_send_head == NULL)
1578 chan->tx_send_head = sar_queue.next;
1583 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1585 struct sk_buff *skb;
1589 /* Connectionless channel */
1590 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1591 skb = l2cap_create_connless_pdu(chan, msg, len);
1593 return PTR_ERR(skb);
1595 l2cap_do_send(chan, skb);
1599 switch (chan->mode) {
1600 case L2CAP_MODE_BASIC:
1601 /* Check outgoing MTU */
1602 if (len > chan->omtu)
1605 /* Create a basic PDU */
1606 skb = l2cap_create_basic_pdu(chan, msg, len);
1608 return PTR_ERR(skb);
1610 l2cap_do_send(chan, skb);
1614 case L2CAP_MODE_ERTM:
1615 case L2CAP_MODE_STREAMING:
1616 /* Entire SDU fits into one PDU */
1617 if (len <= chan->remote_mps) {
1618 control = L2CAP_SDU_UNSEGMENTED;
1619 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1622 return PTR_ERR(skb);
1624 __skb_queue_tail(&chan->tx_q, skb);
1626 if (chan->tx_send_head == NULL)
1627 chan->tx_send_head = skb;
1630 /* Segment SDU into multiples PDUs */
1631 err = l2cap_sar_segment_sdu(chan, msg, len);
1636 if (chan->mode == L2CAP_MODE_STREAMING) {
1637 l2cap_streaming_send(chan);
1642 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1643 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
1648 err = l2cap_ertm_send(chan);
1655 BT_DBG("bad state %1.1x", chan->mode);
1662 static void l2cap_chan_ready(struct sock *sk)
1664 struct sock *parent = bt_sk(sk)->parent;
1665 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1667 BT_DBG("sk %p, parent %p", sk, parent);
1669 chan->conf_state = 0;
1670 l2cap_chan_clear_timer(chan);
1673 /* Outgoing channel.
1674 * Wake up socket sleeping on connect.
1676 sk->sk_state = BT_CONNECTED;
1677 sk->sk_state_change(sk);
1679 /* Incoming channel.
1680 * Wake up socket sleeping on accept.
1682 parent->sk_data_ready(parent, 0);
1686 /* Copy frame to all raw sockets on that connection */
1687 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1689 struct sk_buff *nskb;
1690 struct l2cap_chan *chan;
1692 BT_DBG("conn %p", conn);
1694 read_lock(&conn->chan_lock);
1695 list_for_each_entry(chan, &conn->chan_l, list) {
1696 struct sock *sk = chan->sk;
1697 if (chan->chan_type != L2CAP_CHAN_RAW)
1700 /* Don't send frame to the socket it came from */
1703 nskb = skb_clone(skb, GFP_ATOMIC);
1707 if (sock_queue_rcv_skb(sk, nskb))
1710 read_unlock(&conn->chan_lock);
1713 /* ---- L2CAP signalling commands ---- */
1714 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1715 u8 code, u8 ident, u16 dlen, void *data)
1717 struct sk_buff *skb, **frag;
1718 struct l2cap_cmd_hdr *cmd;
1719 struct l2cap_hdr *lh;
1722 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1723 conn, code, ident, dlen);
1725 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1726 count = min_t(unsigned int, conn->mtu, len);
1728 skb = bt_skb_alloc(count, GFP_ATOMIC);
1732 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1733 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1735 if (conn->hcon->type == LE_LINK)
1736 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1738 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1740 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1743 cmd->len = cpu_to_le16(dlen);
1746 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1747 memcpy(skb_put(skb, count), data, count);
1753 /* Continuation fragments (no L2CAP header) */
1754 frag = &skb_shinfo(skb)->frag_list;
1756 count = min_t(unsigned int, conn->mtu, len);
1758 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1762 memcpy(skb_put(*frag, count), data, count);
1767 frag = &(*frag)->next;
1777 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1779 struct l2cap_conf_opt *opt = *ptr;
1782 len = L2CAP_CONF_OPT_SIZE + opt->len;
1790 *val = *((u8 *) opt->val);
1794 *val = get_unaligned_le16(opt->val);
1798 *val = get_unaligned_le32(opt->val);
1802 *val = (unsigned long) opt->val;
1806 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1810 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1812 struct l2cap_conf_opt *opt = *ptr;
1814 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1821 *((u8 *) opt->val) = val;
1825 put_unaligned_le16(val, opt->val);
1829 put_unaligned_le32(val, opt->val);
1833 memcpy(opt->val, (void *) val, len);
1837 *ptr += L2CAP_CONF_OPT_SIZE + len;
1840 static void l2cap_ack_timeout(unsigned long arg)
1842 struct l2cap_chan *chan = (void *) arg;
1844 bh_lock_sock(chan->sk);
1845 l2cap_send_ack(chan);
1846 bh_unlock_sock(chan->sk);
1849 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1851 struct sock *sk = chan->sk;
1853 chan->expected_ack_seq = 0;
1854 chan->unacked_frames = 0;
1855 chan->buffer_seq = 0;
1856 chan->num_acked = 0;
1857 chan->frames_sent = 0;
1859 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1860 (unsigned long) chan);
1861 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1862 (unsigned long) chan);
1863 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1865 skb_queue_head_init(&chan->srej_q);
1866 skb_queue_head_init(&chan->busy_q);
1868 INIT_LIST_HEAD(&chan->srej_l);
1870 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1872 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1875 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1878 case L2CAP_MODE_STREAMING:
1879 case L2CAP_MODE_ERTM:
1880 if (l2cap_mode_supported(mode, remote_feat_mask))
1884 return L2CAP_MODE_BASIC;
1888 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1890 struct l2cap_conf_req *req = data;
1891 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1892 void *ptr = req->data;
1894 BT_DBG("chan %p", chan);
1896 if (chan->num_conf_req || chan->num_conf_rsp)
1899 switch (chan->mode) {
1900 case L2CAP_MODE_STREAMING:
1901 case L2CAP_MODE_ERTM:
1902 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1907 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1912 if (chan->imtu != L2CAP_DEFAULT_MTU)
1913 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1915 switch (chan->mode) {
1916 case L2CAP_MODE_BASIC:
1917 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1918 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1921 rfc.mode = L2CAP_MODE_BASIC;
1923 rfc.max_transmit = 0;
1924 rfc.retrans_timeout = 0;
1925 rfc.monitor_timeout = 0;
1926 rfc.max_pdu_size = 0;
1928 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1929 (unsigned long) &rfc);
1932 case L2CAP_MODE_ERTM:
1933 rfc.mode = L2CAP_MODE_ERTM;
1934 rfc.txwin_size = chan->tx_win;
1935 rfc.max_transmit = chan->max_tx;
1936 rfc.retrans_timeout = 0;
1937 rfc.monitor_timeout = 0;
1938 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1939 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1940 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1942 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1943 (unsigned long) &rfc);
1945 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1948 if (chan->fcs == L2CAP_FCS_NONE ||
1949 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1950 chan->fcs = L2CAP_FCS_NONE;
1951 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1955 case L2CAP_MODE_STREAMING:
1956 rfc.mode = L2CAP_MODE_STREAMING;
1958 rfc.max_transmit = 0;
1959 rfc.retrans_timeout = 0;
1960 rfc.monitor_timeout = 0;
1961 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1962 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1963 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1965 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1966 (unsigned long) &rfc);
1968 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1971 if (chan->fcs == L2CAP_FCS_NONE ||
1972 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1973 chan->fcs = L2CAP_FCS_NONE;
1974 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1979 req->dcid = cpu_to_le16(chan->dcid);
1980 req->flags = cpu_to_le16(0);
1985 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1987 struct l2cap_conf_rsp *rsp = data;
1988 void *ptr = rsp->data;
1989 void *req = chan->conf_req;
1990 int len = chan->conf_len;
1991 int type, hint, olen;
1993 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1994 u16 mtu = L2CAP_DEFAULT_MTU;
1995 u16 result = L2CAP_CONF_SUCCESS;
1997 BT_DBG("chan %p", chan);
1999 while (len >= L2CAP_CONF_OPT_SIZE) {
2000 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2002 hint = type & L2CAP_CONF_HINT;
2003 type &= L2CAP_CONF_MASK;
2006 case L2CAP_CONF_MTU:
2010 case L2CAP_CONF_FLUSH_TO:
2011 chan->flush_to = val;
2014 case L2CAP_CONF_QOS:
2017 case L2CAP_CONF_RFC:
2018 if (olen == sizeof(rfc))
2019 memcpy(&rfc, (void *) val, olen);
2022 case L2CAP_CONF_FCS:
2023 if (val == L2CAP_FCS_NONE)
2024 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2032 result = L2CAP_CONF_UNKNOWN;
2033 *((u8 *) ptr++) = type;
2038 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2041 switch (chan->mode) {
2042 case L2CAP_MODE_STREAMING:
2043 case L2CAP_MODE_ERTM:
2044 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2045 chan->mode = l2cap_select_mode(rfc.mode,
2046 chan->conn->feat_mask);
2050 if (chan->mode != rfc.mode)
2051 return -ECONNREFUSED;
2057 if (chan->mode != rfc.mode) {
2058 result = L2CAP_CONF_UNACCEPT;
2059 rfc.mode = chan->mode;
2061 if (chan->num_conf_rsp == 1)
2062 return -ECONNREFUSED;
2064 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2065 sizeof(rfc), (unsigned long) &rfc);
2069 if (result == L2CAP_CONF_SUCCESS) {
2070 /* Configure output options and let the other side know
2071 * which ones we don't like. */
2073 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2074 result = L2CAP_CONF_UNACCEPT;
2077 chan->conf_state |= L2CAP_CONF_MTU_DONE;
2079 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2082 case L2CAP_MODE_BASIC:
2083 chan->fcs = L2CAP_FCS_NONE;
2084 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2087 case L2CAP_MODE_ERTM:
2088 chan->remote_tx_win = rfc.txwin_size;
2089 chan->remote_max_tx = rfc.max_transmit;
2091 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2092 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2094 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2096 rfc.retrans_timeout =
2097 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2098 rfc.monitor_timeout =
2099 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2101 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2104 sizeof(rfc), (unsigned long) &rfc);
2108 case L2CAP_MODE_STREAMING:
2109 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2110 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2112 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2114 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2117 sizeof(rfc), (unsigned long) &rfc);
2122 result = L2CAP_CONF_UNACCEPT;
2124 memset(&rfc, 0, sizeof(rfc));
2125 rfc.mode = chan->mode;
2128 if (result == L2CAP_CONF_SUCCESS)
2129 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2131 rsp->scid = cpu_to_le16(chan->dcid);
2132 rsp->result = cpu_to_le16(result);
2133 rsp->flags = cpu_to_le16(0x0000);
2138 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2140 struct l2cap_conf_req *req = data;
2141 void *ptr = req->data;
2144 struct l2cap_conf_rfc rfc;
2146 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2148 while (len >= L2CAP_CONF_OPT_SIZE) {
2149 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2152 case L2CAP_CONF_MTU:
2153 if (val < L2CAP_DEFAULT_MIN_MTU) {
2154 *result = L2CAP_CONF_UNACCEPT;
2155 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2158 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2161 case L2CAP_CONF_FLUSH_TO:
2162 chan->flush_to = val;
2163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2167 case L2CAP_CONF_RFC:
2168 if (olen == sizeof(rfc))
2169 memcpy(&rfc, (void *)val, olen);
2171 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2172 rfc.mode != chan->mode)
2173 return -ECONNREFUSED;
2177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2178 sizeof(rfc), (unsigned long) &rfc);
2183 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2184 return -ECONNREFUSED;
2186 chan->mode = rfc.mode;
2188 if (*result == L2CAP_CONF_SUCCESS) {
2190 case L2CAP_MODE_ERTM:
2191 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2192 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2193 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2195 case L2CAP_MODE_STREAMING:
2196 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2200 req->dcid = cpu_to_le16(chan->dcid);
2201 req->flags = cpu_to_le16(0x0000);
2206 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2208 struct l2cap_conf_rsp *rsp = data;
2209 void *ptr = rsp->data;
2211 BT_DBG("chan %p", chan);
2213 rsp->scid = cpu_to_le16(chan->dcid);
2214 rsp->result = cpu_to_le16(result);
2215 rsp->flags = cpu_to_le16(flags);
2220 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2222 struct l2cap_conn_rsp rsp;
2223 struct l2cap_conn *conn = chan->conn;
2226 rsp.scid = cpu_to_le16(chan->dcid);
2227 rsp.dcid = cpu_to_le16(chan->scid);
2228 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2229 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2230 l2cap_send_cmd(conn, chan->ident,
2231 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2233 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2236 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2237 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2238 l2cap_build_conf_req(chan, buf), buf);
2239 chan->num_conf_req++;
2242 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2246 struct l2cap_conf_rfc rfc;
2248 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2250 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2253 while (len >= L2CAP_CONF_OPT_SIZE) {
2254 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2257 case L2CAP_CONF_RFC:
2258 if (olen == sizeof(rfc))
2259 memcpy(&rfc, (void *)val, olen);
2266 case L2CAP_MODE_ERTM:
2267 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2268 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2269 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2271 case L2CAP_MODE_STREAMING:
2272 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2276 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2278 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2280 if (rej->reason != 0x0000)
2283 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2284 cmd->ident == conn->info_ident) {
2285 del_timer(&conn->info_timer);
2287 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2288 conn->info_ident = 0;
2290 l2cap_conn_start(conn);
2296 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2298 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2299 struct l2cap_conn_rsp rsp;
2300 struct l2cap_chan *chan = NULL, *pchan;
2301 struct sock *parent, *sk = NULL;
2302 int result, status = L2CAP_CS_NO_INFO;
2304 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2305 __le16 psm = req->psm;
2307 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2309 /* Check if we have socket listening on psm */
2310 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2312 result = L2CAP_CR_BAD_PSM;
2318 bh_lock_sock(parent);
2320 /* Check if the ACL is secure enough (if not SDP) */
2321 if (psm != cpu_to_le16(0x0001) &&
2322 !hci_conn_check_link_mode(conn->hcon)) {
2323 conn->disc_reason = 0x05;
2324 result = L2CAP_CR_SEC_BLOCK;
2328 result = L2CAP_CR_NO_MEM;
2330 /* Check for backlog size */
2331 if (sk_acceptq_is_full(parent)) {
2332 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2336 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2340 chan = l2cap_chan_create(sk);
2342 l2cap_sock_kill(sk);
2346 l2cap_pi(sk)->chan = chan;
2348 write_lock_bh(&conn->chan_lock);
2350 /* Check if we already have channel with that dcid */
2351 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2352 write_unlock_bh(&conn->chan_lock);
2353 sock_set_flag(sk, SOCK_ZAPPED);
2354 l2cap_sock_kill(sk);
2358 hci_conn_hold(conn->hcon);
2360 l2cap_sock_init(sk, parent);
2361 bacpy(&bt_sk(sk)->src, conn->src);
2362 bacpy(&bt_sk(sk)->dst, conn->dst);
2366 bt_accept_enqueue(parent, sk);
2368 __l2cap_chan_add(conn, chan);
2372 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
2374 chan->ident = cmd->ident;
2376 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2377 if (l2cap_check_security(chan)) {
2378 if (bt_sk(sk)->defer_setup) {
2379 sk->sk_state = BT_CONNECT2;
2380 result = L2CAP_CR_PEND;
2381 status = L2CAP_CS_AUTHOR_PEND;
2382 parent->sk_data_ready(parent, 0);
2384 sk->sk_state = BT_CONFIG;
2385 result = L2CAP_CR_SUCCESS;
2386 status = L2CAP_CS_NO_INFO;
2389 sk->sk_state = BT_CONNECT2;
2390 result = L2CAP_CR_PEND;
2391 status = L2CAP_CS_AUTHEN_PEND;
2394 sk->sk_state = BT_CONNECT2;
2395 result = L2CAP_CR_PEND;
2396 status = L2CAP_CS_NO_INFO;
2399 write_unlock_bh(&conn->chan_lock);
2402 bh_unlock_sock(parent);
2405 rsp.scid = cpu_to_le16(scid);
2406 rsp.dcid = cpu_to_le16(dcid);
2407 rsp.result = cpu_to_le16(result);
2408 rsp.status = cpu_to_le16(status);
2409 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2411 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2412 struct l2cap_info_req info;
2413 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2415 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2416 conn->info_ident = l2cap_get_ident(conn);
2418 mod_timer(&conn->info_timer, jiffies +
2419 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2421 l2cap_send_cmd(conn, conn->info_ident,
2422 L2CAP_INFO_REQ, sizeof(info), &info);
2425 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2426 result == L2CAP_CR_SUCCESS) {
2428 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2429 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2430 l2cap_build_conf_req(chan, buf), buf);
2431 chan->num_conf_req++;
2437 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2439 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2440 u16 scid, dcid, result, status;
2441 struct l2cap_chan *chan;
2445 scid = __le16_to_cpu(rsp->scid);
2446 dcid = __le16_to_cpu(rsp->dcid);
2447 result = __le16_to_cpu(rsp->result);
2448 status = __le16_to_cpu(rsp->status);
2450 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2453 chan = l2cap_get_chan_by_scid(conn, scid);
2457 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2465 case L2CAP_CR_SUCCESS:
2466 sk->sk_state = BT_CONFIG;
2469 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2471 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2474 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(chan, req), req);
2478 chan->num_conf_req++;
2482 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2486 /* don't delete l2cap channel if sk is owned by user */
2487 if (sock_owned_by_user(sk)) {
2488 sk->sk_state = BT_DISCONN;
2489 l2cap_chan_clear_timer(chan);
2490 l2cap_chan_set_timer(chan, HZ / 5);
2494 l2cap_chan_del(chan, ECONNREFUSED);
2502 static inline void set_default_fcs(struct l2cap_chan *chan)
2504 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2506 /* FCS is enabled only in ERTM or streaming mode, if one or both
2509 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2510 chan->fcs = L2CAP_FCS_NONE;
2511 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2512 chan->fcs = L2CAP_FCS_CRC16;
2515 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2517 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2520 struct l2cap_chan *chan;
2524 dcid = __le16_to_cpu(req->dcid);
2525 flags = __le16_to_cpu(req->flags);
2527 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2529 chan = l2cap_get_chan_by_scid(conn, dcid);
2535 if (sk->sk_state != BT_CONFIG) {
2536 struct l2cap_cmd_rej rej;
2538 rej.reason = cpu_to_le16(0x0002);
2539 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2544 /* Reject if config buffer is too small. */
2545 len = cmd_len - sizeof(*req);
2546 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2547 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2548 l2cap_build_conf_rsp(chan, rsp,
2549 L2CAP_CONF_REJECT, flags), rsp);
2554 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2555 chan->conf_len += len;
2557 if (flags & 0x0001) {
2558 /* Incomplete config. Send empty response. */
2559 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2560 l2cap_build_conf_rsp(chan, rsp,
2561 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2565 /* Complete config. */
2566 len = l2cap_parse_conf_req(chan, rsp);
2568 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2572 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2573 chan->num_conf_rsp++;
2575 /* Reset config buffer. */
2578 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2581 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2582 set_default_fcs(chan);
2584 sk->sk_state = BT_CONNECTED;
2586 chan->next_tx_seq = 0;
2587 chan->expected_tx_seq = 0;
2588 skb_queue_head_init(&chan->tx_q);
2589 if (chan->mode == L2CAP_MODE_ERTM)
2590 l2cap_ertm_init(chan);
2592 l2cap_chan_ready(sk);
2596 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2598 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2599 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2600 l2cap_build_conf_req(chan, buf), buf);
2601 chan->num_conf_req++;
2609 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2611 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2612 u16 scid, flags, result;
2613 struct l2cap_chan *chan;
2615 int len = cmd->len - sizeof(*rsp);
2617 scid = __le16_to_cpu(rsp->scid);
2618 flags = __le16_to_cpu(rsp->flags);
2619 result = __le16_to_cpu(rsp->result);
2621 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2622 scid, flags, result);
2624 chan = l2cap_get_chan_by_scid(conn, scid);
2631 case L2CAP_CONF_SUCCESS:
2632 l2cap_conf_rfc_get(chan, rsp->data, len);
2635 case L2CAP_CONF_UNACCEPT:
2636 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2639 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2640 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2644 /* throw out any old stored conf requests */
2645 result = L2CAP_CONF_SUCCESS;
2646 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2649 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2653 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2654 L2CAP_CONF_REQ, len, req);
2655 chan->num_conf_req++;
2656 if (result != L2CAP_CONF_SUCCESS)
2662 sk->sk_err = ECONNRESET;
2663 l2cap_chan_set_timer(chan, HZ * 5);
2664 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2671 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2673 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2674 set_default_fcs(chan);
2676 sk->sk_state = BT_CONNECTED;
2677 chan->next_tx_seq = 0;
2678 chan->expected_tx_seq = 0;
2679 skb_queue_head_init(&chan->tx_q);
2680 if (chan->mode == L2CAP_MODE_ERTM)
2681 l2cap_ertm_init(chan);
2683 l2cap_chan_ready(sk);
2691 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2693 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2694 struct l2cap_disconn_rsp rsp;
2696 struct l2cap_chan *chan;
2699 scid = __le16_to_cpu(req->scid);
2700 dcid = __le16_to_cpu(req->dcid);
2702 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2704 chan = l2cap_get_chan_by_scid(conn, dcid);
2710 rsp.dcid = cpu_to_le16(chan->scid);
2711 rsp.scid = cpu_to_le16(chan->dcid);
2712 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2714 sk->sk_shutdown = SHUTDOWN_MASK;
2716 /* don't delete l2cap channel if sk is owned by user */
2717 if (sock_owned_by_user(sk)) {
2718 sk->sk_state = BT_DISCONN;
2719 l2cap_chan_clear_timer(chan);
2720 l2cap_chan_set_timer(chan, HZ / 5);
2725 l2cap_chan_del(chan, ECONNRESET);
2728 l2cap_sock_kill(sk);
2732 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2734 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2736 struct l2cap_chan *chan;
2739 scid = __le16_to_cpu(rsp->scid);
2740 dcid = __le16_to_cpu(rsp->dcid);
2742 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2744 chan = l2cap_get_chan_by_scid(conn, scid);
2750 /* don't delete l2cap channel if sk is owned by user */
2751 if (sock_owned_by_user(sk)) {
2752 sk->sk_state = BT_DISCONN;
2753 l2cap_chan_clear_timer(chan);
2754 l2cap_chan_set_timer(chan, HZ / 5);
2759 l2cap_chan_del(chan, 0);
2762 l2cap_sock_kill(sk);
2766 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2768 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2771 type = __le16_to_cpu(req->type);
2773 BT_DBG("type 0x%4.4x", type);
2775 if (type == L2CAP_IT_FEAT_MASK) {
2777 u32 feat_mask = l2cap_feat_mask;
2778 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2779 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2780 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2782 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2784 put_unaligned_le32(feat_mask, rsp->data);
2785 l2cap_send_cmd(conn, cmd->ident,
2786 L2CAP_INFO_RSP, sizeof(buf), buf);
2787 } else if (type == L2CAP_IT_FIXED_CHAN) {
2789 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2790 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2791 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2792 memcpy(buf + 4, l2cap_fixed_chan, 8);
2793 l2cap_send_cmd(conn, cmd->ident,
2794 L2CAP_INFO_RSP, sizeof(buf), buf);
2796 struct l2cap_info_rsp rsp;
2797 rsp.type = cpu_to_le16(type);
2798 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2799 l2cap_send_cmd(conn, cmd->ident,
2800 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2806 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2808 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2811 type = __le16_to_cpu(rsp->type);
2812 result = __le16_to_cpu(rsp->result);
2814 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2816 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2817 if (cmd->ident != conn->info_ident ||
2818 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2821 del_timer(&conn->info_timer);
2823 if (result != L2CAP_IR_SUCCESS) {
2824 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2825 conn->info_ident = 0;
2827 l2cap_conn_start(conn);
2832 if (type == L2CAP_IT_FEAT_MASK) {
2833 conn->feat_mask = get_unaligned_le32(rsp->data);
2835 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2836 struct l2cap_info_req req;
2837 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2839 conn->info_ident = l2cap_get_ident(conn);
2841 l2cap_send_cmd(conn, conn->info_ident,
2842 L2CAP_INFO_REQ, sizeof(req), &req);
2844 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2845 conn->info_ident = 0;
2847 l2cap_conn_start(conn);
2849 } else if (type == L2CAP_IT_FIXED_CHAN) {
2850 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2851 conn->info_ident = 0;
2853 l2cap_conn_start(conn);
2859 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2864 if (min > max || min < 6 || max > 3200)
2867 if (to_multiplier < 10 || to_multiplier > 3200)
2870 if (max >= to_multiplier * 8)
2873 max_latency = (to_multiplier * 8 / max) - 1;
2874 if (latency > 499 || latency > max_latency)
2880 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2881 struct l2cap_cmd_hdr *cmd, u8 *data)
2883 struct hci_conn *hcon = conn->hcon;
2884 struct l2cap_conn_param_update_req *req;
2885 struct l2cap_conn_param_update_rsp rsp;
2886 u16 min, max, latency, to_multiplier, cmd_len;
2889 if (!(hcon->link_mode & HCI_LM_MASTER))
2892 cmd_len = __le16_to_cpu(cmd->len);
2893 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2896 req = (struct l2cap_conn_param_update_req *) data;
2897 min = __le16_to_cpu(req->min);
2898 max = __le16_to_cpu(req->max);
2899 latency = __le16_to_cpu(req->latency);
2900 to_multiplier = __le16_to_cpu(req->to_multiplier);
2902 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2903 min, max, latency, to_multiplier);
2905 memset(&rsp, 0, sizeof(rsp));
2907 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2909 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2911 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2913 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2917 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2922 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2923 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2927 switch (cmd->code) {
2928 case L2CAP_COMMAND_REJ:
2929 l2cap_command_rej(conn, cmd, data);
2932 case L2CAP_CONN_REQ:
2933 err = l2cap_connect_req(conn, cmd, data);
2936 case L2CAP_CONN_RSP:
2937 err = l2cap_connect_rsp(conn, cmd, data);
2940 case L2CAP_CONF_REQ:
2941 err = l2cap_config_req(conn, cmd, cmd_len, data);
2944 case L2CAP_CONF_RSP:
2945 err = l2cap_config_rsp(conn, cmd, data);
2948 case L2CAP_DISCONN_REQ:
2949 err = l2cap_disconnect_req(conn, cmd, data);
2952 case L2CAP_DISCONN_RSP:
2953 err = l2cap_disconnect_rsp(conn, cmd, data);
2956 case L2CAP_ECHO_REQ:
2957 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2960 case L2CAP_ECHO_RSP:
2963 case L2CAP_INFO_REQ:
2964 err = l2cap_information_req(conn, cmd, data);
2967 case L2CAP_INFO_RSP:
2968 err = l2cap_information_rsp(conn, cmd, data);
2972 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2980 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2981 struct l2cap_cmd_hdr *cmd, u8 *data)
2983 switch (cmd->code) {
2984 case L2CAP_COMMAND_REJ:
2987 case L2CAP_CONN_PARAM_UPDATE_REQ:
2988 return l2cap_conn_param_update_req(conn, cmd, data);
2990 case L2CAP_CONN_PARAM_UPDATE_RSP:
2994 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2999 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3000 struct sk_buff *skb)
3002 u8 *data = skb->data;
3004 struct l2cap_cmd_hdr cmd;
3007 l2cap_raw_recv(conn, skb);
3009 while (len >= L2CAP_CMD_HDR_SIZE) {
3011 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3012 data += L2CAP_CMD_HDR_SIZE;
3013 len -= L2CAP_CMD_HDR_SIZE;
3015 cmd_len = le16_to_cpu(cmd.len);
3017 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3019 if (cmd_len > len || !cmd.ident) {
3020 BT_DBG("corrupted command");
3024 if (conn->hcon->type == LE_LINK)
3025 err = l2cap_le_sig_cmd(conn, &cmd, data);
3027 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3030 struct l2cap_cmd_rej rej;
3032 BT_ERR("Wrong link type (%d)", err);
3034 /* FIXME: Map err to a valid reason */
3035 rej.reason = cpu_to_le16(0);
3036 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3046 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3048 u16 our_fcs, rcv_fcs;
3049 int hdr_size = L2CAP_HDR_SIZE + 2;
3051 if (chan->fcs == L2CAP_FCS_CRC16) {
3052 skb_trim(skb, skb->len - 2);
3053 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3054 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3056 if (our_fcs != rcv_fcs)
3062 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3066 chan->frames_sent = 0;
3068 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3070 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3071 control |= L2CAP_SUPER_RCV_NOT_READY;
3072 l2cap_send_sframe(chan, control);
3073 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3076 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
3077 l2cap_retransmit_frames(chan);
3079 l2cap_ertm_send(chan);
3081 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3082 chan->frames_sent == 0) {
3083 control |= L2CAP_SUPER_RCV_READY;
3084 l2cap_send_sframe(chan, control);
3088 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3090 struct sk_buff *next_skb;
3091 int tx_seq_offset, next_tx_seq_offset;
3093 bt_cb(skb)->tx_seq = tx_seq;
3094 bt_cb(skb)->sar = sar;
3096 next_skb = skb_peek(&chan->srej_q);
3098 __skb_queue_tail(&chan->srej_q, skb);
3102 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3103 if (tx_seq_offset < 0)
3104 tx_seq_offset += 64;
3107 if (bt_cb(next_skb)->tx_seq == tx_seq)
3110 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3111 chan->buffer_seq) % 64;
3112 if (next_tx_seq_offset < 0)
3113 next_tx_seq_offset += 64;
3115 if (next_tx_seq_offset > tx_seq_offset) {
3116 __skb_queue_before(&chan->srej_q, next_skb, skb);
3120 if (skb_queue_is_last(&chan->srej_q, next_skb))
3123 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3125 __skb_queue_tail(&chan->srej_q, skb);
3130 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3132 struct sk_buff *_skb;
3135 switch (control & L2CAP_CTRL_SAR) {
3136 case L2CAP_SDU_UNSEGMENTED:
3137 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3140 return sock_queue_rcv_skb(chan->sk, skb);
3142 case L2CAP_SDU_START:
3143 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3146 chan->sdu_len = get_unaligned_le16(skb->data);
3148 if (chan->sdu_len > chan->imtu)
3151 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3155 /* pull sdu_len bytes only after alloc, because of Local Busy
3156 * condition we have to be sure that this will be executed
3157 * only once, i.e., when alloc does not fail */
3160 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3162 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3163 chan->partial_sdu_len = skb->len;
3166 case L2CAP_SDU_CONTINUE:
3167 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3173 chan->partial_sdu_len += skb->len;
3174 if (chan->partial_sdu_len > chan->sdu_len)
3177 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3182 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3188 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
3189 chan->partial_sdu_len += skb->len;
3191 if (chan->partial_sdu_len > chan->imtu)
3194 if (chan->partial_sdu_len != chan->sdu_len)
3197 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3200 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3202 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3206 err = sock_queue_rcv_skb(chan->sk, _skb);
3209 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3213 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3214 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3216 kfree_skb(chan->sdu);
3224 kfree_skb(chan->sdu);
3228 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3233 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3235 struct sk_buff *skb;
3239 while ((skb = skb_dequeue(&chan->busy_q))) {
3240 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3241 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3243 skb_queue_head(&chan->busy_q, skb);
3247 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3250 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3253 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3254 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3255 l2cap_send_sframe(chan, control);
3256 chan->retry_count = 1;
3258 del_timer(&chan->retrans_timer);
3259 __mod_monitor_timer();
3261 chan->conn_state |= L2CAP_CONN_WAIT_F;
3264 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3265 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3267 BT_DBG("chan %p, Exit local busy", chan);
3272 static void l2cap_busy_work(struct work_struct *work)
3274 DECLARE_WAITQUEUE(wait, current);
3275 struct l2cap_chan *chan =
3276 container_of(work, struct l2cap_chan, busy_work);
3277 struct sock *sk = chan->sk;
3278 int n_tries = 0, timeo = HZ/5, err;
3279 struct sk_buff *skb;
3283 add_wait_queue(sk_sleep(sk), &wait);
3284 while ((skb = skb_peek(&chan->busy_q))) {
3285 set_current_state(TASK_INTERRUPTIBLE);
3287 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3289 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3296 if (signal_pending(current)) {
3297 err = sock_intr_errno(timeo);
3302 timeo = schedule_timeout(timeo);
3305 err = sock_error(sk);
3309 if (l2cap_try_push_rx_skb(chan) == 0)
3313 set_current_state(TASK_RUNNING);
3314 remove_wait_queue(sk_sleep(sk), &wait);
3319 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3323 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3324 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3325 __skb_queue_tail(&chan->busy_q, skb);
3326 return l2cap_try_push_rx_skb(chan);
3331 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3333 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3337 /* Busy Condition */
3338 BT_DBG("chan %p, Enter local busy", chan);
3340 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3341 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3342 __skb_queue_tail(&chan->busy_q, skb);
3344 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3345 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3346 l2cap_send_sframe(chan, sctrl);
3348 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3350 del_timer(&chan->ack_timer);
3352 queue_work(_busy_wq, &chan->busy_work);
3357 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3359 struct sk_buff *_skb;
3363 * TODO: We have to notify the userland if some data is lost with the
3367 switch (control & L2CAP_CTRL_SAR) {
3368 case L2CAP_SDU_UNSEGMENTED:
3369 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3370 kfree_skb(chan->sdu);
3374 err = sock_queue_rcv_skb(chan->sk, skb);
3380 case L2CAP_SDU_START:
3381 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3382 kfree_skb(chan->sdu);
3386 chan->sdu_len = get_unaligned_le16(skb->data);
3389 if (chan->sdu_len > chan->imtu) {
3394 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3400 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3402 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3403 chan->partial_sdu_len = skb->len;
3407 case L2CAP_SDU_CONTINUE:
3408 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3411 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3413 chan->partial_sdu_len += skb->len;
3414 if (chan->partial_sdu_len > chan->sdu_len)
3415 kfree_skb(chan->sdu);
3422 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3425 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3427 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3428 chan->partial_sdu_len += skb->len;
3430 if (chan->partial_sdu_len > chan->imtu)
3433 if (chan->partial_sdu_len == chan->sdu_len) {
3434 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3435 err = sock_queue_rcv_skb(chan->sk, _skb);
3442 kfree_skb(chan->sdu);
3450 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3452 struct sk_buff *skb;
3455 while ((skb = skb_peek(&chan->srej_q))) {
3456 if (bt_cb(skb)->tx_seq != tx_seq)
3459 skb = skb_dequeue(&chan->srej_q);
3460 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3461 l2cap_ertm_reassembly_sdu(chan, skb, control);
3462 chan->buffer_seq_srej =
3463 (chan->buffer_seq_srej + 1) % 64;
3464 tx_seq = (tx_seq + 1) % 64;
3468 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3470 struct srej_list *l, *tmp;
3473 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3474 if (l->tx_seq == tx_seq) {
3479 control = L2CAP_SUPER_SELECT_REJECT;
3480 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3481 l2cap_send_sframe(chan, control);
3483 list_add_tail(&l->list, &chan->srej_l);
3487 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3489 struct srej_list *new;
3492 while (tx_seq != chan->expected_tx_seq) {
3493 control = L2CAP_SUPER_SELECT_REJECT;
3494 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3495 l2cap_send_sframe(chan, control);
3497 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3498 new->tx_seq = chan->expected_tx_seq;
3499 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3500 list_add_tail(&new->list, &chan->srej_l);
3502 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3505 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3507 u8 tx_seq = __get_txseq(rx_control);
3508 u8 req_seq = __get_reqseq(rx_control);
3509 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3510 int tx_seq_offset, expected_tx_seq_offset;
3511 int num_to_ack = (chan->tx_win/6) + 1;
3514 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3515 tx_seq, rx_control);
3517 if (L2CAP_CTRL_FINAL & rx_control &&
3518 chan->conn_state & L2CAP_CONN_WAIT_F) {
3519 del_timer(&chan->monitor_timer);
3520 if (chan->unacked_frames > 0)
3521 __mod_retrans_timer();
3522 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3525 chan->expected_ack_seq = req_seq;
3526 l2cap_drop_acked_frames(chan);
3528 if (tx_seq == chan->expected_tx_seq)
3531 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3532 if (tx_seq_offset < 0)
3533 tx_seq_offset += 64;
3535 /* invalid tx_seq */
3536 if (tx_seq_offset >= chan->tx_win) {
3537 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3541 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3544 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3545 struct srej_list *first;
3547 first = list_first_entry(&chan->srej_l,
3548 struct srej_list, list);
3549 if (tx_seq == first->tx_seq) {
3550 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3551 l2cap_check_srej_gap(chan, tx_seq);
3553 list_del(&first->list);
3556 if (list_empty(&chan->srej_l)) {
3557 chan->buffer_seq = chan->buffer_seq_srej;
3558 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3559 l2cap_send_ack(chan);
3560 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3563 struct srej_list *l;
3565 /* duplicated tx_seq */
3566 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3569 list_for_each_entry(l, &chan->srej_l, list) {
3570 if (l->tx_seq == tx_seq) {
3571 l2cap_resend_srejframe(chan, tx_seq);
3575 l2cap_send_srejframe(chan, tx_seq);
3578 expected_tx_seq_offset =
3579 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3580 if (expected_tx_seq_offset < 0)
3581 expected_tx_seq_offset += 64;
3583 /* duplicated tx_seq */
3584 if (tx_seq_offset < expected_tx_seq_offset)
3587 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3589 BT_DBG("chan %p, Enter SREJ", chan);
3591 INIT_LIST_HEAD(&chan->srej_l);
3592 chan->buffer_seq_srej = chan->buffer_seq;
3594 __skb_queue_head_init(&chan->srej_q);
3595 __skb_queue_head_init(&chan->busy_q);
3596 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3598 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3600 l2cap_send_srejframe(chan, tx_seq);
3602 del_timer(&chan->ack_timer);
3607 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3609 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3610 bt_cb(skb)->tx_seq = tx_seq;
3611 bt_cb(skb)->sar = sar;
3612 __skb_queue_tail(&chan->srej_q, skb);
3616 err = l2cap_push_rx_skb(chan, skb, rx_control);
3620 if (rx_control & L2CAP_CTRL_FINAL) {
3621 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3622 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3624 l2cap_retransmit_frames(chan);
3629 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3630 if (chan->num_acked == num_to_ack - 1)
3631 l2cap_send_ack(chan);
3640 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3642 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3645 chan->expected_ack_seq = __get_reqseq(rx_control);
3646 l2cap_drop_acked_frames(chan);
3648 if (rx_control & L2CAP_CTRL_POLL) {
3649 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3650 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3651 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3652 (chan->unacked_frames > 0))
3653 __mod_retrans_timer();
3655 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3656 l2cap_send_srejtail(chan);
3658 l2cap_send_i_or_rr_or_rnr(chan);
3661 } else if (rx_control & L2CAP_CTRL_FINAL) {
3662 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3664 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3665 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3667 l2cap_retransmit_frames(chan);
3670 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3671 (chan->unacked_frames > 0))
3672 __mod_retrans_timer();
3674 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3675 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3676 l2cap_send_ack(chan);
3678 l2cap_ertm_send(chan);
3682 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3684 u8 tx_seq = __get_reqseq(rx_control);
3686 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3688 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3690 chan->expected_ack_seq = tx_seq;
3691 l2cap_drop_acked_frames(chan);
3693 if (rx_control & L2CAP_CTRL_FINAL) {
3694 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3695 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3697 l2cap_retransmit_frames(chan);
3699 l2cap_retransmit_frames(chan);
3701 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3702 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3705 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3707 u8 tx_seq = __get_reqseq(rx_control);
3709 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3711 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3713 if (rx_control & L2CAP_CTRL_POLL) {
3714 chan->expected_ack_seq = tx_seq;
3715 l2cap_drop_acked_frames(chan);
3717 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3718 l2cap_retransmit_one_frame(chan, tx_seq);
3720 l2cap_ertm_send(chan);
3722 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3723 chan->srej_save_reqseq = tx_seq;
3724 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3726 } else if (rx_control & L2CAP_CTRL_FINAL) {
3727 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3728 chan->srej_save_reqseq == tx_seq)
3729 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3731 l2cap_retransmit_one_frame(chan, tx_seq);
3733 l2cap_retransmit_one_frame(chan, tx_seq);
3734 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3735 chan->srej_save_reqseq = tx_seq;
3736 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3741 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3743 u8 tx_seq = __get_reqseq(rx_control);
3745 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3747 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3748 chan->expected_ack_seq = tx_seq;
3749 l2cap_drop_acked_frames(chan);
3751 if (rx_control & L2CAP_CTRL_POLL)
3752 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3754 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3755 del_timer(&chan->retrans_timer);
3756 if (rx_control & L2CAP_CTRL_POLL)
3757 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3761 if (rx_control & L2CAP_CTRL_POLL)
3762 l2cap_send_srejtail(chan);
3764 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3767 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3769 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3771 if (L2CAP_CTRL_FINAL & rx_control &&
3772 chan->conn_state & L2CAP_CONN_WAIT_F) {
3773 del_timer(&chan->monitor_timer);
3774 if (chan->unacked_frames > 0)
3775 __mod_retrans_timer();
3776 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3779 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3780 case L2CAP_SUPER_RCV_READY:
3781 l2cap_data_channel_rrframe(chan, rx_control);
3784 case L2CAP_SUPER_REJECT:
3785 l2cap_data_channel_rejframe(chan, rx_control);
3788 case L2CAP_SUPER_SELECT_REJECT:
3789 l2cap_data_channel_srejframe(chan, rx_control);
3792 case L2CAP_SUPER_RCV_NOT_READY:
3793 l2cap_data_channel_rnrframe(chan, rx_control);
3801 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3803 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3806 int len, next_tx_seq_offset, req_seq_offset;
3808 control = get_unaligned_le16(skb->data);
3813 * We can just drop the corrupted I-frame here.
3814 * Receiver will miss it and start proper recovery
3815 * procedures and ask retransmission.
3817 if (l2cap_check_fcs(chan, skb))
3820 if (__is_sar_start(control) && __is_iframe(control))
3823 if (chan->fcs == L2CAP_FCS_CRC16)
3826 if (len > chan->mps) {
3827 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3831 req_seq = __get_reqseq(control);
3832 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3833 if (req_seq_offset < 0)
3834 req_seq_offset += 64;
3836 next_tx_seq_offset =
3837 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3838 if (next_tx_seq_offset < 0)
3839 next_tx_seq_offset += 64;
3841 /* check for invalid req-seq */
3842 if (req_seq_offset > next_tx_seq_offset) {
3843 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3847 if (__is_iframe(control)) {
3849 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3853 l2cap_data_channel_iframe(chan, control, skb);
3857 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3861 l2cap_data_channel_sframe(chan, control, skb);
3871 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3873 struct l2cap_chan *chan;
3874 struct sock *sk = NULL;
3879 chan = l2cap_get_chan_by_scid(conn, cid);
3881 BT_DBG("unknown cid 0x%4.4x", cid);
3887 BT_DBG("chan %p, len %d", chan, skb->len);
3889 if (sk->sk_state != BT_CONNECTED)
3892 switch (chan->mode) {
3893 case L2CAP_MODE_BASIC:
3894 /* If socket recv buffers overflows we drop data here
3895 * which is *bad* because L2CAP has to be reliable.
3896 * But we don't have any other choice. L2CAP doesn't
3897 * provide flow control mechanism. */
3899 if (chan->imtu < skb->len)
3902 if (!sock_queue_rcv_skb(sk, skb))
3906 case L2CAP_MODE_ERTM:
3907 if (!sock_owned_by_user(sk)) {
3908 l2cap_ertm_data_rcv(sk, skb);
3910 if (sk_add_backlog(sk, skb))
3916 case L2CAP_MODE_STREAMING:
3917 control = get_unaligned_le16(skb->data);
3921 if (l2cap_check_fcs(chan, skb))
3924 if (__is_sar_start(control))
3927 if (chan->fcs == L2CAP_FCS_CRC16)
3930 if (len > chan->mps || len < 0 || __is_sframe(control))
3933 tx_seq = __get_txseq(control);
3935 if (chan->expected_tx_seq == tx_seq)
3936 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3938 chan->expected_tx_seq = (tx_seq + 1) % 64;
3940 l2cap_streaming_reassembly_sdu(chan, skb, control);
3945 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3959 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3961 struct sock *sk = NULL;
3962 struct l2cap_chan *chan;
3964 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3972 BT_DBG("sk %p, len %d", sk, skb->len);
3974 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3977 if (l2cap_pi(sk)->chan->imtu < skb->len)
3980 if (!sock_queue_rcv_skb(sk, skb))
3992 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3994 struct sock *sk = NULL;
3995 struct l2cap_chan *chan;
3997 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4005 BT_DBG("sk %p, len %d", sk, skb->len);
4007 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4010 if (l2cap_pi(sk)->chan->imtu < skb->len)
4013 if (!sock_queue_rcv_skb(sk, skb))
4025 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4027 struct l2cap_hdr *lh = (void *) skb->data;
4031 skb_pull(skb, L2CAP_HDR_SIZE);
4032 cid = __le16_to_cpu(lh->cid);
4033 len = __le16_to_cpu(lh->len);
4035 if (len != skb->len) {
4040 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4043 case L2CAP_CID_LE_SIGNALING:
4044 case L2CAP_CID_SIGNALING:
4045 l2cap_sig_channel(conn, skb);
4048 case L2CAP_CID_CONN_LESS:
4049 psm = get_unaligned_le16(skb->data);
4051 l2cap_conless_channel(conn, psm, skb);
4054 case L2CAP_CID_LE_DATA:
4055 l2cap_att_channel(conn, cid, skb);
4059 l2cap_data_channel(conn, cid, skb);
4064 /* ---- L2CAP interface with lower layer (HCI) ---- */
4066 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4068 int exact = 0, lm1 = 0, lm2 = 0;
4069 struct l2cap_chan *c;
4071 if (type != ACL_LINK)
4074 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4076 /* Find listening sockets and check their link_mode */
4077 read_lock(&chan_list_lock);
4078 list_for_each_entry(c, &chan_list, global_l) {
4079 struct sock *sk = c->sk;
4081 if (sk->sk_state != BT_LISTEN)
4084 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4085 lm1 |= HCI_LM_ACCEPT;
4087 lm1 |= HCI_LM_MASTER;
4089 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4090 lm2 |= HCI_LM_ACCEPT;
4092 lm2 |= HCI_LM_MASTER;
4095 read_unlock(&chan_list_lock);
4097 return exact ? lm1 : lm2;
4100 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4102 struct l2cap_conn *conn;
4104 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4106 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4110 conn = l2cap_conn_add(hcon, status);
4112 l2cap_conn_ready(conn);
4114 l2cap_conn_del(hcon, bt_err(status));
4119 static int l2cap_disconn_ind(struct hci_conn *hcon)
4121 struct l2cap_conn *conn = hcon->l2cap_data;
4123 BT_DBG("hcon %p", hcon);
4125 if (hcon->type != ACL_LINK || !conn)
4128 return conn->disc_reason;
4131 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4133 BT_DBG("hcon %p reason %d", hcon, reason);
4135 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4138 l2cap_conn_del(hcon, bt_err(reason));
4143 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4148 if (encrypt == 0x00) {
4149 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4150 l2cap_chan_clear_timer(chan);
4151 l2cap_chan_set_timer(chan, HZ * 5);
4152 } else if (chan->sec_level == BT_SECURITY_HIGH)
4153 __l2cap_chan_close(chan, ECONNREFUSED);
4155 if (chan->sec_level == BT_SECURITY_MEDIUM)
4156 l2cap_chan_clear_timer(chan);
4160 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4162 struct l2cap_conn *conn = hcon->l2cap_data;
4163 struct l2cap_chan *chan;
4168 BT_DBG("conn %p", conn);
4170 read_lock(&conn->chan_lock);
4172 list_for_each_entry(chan, &conn->chan_l, list) {
4173 struct sock *sk = chan->sk;
4177 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
4182 if (!status && (sk->sk_state == BT_CONNECTED ||
4183 sk->sk_state == BT_CONFIG)) {
4184 l2cap_check_encryption(chan, encrypt);
4189 if (sk->sk_state == BT_CONNECT) {
4191 struct l2cap_conn_req req;
4192 req.scid = cpu_to_le16(chan->scid);
4193 req.psm = chan->psm;
4195 chan->ident = l2cap_get_ident(conn);
4196 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
4198 l2cap_send_cmd(conn, chan->ident,
4199 L2CAP_CONN_REQ, sizeof(req), &req);
4201 l2cap_chan_clear_timer(chan);
4202 l2cap_chan_set_timer(chan, HZ / 10);
4204 } else if (sk->sk_state == BT_CONNECT2) {
4205 struct l2cap_conn_rsp rsp;
4209 sk->sk_state = BT_CONFIG;
4210 result = L2CAP_CR_SUCCESS;
4212 sk->sk_state = BT_DISCONN;
4213 l2cap_chan_set_timer(chan, HZ / 10);
4214 result = L2CAP_CR_SEC_BLOCK;
4217 rsp.scid = cpu_to_le16(chan->dcid);
4218 rsp.dcid = cpu_to_le16(chan->scid);
4219 rsp.result = cpu_to_le16(result);
4220 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4221 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4228 read_unlock(&conn->chan_lock);
4233 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4235 struct l2cap_conn *conn = hcon->l2cap_data;
4238 conn = l2cap_conn_add(hcon, 0);
4243 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4245 if (!(flags & ACL_CONT)) {
4246 struct l2cap_hdr *hdr;
4247 struct l2cap_chan *chan;
4252 BT_ERR("Unexpected start frame (len %d)", skb->len);
4253 kfree_skb(conn->rx_skb);
4254 conn->rx_skb = NULL;
4256 l2cap_conn_unreliable(conn, ECOMM);
4259 /* Start fragment always begin with Basic L2CAP header */
4260 if (skb->len < L2CAP_HDR_SIZE) {
4261 BT_ERR("Frame is too short (len %d)", skb->len);
4262 l2cap_conn_unreliable(conn, ECOMM);
4266 hdr = (struct l2cap_hdr *) skb->data;
4267 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4268 cid = __le16_to_cpu(hdr->cid);
4270 if (len == skb->len) {
4271 /* Complete frame received */
4272 l2cap_recv_frame(conn, skb);
4276 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4278 if (skb->len > len) {
4279 BT_ERR("Frame is too long (len %d, expected len %d)",
4281 l2cap_conn_unreliable(conn, ECOMM);
4285 chan = l2cap_get_chan_by_scid(conn, cid);
4287 if (chan && chan->sk) {
4288 struct sock *sk = chan->sk;
4290 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4291 BT_ERR("Frame exceeding recv MTU (len %d, "
4295 l2cap_conn_unreliable(conn, ECOMM);
4301 /* Allocate skb for the complete frame (with header) */
4302 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4306 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4308 conn->rx_len = len - skb->len;
4310 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4312 if (!conn->rx_len) {
4313 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4314 l2cap_conn_unreliable(conn, ECOMM);
4318 if (skb->len > conn->rx_len) {
4319 BT_ERR("Fragment is too long (len %d, expected %d)",
4320 skb->len, conn->rx_len);
4321 kfree_skb(conn->rx_skb);
4322 conn->rx_skb = NULL;
4324 l2cap_conn_unreliable(conn, ECOMM);
4328 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4330 conn->rx_len -= skb->len;
4332 if (!conn->rx_len) {
4333 /* Complete frame received */
4334 l2cap_recv_frame(conn, conn->rx_skb);
4335 conn->rx_skb = NULL;
4344 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4346 struct l2cap_chan *c;
4348 read_lock_bh(&chan_list_lock);
4350 list_for_each_entry(c, &chan_list, global_l) {
4351 struct sock *sk = c->sk;
4353 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4354 batostr(&bt_sk(sk)->src),
4355 batostr(&bt_sk(sk)->dst),
4356 sk->sk_state, __le16_to_cpu(c->psm),
4357 c->scid, c->dcid, c->imtu, c->omtu,
4358 c->sec_level, c->mode);
4361 read_unlock_bh(&chan_list_lock);
4366 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4368 return single_open(file, l2cap_debugfs_show, inode->i_private);
4371 static const struct file_operations l2cap_debugfs_fops = {
4372 .open = l2cap_debugfs_open,
4374 .llseek = seq_lseek,
4375 .release = single_release,
4378 static struct dentry *l2cap_debugfs;
4380 static struct hci_proto l2cap_hci_proto = {
4382 .id = HCI_PROTO_L2CAP,
4383 .connect_ind = l2cap_connect_ind,
4384 .connect_cfm = l2cap_connect_cfm,
4385 .disconn_ind = l2cap_disconn_ind,
4386 .disconn_cfm = l2cap_disconn_cfm,
4387 .security_cfm = l2cap_security_cfm,
4388 .recv_acldata = l2cap_recv_acldata
4391 int __init l2cap_init(void)
4395 err = l2cap_init_sockets();
4399 _busy_wq = create_singlethread_workqueue("l2cap");
4405 err = hci_register_proto(&l2cap_hci_proto);
4407 BT_ERR("L2CAP protocol registration failed");
4408 bt_sock_unregister(BTPROTO_L2CAP);
4413 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4414 bt_debugfs, NULL, &l2cap_debugfs_fops);
4416 BT_ERR("Failed to create L2CAP debug file");
4422 destroy_workqueue(_busy_wq);
4423 l2cap_cleanup_sockets();
4427 void l2cap_exit(void)
4429 debugfs_remove(l2cap_debugfs);
4431 flush_workqueue(_busy_wq);
4432 destroy_workqueue(_busy_wq);
4434 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4435 BT_ERR("L2CAP protocol unregistration failed");
4437 l2cap_cleanup_sockets();
4440 module_param(disable_ertm, bool, 0644);
4441 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");