2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
103 s = __l2cap_get_chan_by_scid(l, cid);
106 read_unlock(&l->lock);
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
124 s = __l2cap_get_chan_by_ident(l, ident);
127 read_unlock(&l->lock);
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
133 u16 cid = L2CAP_CID_DYN_START;
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
148 l2cap_pi(l->head)->prev_c = sk;
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
159 write_lock_bh(&l->lock);
164 l2cap_pi(next)->prev_c = prev;
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
174 struct l2cap_chan_list *l = &conn->chan_list;
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
179 conn->disc_reason = 0x13;
181 l2cap_pi(sk)->conn = conn;
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
206 __l2cap_chan_link(l, sk);
210 * Must be called on the locked socket. */
211 void l2cap_chan_del(struct sock *sk, int err)
213 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
214 struct sock *parent = bt_sk(sk)->parent;
216 l2cap_sock_clear_timer(sk);
218 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
221 /* Unlink from channel list */
222 l2cap_chan_unlink(&conn->chan_list, sk);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 skb_queue_purge(TX_QUEUE(sk));
241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
242 struct srej_list *l, *tmp;
244 del_timer(&l2cap_pi(sk)->retrans_timer);
245 del_timer(&l2cap_pi(sk)->monitor_timer);
246 del_timer(&l2cap_pi(sk)->ack_timer);
248 skb_queue_purge(SREJ_QUEUE(sk));
249 skb_queue_purge(BUSY_QUEUE(sk));
251 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
258 static inline u8 l2cap_get_auth_type(struct sock *sk)
260 if (sk->sk_type == SOCK_RAW) {
261 switch (l2cap_pi(sk)->sec_level) {
262 case BT_SECURITY_HIGH:
263 return HCI_AT_DEDICATED_BONDING_MITM;
264 case BT_SECURITY_MEDIUM:
265 return HCI_AT_DEDICATED_BONDING;
267 return HCI_AT_NO_BONDING;
269 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
270 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
271 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
274 return HCI_AT_NO_BONDING_MITM;
276 return HCI_AT_NO_BONDING;
278 switch (l2cap_pi(sk)->sec_level) {
279 case BT_SECURITY_HIGH:
280 return HCI_AT_GENERAL_BONDING_MITM;
281 case BT_SECURITY_MEDIUM:
282 return HCI_AT_GENERAL_BONDING;
284 return HCI_AT_NO_BONDING;
289 /* Service level security */
290 static inline int l2cap_check_security(struct sock *sk)
292 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
295 auth_type = l2cap_get_auth_type(sk);
297 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 u8 l2cap_get_ident(struct l2cap_conn *conn)
305 /* Get next available identificator.
306 * 1 - 128 are used by kernel.
307 * 129 - 199 are reserved.
308 * 200 - 254 are used by utilities like l2ping, etc.
311 spin_lock_bh(&conn->lock);
313 if (++conn->tx_ident > 128)
318 spin_unlock_bh(&conn->lock);
323 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
325 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 if (lmp_no_flush_capable(conn->hcon->hdev))
334 flags = ACL_START_NO_FLUSH;
338 hci_send_acl(conn->hcon, skb, flags);
341 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 struct l2cap_hdr *lh;
345 struct l2cap_conn *conn = pi->conn;
346 struct sock *sk = (struct sock *)pi;
347 int count, hlen = L2CAP_HDR_SIZE + 2;
350 if (sk->sk_state != BT_CONNECTED)
353 if (pi->fcs == L2CAP_FCS_CRC16)
356 BT_DBG("pi %p, control 0x%2.2x", pi, control);
358 count = min_t(unsigned int, conn->mtu, hlen);
359 control |= L2CAP_CTRL_FRAME_TYPE;
361 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
362 control |= L2CAP_CTRL_FINAL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
366 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
367 control |= L2CAP_CTRL_POLL;
368 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
371 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
376 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
377 lh->cid = cpu_to_le16(pi->dcid);
378 put_unaligned_le16(control, skb_put(skb, 2));
380 if (pi->fcs == L2CAP_FCS_CRC16) {
381 u16 fcs = crc16(0, (u8 *)lh, count - 2);
382 put_unaligned_le16(fcs, skb_put(skb, 2));
385 if (lmp_no_flush_capable(conn->hcon->hdev))
386 flags = ACL_START_NO_FLUSH;
390 hci_send_acl(pi->conn->hcon, skb, flags);
393 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
395 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
396 control |= L2CAP_SUPER_RCV_NOT_READY;
397 pi->conn_state |= L2CAP_CONN_RNR_SENT;
399 control |= L2CAP_SUPER_RCV_READY;
401 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
403 l2cap_send_sframe(pi, control);
406 static inline int __l2cap_no_conn_pending(struct sock *sk)
408 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
411 static void l2cap_do_start(struct sock *sk)
413 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
415 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
416 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
419 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
420 struct l2cap_conn_req req;
421 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
422 req.psm = l2cap_pi(sk)->psm;
424 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
425 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
427 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
428 L2CAP_CONN_REQ, sizeof(req), &req);
431 struct l2cap_info_req req;
432 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
434 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
435 conn->info_ident = l2cap_get_ident(conn);
437 mod_timer(&conn->info_timer, jiffies +
438 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
440 l2cap_send_cmd(conn, conn->info_ident,
441 L2CAP_INFO_REQ, sizeof(req), &req);
445 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
447 u32 local_feat_mask = l2cap_feat_mask;
449 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
452 case L2CAP_MODE_ERTM:
453 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
454 case L2CAP_MODE_STREAMING:
455 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
461 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
463 struct l2cap_disconn_req req;
468 skb_queue_purge(TX_QUEUE(sk));
470 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
471 del_timer(&l2cap_pi(sk)->retrans_timer);
472 del_timer(&l2cap_pi(sk)->monitor_timer);
473 del_timer(&l2cap_pi(sk)->ack_timer);
476 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
477 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
478 l2cap_send_cmd(conn, l2cap_get_ident(conn),
479 L2CAP_DISCONN_REQ, sizeof(req), &req);
481 sk->sk_state = BT_DISCONN;
485 /* ---- L2CAP connections ---- */
486 static void l2cap_conn_start(struct l2cap_conn *conn)
488 struct l2cap_chan_list *l = &conn->chan_list;
489 struct sock_del_list del, *tmp1, *tmp2;
492 BT_DBG("conn %p", conn);
494 INIT_LIST_HEAD(&del.list);
498 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
501 if (sk->sk_type != SOCK_SEQPACKET &&
502 sk->sk_type != SOCK_STREAM) {
507 if (sk->sk_state == BT_CONNECT) {
508 struct l2cap_conn_req req;
510 if (!l2cap_check_security(sk) ||
511 !__l2cap_no_conn_pending(sk)) {
516 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
518 && l2cap_pi(sk)->conf_state &
519 L2CAP_CONF_STATE2_DEVICE) {
520 tmp1 = kzalloc(sizeof(struct sock_del_list),
523 list_add_tail(&tmp1->list, &del.list);
528 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
529 req.psm = l2cap_pi(sk)->psm;
531 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
532 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
534 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
535 L2CAP_CONN_REQ, sizeof(req), &req);
537 } else if (sk->sk_state == BT_CONNECT2) {
538 struct l2cap_conn_rsp rsp;
540 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
541 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
543 if (l2cap_check_security(sk)) {
544 if (bt_sk(sk)->defer_setup) {
545 struct sock *parent = bt_sk(sk)->parent;
546 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
547 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
548 parent->sk_data_ready(parent, 0);
551 sk->sk_state = BT_CONFIG;
552 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
553 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
556 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
557 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
560 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
561 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
563 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
564 rsp.result != L2CAP_CR_SUCCESS) {
569 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
570 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
571 l2cap_build_conf_req(sk, buf), buf);
572 l2cap_pi(sk)->num_conf_req++;
578 read_unlock(&l->lock);
580 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
581 bh_lock_sock(tmp1->sk);
582 __l2cap_sock_close(tmp1->sk, ECONNRESET);
583 bh_unlock_sock(tmp1->sk);
584 list_del(&tmp1->list);
589 /* Find socket with cid and source bdaddr.
590 * Returns closest match, locked.
592 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
594 struct sock *s, *sk = NULL, *sk1 = NULL;
595 struct hlist_node *node;
597 read_lock(&l2cap_sk_list.lock);
599 sk_for_each(sk, node, &l2cap_sk_list.head) {
600 if (state && sk->sk_state != state)
603 if (l2cap_pi(sk)->scid == cid) {
605 if (!bacmp(&bt_sk(sk)->src, src))
609 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
616 read_unlock(&l2cap_sk_list.lock);
621 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
623 struct l2cap_chan_list *list = &conn->chan_list;
624 struct sock *parent, *uninitialized_var(sk);
628 /* Check if we have socket listening on cid */
629 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
634 /* Check for backlog size */
635 if (sk_acceptq_is_full(parent)) {
636 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 write_lock_bh(&list->lock);
646 hci_conn_hold(conn->hcon);
648 l2cap_sock_init(sk, parent);
649 bacpy(&bt_sk(sk)->src, conn->src);
650 bacpy(&bt_sk(sk)->dst, conn->dst);
652 bt_accept_enqueue(parent, sk);
654 __l2cap_chan_add(conn, sk);
656 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 sk->sk_state = BT_CONNECTED;
659 parent->sk_data_ready(parent, 0);
661 write_unlock_bh(&list->lock);
664 bh_unlock_sock(parent);
667 static void l2cap_conn_ready(struct l2cap_conn *conn)
669 struct l2cap_chan_list *l = &conn->chan_list;
672 BT_DBG("conn %p", conn);
674 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
675 l2cap_le_conn_ready(conn);
679 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
682 if (conn->hcon->type == LE_LINK) {
683 l2cap_sock_clear_timer(sk);
684 sk->sk_state = BT_CONNECTED;
685 sk->sk_state_change(sk);
688 if (sk->sk_type != SOCK_SEQPACKET &&
689 sk->sk_type != SOCK_STREAM) {
690 l2cap_sock_clear_timer(sk);
691 sk->sk_state = BT_CONNECTED;
692 sk->sk_state_change(sk);
693 } else if (sk->sk_state == BT_CONNECT)
699 read_unlock(&l->lock);
702 /* Notify sockets that we cannot guaranty reliability anymore */
703 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705 struct l2cap_chan_list *l = &conn->chan_list;
708 BT_DBG("conn %p", conn);
712 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
713 if (l2cap_pi(sk)->force_reliable)
717 read_unlock(&l->lock);
720 static void l2cap_info_timeout(unsigned long arg)
722 struct l2cap_conn *conn = (void *) arg;
724 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
725 conn->info_ident = 0;
727 l2cap_conn_start(conn);
730 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
732 struct l2cap_conn *conn = hcon->l2cap_data;
737 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
741 hcon->l2cap_data = conn;
744 BT_DBG("hcon %p conn %p", hcon, conn);
746 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
747 conn->mtu = hcon->hdev->le_mtu;
749 conn->mtu = hcon->hdev->acl_mtu;
751 conn->src = &hcon->hdev->bdaddr;
752 conn->dst = &hcon->dst;
756 spin_lock_init(&conn->lock);
757 rwlock_init(&conn->chan_list.lock);
759 if (hcon->type != LE_LINK)
760 setup_timer(&conn->info_timer, l2cap_info_timeout,
761 (unsigned long) conn);
763 conn->disc_reason = 0x13;
768 static void l2cap_conn_del(struct hci_conn *hcon, int err)
770 struct l2cap_conn *conn = hcon->l2cap_data;
776 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
778 kfree_skb(conn->rx_skb);
781 while ((sk = conn->chan_list.head)) {
783 l2cap_chan_del(sk, err);
788 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
789 del_timer_sync(&conn->info_timer);
791 hcon->l2cap_data = NULL;
795 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
797 struct l2cap_chan_list *l = &conn->chan_list;
798 write_lock_bh(&l->lock);
799 __l2cap_chan_add(conn, sk);
800 write_unlock_bh(&l->lock);
803 /* ---- Socket interface ---- */
805 /* Find socket with psm and source bdaddr.
806 * Returns closest match.
808 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
810 struct sock *sk = NULL, *sk1 = NULL;
811 struct hlist_node *node;
813 read_lock(&l2cap_sk_list.lock);
815 sk_for_each(sk, node, &l2cap_sk_list.head) {
816 if (state && sk->sk_state != state)
819 if (l2cap_pi(sk)->psm == psm) {
821 if (!bacmp(&bt_sk(sk)->src, src))
825 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
830 read_unlock(&l2cap_sk_list.lock);
832 return node ? sk : sk1;
835 int l2cap_do_connect(struct sock *sk)
837 bdaddr_t *src = &bt_sk(sk)->src;
838 bdaddr_t *dst = &bt_sk(sk)->dst;
839 struct l2cap_conn *conn;
840 struct hci_conn *hcon;
841 struct hci_dev *hdev;
845 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
848 hdev = hci_get_route(dst, src);
850 return -EHOSTUNREACH;
852 hci_dev_lock_bh(hdev);
854 auth_type = l2cap_get_auth_type(sk);
856 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
857 hcon = hci_connect(hdev, LE_LINK, dst,
858 l2cap_pi(sk)->sec_level, auth_type);
860 hcon = hci_connect(hdev, ACL_LINK, dst,
861 l2cap_pi(sk)->sec_level, auth_type);
868 conn = l2cap_conn_add(hcon, 0);
875 /* Update source addr of the socket */
876 bacpy(src, conn->src);
878 l2cap_chan_add(conn, sk);
880 sk->sk_state = BT_CONNECT;
881 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
883 if (hcon->state == BT_CONNECTED) {
884 if (sk->sk_type != SOCK_SEQPACKET &&
885 sk->sk_type != SOCK_STREAM) {
886 l2cap_sock_clear_timer(sk);
887 if (l2cap_check_security(sk))
888 sk->sk_state = BT_CONNECTED;
896 hci_dev_unlock_bh(hdev);
901 int __l2cap_wait_ack(struct sock *sk)
903 DECLARE_WAITQUEUE(wait, current);
907 add_wait_queue(sk_sleep(sk), &wait);
908 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
909 set_current_state(TASK_INTERRUPTIBLE);
914 if (signal_pending(current)) {
915 err = sock_intr_errno(timeo);
920 timeo = schedule_timeout(timeo);
923 err = sock_error(sk);
927 set_current_state(TASK_RUNNING);
928 remove_wait_queue(sk_sleep(sk), &wait);
932 static void l2cap_monitor_timeout(unsigned long arg)
934 struct sock *sk = (void *) arg;
939 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
940 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
945 l2cap_pi(sk)->retry_count++;
946 __mod_monitor_timer();
948 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
952 static void l2cap_retrans_timeout(unsigned long arg)
954 struct sock *sk = (void *) arg;
959 l2cap_pi(sk)->retry_count = 1;
960 __mod_monitor_timer();
962 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
964 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
968 static void l2cap_drop_acked_frames(struct sock *sk)
972 while ((skb = skb_peek(TX_QUEUE(sk))) &&
973 l2cap_pi(sk)->unacked_frames) {
974 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
977 skb = skb_dequeue(TX_QUEUE(sk));
980 l2cap_pi(sk)->unacked_frames--;
983 if (!l2cap_pi(sk)->unacked_frames)
984 del_timer(&l2cap_pi(sk)->retrans_timer);
987 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
989 struct l2cap_pinfo *pi = l2cap_pi(sk);
990 struct hci_conn *hcon = pi->conn->hcon;
993 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
995 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
996 flags = ACL_START_NO_FLUSH;
1000 hci_send_acl(hcon, skb, flags);
1003 void l2cap_streaming_send(struct sock *sk)
1005 struct sk_buff *skb;
1006 struct l2cap_pinfo *pi = l2cap_pi(sk);
1009 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1010 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1011 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1012 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 if (pi->fcs == L2CAP_FCS_CRC16) {
1015 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1016 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1019 l2cap_do_send(sk, skb);
1021 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1025 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1027 struct l2cap_pinfo *pi = l2cap_pi(sk);
1028 struct sk_buff *skb, *tx_skb;
1031 skb = skb_peek(TX_QUEUE(sk));
1036 if (bt_cb(skb)->tx_seq == tx_seq)
1039 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1042 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1044 if (pi->remote_max_tx &&
1045 bt_cb(skb)->retries == pi->remote_max_tx) {
1046 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1050 tx_skb = skb_clone(skb, GFP_ATOMIC);
1051 bt_cb(skb)->retries++;
1052 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1055 control |= L2CAP_CTRL_FINAL;
1056 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1060 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1062 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1064 if (pi->fcs == L2CAP_FCS_CRC16) {
1065 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1066 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 l2cap_do_send(sk, tx_skb);
1072 int l2cap_ertm_send(struct sock *sk)
1074 struct sk_buff *skb, *tx_skb;
1075 struct l2cap_pinfo *pi = l2cap_pi(sk);
1079 if (sk->sk_state != BT_CONNECTED)
1082 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1084 if (pi->remote_max_tx &&
1085 bt_cb(skb)->retries == pi->remote_max_tx) {
1086 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1090 tx_skb = skb_clone(skb, GFP_ATOMIC);
1092 bt_cb(skb)->retries++;
1094 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1095 control &= L2CAP_CTRL_SAR;
1097 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1098 control |= L2CAP_CTRL_FINAL;
1099 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1101 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1102 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1103 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106 if (pi->fcs == L2CAP_FCS_CRC16) {
1107 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1108 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 l2cap_do_send(sk, tx_skb);
1113 __mod_retrans_timer();
1115 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1116 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1118 pi->unacked_frames++;
1121 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1122 sk->sk_send_head = NULL;
1124 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1132 static int l2cap_retransmit_frames(struct sock *sk)
1134 struct l2cap_pinfo *pi = l2cap_pi(sk);
1137 if (!skb_queue_empty(TX_QUEUE(sk)))
1138 sk->sk_send_head = TX_QUEUE(sk)->next;
1140 pi->next_tx_seq = pi->expected_ack_seq;
1141 ret = l2cap_ertm_send(sk);
1145 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1147 struct sock *sk = (struct sock *)pi;
1150 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1152 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1153 control |= L2CAP_SUPER_RCV_NOT_READY;
1154 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1155 l2cap_send_sframe(pi, control);
1159 if (l2cap_ertm_send(sk) > 0)
1162 control |= L2CAP_SUPER_RCV_READY;
1163 l2cap_send_sframe(pi, control);
1166 static void l2cap_send_srejtail(struct sock *sk)
1168 struct srej_list *tail;
1171 control = L2CAP_SUPER_SELECT_REJECT;
1172 control |= L2CAP_CTRL_FINAL;
1174 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1175 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1177 l2cap_send_sframe(l2cap_pi(sk), control);
1180 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1182 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1183 struct sk_buff **frag;
1186 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1192 /* Continuation fragments (no L2CAP header) */
1193 frag = &skb_shinfo(skb)->frag_list;
1195 count = min_t(unsigned int, conn->mtu, len);
1197 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1200 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1206 frag = &(*frag)->next;
1212 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1214 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1215 struct sk_buff *skb;
1216 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1217 struct l2cap_hdr *lh;
1219 BT_DBG("sk %p len %d", sk, (int)len);
1221 count = min_t(unsigned int, (conn->mtu - hlen), len);
1222 skb = bt_skb_send_alloc(sk, count + hlen,
1223 msg->msg_flags & MSG_DONTWAIT, &err);
1225 return ERR_PTR(err);
1227 /* Create L2CAP header */
1228 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1229 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1230 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1231 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1233 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1234 if (unlikely(err < 0)) {
1236 return ERR_PTR(err);
1241 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1244 struct sk_buff *skb;
1245 int err, count, hlen = L2CAP_HDR_SIZE;
1246 struct l2cap_hdr *lh;
1248 BT_DBG("sk %p len %d", sk, (int)len);
1250 count = min_t(unsigned int, (conn->mtu - hlen), len);
1251 skb = bt_skb_send_alloc(sk, count + hlen,
1252 msg->msg_flags & MSG_DONTWAIT, &err);
1254 return ERR_PTR(err);
1256 /* Create L2CAP header */
1257 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1258 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1261 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1262 if (unlikely(err < 0)) {
1264 return ERR_PTR(err);
1269 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1271 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1272 struct sk_buff *skb;
1273 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1274 struct l2cap_hdr *lh;
1276 BT_DBG("sk %p len %d", sk, (int)len);
1279 return ERR_PTR(-ENOTCONN);
1284 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1287 count = min_t(unsigned int, (conn->mtu - hlen), len);
1288 skb = bt_skb_send_alloc(sk, count + hlen,
1289 msg->msg_flags & MSG_DONTWAIT, &err);
1291 return ERR_PTR(err);
1293 /* Create L2CAP header */
1294 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1295 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1296 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1297 put_unaligned_le16(control, skb_put(skb, 2));
1299 put_unaligned_le16(sdulen, skb_put(skb, 2));
1301 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1302 if (unlikely(err < 0)) {
1304 return ERR_PTR(err);
1307 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1308 put_unaligned_le16(0, skb_put(skb, 2));
1310 bt_cb(skb)->retries = 0;
1314 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1316 struct l2cap_pinfo *pi = l2cap_pi(sk);
1317 struct sk_buff *skb;
1318 struct sk_buff_head sar_queue;
1322 skb_queue_head_init(&sar_queue);
1323 control = L2CAP_SDU_START;
1324 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1326 return PTR_ERR(skb);
1328 __skb_queue_tail(&sar_queue, skb);
1329 len -= pi->remote_mps;
1330 size += pi->remote_mps;
1335 if (len > pi->remote_mps) {
1336 control = L2CAP_SDU_CONTINUE;
1337 buflen = pi->remote_mps;
1339 control = L2CAP_SDU_END;
1343 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1345 skb_queue_purge(&sar_queue);
1346 return PTR_ERR(skb);
1349 __skb_queue_tail(&sar_queue, skb);
1353 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1354 if (sk->sk_send_head == NULL)
1355 sk->sk_send_head = sar_queue.next;
1360 static void l2cap_chan_ready(struct sock *sk)
1362 struct sock *parent = bt_sk(sk)->parent;
1364 BT_DBG("sk %p, parent %p", sk, parent);
1366 l2cap_pi(sk)->conf_state = 0;
1367 l2cap_sock_clear_timer(sk);
1370 /* Outgoing channel.
1371 * Wake up socket sleeping on connect.
1373 sk->sk_state = BT_CONNECTED;
1374 sk->sk_state_change(sk);
1376 /* Incoming channel.
1377 * Wake up socket sleeping on accept.
1379 parent->sk_data_ready(parent, 0);
1383 /* Copy frame to all raw sockets on that connection */
1384 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1386 struct l2cap_chan_list *l = &conn->chan_list;
1387 struct sk_buff *nskb;
1390 BT_DBG("conn %p", conn);
1392 read_lock(&l->lock);
1393 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1394 if (sk->sk_type != SOCK_RAW)
1397 /* Don't send frame to the socket it came from */
1400 nskb = skb_clone(skb, GFP_ATOMIC);
1404 if (sock_queue_rcv_skb(sk, nskb))
1407 read_unlock(&l->lock);
1410 /* ---- L2CAP signalling commands ---- */
1411 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1412 u8 code, u8 ident, u16 dlen, void *data)
1414 struct sk_buff *skb, **frag;
1415 struct l2cap_cmd_hdr *cmd;
1416 struct l2cap_hdr *lh;
1419 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1420 conn, code, ident, dlen);
1422 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1423 count = min_t(unsigned int, conn->mtu, len);
1425 skb = bt_skb_alloc(count, GFP_ATOMIC);
1429 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1430 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1432 if (conn->hcon->type == LE_LINK)
1433 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1435 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1437 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1440 cmd->len = cpu_to_le16(dlen);
1443 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1444 memcpy(skb_put(skb, count), data, count);
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1453 count = min_t(unsigned int, conn->mtu, len);
1455 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1459 memcpy(skb_put(*frag, count), data, count);
1464 frag = &(*frag)->next;
1474 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1476 struct l2cap_conf_opt *opt = *ptr;
1479 len = L2CAP_CONF_OPT_SIZE + opt->len;
1487 *val = *((u8 *) opt->val);
1491 *val = get_unaligned_le16(opt->val);
1495 *val = get_unaligned_le32(opt->val);
1499 *val = (unsigned long) opt->val;
1503 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1507 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1509 struct l2cap_conf_opt *opt = *ptr;
1511 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1518 *((u8 *) opt->val) = val;
1522 put_unaligned_le16(val, opt->val);
1526 put_unaligned_le32(val, opt->val);
1530 memcpy(opt->val, (void *) val, len);
1534 *ptr += L2CAP_CONF_OPT_SIZE + len;
1537 static void l2cap_ack_timeout(unsigned long arg)
1539 struct sock *sk = (void *) arg;
1542 l2cap_send_ack(l2cap_pi(sk));
1546 static inline void l2cap_ertm_init(struct sock *sk)
1548 l2cap_pi(sk)->expected_ack_seq = 0;
1549 l2cap_pi(sk)->unacked_frames = 0;
1550 l2cap_pi(sk)->buffer_seq = 0;
1551 l2cap_pi(sk)->num_acked = 0;
1552 l2cap_pi(sk)->frames_sent = 0;
1554 setup_timer(&l2cap_pi(sk)->retrans_timer,
1555 l2cap_retrans_timeout, (unsigned long) sk);
1556 setup_timer(&l2cap_pi(sk)->monitor_timer,
1557 l2cap_monitor_timeout, (unsigned long) sk);
1558 setup_timer(&l2cap_pi(sk)->ack_timer,
1559 l2cap_ack_timeout, (unsigned long) sk);
1561 __skb_queue_head_init(SREJ_QUEUE(sk));
1562 __skb_queue_head_init(BUSY_QUEUE(sk));
1564 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1566 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1569 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1572 case L2CAP_MODE_STREAMING:
1573 case L2CAP_MODE_ERTM:
1574 if (l2cap_mode_supported(mode, remote_feat_mask))
1578 return L2CAP_MODE_BASIC;
1582 int l2cap_build_conf_req(struct sock *sk, void *data)
1584 struct l2cap_pinfo *pi = l2cap_pi(sk);
1585 struct l2cap_conf_req *req = data;
1586 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1587 void *ptr = req->data;
1589 BT_DBG("sk %p", sk);
1591 if (pi->num_conf_req || pi->num_conf_rsp)
1595 case L2CAP_MODE_STREAMING:
1596 case L2CAP_MODE_ERTM:
1597 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1602 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1607 if (pi->imtu != L2CAP_DEFAULT_MTU)
1608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1611 case L2CAP_MODE_BASIC:
1612 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1613 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1616 rfc.mode = L2CAP_MODE_BASIC;
1618 rfc.max_transmit = 0;
1619 rfc.retrans_timeout = 0;
1620 rfc.monitor_timeout = 0;
1621 rfc.max_pdu_size = 0;
1623 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1624 (unsigned long) &rfc);
1627 case L2CAP_MODE_ERTM:
1628 rfc.mode = L2CAP_MODE_ERTM;
1629 rfc.txwin_size = pi->tx_win;
1630 rfc.max_transmit = pi->max_tx;
1631 rfc.retrans_timeout = 0;
1632 rfc.monitor_timeout = 0;
1633 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1634 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1635 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1638 (unsigned long) &rfc);
1640 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1643 if (pi->fcs == L2CAP_FCS_NONE ||
1644 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1645 pi->fcs = L2CAP_FCS_NONE;
1646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1650 case L2CAP_MODE_STREAMING:
1651 rfc.mode = L2CAP_MODE_STREAMING;
1653 rfc.max_transmit = 0;
1654 rfc.retrans_timeout = 0;
1655 rfc.monitor_timeout = 0;
1656 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1657 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1658 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1661 (unsigned long) &rfc);
1663 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1666 if (pi->fcs == L2CAP_FCS_NONE ||
1667 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1668 pi->fcs = L2CAP_FCS_NONE;
1669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1674 req->dcid = cpu_to_le16(pi->dcid);
1675 req->flags = cpu_to_le16(0);
1680 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1682 struct l2cap_pinfo *pi = l2cap_pi(sk);
1683 struct l2cap_conf_rsp *rsp = data;
1684 void *ptr = rsp->data;
1685 void *req = pi->conf_req;
1686 int len = pi->conf_len;
1687 int type, hint, olen;
1689 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1690 u16 mtu = L2CAP_DEFAULT_MTU;
1691 u16 result = L2CAP_CONF_SUCCESS;
1693 BT_DBG("sk %p", sk);
1695 while (len >= L2CAP_CONF_OPT_SIZE) {
1696 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1698 hint = type & L2CAP_CONF_HINT;
1699 type &= L2CAP_CONF_MASK;
1702 case L2CAP_CONF_MTU:
1706 case L2CAP_CONF_FLUSH_TO:
1710 case L2CAP_CONF_QOS:
1713 case L2CAP_CONF_RFC:
1714 if (olen == sizeof(rfc))
1715 memcpy(&rfc, (void *) val, olen);
1718 case L2CAP_CONF_FCS:
1719 if (val == L2CAP_FCS_NONE)
1720 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1728 result = L2CAP_CONF_UNKNOWN;
1729 *((u8 *) ptr++) = type;
1734 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1738 case L2CAP_MODE_STREAMING:
1739 case L2CAP_MODE_ERTM:
1740 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1741 pi->mode = l2cap_select_mode(rfc.mode,
1742 pi->conn->feat_mask);
1746 if (pi->mode != rfc.mode)
1747 return -ECONNREFUSED;
1753 if (pi->mode != rfc.mode) {
1754 result = L2CAP_CONF_UNACCEPT;
1755 rfc.mode = pi->mode;
1757 if (pi->num_conf_rsp == 1)
1758 return -ECONNREFUSED;
1760 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1761 sizeof(rfc), (unsigned long) &rfc);
1765 if (result == L2CAP_CONF_SUCCESS) {
1766 /* Configure output options and let the other side know
1767 * which ones we don't like. */
1769 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1770 result = L2CAP_CONF_UNACCEPT;
1773 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1775 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1778 case L2CAP_MODE_BASIC:
1779 pi->fcs = L2CAP_FCS_NONE;
1780 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1783 case L2CAP_MODE_ERTM:
1784 pi->remote_tx_win = rfc.txwin_size;
1785 pi->remote_max_tx = rfc.max_transmit;
1787 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1788 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1790 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1792 rfc.retrans_timeout =
1793 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1794 rfc.monitor_timeout =
1795 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1797 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1799 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1800 sizeof(rfc), (unsigned long) &rfc);
1804 case L2CAP_MODE_STREAMING:
1805 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1806 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1808 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1810 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1812 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1813 sizeof(rfc), (unsigned long) &rfc);
1818 result = L2CAP_CONF_UNACCEPT;
1820 memset(&rfc, 0, sizeof(rfc));
1821 rfc.mode = pi->mode;
1824 if (result == L2CAP_CONF_SUCCESS)
1825 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1827 rsp->scid = cpu_to_le16(pi->dcid);
1828 rsp->result = cpu_to_le16(result);
1829 rsp->flags = cpu_to_le16(0x0000);
1834 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1836 struct l2cap_pinfo *pi = l2cap_pi(sk);
1837 struct l2cap_conf_req *req = data;
1838 void *ptr = req->data;
1841 struct l2cap_conf_rfc rfc;
1843 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1845 while (len >= L2CAP_CONF_OPT_SIZE) {
1846 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1849 case L2CAP_CONF_MTU:
1850 if (val < L2CAP_DEFAULT_MIN_MTU) {
1851 *result = L2CAP_CONF_UNACCEPT;
1852 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1858 case L2CAP_CONF_FLUSH_TO:
1860 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1864 case L2CAP_CONF_RFC:
1865 if (olen == sizeof(rfc))
1866 memcpy(&rfc, (void *)val, olen);
1868 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1869 rfc.mode != pi->mode)
1870 return -ECONNREFUSED;
1874 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1875 sizeof(rfc), (unsigned long) &rfc);
1880 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1881 return -ECONNREFUSED;
1883 pi->mode = rfc.mode;
1885 if (*result == L2CAP_CONF_SUCCESS) {
1887 case L2CAP_MODE_ERTM:
1888 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1889 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1890 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1892 case L2CAP_MODE_STREAMING:
1893 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1897 req->dcid = cpu_to_le16(pi->dcid);
1898 req->flags = cpu_to_le16(0x0000);
1903 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1905 struct l2cap_conf_rsp *rsp = data;
1906 void *ptr = rsp->data;
1908 BT_DBG("sk %p", sk);
1910 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1911 rsp->result = cpu_to_le16(result);
1912 rsp->flags = cpu_to_le16(flags);
1917 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1919 struct l2cap_pinfo *pi = l2cap_pi(sk);
1922 struct l2cap_conf_rfc rfc;
1924 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1926 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1929 while (len >= L2CAP_CONF_OPT_SIZE) {
1930 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1933 case L2CAP_CONF_RFC:
1934 if (olen == sizeof(rfc))
1935 memcpy(&rfc, (void *)val, olen);
1942 case L2CAP_MODE_ERTM:
1943 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1944 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1945 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1947 case L2CAP_MODE_STREAMING:
1948 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1952 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1954 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1956 if (rej->reason != 0x0000)
1959 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1960 cmd->ident == conn->info_ident) {
1961 del_timer(&conn->info_timer);
1963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1964 conn->info_ident = 0;
1966 l2cap_conn_start(conn);
1972 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1974 struct l2cap_chan_list *list = &conn->chan_list;
1975 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1976 struct l2cap_conn_rsp rsp;
1977 struct sock *parent, *sk = NULL;
1978 int result, status = L2CAP_CS_NO_INFO;
1980 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1981 __le16 psm = req->psm;
1983 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1985 /* Check if we have socket listening on psm */
1986 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1988 result = L2CAP_CR_BAD_PSM;
1992 bh_lock_sock(parent);
1994 /* Check if the ACL is secure enough (if not SDP) */
1995 if (psm != cpu_to_le16(0x0001) &&
1996 !hci_conn_check_link_mode(conn->hcon)) {
1997 conn->disc_reason = 0x05;
1998 result = L2CAP_CR_SEC_BLOCK;
2002 result = L2CAP_CR_NO_MEM;
2004 /* Check for backlog size */
2005 if (sk_acceptq_is_full(parent)) {
2006 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2010 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2014 write_lock_bh(&list->lock);
2016 /* Check if we already have channel with that dcid */
2017 if (__l2cap_get_chan_by_dcid(list, scid)) {
2018 write_unlock_bh(&list->lock);
2019 sock_set_flag(sk, SOCK_ZAPPED);
2020 l2cap_sock_kill(sk);
2024 hci_conn_hold(conn->hcon);
2026 l2cap_sock_init(sk, parent);
2027 bacpy(&bt_sk(sk)->src, conn->src);
2028 bacpy(&bt_sk(sk)->dst, conn->dst);
2029 l2cap_pi(sk)->psm = psm;
2030 l2cap_pi(sk)->dcid = scid;
2032 bt_accept_enqueue(parent, sk);
2034 __l2cap_chan_add(conn, sk);
2035 dcid = l2cap_pi(sk)->scid;
2037 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2039 l2cap_pi(sk)->ident = cmd->ident;
2041 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2042 if (l2cap_check_security(sk)) {
2043 if (bt_sk(sk)->defer_setup) {
2044 sk->sk_state = BT_CONNECT2;
2045 result = L2CAP_CR_PEND;
2046 status = L2CAP_CS_AUTHOR_PEND;
2047 parent->sk_data_ready(parent, 0);
2049 sk->sk_state = BT_CONFIG;
2050 result = L2CAP_CR_SUCCESS;
2051 status = L2CAP_CS_NO_INFO;
2054 sk->sk_state = BT_CONNECT2;
2055 result = L2CAP_CR_PEND;
2056 status = L2CAP_CS_AUTHEN_PEND;
2059 sk->sk_state = BT_CONNECT2;
2060 result = L2CAP_CR_PEND;
2061 status = L2CAP_CS_NO_INFO;
2064 write_unlock_bh(&list->lock);
2067 bh_unlock_sock(parent);
2070 rsp.scid = cpu_to_le16(scid);
2071 rsp.dcid = cpu_to_le16(dcid);
2072 rsp.result = cpu_to_le16(result);
2073 rsp.status = cpu_to_le16(status);
2074 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2076 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2077 struct l2cap_info_req info;
2078 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2080 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2081 conn->info_ident = l2cap_get_ident(conn);
2083 mod_timer(&conn->info_timer, jiffies +
2084 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2086 l2cap_send_cmd(conn, conn->info_ident,
2087 L2CAP_INFO_REQ, sizeof(info), &info);
2090 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2091 result == L2CAP_CR_SUCCESS) {
2093 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2094 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2095 l2cap_build_conf_req(sk, buf), buf);
2096 l2cap_pi(sk)->num_conf_req++;
2102 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2104 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2105 u16 scid, dcid, result, status;
2109 scid = __le16_to_cpu(rsp->scid);
2110 dcid = __le16_to_cpu(rsp->dcid);
2111 result = __le16_to_cpu(rsp->result);
2112 status = __le16_to_cpu(rsp->status);
2114 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2117 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2121 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2127 case L2CAP_CR_SUCCESS:
2128 sk->sk_state = BT_CONFIG;
2129 l2cap_pi(sk)->ident = 0;
2130 l2cap_pi(sk)->dcid = dcid;
2131 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2133 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2136 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2138 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2139 l2cap_build_conf_req(sk, req), req);
2140 l2cap_pi(sk)->num_conf_req++;
2144 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2148 /* don't delete l2cap channel if sk is owned by user */
2149 if (sock_owned_by_user(sk)) {
2150 sk->sk_state = BT_DISCONN;
2151 l2cap_sock_clear_timer(sk);
2152 l2cap_sock_set_timer(sk, HZ / 5);
2156 l2cap_chan_del(sk, ECONNREFUSED);
2164 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2166 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2170 pi->fcs = L2CAP_FCS_NONE;
2171 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2172 pi->fcs = L2CAP_FCS_CRC16;
2175 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2177 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2183 dcid = __le16_to_cpu(req->dcid);
2184 flags = __le16_to_cpu(req->flags);
2186 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2188 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2192 if (sk->sk_state != BT_CONFIG) {
2193 struct l2cap_cmd_rej rej;
2195 rej.reason = cpu_to_le16(0x0002);
2196 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2201 /* Reject if config buffer is too small. */
2202 len = cmd_len - sizeof(*req);
2203 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2204 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2205 l2cap_build_conf_rsp(sk, rsp,
2206 L2CAP_CONF_REJECT, flags), rsp);
2211 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2212 l2cap_pi(sk)->conf_len += len;
2214 if (flags & 0x0001) {
2215 /* Incomplete config. Send empty response. */
2216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2217 l2cap_build_conf_rsp(sk, rsp,
2218 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2222 /* Complete config. */
2223 len = l2cap_parse_conf_req(sk, rsp);
2225 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2229 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2230 l2cap_pi(sk)->num_conf_rsp++;
2232 /* Reset config buffer. */
2233 l2cap_pi(sk)->conf_len = 0;
2235 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2239 set_default_fcs(l2cap_pi(sk));
2241 sk->sk_state = BT_CONNECTED;
2243 l2cap_pi(sk)->next_tx_seq = 0;
2244 l2cap_pi(sk)->expected_tx_seq = 0;
2245 __skb_queue_head_init(TX_QUEUE(sk));
2246 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2247 l2cap_ertm_init(sk);
2249 l2cap_chan_ready(sk);
2253 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2255 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2256 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2257 l2cap_build_conf_req(sk, buf), buf);
2258 l2cap_pi(sk)->num_conf_req++;
2266 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2268 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2269 u16 scid, flags, result;
2271 int len = cmd->len - sizeof(*rsp);
2273 scid = __le16_to_cpu(rsp->scid);
2274 flags = __le16_to_cpu(rsp->flags);
2275 result = __le16_to_cpu(rsp->result);
2277 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2278 scid, flags, result);
2280 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2285 case L2CAP_CONF_SUCCESS:
2286 l2cap_conf_rfc_get(sk, rsp->data, len);
2289 case L2CAP_CONF_UNACCEPT:
2290 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2294 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2298 /* throw out any old stored conf requests */
2299 result = L2CAP_CONF_SUCCESS;
2300 len = l2cap_parse_conf_rsp(sk, rsp->data,
2303 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2307 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2308 L2CAP_CONF_REQ, len, req);
2309 l2cap_pi(sk)->num_conf_req++;
2310 if (result != L2CAP_CONF_SUCCESS)
2316 sk->sk_err = ECONNRESET;
2317 l2cap_sock_set_timer(sk, HZ * 5);
2318 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2325 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2327 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2328 set_default_fcs(l2cap_pi(sk));
2330 sk->sk_state = BT_CONNECTED;
2331 l2cap_pi(sk)->next_tx_seq = 0;
2332 l2cap_pi(sk)->expected_tx_seq = 0;
2333 __skb_queue_head_init(TX_QUEUE(sk));
2334 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2335 l2cap_ertm_init(sk);
2337 l2cap_chan_ready(sk);
2345 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2347 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2348 struct l2cap_disconn_rsp rsp;
2352 scid = __le16_to_cpu(req->scid);
2353 dcid = __le16_to_cpu(req->dcid);
2355 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2357 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2361 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2362 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2363 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2365 sk->sk_shutdown = SHUTDOWN_MASK;
2367 /* don't delete l2cap channel if sk is owned by user */
2368 if (sock_owned_by_user(sk)) {
2369 sk->sk_state = BT_DISCONN;
2370 l2cap_sock_clear_timer(sk);
2371 l2cap_sock_set_timer(sk, HZ / 5);
2376 l2cap_chan_del(sk, ECONNRESET);
2379 l2cap_sock_kill(sk);
2383 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2385 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2389 scid = __le16_to_cpu(rsp->scid);
2390 dcid = __le16_to_cpu(rsp->dcid);
2392 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2394 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2398 /* don't delete l2cap channel if sk is owned by user */
2399 if (sock_owned_by_user(sk)) {
2400 sk->sk_state = BT_DISCONN;
2401 l2cap_sock_clear_timer(sk);
2402 l2cap_sock_set_timer(sk, HZ / 5);
2407 l2cap_chan_del(sk, 0);
2410 l2cap_sock_kill(sk);
2414 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2416 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2419 type = __le16_to_cpu(req->type);
2421 BT_DBG("type 0x%4.4x", type);
2423 if (type == L2CAP_IT_FEAT_MASK) {
2425 u32 feat_mask = l2cap_feat_mask;
2426 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2427 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2428 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2430 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2432 put_unaligned_le32(feat_mask, rsp->data);
2433 l2cap_send_cmd(conn, cmd->ident,
2434 L2CAP_INFO_RSP, sizeof(buf), buf);
2435 } else if (type == L2CAP_IT_FIXED_CHAN) {
2437 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2438 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2439 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2440 memcpy(buf + 4, l2cap_fixed_chan, 8);
2441 l2cap_send_cmd(conn, cmd->ident,
2442 L2CAP_INFO_RSP, sizeof(buf), buf);
2444 struct l2cap_info_rsp rsp;
2445 rsp.type = cpu_to_le16(type);
2446 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2447 l2cap_send_cmd(conn, cmd->ident,
2448 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2454 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2456 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2459 type = __le16_to_cpu(rsp->type);
2460 result = __le16_to_cpu(rsp->result);
2462 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2464 del_timer(&conn->info_timer);
2466 if (result != L2CAP_IR_SUCCESS) {
2467 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2468 conn->info_ident = 0;
2470 l2cap_conn_start(conn);
2475 if (type == L2CAP_IT_FEAT_MASK) {
2476 conn->feat_mask = get_unaligned_le32(rsp->data);
2478 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2479 struct l2cap_info_req req;
2480 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2482 conn->info_ident = l2cap_get_ident(conn);
2484 l2cap_send_cmd(conn, conn->info_ident,
2485 L2CAP_INFO_REQ, sizeof(req), &req);
2487 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2488 conn->info_ident = 0;
2490 l2cap_conn_start(conn);
2492 } else if (type == L2CAP_IT_FIXED_CHAN) {
2493 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2494 conn->info_ident = 0;
2496 l2cap_conn_start(conn);
2502 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2507 if (min > max || min < 6 || max > 3200)
2510 if (to_multiplier < 10 || to_multiplier > 3200)
2513 if (max >= to_multiplier * 8)
2516 max_latency = (to_multiplier * 8 / max) - 1;
2517 if (latency > 499 || latency > max_latency)
2523 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2524 struct l2cap_cmd_hdr *cmd, u8 *data)
2526 struct hci_conn *hcon = conn->hcon;
2527 struct l2cap_conn_param_update_req *req;
2528 struct l2cap_conn_param_update_rsp rsp;
2529 u16 min, max, latency, to_multiplier, cmd_len;
2532 if (!(hcon->link_mode & HCI_LM_MASTER))
2535 cmd_len = __le16_to_cpu(cmd->len);
2536 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2539 req = (struct l2cap_conn_param_update_req *) data;
2540 min = __le16_to_cpu(req->min);
2541 max = __le16_to_cpu(req->max);
2542 latency = __le16_to_cpu(req->latency);
2543 to_multiplier = __le16_to_cpu(req->to_multiplier);
2545 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2546 min, max, latency, to_multiplier);
2548 memset(&rsp, 0, sizeof(rsp));
2550 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2552 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2554 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2556 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2560 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2565 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2566 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2570 switch (cmd->code) {
2571 case L2CAP_COMMAND_REJ:
2572 l2cap_command_rej(conn, cmd, data);
2575 case L2CAP_CONN_REQ:
2576 err = l2cap_connect_req(conn, cmd, data);
2579 case L2CAP_CONN_RSP:
2580 err = l2cap_connect_rsp(conn, cmd, data);
2583 case L2CAP_CONF_REQ:
2584 err = l2cap_config_req(conn, cmd, cmd_len, data);
2587 case L2CAP_CONF_RSP:
2588 err = l2cap_config_rsp(conn, cmd, data);
2591 case L2CAP_DISCONN_REQ:
2592 err = l2cap_disconnect_req(conn, cmd, data);
2595 case L2CAP_DISCONN_RSP:
2596 err = l2cap_disconnect_rsp(conn, cmd, data);
2599 case L2CAP_ECHO_REQ:
2600 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2603 case L2CAP_ECHO_RSP:
2606 case L2CAP_INFO_REQ:
2607 err = l2cap_information_req(conn, cmd, data);
2610 case L2CAP_INFO_RSP:
2611 err = l2cap_information_rsp(conn, cmd, data);
2615 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2623 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2624 struct l2cap_cmd_hdr *cmd, u8 *data)
2626 switch (cmd->code) {
2627 case L2CAP_COMMAND_REJ:
2630 case L2CAP_CONN_PARAM_UPDATE_REQ:
2631 return l2cap_conn_param_update_req(conn, cmd, data);
2633 case L2CAP_CONN_PARAM_UPDATE_RSP:
2637 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2642 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2643 struct sk_buff *skb)
2645 u8 *data = skb->data;
2647 struct l2cap_cmd_hdr cmd;
2650 l2cap_raw_recv(conn, skb);
2652 while (len >= L2CAP_CMD_HDR_SIZE) {
2654 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2655 data += L2CAP_CMD_HDR_SIZE;
2656 len -= L2CAP_CMD_HDR_SIZE;
2658 cmd_len = le16_to_cpu(cmd.len);
2660 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2662 if (cmd_len > len || !cmd.ident) {
2663 BT_DBG("corrupted command");
2667 if (conn->hcon->type == LE_LINK)
2668 err = l2cap_le_sig_cmd(conn, &cmd, data);
2670 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2673 struct l2cap_cmd_rej rej;
2675 BT_ERR("Wrong link type (%d)", err);
2677 /* FIXME: Map err to a valid reason */
2678 rej.reason = cpu_to_le16(0);
2679 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2689 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2691 u16 our_fcs, rcv_fcs;
2692 int hdr_size = L2CAP_HDR_SIZE + 2;
2694 if (pi->fcs == L2CAP_FCS_CRC16) {
2695 skb_trim(skb, skb->len - 2);
2696 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2697 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2699 if (our_fcs != rcv_fcs)
2705 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2707 struct l2cap_pinfo *pi = l2cap_pi(sk);
2710 pi->frames_sent = 0;
2712 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2714 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2715 control |= L2CAP_SUPER_RCV_NOT_READY;
2716 l2cap_send_sframe(pi, control);
2717 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2720 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2721 l2cap_retransmit_frames(sk);
2723 l2cap_ertm_send(sk);
2725 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2726 pi->frames_sent == 0) {
2727 control |= L2CAP_SUPER_RCV_READY;
2728 l2cap_send_sframe(pi, control);
2732 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2734 struct sk_buff *next_skb;
2735 struct l2cap_pinfo *pi = l2cap_pi(sk);
2736 int tx_seq_offset, next_tx_seq_offset;
2738 bt_cb(skb)->tx_seq = tx_seq;
2739 bt_cb(skb)->sar = sar;
2741 next_skb = skb_peek(SREJ_QUEUE(sk));
2743 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2747 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2748 if (tx_seq_offset < 0)
2749 tx_seq_offset += 64;
2752 if (bt_cb(next_skb)->tx_seq == tx_seq)
2755 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2756 pi->buffer_seq) % 64;
2757 if (next_tx_seq_offset < 0)
2758 next_tx_seq_offset += 64;
2760 if (next_tx_seq_offset > tx_seq_offset) {
2761 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2765 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2768 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2770 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2775 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2777 struct l2cap_pinfo *pi = l2cap_pi(sk);
2778 struct sk_buff *_skb;
2781 switch (control & L2CAP_CTRL_SAR) {
2782 case L2CAP_SDU_UNSEGMENTED:
2783 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2786 err = sock_queue_rcv_skb(sk, skb);
2792 case L2CAP_SDU_START:
2793 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2796 pi->sdu_len = get_unaligned_le16(skb->data);
2798 if (pi->sdu_len > pi->imtu)
2801 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2805 /* pull sdu_len bytes only after alloc, because of Local Busy
2806 * condition we have to be sure that this will be executed
2807 * only once, i.e., when alloc does not fail */
2810 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2812 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2813 pi->partial_sdu_len = skb->len;
2816 case L2CAP_SDU_CONTINUE:
2817 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2823 pi->partial_sdu_len += skb->len;
2824 if (pi->partial_sdu_len > pi->sdu_len)
2827 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2832 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2838 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2839 pi->partial_sdu_len += skb->len;
2841 if (pi->partial_sdu_len > pi->imtu)
2844 if (pi->partial_sdu_len != pi->sdu_len)
2847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2850 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2852 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2856 err = sock_queue_rcv_skb(sk, _skb);
2859 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2863 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2864 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2878 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2883 static int l2cap_try_push_rx_skb(struct sock *sk)
2885 struct l2cap_pinfo *pi = l2cap_pi(sk);
2886 struct sk_buff *skb;
2890 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2891 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2892 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2894 skb_queue_head(BUSY_QUEUE(sk), skb);
2898 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2901 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2904 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2905 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2906 l2cap_send_sframe(pi, control);
2907 l2cap_pi(sk)->retry_count = 1;
2909 del_timer(&pi->retrans_timer);
2910 __mod_monitor_timer();
2912 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2915 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2916 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2918 BT_DBG("sk %p, Exit local busy", sk);
2923 static void l2cap_busy_work(struct work_struct *work)
2925 DECLARE_WAITQUEUE(wait, current);
2926 struct l2cap_pinfo *pi =
2927 container_of(work, struct l2cap_pinfo, busy_work);
2928 struct sock *sk = (struct sock *)pi;
2929 int n_tries = 0, timeo = HZ/5, err;
2930 struct sk_buff *skb;
2934 add_wait_queue(sk_sleep(sk), &wait);
2935 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2936 set_current_state(TASK_INTERRUPTIBLE);
2938 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2940 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2947 if (signal_pending(current)) {
2948 err = sock_intr_errno(timeo);
2953 timeo = schedule_timeout(timeo);
2956 err = sock_error(sk);
2960 if (l2cap_try_push_rx_skb(sk) == 0)
2964 set_current_state(TASK_RUNNING);
2965 remove_wait_queue(sk_sleep(sk), &wait);
2970 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2972 struct l2cap_pinfo *pi = l2cap_pi(sk);
2975 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2976 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2977 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2978 return l2cap_try_push_rx_skb(sk);
2983 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2985 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2989 /* Busy Condition */
2990 BT_DBG("sk %p, Enter local busy", sk);
2992 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2993 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2994 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2996 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2997 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2998 l2cap_send_sframe(pi, sctrl);
3000 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3002 del_timer(&pi->ack_timer);
3004 queue_work(_busy_wq, &pi->busy_work);
3009 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3011 struct l2cap_pinfo *pi = l2cap_pi(sk);
3012 struct sk_buff *_skb;
3016 * TODO: We have to notify the userland if some data is lost with the
3020 switch (control & L2CAP_CTRL_SAR) {
3021 case L2CAP_SDU_UNSEGMENTED:
3022 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3027 err = sock_queue_rcv_skb(sk, skb);
3033 case L2CAP_SDU_START:
3034 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3039 pi->sdu_len = get_unaligned_le16(skb->data);
3042 if (pi->sdu_len > pi->imtu) {
3047 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3053 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3055 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3056 pi->partial_sdu_len = skb->len;
3060 case L2CAP_SDU_CONTINUE:
3061 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3064 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3066 pi->partial_sdu_len += skb->len;
3067 if (pi->partial_sdu_len > pi->sdu_len)
3075 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3078 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3080 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3081 pi->partial_sdu_len += skb->len;
3083 if (pi->partial_sdu_len > pi->imtu)
3086 if (pi->partial_sdu_len == pi->sdu_len) {
3087 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3088 err = sock_queue_rcv_skb(sk, _skb);
3103 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3105 struct sk_buff *skb;
3108 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3109 if (bt_cb(skb)->tx_seq != tx_seq)
3112 skb = skb_dequeue(SREJ_QUEUE(sk));
3113 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3114 l2cap_ertm_reassembly_sdu(sk, skb, control);
3115 l2cap_pi(sk)->buffer_seq_srej =
3116 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3117 tx_seq = (tx_seq + 1) % 64;
3121 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3123 struct l2cap_pinfo *pi = l2cap_pi(sk);
3124 struct srej_list *l, *tmp;
3127 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3128 if (l->tx_seq == tx_seq) {
3133 control = L2CAP_SUPER_SELECT_REJECT;
3134 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3135 l2cap_send_sframe(pi, control);
3137 list_add_tail(&l->list, SREJ_LIST(sk));
3141 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3143 struct l2cap_pinfo *pi = l2cap_pi(sk);
3144 struct srej_list *new;
3147 while (tx_seq != pi->expected_tx_seq) {
3148 control = L2CAP_SUPER_SELECT_REJECT;
3149 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3150 l2cap_send_sframe(pi, control);
3152 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3153 new->tx_seq = pi->expected_tx_seq;
3154 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3155 list_add_tail(&new->list, SREJ_LIST(sk));
3157 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3160 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3162 struct l2cap_pinfo *pi = l2cap_pi(sk);
3163 u8 tx_seq = __get_txseq(rx_control);
3164 u8 req_seq = __get_reqseq(rx_control);
3165 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3166 int tx_seq_offset, expected_tx_seq_offset;
3167 int num_to_ack = (pi->tx_win/6) + 1;
3170 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3173 if (L2CAP_CTRL_FINAL & rx_control &&
3174 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3175 del_timer(&pi->monitor_timer);
3176 if (pi->unacked_frames > 0)
3177 __mod_retrans_timer();
3178 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3181 pi->expected_ack_seq = req_seq;
3182 l2cap_drop_acked_frames(sk);
3184 if (tx_seq == pi->expected_tx_seq)
3187 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3188 if (tx_seq_offset < 0)
3189 tx_seq_offset += 64;
3191 /* invalid tx_seq */
3192 if (tx_seq_offset >= pi->tx_win) {
3193 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3197 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3200 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3201 struct srej_list *first;
3203 first = list_first_entry(SREJ_LIST(sk),
3204 struct srej_list, list);
3205 if (tx_seq == first->tx_seq) {
3206 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3207 l2cap_check_srej_gap(sk, tx_seq);
3209 list_del(&first->list);
3212 if (list_empty(SREJ_LIST(sk))) {
3213 pi->buffer_seq = pi->buffer_seq_srej;
3214 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3216 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3219 struct srej_list *l;
3221 /* duplicated tx_seq */
3222 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3225 list_for_each_entry(l, SREJ_LIST(sk), list) {
3226 if (l->tx_seq == tx_seq) {
3227 l2cap_resend_srejframe(sk, tx_seq);
3231 l2cap_send_srejframe(sk, tx_seq);
3234 expected_tx_seq_offset =
3235 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3236 if (expected_tx_seq_offset < 0)
3237 expected_tx_seq_offset += 64;
3239 /* duplicated tx_seq */
3240 if (tx_seq_offset < expected_tx_seq_offset)
3243 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3245 BT_DBG("sk %p, Enter SREJ", sk);
3247 INIT_LIST_HEAD(SREJ_LIST(sk));
3248 pi->buffer_seq_srej = pi->buffer_seq;
3250 __skb_queue_head_init(SREJ_QUEUE(sk));
3251 __skb_queue_head_init(BUSY_QUEUE(sk));
3252 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3254 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3256 l2cap_send_srejframe(sk, tx_seq);
3258 del_timer(&pi->ack_timer);
3263 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3265 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3266 bt_cb(skb)->tx_seq = tx_seq;
3267 bt_cb(skb)->sar = sar;
3268 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3272 err = l2cap_push_rx_skb(sk, skb, rx_control);
3276 if (rx_control & L2CAP_CTRL_FINAL) {
3277 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3278 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3280 l2cap_retransmit_frames(sk);
3285 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3286 if (pi->num_acked == num_to_ack - 1)
3296 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3298 struct l2cap_pinfo *pi = l2cap_pi(sk);
3300 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3303 pi->expected_ack_seq = __get_reqseq(rx_control);
3304 l2cap_drop_acked_frames(sk);
3306 if (rx_control & L2CAP_CTRL_POLL) {
3307 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3308 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3309 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3310 (pi->unacked_frames > 0))
3311 __mod_retrans_timer();
3313 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3314 l2cap_send_srejtail(sk);
3316 l2cap_send_i_or_rr_or_rnr(sk);
3319 } else if (rx_control & L2CAP_CTRL_FINAL) {
3320 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3322 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3323 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3325 l2cap_retransmit_frames(sk);
3328 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3329 (pi->unacked_frames > 0))
3330 __mod_retrans_timer();
3332 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3333 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3336 l2cap_ertm_send(sk);
3340 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3342 struct l2cap_pinfo *pi = l2cap_pi(sk);
3343 u8 tx_seq = __get_reqseq(rx_control);
3345 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3347 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3349 pi->expected_ack_seq = tx_seq;
3350 l2cap_drop_acked_frames(sk);
3352 if (rx_control & L2CAP_CTRL_FINAL) {
3353 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3354 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3356 l2cap_retransmit_frames(sk);
3358 l2cap_retransmit_frames(sk);
3360 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3361 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3364 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3366 struct l2cap_pinfo *pi = l2cap_pi(sk);
3367 u8 tx_seq = __get_reqseq(rx_control);
3369 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3371 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3373 if (rx_control & L2CAP_CTRL_POLL) {
3374 pi->expected_ack_seq = tx_seq;
3375 l2cap_drop_acked_frames(sk);
3377 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3378 l2cap_retransmit_one_frame(sk, tx_seq);
3380 l2cap_ertm_send(sk);
3382 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3383 pi->srej_save_reqseq = tx_seq;
3384 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3386 } else if (rx_control & L2CAP_CTRL_FINAL) {
3387 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3388 pi->srej_save_reqseq == tx_seq)
3389 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3391 l2cap_retransmit_one_frame(sk, tx_seq);
3393 l2cap_retransmit_one_frame(sk, tx_seq);
3394 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3395 pi->srej_save_reqseq = tx_seq;
3396 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3401 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3403 struct l2cap_pinfo *pi = l2cap_pi(sk);
3404 u8 tx_seq = __get_reqseq(rx_control);
3406 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3408 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3409 pi->expected_ack_seq = tx_seq;
3410 l2cap_drop_acked_frames(sk);
3412 if (rx_control & L2CAP_CTRL_POLL)
3413 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3415 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3416 del_timer(&pi->retrans_timer);
3417 if (rx_control & L2CAP_CTRL_POLL)
3418 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3422 if (rx_control & L2CAP_CTRL_POLL)
3423 l2cap_send_srejtail(sk);
3425 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3428 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3430 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3432 if (L2CAP_CTRL_FINAL & rx_control &&
3433 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3434 del_timer(&l2cap_pi(sk)->monitor_timer);
3435 if (l2cap_pi(sk)->unacked_frames > 0)
3436 __mod_retrans_timer();
3437 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3440 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3441 case L2CAP_SUPER_RCV_READY:
3442 l2cap_data_channel_rrframe(sk, rx_control);
3445 case L2CAP_SUPER_REJECT:
3446 l2cap_data_channel_rejframe(sk, rx_control);
3449 case L2CAP_SUPER_SELECT_REJECT:
3450 l2cap_data_channel_srejframe(sk, rx_control);
3453 case L2CAP_SUPER_RCV_NOT_READY:
3454 l2cap_data_channel_rnrframe(sk, rx_control);
3462 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3464 struct l2cap_pinfo *pi = l2cap_pi(sk);
3467 int len, next_tx_seq_offset, req_seq_offset;
3469 control = get_unaligned_le16(skb->data);
3474 * We can just drop the corrupted I-frame here.
3475 * Receiver will miss it and start proper recovery
3476 * procedures and ask retransmission.
3478 if (l2cap_check_fcs(pi, skb))
3481 if (__is_sar_start(control) && __is_iframe(control))
3484 if (pi->fcs == L2CAP_FCS_CRC16)
3487 if (len > pi->mps) {
3488 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3492 req_seq = __get_reqseq(control);
3493 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3494 if (req_seq_offset < 0)
3495 req_seq_offset += 64;
3497 next_tx_seq_offset =
3498 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3499 if (next_tx_seq_offset < 0)
3500 next_tx_seq_offset += 64;
3502 /* check for invalid req-seq */
3503 if (req_seq_offset > next_tx_seq_offset) {
3504 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3508 if (__is_iframe(control)) {
3510 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3514 l2cap_data_channel_iframe(sk, control, skb);
3518 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3522 l2cap_data_channel_sframe(sk, control, skb);
3532 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3535 struct l2cap_pinfo *pi;
3540 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3542 BT_DBG("unknown cid 0x%4.4x", cid);
3548 BT_DBG("sk %p, len %d", sk, skb->len);
3550 if (sk->sk_state != BT_CONNECTED)
3554 case L2CAP_MODE_BASIC:
3555 /* If socket recv buffers overflows we drop data here
3556 * which is *bad* because L2CAP has to be reliable.
3557 * But we don't have any other choice. L2CAP doesn't
3558 * provide flow control mechanism. */
3560 if (pi->imtu < skb->len)
3563 if (!sock_queue_rcv_skb(sk, skb))
3567 case L2CAP_MODE_ERTM:
3568 if (!sock_owned_by_user(sk)) {
3569 l2cap_ertm_data_rcv(sk, skb);
3571 if (sk_add_backlog(sk, skb))
3577 case L2CAP_MODE_STREAMING:
3578 control = get_unaligned_le16(skb->data);
3582 if (l2cap_check_fcs(pi, skb))
3585 if (__is_sar_start(control))
3588 if (pi->fcs == L2CAP_FCS_CRC16)
3591 if (len > pi->mps || len < 0 || __is_sframe(control))
3594 tx_seq = __get_txseq(control);
3596 if (pi->expected_tx_seq == tx_seq)
3597 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3599 pi->expected_tx_seq = (tx_seq + 1) % 64;
3601 l2cap_streaming_reassembly_sdu(sk, skb, control);
3606 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3620 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3624 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3630 BT_DBG("sk %p, len %d", sk, skb->len);
3632 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3635 if (l2cap_pi(sk)->imtu < skb->len)
3638 if (!sock_queue_rcv_skb(sk, skb))
3650 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3652 struct l2cap_hdr *lh = (void *) skb->data;
3656 skb_pull(skb, L2CAP_HDR_SIZE);
3657 cid = __le16_to_cpu(lh->cid);
3658 len = __le16_to_cpu(lh->len);
3660 if (len != skb->len) {
3665 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3668 case L2CAP_CID_LE_SIGNALING:
3669 case L2CAP_CID_SIGNALING:
3670 l2cap_sig_channel(conn, skb);
3673 case L2CAP_CID_CONN_LESS:
3674 psm = get_unaligned_le16(skb->data);
3676 l2cap_conless_channel(conn, psm, skb);
3680 l2cap_data_channel(conn, cid, skb);
3685 /* ---- L2CAP interface with lower layer (HCI) ---- */
3687 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3689 int exact = 0, lm1 = 0, lm2 = 0;
3690 register struct sock *sk;
3691 struct hlist_node *node;
3693 if (type != ACL_LINK)
3696 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3698 /* Find listening sockets and check their link_mode */
3699 read_lock(&l2cap_sk_list.lock);
3700 sk_for_each(sk, node, &l2cap_sk_list.head) {
3701 if (sk->sk_state != BT_LISTEN)
3704 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3705 lm1 |= HCI_LM_ACCEPT;
3706 if (l2cap_pi(sk)->role_switch)
3707 lm1 |= HCI_LM_MASTER;
3709 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3710 lm2 |= HCI_LM_ACCEPT;
3711 if (l2cap_pi(sk)->role_switch)
3712 lm2 |= HCI_LM_MASTER;
3715 read_unlock(&l2cap_sk_list.lock);
3717 return exact ? lm1 : lm2;
3720 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3722 struct l2cap_conn *conn;
3724 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3726 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3730 conn = l2cap_conn_add(hcon, status);
3732 l2cap_conn_ready(conn);
3734 l2cap_conn_del(hcon, bt_err(status));
3739 static int l2cap_disconn_ind(struct hci_conn *hcon)
3741 struct l2cap_conn *conn = hcon->l2cap_data;
3743 BT_DBG("hcon %p", hcon);
3745 if (hcon->type != ACL_LINK || !conn)
3748 return conn->disc_reason;
3751 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3753 BT_DBG("hcon %p reason %d", hcon, reason);
3755 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3758 l2cap_conn_del(hcon, bt_err(reason));
3763 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3765 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3768 if (encrypt == 0x00) {
3769 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3770 l2cap_sock_clear_timer(sk);
3771 l2cap_sock_set_timer(sk, HZ * 5);
3772 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3773 __l2cap_sock_close(sk, ECONNREFUSED);
3775 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3776 l2cap_sock_clear_timer(sk);
3780 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3782 struct l2cap_chan_list *l;
3783 struct l2cap_conn *conn = hcon->l2cap_data;
3789 l = &conn->chan_list;
3791 BT_DBG("conn %p", conn);
3793 read_lock(&l->lock);
3795 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3798 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3803 if (!status && (sk->sk_state == BT_CONNECTED ||
3804 sk->sk_state == BT_CONFIG)) {
3805 l2cap_check_encryption(sk, encrypt);
3810 if (sk->sk_state == BT_CONNECT) {
3812 struct l2cap_conn_req req;
3813 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3814 req.psm = l2cap_pi(sk)->psm;
3816 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3817 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3819 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3820 L2CAP_CONN_REQ, sizeof(req), &req);
3822 l2cap_sock_clear_timer(sk);
3823 l2cap_sock_set_timer(sk, HZ / 10);
3825 } else if (sk->sk_state == BT_CONNECT2) {
3826 struct l2cap_conn_rsp rsp;
3830 sk->sk_state = BT_CONFIG;
3831 result = L2CAP_CR_SUCCESS;
3833 sk->sk_state = BT_DISCONN;
3834 l2cap_sock_set_timer(sk, HZ / 10);
3835 result = L2CAP_CR_SEC_BLOCK;
3838 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3839 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3840 rsp.result = cpu_to_le16(result);
3841 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3842 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3843 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3849 read_unlock(&l->lock);
3854 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3856 struct l2cap_conn *conn = hcon->l2cap_data;
3859 conn = l2cap_conn_add(hcon, 0);
3864 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3866 if (!(flags & ACL_CONT)) {
3867 struct l2cap_hdr *hdr;
3873 BT_ERR("Unexpected start frame (len %d)", skb->len);
3874 kfree_skb(conn->rx_skb);
3875 conn->rx_skb = NULL;
3877 l2cap_conn_unreliable(conn, ECOMM);
3880 /* Start fragment always begin with Basic L2CAP header */
3881 if (skb->len < L2CAP_HDR_SIZE) {
3882 BT_ERR("Frame is too short (len %d)", skb->len);
3883 l2cap_conn_unreliable(conn, ECOMM);
3887 hdr = (struct l2cap_hdr *) skb->data;
3888 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3889 cid = __le16_to_cpu(hdr->cid);
3891 if (len == skb->len) {
3892 /* Complete frame received */
3893 l2cap_recv_frame(conn, skb);
3897 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3899 if (skb->len > len) {
3900 BT_ERR("Frame is too long (len %d, expected len %d)",
3902 l2cap_conn_unreliable(conn, ECOMM);
3906 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3908 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3909 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3910 len, l2cap_pi(sk)->imtu);
3912 l2cap_conn_unreliable(conn, ECOMM);
3919 /* Allocate skb for the complete frame (with header) */
3920 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3924 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3926 conn->rx_len = len - skb->len;
3928 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3930 if (!conn->rx_len) {
3931 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3932 l2cap_conn_unreliable(conn, ECOMM);
3936 if (skb->len > conn->rx_len) {
3937 BT_ERR("Fragment is too long (len %d, expected %d)",
3938 skb->len, conn->rx_len);
3939 kfree_skb(conn->rx_skb);
3940 conn->rx_skb = NULL;
3942 l2cap_conn_unreliable(conn, ECOMM);
3946 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3948 conn->rx_len -= skb->len;
3950 if (!conn->rx_len) {
3951 /* Complete frame received */
3952 l2cap_recv_frame(conn, conn->rx_skb);
3953 conn->rx_skb = NULL;
3962 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3965 struct hlist_node *node;
3967 read_lock_bh(&l2cap_sk_list.lock);
3969 sk_for_each(sk, node, &l2cap_sk_list.head) {
3970 struct l2cap_pinfo *pi = l2cap_pi(sk);
3972 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3973 batostr(&bt_sk(sk)->src),
3974 batostr(&bt_sk(sk)->dst),
3975 sk->sk_state, __le16_to_cpu(pi->psm),
3977 pi->imtu, pi->omtu, pi->sec_level,
3981 read_unlock_bh(&l2cap_sk_list.lock);
3986 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3988 return single_open(file, l2cap_debugfs_show, inode->i_private);
3991 static const struct file_operations l2cap_debugfs_fops = {
3992 .open = l2cap_debugfs_open,
3994 .llseek = seq_lseek,
3995 .release = single_release,
3998 static struct dentry *l2cap_debugfs;
4000 static struct hci_proto l2cap_hci_proto = {
4002 .id = HCI_PROTO_L2CAP,
4003 .connect_ind = l2cap_connect_ind,
4004 .connect_cfm = l2cap_connect_cfm,
4005 .disconn_ind = l2cap_disconn_ind,
4006 .disconn_cfm = l2cap_disconn_cfm,
4007 .security_cfm = l2cap_security_cfm,
4008 .recv_acldata = l2cap_recv_acldata
4011 int __init l2cap_init(void)
4015 err = l2cap_init_sockets();
4019 _busy_wq = create_singlethread_workqueue("l2cap");
4025 err = hci_register_proto(&l2cap_hci_proto);
4027 BT_ERR("L2CAP protocol registration failed");
4028 bt_sock_unregister(BTPROTO_L2CAP);
4033 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4034 bt_debugfs, NULL, &l2cap_debugfs_fops);
4036 BT_ERR("Failed to create L2CAP debug file");
4042 destroy_workqueue(_busy_wq);
4043 l2cap_cleanup_sockets();
4047 void l2cap_exit(void)
4049 debugfs_remove(l2cap_debugfs);
4051 flush_workqueue(_busy_wq);
4052 destroy_workqueue(_busy_wq);
4054 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4055 BT_ERR("L2CAP protocol unregistration failed");
4057 l2cap_cleanup_sockets();
4060 module_param(disable_ertm, bool, 0644);
4061 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");