2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
88 struct sock *sk = (struct sock *) arg;
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
103 __l2cap_sock_close(sk, reason);
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
117 static void l2cap_sock_clear_timer(struct sock *sk)
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
150 s = __l2cap_get_chan_by_scid(l, cid);
153 read_unlock(&l->lock);
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
171 s = __l2cap_get_chan_by_ident(l, ident);
174 read_unlock(&l->lock);
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 u16 cid = L2CAP_CID_DYN_START;
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
195 l2cap_pi(l->head)->prev_c = sk;
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206 write_lock_bh(&l->lock);
211 l2cap_pi(next)->prev_c = prev;
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 struct l2cap_chan_list *l = &conn->chan_list;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226 conn->disc_reason = 0x13;
228 l2cap_pi(sk)->conn = conn;
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
245 __l2cap_chan_link(l, sk);
248 bt_accept_enqueue(parent, sk);
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
258 l2cap_sock_clear_timer(sk);
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
279 sk->sk_state_change(sk);
281 skb_queue_purge(TX_QUEUE(sk));
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
310 auth_type = HCI_AT_NO_BONDING;
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
323 auth_type = HCI_AT_NO_BONDING;
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn->lock);
344 if (++conn->tx_ident > 128)
349 spin_unlock_bh(&conn->lock);
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
358 BT_DBG("code 0x%2.2x", code);
363 hci_send_acl(conn->hcon, skb, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
374 if (sk->sk_state != BT_CONNECTED)
377 if (pi->fcs == L2CAP_FCS_CRC16)
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
409 hci_send_acl(pi->conn->hcon, skb, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
418 control |= L2CAP_SUPER_RCV_READY;
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
422 l2cap_send_sframe(pi, control);
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
430 static void l2cap_do_start(struct sock *sk)
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
466 u32 local_feat_mask = l2cap_feat_mask;
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
482 struct l2cap_disconn_req req;
487 skb_queue_purge(TX_QUEUE(sk));
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
500 sk->sk_state = BT_DISCONN;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
511 BT_DBG("conn %p", conn);
513 INIT_LIST_HEAD(&del.list);
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
542 list_add_tail(&tmp1->list, &del.list);
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
597 read_unlock(&l->lock);
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
610 struct l2cap_chan_list *l = &conn->chan_list;
613 BT_DBG("conn %p", conn);
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
631 read_unlock(&l->lock);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
637 struct l2cap_chan_list *l = &conn->chan_list;
640 BT_DBG("conn %p", conn);
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
649 read_unlock(&l->lock);
652 static void l2cap_info_timeout(unsigned long arg)
654 struct l2cap_conn *conn = (void *) arg;
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
659 l2cap_conn_start(conn);
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
664 struct l2cap_conn *conn = hcon->l2cap_data;
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
673 hcon->l2cap_data = conn;
676 BT_DBG("hcon %p conn %p", hcon, conn);
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
690 conn->disc_reason = 0x13;
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
697 struct l2cap_conn *conn = hcon->l2cap_data;
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
705 kfree_skb(conn->rx_skb);
708 while ((sk = conn->chan_list.head)) {
710 l2cap_chan_del(sk, err);
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
718 hcon->l2cap_data = NULL;
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
755 if (l2cap_pi(sk)->psm == psm) {
757 if (!bacmp(&bt_sk(sk)->src, src))
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
765 return node ? sk : sk1;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
777 read_unlock(&l2cap_sk_list.lock);
781 static void l2cap_sock_destruct(struct sock *sk)
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
793 BT_DBG("parent %p", parent);
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock *sk)
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
819 static void __l2cap_sock_close(struct sock *sk, int reason)
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
823 switch (sk->sk_state) {
825 l2cap_sock_cleanup_listen(sk);
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
837 l2cap_chan_del(sk, reason);
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
850 result = L2CAP_CR_BAD_PSM;
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
859 l2cap_chan_del(sk, reason);
864 l2cap_chan_del(sk, reason);
868 sock_set_flag(sk, SOCK_ZAPPED);
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
876 l2cap_sock_clear_timer(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
904 pi->imtu = L2CAP_DEFAULT_MTU;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
910 pi->mode = L2CAP_MODE_BASIC;
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
917 pi->force_reliable = 0;
920 /* Default config options */
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
929 static struct proto l2cap_proto = {
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
949 sock_reset_flag(sk, SOCK_ZAPPED);
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
956 bt_sock_link(&l2cap_sk_list, sk);
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
965 BT_DBG("sock %p", sock);
967 sock->state = SS_UNCONNECTED;
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
976 sock->ops = &l2cap_sock_ops;
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
982 l2cap_sock_init(sk, NULL);
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1006 if (sk->sk_state != BT_OPEN) {
1012 __u16 psm = __le16_to_cpu(la.l2_psm);
1014 /* PSM must be odd and lsb of upper byte must be 0 */
1015 if ((psm & 0x0101) != 0x0001) {
1020 /* Restrict usage of well-known PSMs */
1021 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1027 write_lock_bh(&l2cap_sk_list.lock);
1029 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1032 /* Save source address */
1033 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1034 l2cap_pi(sk)->psm = la.l2_psm;
1035 l2cap_pi(sk)->sport = la.l2_psm;
1036 sk->sk_state = BT_BOUND;
1038 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1039 __le16_to_cpu(la.l2_psm) == 0x0003)
1040 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1043 write_unlock_bh(&l2cap_sk_list.lock);
1050 static int l2cap_do_connect(struct sock *sk)
1052 bdaddr_t *src = &bt_sk(sk)->src;
1053 bdaddr_t *dst = &bt_sk(sk)->dst;
1054 struct l2cap_conn *conn;
1055 struct hci_conn *hcon;
1056 struct hci_dev *hdev;
1060 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1063 hdev = hci_get_route(dst, src);
1065 return -EHOSTUNREACH;
1067 hci_dev_lock_bh(hdev);
1071 if (sk->sk_type == SOCK_RAW) {
1072 switch (l2cap_pi(sk)->sec_level) {
1073 case BT_SECURITY_HIGH:
1074 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1076 case BT_SECURITY_MEDIUM:
1077 auth_type = HCI_AT_DEDICATED_BONDING;
1080 auth_type = HCI_AT_NO_BONDING;
1083 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1085 auth_type = HCI_AT_NO_BONDING_MITM;
1087 auth_type = HCI_AT_NO_BONDING;
1089 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1090 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1092 switch (l2cap_pi(sk)->sec_level) {
1093 case BT_SECURITY_HIGH:
1094 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1096 case BT_SECURITY_MEDIUM:
1097 auth_type = HCI_AT_GENERAL_BONDING;
1100 auth_type = HCI_AT_NO_BONDING;
1105 hcon = hci_connect(hdev, ACL_LINK, dst,
1106 l2cap_pi(sk)->sec_level, auth_type);
1110 conn = l2cap_conn_add(hcon, 0);
1118 /* Update source addr of the socket */
1119 bacpy(src, conn->src);
1121 l2cap_chan_add(conn, sk, NULL);
1123 sk->sk_state = BT_CONNECT;
1124 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1126 if (hcon->state == BT_CONNECTED) {
1127 if (sk->sk_type != SOCK_SEQPACKET &&
1128 sk->sk_type != SOCK_STREAM) {
1129 l2cap_sock_clear_timer(sk);
1130 sk->sk_state = BT_CONNECTED;
1136 hci_dev_unlock_bh(hdev);
1141 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1143 struct sock *sk = sock->sk;
1144 struct sockaddr_l2 la;
1147 BT_DBG("sk %p", sk);
1149 if (!addr || alen < sizeof(addr->sa_family) ||
1150 addr->sa_family != AF_BLUETOOTH)
1153 memset(&la, 0, sizeof(la));
1154 len = min_t(unsigned int, sizeof(la), alen);
1155 memcpy(&la, addr, len);
1162 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1168 switch (l2cap_pi(sk)->mode) {
1169 case L2CAP_MODE_BASIC:
1171 case L2CAP_MODE_ERTM:
1172 case L2CAP_MODE_STREAMING:
1181 switch (sk->sk_state) {
1185 /* Already connecting */
1189 /* Already connected */
1203 /* PSM must be odd and lsb of upper byte must be 0 */
1204 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1205 sk->sk_type != SOCK_RAW) {
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1212 l2cap_pi(sk)->psm = la.l2_psm;
1214 err = l2cap_do_connect(sk);
1219 err = bt_sock_wait_state(sk, BT_CONNECTED,
1220 sock_sndtimeo(sk, flags & O_NONBLOCK));
1226 static int l2cap_sock_listen(struct socket *sock, int backlog)
1228 struct sock *sk = sock->sk;
1231 BT_DBG("sk %p backlog %d", sk, backlog);
1235 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1236 || sk->sk_state != BT_BOUND) {
1241 switch (l2cap_pi(sk)->mode) {
1242 case L2CAP_MODE_BASIC:
1244 case L2CAP_MODE_ERTM:
1245 case L2CAP_MODE_STREAMING:
1254 if (!l2cap_pi(sk)->psm) {
1255 bdaddr_t *src = &bt_sk(sk)->src;
1260 write_lock_bh(&l2cap_sk_list.lock);
1262 for (psm = 0x1001; psm < 0x1100; psm += 2)
1263 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1264 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1265 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1270 write_unlock_bh(&l2cap_sk_list.lock);
1276 sk->sk_max_ack_backlog = backlog;
1277 sk->sk_ack_backlog = 0;
1278 sk->sk_state = BT_LISTEN;
1285 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1287 DECLARE_WAITQUEUE(wait, current);
1288 struct sock *sk = sock->sk, *nsk;
1292 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1294 if (sk->sk_state != BT_LISTEN) {
1299 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1301 BT_DBG("sk %p timeo %ld", sk, timeo);
1303 /* Wait for an incoming connection. (wake-one). */
1304 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1305 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1306 set_current_state(TASK_INTERRUPTIBLE);
1313 timeo = schedule_timeout(timeo);
1314 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1316 if (sk->sk_state != BT_LISTEN) {
1321 if (signal_pending(current)) {
1322 err = sock_intr_errno(timeo);
1326 set_current_state(TASK_RUNNING);
1327 remove_wait_queue(sk_sleep(sk), &wait);
1332 newsock->state = SS_CONNECTED;
1334 BT_DBG("new socket %p", nsk);
1341 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1343 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1344 struct sock *sk = sock->sk;
1346 BT_DBG("sock %p, sk %p", sock, sk);
1348 addr->sa_family = AF_BLUETOOTH;
1349 *len = sizeof(struct sockaddr_l2);
1352 la->l2_psm = l2cap_pi(sk)->psm;
1353 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1354 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1356 la->l2_psm = l2cap_pi(sk)->sport;
1357 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1358 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1364 static int __l2cap_wait_ack(struct sock *sk)
1366 DECLARE_WAITQUEUE(wait, current);
1370 add_wait_queue(sk_sleep(sk), &wait);
1371 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1372 set_current_state(TASK_INTERRUPTIBLE);
1377 if (signal_pending(current)) {
1378 err = sock_intr_errno(timeo);
1383 timeo = schedule_timeout(timeo);
1386 err = sock_error(sk);
1390 set_current_state(TASK_RUNNING);
1391 remove_wait_queue(sk_sleep(sk), &wait);
1395 static void l2cap_monitor_timeout(unsigned long arg)
1397 struct sock *sk = (void *) arg;
1399 BT_DBG("sk %p", sk);
1402 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1403 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1408 l2cap_pi(sk)->retry_count++;
1409 __mod_monitor_timer();
1411 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1415 static void l2cap_retrans_timeout(unsigned long arg)
1417 struct sock *sk = (void *) arg;
1419 BT_DBG("sk %p", sk);
1422 l2cap_pi(sk)->retry_count = 1;
1423 __mod_monitor_timer();
1425 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1427 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1431 static void l2cap_drop_acked_frames(struct sock *sk)
1433 struct sk_buff *skb;
1435 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1436 l2cap_pi(sk)->unacked_frames) {
1437 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1440 skb = skb_dequeue(TX_QUEUE(sk));
1443 l2cap_pi(sk)->unacked_frames--;
1446 if (!l2cap_pi(sk)->unacked_frames)
1447 del_timer(&l2cap_pi(sk)->retrans_timer);
1450 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1452 struct l2cap_pinfo *pi = l2cap_pi(sk);
1454 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1456 hci_send_acl(pi->conn->hcon, skb, 0);
1459 static void l2cap_streaming_send(struct sock *sk)
1461 struct sk_buff *skb;
1462 struct l2cap_pinfo *pi = l2cap_pi(sk);
1465 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1466 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1467 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1468 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1470 if (pi->fcs == L2CAP_FCS_CRC16) {
1471 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1472 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1475 l2cap_do_send(sk, skb);
1477 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1481 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1483 struct l2cap_pinfo *pi = l2cap_pi(sk);
1484 struct sk_buff *skb, *tx_skb;
1487 skb = skb_peek(TX_QUEUE(sk));
1492 if (bt_cb(skb)->tx_seq == tx_seq)
1495 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1498 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1500 if (pi->remote_max_tx &&
1501 bt_cb(skb)->retries == pi->remote_max_tx) {
1502 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1506 tx_skb = skb_clone(skb, GFP_ATOMIC);
1507 bt_cb(skb)->retries++;
1508 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1510 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1511 control |= L2CAP_CTRL_FINAL;
1512 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1515 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1516 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1518 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1520 if (pi->fcs == L2CAP_FCS_CRC16) {
1521 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1522 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1525 l2cap_do_send(sk, tx_skb);
1528 static int l2cap_ertm_send(struct sock *sk)
1530 struct sk_buff *skb, *tx_skb;
1531 struct l2cap_pinfo *pi = l2cap_pi(sk);
1535 if (sk->sk_state != BT_CONNECTED)
1538 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1540 if (pi->remote_max_tx &&
1541 bt_cb(skb)->retries == pi->remote_max_tx) {
1542 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1546 tx_skb = skb_clone(skb, GFP_ATOMIC);
1548 bt_cb(skb)->retries++;
1550 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1551 control &= L2CAP_CTRL_SAR;
1553 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1554 control |= L2CAP_CTRL_FINAL;
1555 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1557 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1558 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1559 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1562 if (pi->fcs == L2CAP_FCS_CRC16) {
1563 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1564 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1567 l2cap_do_send(sk, tx_skb);
1569 __mod_retrans_timer();
1571 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1572 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1574 pi->unacked_frames++;
1577 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1578 sk->sk_send_head = NULL;
1580 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1588 static int l2cap_retransmit_frames(struct sock *sk)
1590 struct l2cap_pinfo *pi = l2cap_pi(sk);
1593 if (!skb_queue_empty(TX_QUEUE(sk)))
1594 sk->sk_send_head = TX_QUEUE(sk)->next;
1596 pi->next_tx_seq = pi->expected_ack_seq;
1597 ret = l2cap_ertm_send(sk);
1601 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1603 struct sock *sk = (struct sock *)pi;
1606 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1608 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1609 control |= L2CAP_SUPER_RCV_NOT_READY;
1610 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1611 l2cap_send_sframe(pi, control);
1615 if (l2cap_ertm_send(sk) > 0)
1618 control |= L2CAP_SUPER_RCV_READY;
1619 l2cap_send_sframe(pi, control);
1622 static void l2cap_send_srejtail(struct sock *sk)
1624 struct srej_list *tail;
1627 control = L2CAP_SUPER_SELECT_REJECT;
1628 control |= L2CAP_CTRL_FINAL;
1630 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1631 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1633 l2cap_send_sframe(l2cap_pi(sk), control);
1636 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1638 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1639 struct sk_buff **frag;
1642 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1648 /* Continuation fragments (no L2CAP header) */
1649 frag = &skb_shinfo(skb)->frag_list;
1651 count = min_t(unsigned int, conn->mtu, len);
1653 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1656 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1662 frag = &(*frag)->next;
1668 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1670 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1671 struct sk_buff *skb;
1672 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1673 struct l2cap_hdr *lh;
1675 BT_DBG("sk %p len %d", sk, (int)len);
1677 count = min_t(unsigned int, (conn->mtu - hlen), len);
1678 skb = bt_skb_send_alloc(sk, count + hlen,
1679 msg->msg_flags & MSG_DONTWAIT, &err);
1681 return ERR_PTR(-ENOMEM);
1683 /* Create L2CAP header */
1684 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1685 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1687 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1689 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1690 if (unlikely(err < 0)) {
1692 return ERR_PTR(err);
1697 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1700 struct sk_buff *skb;
1701 int err, count, hlen = L2CAP_HDR_SIZE;
1702 struct l2cap_hdr *lh;
1704 BT_DBG("sk %p len %d", sk, (int)len);
1706 count = min_t(unsigned int, (conn->mtu - hlen), len);
1707 skb = bt_skb_send_alloc(sk, count + hlen,
1708 msg->msg_flags & MSG_DONTWAIT, &err);
1710 return ERR_PTR(-ENOMEM);
1712 /* Create L2CAP header */
1713 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1714 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1715 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1717 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1718 if (unlikely(err < 0)) {
1720 return ERR_PTR(err);
1725 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1727 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1728 struct sk_buff *skb;
1729 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1730 struct l2cap_hdr *lh;
1732 BT_DBG("sk %p len %d", sk, (int)len);
1735 return ERR_PTR(-ENOTCONN);
1740 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1743 count = min_t(unsigned int, (conn->mtu - hlen), len);
1744 skb = bt_skb_send_alloc(sk, count + hlen,
1745 msg->msg_flags & MSG_DONTWAIT, &err);
1747 return ERR_PTR(-ENOMEM);
1749 /* Create L2CAP header */
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1753 put_unaligned_le16(control, skb_put(skb, 2));
1755 put_unaligned_le16(sdulen, skb_put(skb, 2));
1757 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1758 if (unlikely(err < 0)) {
1760 return ERR_PTR(err);
1763 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1764 put_unaligned_le16(0, skb_put(skb, 2));
1766 bt_cb(skb)->retries = 0;
1770 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1772 struct l2cap_pinfo *pi = l2cap_pi(sk);
1773 struct sk_buff *skb;
1774 struct sk_buff_head sar_queue;
1778 skb_queue_head_init(&sar_queue);
1779 control = L2CAP_SDU_START;
1780 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1782 return PTR_ERR(skb);
1784 __skb_queue_tail(&sar_queue, skb);
1785 len -= pi->remote_mps;
1786 size += pi->remote_mps;
1791 if (len > pi->remote_mps) {
1792 control = L2CAP_SDU_CONTINUE;
1793 buflen = pi->remote_mps;
1795 control = L2CAP_SDU_END;
1799 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1801 skb_queue_purge(&sar_queue);
1802 return PTR_ERR(skb);
1805 __skb_queue_tail(&sar_queue, skb);
1809 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1810 if (sk->sk_send_head == NULL)
1811 sk->sk_send_head = sar_queue.next;
1816 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1818 struct sock *sk = sock->sk;
1819 struct l2cap_pinfo *pi = l2cap_pi(sk);
1820 struct sk_buff *skb;
1824 BT_DBG("sock %p, sk %p", sock, sk);
1826 err = sock_error(sk);
1830 if (msg->msg_flags & MSG_OOB)
1835 if (sk->sk_state != BT_CONNECTED) {
1840 /* Connectionless channel */
1841 if (sk->sk_type == SOCK_DGRAM) {
1842 skb = l2cap_create_connless_pdu(sk, msg, len);
1846 l2cap_do_send(sk, skb);
1853 case L2CAP_MODE_BASIC:
1854 /* Check outgoing MTU */
1855 if (len > pi->omtu) {
1860 /* Create a basic PDU */
1861 skb = l2cap_create_basic_pdu(sk, msg, len);
1867 l2cap_do_send(sk, skb);
1871 case L2CAP_MODE_ERTM:
1872 case L2CAP_MODE_STREAMING:
1873 /* Entire SDU fits into one PDU */
1874 if (len <= pi->remote_mps) {
1875 control = L2CAP_SDU_UNSEGMENTED;
1876 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1881 __skb_queue_tail(TX_QUEUE(sk), skb);
1883 if (sk->sk_send_head == NULL)
1884 sk->sk_send_head = skb;
1887 /* Segment SDU into multiples PDUs */
1888 err = l2cap_sar_segment_sdu(sk, msg, len);
1893 if (pi->mode == L2CAP_MODE_STREAMING) {
1894 l2cap_streaming_send(sk);
1896 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1897 pi->conn_state && L2CAP_CONN_WAIT_F) {
1901 err = l2cap_ertm_send(sk);
1909 BT_DBG("bad state %1.1x", pi->mode);
1918 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1920 struct sock *sk = sock->sk;
1924 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1925 struct l2cap_conn_rsp rsp;
1926 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1929 sk->sk_state = BT_CONFIG;
1931 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1932 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1933 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1934 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1935 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1936 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1938 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1943 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1944 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1945 l2cap_build_conf_req(sk, buf), buf);
1946 l2cap_pi(sk)->num_conf_req++;
1954 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1957 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1959 struct sock *sk = sock->sk;
1960 struct l2cap_options opts;
1964 BT_DBG("sk %p", sk);
1970 if (sk->sk_state == BT_CONNECTED) {
1975 opts.imtu = l2cap_pi(sk)->imtu;
1976 opts.omtu = l2cap_pi(sk)->omtu;
1977 opts.flush_to = l2cap_pi(sk)->flush_to;
1978 opts.mode = l2cap_pi(sk)->mode;
1979 opts.fcs = l2cap_pi(sk)->fcs;
1980 opts.max_tx = l2cap_pi(sk)->max_tx;
1981 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1983 len = min_t(unsigned int, sizeof(opts), optlen);
1984 if (copy_from_user((char *) &opts, optval, len)) {
1989 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1994 l2cap_pi(sk)->mode = opts.mode;
1995 switch (l2cap_pi(sk)->mode) {
1996 case L2CAP_MODE_BASIC:
1997 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1999 case L2CAP_MODE_ERTM:
2000 case L2CAP_MODE_STREAMING:
2009 l2cap_pi(sk)->imtu = opts.imtu;
2010 l2cap_pi(sk)->omtu = opts.omtu;
2011 l2cap_pi(sk)->fcs = opts.fcs;
2012 l2cap_pi(sk)->max_tx = opts.max_tx;
2013 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2017 if (get_user(opt, (u32 __user *) optval)) {
2022 if (opt & L2CAP_LM_AUTH)
2023 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2024 if (opt & L2CAP_LM_ENCRYPT)
2025 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2026 if (opt & L2CAP_LM_SECURE)
2027 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2029 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2030 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2042 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2044 struct sock *sk = sock->sk;
2045 struct bt_security sec;
2049 BT_DBG("sk %p", sk);
2051 if (level == SOL_L2CAP)
2052 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2054 if (level != SOL_BLUETOOTH)
2055 return -ENOPROTOOPT;
2061 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2062 && sk->sk_type != SOCK_RAW) {
2067 sec.level = BT_SECURITY_LOW;
2069 len = min_t(unsigned int, sizeof(sec), optlen);
2070 if (copy_from_user((char *) &sec, optval, len)) {
2075 if (sec.level < BT_SECURITY_LOW ||
2076 sec.level > BT_SECURITY_HIGH) {
2081 l2cap_pi(sk)->sec_level = sec.level;
2084 case BT_DEFER_SETUP:
2085 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2090 if (get_user(opt, (u32 __user *) optval)) {
2095 bt_sk(sk)->defer_setup = opt;
2107 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2109 struct sock *sk = sock->sk;
2110 struct l2cap_options opts;
2111 struct l2cap_conninfo cinfo;
2115 BT_DBG("sk %p", sk);
2117 if (get_user(len, optlen))
2124 opts.imtu = l2cap_pi(sk)->imtu;
2125 opts.omtu = l2cap_pi(sk)->omtu;
2126 opts.flush_to = l2cap_pi(sk)->flush_to;
2127 opts.mode = l2cap_pi(sk)->mode;
2128 opts.fcs = l2cap_pi(sk)->fcs;
2129 opts.max_tx = l2cap_pi(sk)->max_tx;
2130 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2132 len = min_t(unsigned int, len, sizeof(opts));
2133 if (copy_to_user(optval, (char *) &opts, len))
2139 switch (l2cap_pi(sk)->sec_level) {
2140 case BT_SECURITY_LOW:
2141 opt = L2CAP_LM_AUTH;
2143 case BT_SECURITY_MEDIUM:
2144 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2146 case BT_SECURITY_HIGH:
2147 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2155 if (l2cap_pi(sk)->role_switch)
2156 opt |= L2CAP_LM_MASTER;
2158 if (l2cap_pi(sk)->force_reliable)
2159 opt |= L2CAP_LM_RELIABLE;
2161 if (put_user(opt, (u32 __user *) optval))
2165 case L2CAP_CONNINFO:
2166 if (sk->sk_state != BT_CONNECTED &&
2167 !(sk->sk_state == BT_CONNECT2 &&
2168 bt_sk(sk)->defer_setup)) {
2173 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2174 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2176 len = min_t(unsigned int, len, sizeof(cinfo));
2177 if (copy_to_user(optval, (char *) &cinfo, len))
2191 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2193 struct sock *sk = sock->sk;
2194 struct bt_security sec;
2197 BT_DBG("sk %p", sk);
2199 if (level == SOL_L2CAP)
2200 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2202 if (level != SOL_BLUETOOTH)
2203 return -ENOPROTOOPT;
2205 if (get_user(len, optlen))
2212 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2213 && sk->sk_type != SOCK_RAW) {
2218 sec.level = l2cap_pi(sk)->sec_level;
2220 len = min_t(unsigned int, len, sizeof(sec));
2221 if (copy_to_user(optval, (char *) &sec, len))
2226 case BT_DEFER_SETUP:
2227 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2232 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2246 static int l2cap_sock_shutdown(struct socket *sock, int how)
2248 struct sock *sk = sock->sk;
2251 BT_DBG("sock %p, sk %p", sock, sk);
2257 if (!sk->sk_shutdown) {
2258 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2259 err = __l2cap_wait_ack(sk);
2261 sk->sk_shutdown = SHUTDOWN_MASK;
2262 l2cap_sock_clear_timer(sk);
2263 __l2cap_sock_close(sk, 0);
2265 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2266 err = bt_sock_wait_state(sk, BT_CLOSED,
2270 if (!err && sk->sk_err)
2277 static int l2cap_sock_release(struct socket *sock)
2279 struct sock *sk = sock->sk;
2282 BT_DBG("sock %p, sk %p", sock, sk);
2287 err = l2cap_sock_shutdown(sock, 2);
2290 l2cap_sock_kill(sk);
2294 static void l2cap_chan_ready(struct sock *sk)
2296 struct sock *parent = bt_sk(sk)->parent;
2298 BT_DBG("sk %p, parent %p", sk, parent);
2300 l2cap_pi(sk)->conf_state = 0;
2301 l2cap_sock_clear_timer(sk);
2304 /* Outgoing channel.
2305 * Wake up socket sleeping on connect.
2307 sk->sk_state = BT_CONNECTED;
2308 sk->sk_state_change(sk);
2310 /* Incoming channel.
2311 * Wake up socket sleeping on accept.
2313 parent->sk_data_ready(parent, 0);
2317 /* Copy frame to all raw sockets on that connection */
2318 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2320 struct l2cap_chan_list *l = &conn->chan_list;
2321 struct sk_buff *nskb;
2324 BT_DBG("conn %p", conn);
2326 read_lock(&l->lock);
2327 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2328 if (sk->sk_type != SOCK_RAW)
2331 /* Don't send frame to the socket it came from */
2334 nskb = skb_clone(skb, GFP_ATOMIC);
2338 if (sock_queue_rcv_skb(sk, nskb))
2341 read_unlock(&l->lock);
2344 /* ---- L2CAP signalling commands ---- */
2345 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2346 u8 code, u8 ident, u16 dlen, void *data)
2348 struct sk_buff *skb, **frag;
2349 struct l2cap_cmd_hdr *cmd;
2350 struct l2cap_hdr *lh;
2353 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2354 conn, code, ident, dlen);
2356 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2357 count = min_t(unsigned int, conn->mtu, len);
2359 skb = bt_skb_alloc(count, GFP_ATOMIC);
2363 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2364 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2365 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2367 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2370 cmd->len = cpu_to_le16(dlen);
2373 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2374 memcpy(skb_put(skb, count), data, count);
2380 /* Continuation fragments (no L2CAP header) */
2381 frag = &skb_shinfo(skb)->frag_list;
2383 count = min_t(unsigned int, conn->mtu, len);
2385 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2389 memcpy(skb_put(*frag, count), data, count);
2394 frag = &(*frag)->next;
2404 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2406 struct l2cap_conf_opt *opt = *ptr;
2409 len = L2CAP_CONF_OPT_SIZE + opt->len;
2417 *val = *((u8 *) opt->val);
2421 *val = __le16_to_cpu(*((__le16 *) opt->val));
2425 *val = __le32_to_cpu(*((__le32 *) opt->val));
2429 *val = (unsigned long) opt->val;
2433 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2437 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2439 struct l2cap_conf_opt *opt = *ptr;
2441 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2448 *((u8 *) opt->val) = val;
2452 *((__le16 *) opt->val) = cpu_to_le16(val);
2456 *((__le32 *) opt->val) = cpu_to_le32(val);
2460 memcpy(opt->val, (void *) val, len);
2464 *ptr += L2CAP_CONF_OPT_SIZE + len;
2467 static void l2cap_ack_timeout(unsigned long arg)
2469 struct sock *sk = (void *) arg;
2472 l2cap_send_ack(l2cap_pi(sk));
2476 static inline void l2cap_ertm_init(struct sock *sk)
2478 l2cap_pi(sk)->expected_ack_seq = 0;
2479 l2cap_pi(sk)->unacked_frames = 0;
2480 l2cap_pi(sk)->buffer_seq = 0;
2481 l2cap_pi(sk)->num_acked = 0;
2482 l2cap_pi(sk)->frames_sent = 0;
2484 setup_timer(&l2cap_pi(sk)->retrans_timer,
2485 l2cap_retrans_timeout, (unsigned long) sk);
2486 setup_timer(&l2cap_pi(sk)->monitor_timer,
2487 l2cap_monitor_timeout, (unsigned long) sk);
2488 setup_timer(&l2cap_pi(sk)->ack_timer,
2489 l2cap_ack_timeout, (unsigned long) sk);
2491 __skb_queue_head_init(SREJ_QUEUE(sk));
2492 __skb_queue_head_init(BUSY_QUEUE(sk));
2494 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2496 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2499 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2502 case L2CAP_MODE_STREAMING:
2503 case L2CAP_MODE_ERTM:
2504 if (l2cap_mode_supported(mode, remote_feat_mask))
2508 return L2CAP_MODE_BASIC;
2512 static int l2cap_build_conf_req(struct sock *sk, void *data)
2514 struct l2cap_pinfo *pi = l2cap_pi(sk);
2515 struct l2cap_conf_req *req = data;
2516 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2517 void *ptr = req->data;
2519 BT_DBG("sk %p", sk);
2521 if (pi->num_conf_req || pi->num_conf_rsp)
2525 case L2CAP_MODE_STREAMING:
2526 case L2CAP_MODE_ERTM:
2527 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2532 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2538 case L2CAP_MODE_BASIC:
2539 if (pi->imtu != L2CAP_DEFAULT_MTU)
2540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2542 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2543 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2546 rfc.mode = L2CAP_MODE_BASIC;
2548 rfc.max_transmit = 0;
2549 rfc.retrans_timeout = 0;
2550 rfc.monitor_timeout = 0;
2551 rfc.max_pdu_size = 0;
2553 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2554 (unsigned long) &rfc);
2557 case L2CAP_MODE_ERTM:
2558 rfc.mode = L2CAP_MODE_ERTM;
2559 rfc.txwin_size = pi->tx_win;
2560 rfc.max_transmit = pi->max_tx;
2561 rfc.retrans_timeout = 0;
2562 rfc.monitor_timeout = 0;
2563 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2564 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2565 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2568 (unsigned long) &rfc);
2570 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2573 if (pi->fcs == L2CAP_FCS_NONE ||
2574 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2575 pi->fcs = L2CAP_FCS_NONE;
2576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2580 case L2CAP_MODE_STREAMING:
2581 rfc.mode = L2CAP_MODE_STREAMING;
2583 rfc.max_transmit = 0;
2584 rfc.retrans_timeout = 0;
2585 rfc.monitor_timeout = 0;
2586 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2587 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2588 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2590 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2591 (unsigned long) &rfc);
2593 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2596 if (pi->fcs == L2CAP_FCS_NONE ||
2597 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2598 pi->fcs = L2CAP_FCS_NONE;
2599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2604 /* FIXME: Need actual value of the flush timeout */
2605 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2606 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2608 req->dcid = cpu_to_le16(pi->dcid);
2609 req->flags = cpu_to_le16(0);
2614 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2616 struct l2cap_pinfo *pi = l2cap_pi(sk);
2617 struct l2cap_conf_rsp *rsp = data;
2618 void *ptr = rsp->data;
2619 void *req = pi->conf_req;
2620 int len = pi->conf_len;
2621 int type, hint, olen;
2623 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2624 u16 mtu = L2CAP_DEFAULT_MTU;
2625 u16 result = L2CAP_CONF_SUCCESS;
2627 BT_DBG("sk %p", sk);
2629 while (len >= L2CAP_CONF_OPT_SIZE) {
2630 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2632 hint = type & L2CAP_CONF_HINT;
2633 type &= L2CAP_CONF_MASK;
2636 case L2CAP_CONF_MTU:
2640 case L2CAP_CONF_FLUSH_TO:
2644 case L2CAP_CONF_QOS:
2647 case L2CAP_CONF_RFC:
2648 if (olen == sizeof(rfc))
2649 memcpy(&rfc, (void *) val, olen);
2652 case L2CAP_CONF_FCS:
2653 if (val == L2CAP_FCS_NONE)
2654 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2662 result = L2CAP_CONF_UNKNOWN;
2663 *((u8 *) ptr++) = type;
2668 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2672 case L2CAP_MODE_STREAMING:
2673 case L2CAP_MODE_ERTM:
2674 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2675 pi->mode = l2cap_select_mode(rfc.mode,
2676 pi->conn->feat_mask);
2680 if (pi->mode != rfc.mode)
2681 return -ECONNREFUSED;
2687 if (pi->mode != rfc.mode) {
2688 result = L2CAP_CONF_UNACCEPT;
2689 rfc.mode = pi->mode;
2691 if (pi->num_conf_rsp == 1)
2692 return -ECONNREFUSED;
2694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2695 sizeof(rfc), (unsigned long) &rfc);
2699 if (result == L2CAP_CONF_SUCCESS) {
2700 /* Configure output options and let the other side know
2701 * which ones we don't like. */
2703 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2704 result = L2CAP_CONF_UNACCEPT;
2707 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2709 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2712 case L2CAP_MODE_BASIC:
2713 pi->fcs = L2CAP_FCS_NONE;
2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2717 case L2CAP_MODE_ERTM:
2718 pi->remote_tx_win = rfc.txwin_size;
2719 pi->remote_max_tx = rfc.max_transmit;
2721 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2722 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2724 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2726 rfc.retrans_timeout =
2727 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2728 rfc.monitor_timeout =
2729 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2731 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2734 sizeof(rfc), (unsigned long) &rfc);
2738 case L2CAP_MODE_STREAMING:
2739 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2740 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2742 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2744 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2746 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2747 sizeof(rfc), (unsigned long) &rfc);
2752 result = L2CAP_CONF_UNACCEPT;
2754 memset(&rfc, 0, sizeof(rfc));
2755 rfc.mode = pi->mode;
2758 if (result == L2CAP_CONF_SUCCESS)
2759 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2761 rsp->scid = cpu_to_le16(pi->dcid);
2762 rsp->result = cpu_to_le16(result);
2763 rsp->flags = cpu_to_le16(0x0000);
2768 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2770 struct l2cap_pinfo *pi = l2cap_pi(sk);
2771 struct l2cap_conf_req *req = data;
2772 void *ptr = req->data;
2775 struct l2cap_conf_rfc rfc;
2777 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2779 while (len >= L2CAP_CONF_OPT_SIZE) {
2780 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2783 case L2CAP_CONF_MTU:
2784 if (val < L2CAP_DEFAULT_MIN_MTU) {
2785 *result = L2CAP_CONF_UNACCEPT;
2786 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2789 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2792 case L2CAP_CONF_FLUSH_TO:
2794 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2798 case L2CAP_CONF_RFC:
2799 if (olen == sizeof(rfc))
2800 memcpy(&rfc, (void *)val, olen);
2802 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2803 rfc.mode != pi->mode)
2804 return -ECONNREFUSED;
2808 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2809 sizeof(rfc), (unsigned long) &rfc);
2814 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2815 return -ECONNREFUSED;
2817 pi->mode = rfc.mode;
2819 if (*result == L2CAP_CONF_SUCCESS) {
2821 case L2CAP_MODE_ERTM:
2822 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2823 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2824 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2826 case L2CAP_MODE_STREAMING:
2827 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2831 req->dcid = cpu_to_le16(pi->dcid);
2832 req->flags = cpu_to_le16(0x0000);
2837 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2839 struct l2cap_conf_rsp *rsp = data;
2840 void *ptr = rsp->data;
2842 BT_DBG("sk %p", sk);
2844 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2845 rsp->result = cpu_to_le16(result);
2846 rsp->flags = cpu_to_le16(flags);
2851 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2853 struct l2cap_pinfo *pi = l2cap_pi(sk);
2856 struct l2cap_conf_rfc rfc;
2858 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2860 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2863 while (len >= L2CAP_CONF_OPT_SIZE) {
2864 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2867 case L2CAP_CONF_RFC:
2868 if (olen == sizeof(rfc))
2869 memcpy(&rfc, (void *)val, olen);
2876 case L2CAP_MODE_ERTM:
2877 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2878 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2879 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2881 case L2CAP_MODE_STREAMING:
2882 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2886 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2888 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2890 if (rej->reason != 0x0000)
2893 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2894 cmd->ident == conn->info_ident) {
2895 del_timer(&conn->info_timer);
2897 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2898 conn->info_ident = 0;
2900 l2cap_conn_start(conn);
2906 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2908 struct l2cap_chan_list *list = &conn->chan_list;
2909 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2910 struct l2cap_conn_rsp rsp;
2911 struct sock *parent, *uninitialized_var(sk);
2912 int result, status = L2CAP_CS_NO_INFO;
2914 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2915 __le16 psm = req->psm;
2917 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2919 /* Check if we have socket listening on psm */
2920 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2922 result = L2CAP_CR_BAD_PSM;
2926 /* Check if the ACL is secure enough (if not SDP) */
2927 if (psm != cpu_to_le16(0x0001) &&
2928 !hci_conn_check_link_mode(conn->hcon)) {
2929 conn->disc_reason = 0x05;
2930 result = L2CAP_CR_SEC_BLOCK;
2934 result = L2CAP_CR_NO_MEM;
2936 /* Check for backlog size */
2937 if (sk_acceptq_is_full(parent)) {
2938 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2942 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2946 write_lock_bh(&list->lock);
2948 /* Check if we already have channel with that dcid */
2949 if (__l2cap_get_chan_by_dcid(list, scid)) {
2950 write_unlock_bh(&list->lock);
2951 sock_set_flag(sk, SOCK_ZAPPED);
2952 l2cap_sock_kill(sk);
2956 hci_conn_hold(conn->hcon);
2958 l2cap_sock_init(sk, parent);
2959 bacpy(&bt_sk(sk)->src, conn->src);
2960 bacpy(&bt_sk(sk)->dst, conn->dst);
2961 l2cap_pi(sk)->psm = psm;
2962 l2cap_pi(sk)->dcid = scid;
2964 __l2cap_chan_add(conn, sk, parent);
2965 dcid = l2cap_pi(sk)->scid;
2967 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2969 l2cap_pi(sk)->ident = cmd->ident;
2971 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2972 if (l2cap_check_security(sk)) {
2973 if (bt_sk(sk)->defer_setup) {
2974 sk->sk_state = BT_CONNECT2;
2975 result = L2CAP_CR_PEND;
2976 status = L2CAP_CS_AUTHOR_PEND;
2977 parent->sk_data_ready(parent, 0);
2979 sk->sk_state = BT_CONFIG;
2980 result = L2CAP_CR_SUCCESS;
2981 status = L2CAP_CS_NO_INFO;
2984 sk->sk_state = BT_CONNECT2;
2985 result = L2CAP_CR_PEND;
2986 status = L2CAP_CS_AUTHEN_PEND;
2989 sk->sk_state = BT_CONNECT2;
2990 result = L2CAP_CR_PEND;
2991 status = L2CAP_CS_NO_INFO;
2994 write_unlock_bh(&list->lock);
2997 bh_unlock_sock(parent);
3000 rsp.scid = cpu_to_le16(scid);
3001 rsp.dcid = cpu_to_le16(dcid);
3002 rsp.result = cpu_to_le16(result);
3003 rsp.status = cpu_to_le16(status);
3004 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3006 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3007 struct l2cap_info_req info;
3008 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3010 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3011 conn->info_ident = l2cap_get_ident(conn);
3013 mod_timer(&conn->info_timer, jiffies +
3014 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3016 l2cap_send_cmd(conn, conn->info_ident,
3017 L2CAP_INFO_REQ, sizeof(info), &info);
3020 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3021 result == L2CAP_CR_SUCCESS) {
3023 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3024 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3025 l2cap_build_conf_req(sk, buf), buf);
3026 l2cap_pi(sk)->num_conf_req++;
3032 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3034 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3035 u16 scid, dcid, result, status;
3039 scid = __le16_to_cpu(rsp->scid);
3040 dcid = __le16_to_cpu(rsp->dcid);
3041 result = __le16_to_cpu(rsp->result);
3042 status = __le16_to_cpu(rsp->status);
3044 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3047 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3051 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3057 case L2CAP_CR_SUCCESS:
3058 sk->sk_state = BT_CONFIG;
3059 l2cap_pi(sk)->ident = 0;
3060 l2cap_pi(sk)->dcid = dcid;
3061 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3063 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3066 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3068 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3069 l2cap_build_conf_req(sk, req), req);
3070 l2cap_pi(sk)->num_conf_req++;
3074 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3078 l2cap_chan_del(sk, ECONNREFUSED);
3086 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3088 /* FCS is enabled only in ERTM or streaming mode, if one or both
3091 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3092 pi->fcs = L2CAP_FCS_NONE;
3093 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3094 pi->fcs = L2CAP_FCS_CRC16;
3097 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3099 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3105 dcid = __le16_to_cpu(req->dcid);
3106 flags = __le16_to_cpu(req->flags);
3108 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3110 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3114 if (sk->sk_state == BT_DISCONN)
3117 /* Reject if config buffer is too small. */
3118 len = cmd_len - sizeof(*req);
3119 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3120 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3121 l2cap_build_conf_rsp(sk, rsp,
3122 L2CAP_CONF_REJECT, flags), rsp);
3127 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3128 l2cap_pi(sk)->conf_len += len;
3130 if (flags & 0x0001) {
3131 /* Incomplete config. Send empty response. */
3132 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3133 l2cap_build_conf_rsp(sk, rsp,
3134 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3138 /* Complete config. */
3139 len = l2cap_parse_conf_req(sk, rsp);
3141 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3145 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3146 l2cap_pi(sk)->num_conf_rsp++;
3148 /* Reset config buffer. */
3149 l2cap_pi(sk)->conf_len = 0;
3151 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3154 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3155 set_default_fcs(l2cap_pi(sk));
3157 sk->sk_state = BT_CONNECTED;
3159 l2cap_pi(sk)->next_tx_seq = 0;
3160 l2cap_pi(sk)->expected_tx_seq = 0;
3161 __skb_queue_head_init(TX_QUEUE(sk));
3162 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3163 l2cap_ertm_init(sk);
3165 l2cap_chan_ready(sk);
3169 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3171 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3172 l2cap_build_conf_req(sk, buf), buf);
3173 l2cap_pi(sk)->num_conf_req++;
3181 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3183 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3184 u16 scid, flags, result;
3186 int len = cmd->len - sizeof(*rsp);
3188 scid = __le16_to_cpu(rsp->scid);
3189 flags = __le16_to_cpu(rsp->flags);
3190 result = __le16_to_cpu(rsp->result);
3192 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3193 scid, flags, result);
3195 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3200 case L2CAP_CONF_SUCCESS:
3201 l2cap_conf_rfc_get(sk, rsp->data, len);
3204 case L2CAP_CONF_UNACCEPT:
3205 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3208 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3209 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3213 /* throw out any old stored conf requests */
3214 result = L2CAP_CONF_SUCCESS;
3215 len = l2cap_parse_conf_rsp(sk, rsp->data,
3218 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3222 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3223 L2CAP_CONF_REQ, len, req);
3224 l2cap_pi(sk)->num_conf_req++;
3225 if (result != L2CAP_CONF_SUCCESS)
3231 sk->sk_err = ECONNRESET;
3232 l2cap_sock_set_timer(sk, HZ * 5);
3233 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3240 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3242 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3243 set_default_fcs(l2cap_pi(sk));
3245 sk->sk_state = BT_CONNECTED;
3246 l2cap_pi(sk)->next_tx_seq = 0;
3247 l2cap_pi(sk)->expected_tx_seq = 0;
3248 __skb_queue_head_init(TX_QUEUE(sk));
3249 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3250 l2cap_ertm_init(sk);
3252 l2cap_chan_ready(sk);
3260 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3262 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3263 struct l2cap_disconn_rsp rsp;
3267 scid = __le16_to_cpu(req->scid);
3268 dcid = __le16_to_cpu(req->dcid);
3270 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3272 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3276 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3277 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3278 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3280 sk->sk_shutdown = SHUTDOWN_MASK;
3282 l2cap_chan_del(sk, ECONNRESET);
3285 l2cap_sock_kill(sk);
3289 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3291 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3295 scid = __le16_to_cpu(rsp->scid);
3296 dcid = __le16_to_cpu(rsp->dcid);
3298 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3300 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3304 l2cap_chan_del(sk, 0);
3307 l2cap_sock_kill(sk);
3311 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3313 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3316 type = __le16_to_cpu(req->type);
3318 BT_DBG("type 0x%4.4x", type);
3320 if (type == L2CAP_IT_FEAT_MASK) {
3322 u32 feat_mask = l2cap_feat_mask;
3323 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3324 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3325 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3327 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3329 put_unaligned_le32(feat_mask, rsp->data);
3330 l2cap_send_cmd(conn, cmd->ident,
3331 L2CAP_INFO_RSP, sizeof(buf), buf);
3332 } else if (type == L2CAP_IT_FIXED_CHAN) {
3334 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3335 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3336 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3337 memcpy(buf + 4, l2cap_fixed_chan, 8);
3338 l2cap_send_cmd(conn, cmd->ident,
3339 L2CAP_INFO_RSP, sizeof(buf), buf);
3341 struct l2cap_info_rsp rsp;
3342 rsp.type = cpu_to_le16(type);
3343 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3344 l2cap_send_cmd(conn, cmd->ident,
3345 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3351 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3353 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3356 type = __le16_to_cpu(rsp->type);
3357 result = __le16_to_cpu(rsp->result);
3359 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3361 del_timer(&conn->info_timer);
3363 if (result != L2CAP_IR_SUCCESS) {
3364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3365 conn->info_ident = 0;
3367 l2cap_conn_start(conn);
3372 if (type == L2CAP_IT_FEAT_MASK) {
3373 conn->feat_mask = get_unaligned_le32(rsp->data);
3375 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3376 struct l2cap_info_req req;
3377 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3379 conn->info_ident = l2cap_get_ident(conn);
3381 l2cap_send_cmd(conn, conn->info_ident,
3382 L2CAP_INFO_REQ, sizeof(req), &req);
3384 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3385 conn->info_ident = 0;
3387 l2cap_conn_start(conn);
3389 } else if (type == L2CAP_IT_FIXED_CHAN) {
3390 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3391 conn->info_ident = 0;
3393 l2cap_conn_start(conn);
3399 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3401 u8 *data = skb->data;
3403 struct l2cap_cmd_hdr cmd;
3406 l2cap_raw_recv(conn, skb);
3408 while (len >= L2CAP_CMD_HDR_SIZE) {
3410 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3411 data += L2CAP_CMD_HDR_SIZE;
3412 len -= L2CAP_CMD_HDR_SIZE;
3414 cmd_len = le16_to_cpu(cmd.len);
3416 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3418 if (cmd_len > len || !cmd.ident) {
3419 BT_DBG("corrupted command");
3424 case L2CAP_COMMAND_REJ:
3425 l2cap_command_rej(conn, &cmd, data);
3428 case L2CAP_CONN_REQ:
3429 err = l2cap_connect_req(conn, &cmd, data);
3432 case L2CAP_CONN_RSP:
3433 err = l2cap_connect_rsp(conn, &cmd, data);
3436 case L2CAP_CONF_REQ:
3437 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3440 case L2CAP_CONF_RSP:
3441 err = l2cap_config_rsp(conn, &cmd, data);
3444 case L2CAP_DISCONN_REQ:
3445 err = l2cap_disconnect_req(conn, &cmd, data);
3448 case L2CAP_DISCONN_RSP:
3449 err = l2cap_disconnect_rsp(conn, &cmd, data);
3452 case L2CAP_ECHO_REQ:
3453 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3456 case L2CAP_ECHO_RSP:
3459 case L2CAP_INFO_REQ:
3460 err = l2cap_information_req(conn, &cmd, data);
3463 case L2CAP_INFO_RSP:
3464 err = l2cap_information_rsp(conn, &cmd, data);
3468 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3474 struct l2cap_cmd_rej rej;
3475 BT_DBG("error %d", err);
3477 /* FIXME: Map err to a valid reason */
3478 rej.reason = cpu_to_le16(0);
3479 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3489 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3491 u16 our_fcs, rcv_fcs;
3492 int hdr_size = L2CAP_HDR_SIZE + 2;
3494 if (pi->fcs == L2CAP_FCS_CRC16) {
3495 skb_trim(skb, skb->len - 2);
3496 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3497 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3499 if (our_fcs != rcv_fcs)
3505 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3507 struct l2cap_pinfo *pi = l2cap_pi(sk);
3510 pi->frames_sent = 0;
3512 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3514 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3515 control |= L2CAP_SUPER_RCV_NOT_READY;
3516 l2cap_send_sframe(pi, control);
3517 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3520 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3521 l2cap_retransmit_frames(sk);
3523 l2cap_ertm_send(sk);
3525 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3526 pi->frames_sent == 0) {
3527 control |= L2CAP_SUPER_RCV_READY;
3528 l2cap_send_sframe(pi, control);
3532 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3534 struct sk_buff *next_skb;
3535 struct l2cap_pinfo *pi = l2cap_pi(sk);
3536 int tx_seq_offset, next_tx_seq_offset;
3538 bt_cb(skb)->tx_seq = tx_seq;
3539 bt_cb(skb)->sar = sar;
3541 next_skb = skb_peek(SREJ_QUEUE(sk));
3543 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3547 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3548 if (tx_seq_offset < 0)
3549 tx_seq_offset += 64;
3552 if (bt_cb(next_skb)->tx_seq == tx_seq)
3555 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3556 pi->buffer_seq) % 64;
3557 if (next_tx_seq_offset < 0)
3558 next_tx_seq_offset += 64;
3560 if (next_tx_seq_offset > tx_seq_offset) {
3561 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3565 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3568 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3570 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3575 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3577 struct l2cap_pinfo *pi = l2cap_pi(sk);
3578 struct sk_buff *_skb;
3581 switch (control & L2CAP_CTRL_SAR) {
3582 case L2CAP_SDU_UNSEGMENTED:
3583 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3586 err = sock_queue_rcv_skb(sk, skb);
3592 case L2CAP_SDU_START:
3593 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3596 pi->sdu_len = get_unaligned_le16(skb->data);
3598 if (pi->sdu_len > pi->imtu)
3601 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3605 /* pull sdu_len bytes only after alloc, because of Local Busy
3606 * condition we have to be sure that this will be executed
3607 * only once, i.e., when alloc does not fail */
3610 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3612 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3613 pi->partial_sdu_len = skb->len;
3616 case L2CAP_SDU_CONTINUE:
3617 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3623 pi->partial_sdu_len += skb->len;
3624 if (pi->partial_sdu_len > pi->sdu_len)
3627 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3632 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3638 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3639 pi->partial_sdu_len += skb->len;
3641 if (pi->partial_sdu_len > pi->imtu)
3644 if (pi->partial_sdu_len != pi->sdu_len)
3647 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3650 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3652 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3656 err = sock_queue_rcv_skb(sk, _skb);
3659 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3663 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3664 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3678 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3683 static int l2cap_try_push_rx_skb(struct sock *sk)
3685 struct l2cap_pinfo *pi = l2cap_pi(sk);
3686 struct sk_buff *skb;
3690 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3691 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3692 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3694 skb_queue_head(BUSY_QUEUE(sk), skb);
3698 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3701 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3704 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3705 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3706 l2cap_send_sframe(pi, control);
3707 l2cap_pi(sk)->retry_count = 1;
3709 del_timer(&pi->retrans_timer);
3710 __mod_monitor_timer();
3712 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3715 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3716 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3718 BT_DBG("sk %p, Exit local busy", sk);
3723 static void l2cap_busy_work(struct work_struct *work)
3725 DECLARE_WAITQUEUE(wait, current);
3726 struct l2cap_pinfo *pi =
3727 container_of(work, struct l2cap_pinfo, busy_work);
3728 struct sock *sk = (struct sock *)pi;
3729 int n_tries = 0, timeo = HZ/5, err;
3730 struct sk_buff *skb;
3734 add_wait_queue(sk_sleep(sk), &wait);
3735 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3736 set_current_state(TASK_INTERRUPTIBLE);
3738 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3740 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3747 if (signal_pending(current)) {
3748 err = sock_intr_errno(timeo);
3753 timeo = schedule_timeout(timeo);
3756 err = sock_error(sk);
3760 if (l2cap_try_push_rx_skb(sk) == 0)
3764 set_current_state(TASK_RUNNING);
3765 remove_wait_queue(sk_sleep(sk), &wait);
3770 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3772 struct l2cap_pinfo *pi = l2cap_pi(sk);
3775 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3776 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3777 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3778 return l2cap_try_push_rx_skb(sk);
3783 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3785 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3789 /* Busy Condition */
3790 BT_DBG("sk %p, Enter local busy", sk);
3792 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3793 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3794 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3796 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3797 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3798 l2cap_send_sframe(pi, sctrl);
3800 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3802 del_timer(&pi->ack_timer);
3804 queue_work(_busy_wq, &pi->busy_work);
3809 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3811 struct l2cap_pinfo *pi = l2cap_pi(sk);
3812 struct sk_buff *_skb;
3816 * TODO: We have to notify the userland if some data is lost with the
3820 switch (control & L2CAP_CTRL_SAR) {
3821 case L2CAP_SDU_UNSEGMENTED:
3822 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3827 err = sock_queue_rcv_skb(sk, skb);
3833 case L2CAP_SDU_START:
3834 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3839 pi->sdu_len = get_unaligned_le16(skb->data);
3842 if (pi->sdu_len > pi->imtu) {
3847 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3853 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3855 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3856 pi->partial_sdu_len = skb->len;
3860 case L2CAP_SDU_CONTINUE:
3861 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3864 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3866 pi->partial_sdu_len += skb->len;
3867 if (pi->partial_sdu_len > pi->sdu_len)
3875 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3878 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3880 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3881 pi->partial_sdu_len += skb->len;
3883 if (pi->partial_sdu_len > pi->imtu)
3886 if (pi->partial_sdu_len == pi->sdu_len) {
3887 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3888 err = sock_queue_rcv_skb(sk, _skb);
3903 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3905 struct sk_buff *skb;
3908 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3909 if (bt_cb(skb)->tx_seq != tx_seq)
3912 skb = skb_dequeue(SREJ_QUEUE(sk));
3913 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3914 l2cap_ertm_reassembly_sdu(sk, skb, control);
3915 l2cap_pi(sk)->buffer_seq_srej =
3916 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3917 tx_seq = (tx_seq + 1) % 64;
3921 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3923 struct l2cap_pinfo *pi = l2cap_pi(sk);
3924 struct srej_list *l, *tmp;
3927 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3928 if (l->tx_seq == tx_seq) {
3933 control = L2CAP_SUPER_SELECT_REJECT;
3934 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3935 l2cap_send_sframe(pi, control);
3937 list_add_tail(&l->list, SREJ_LIST(sk));
3941 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3943 struct l2cap_pinfo *pi = l2cap_pi(sk);
3944 struct srej_list *new;
3947 while (tx_seq != pi->expected_tx_seq) {
3948 control = L2CAP_SUPER_SELECT_REJECT;
3949 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3950 l2cap_send_sframe(pi, control);
3952 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3953 new->tx_seq = pi->expected_tx_seq;
3954 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3955 list_add_tail(&new->list, SREJ_LIST(sk));
3957 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3960 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3962 struct l2cap_pinfo *pi = l2cap_pi(sk);
3963 u8 tx_seq = __get_txseq(rx_control);
3964 u8 req_seq = __get_reqseq(rx_control);
3965 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3966 int tx_seq_offset, expected_tx_seq_offset;
3967 int num_to_ack = (pi->tx_win/6) + 1;
3970 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3973 if (L2CAP_CTRL_FINAL & rx_control &&
3974 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3975 del_timer(&pi->monitor_timer);
3976 if (pi->unacked_frames > 0)
3977 __mod_retrans_timer();
3978 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3981 pi->expected_ack_seq = req_seq;
3982 l2cap_drop_acked_frames(sk);
3984 if (tx_seq == pi->expected_tx_seq)
3987 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3988 if (tx_seq_offset < 0)
3989 tx_seq_offset += 64;
3991 /* invalid tx_seq */
3992 if (tx_seq_offset >= pi->tx_win) {
3993 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3997 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4000 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4001 struct srej_list *first;
4003 first = list_first_entry(SREJ_LIST(sk),
4004 struct srej_list, list);
4005 if (tx_seq == first->tx_seq) {
4006 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4007 l2cap_check_srej_gap(sk, tx_seq);
4009 list_del(&first->list);
4012 if (list_empty(SREJ_LIST(sk))) {
4013 pi->buffer_seq = pi->buffer_seq_srej;
4014 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4016 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4019 struct srej_list *l;
4021 /* duplicated tx_seq */
4022 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4025 list_for_each_entry(l, SREJ_LIST(sk), list) {
4026 if (l->tx_seq == tx_seq) {
4027 l2cap_resend_srejframe(sk, tx_seq);
4031 l2cap_send_srejframe(sk, tx_seq);
4034 expected_tx_seq_offset =
4035 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4036 if (expected_tx_seq_offset < 0)
4037 expected_tx_seq_offset += 64;
4039 /* duplicated tx_seq */
4040 if (tx_seq_offset < expected_tx_seq_offset)
4043 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4045 BT_DBG("sk %p, Enter SREJ", sk);
4047 INIT_LIST_HEAD(SREJ_LIST(sk));
4048 pi->buffer_seq_srej = pi->buffer_seq;
4050 __skb_queue_head_init(SREJ_QUEUE(sk));
4051 __skb_queue_head_init(BUSY_QUEUE(sk));
4052 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4054 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4056 l2cap_send_srejframe(sk, tx_seq);
4058 del_timer(&pi->ack_timer);
4063 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4065 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4066 bt_cb(skb)->tx_seq = tx_seq;
4067 bt_cb(skb)->sar = sar;
4068 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4072 err = l2cap_push_rx_skb(sk, skb, rx_control);
4076 if (rx_control & L2CAP_CTRL_FINAL) {
4077 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4078 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4080 l2cap_retransmit_frames(sk);
4085 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4086 if (pi->num_acked == num_to_ack - 1)
4096 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4098 struct l2cap_pinfo *pi = l2cap_pi(sk);
4100 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4103 pi->expected_ack_seq = __get_reqseq(rx_control);
4104 l2cap_drop_acked_frames(sk);
4106 if (rx_control & L2CAP_CTRL_POLL) {
4107 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4108 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4109 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4110 (pi->unacked_frames > 0))
4111 __mod_retrans_timer();
4113 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4114 l2cap_send_srejtail(sk);
4116 l2cap_send_i_or_rr_or_rnr(sk);
4119 } else if (rx_control & L2CAP_CTRL_FINAL) {
4120 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4122 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4123 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4125 l2cap_retransmit_frames(sk);
4128 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4129 (pi->unacked_frames > 0))
4130 __mod_retrans_timer();
4132 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4133 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4136 l2cap_ertm_send(sk);
4141 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4143 struct l2cap_pinfo *pi = l2cap_pi(sk);
4144 u8 tx_seq = __get_reqseq(rx_control);
4146 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4148 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4150 pi->expected_ack_seq = tx_seq;
4151 l2cap_drop_acked_frames(sk);
4153 if (rx_control & L2CAP_CTRL_FINAL) {
4154 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4155 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4157 l2cap_retransmit_frames(sk);
4159 l2cap_retransmit_frames(sk);
4161 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4162 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4165 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4167 struct l2cap_pinfo *pi = l2cap_pi(sk);
4168 u8 tx_seq = __get_reqseq(rx_control);
4170 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4172 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4174 if (rx_control & L2CAP_CTRL_POLL) {
4175 pi->expected_ack_seq = tx_seq;
4176 l2cap_drop_acked_frames(sk);
4178 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4179 l2cap_retransmit_one_frame(sk, tx_seq);
4181 l2cap_ertm_send(sk);
4183 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4184 pi->srej_save_reqseq = tx_seq;
4185 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4187 } else if (rx_control & L2CAP_CTRL_FINAL) {
4188 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4189 pi->srej_save_reqseq == tx_seq)
4190 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4192 l2cap_retransmit_one_frame(sk, tx_seq);
4194 l2cap_retransmit_one_frame(sk, tx_seq);
4195 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4196 pi->srej_save_reqseq = tx_seq;
4197 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4202 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4204 struct l2cap_pinfo *pi = l2cap_pi(sk);
4205 u8 tx_seq = __get_reqseq(rx_control);
4207 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4209 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4210 pi->expected_ack_seq = tx_seq;
4211 l2cap_drop_acked_frames(sk);
4213 if (rx_control & L2CAP_CTRL_POLL)
4214 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4216 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4217 del_timer(&pi->retrans_timer);
4218 if (rx_control & L2CAP_CTRL_POLL)
4219 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4223 if (rx_control & L2CAP_CTRL_POLL)
4224 l2cap_send_srejtail(sk);
4226 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4229 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4231 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4233 if (L2CAP_CTRL_FINAL & rx_control &&
4234 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4235 del_timer(&l2cap_pi(sk)->monitor_timer);
4236 if (l2cap_pi(sk)->unacked_frames > 0)
4237 __mod_retrans_timer();
4238 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4241 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4242 case L2CAP_SUPER_RCV_READY:
4243 l2cap_data_channel_rrframe(sk, rx_control);
4246 case L2CAP_SUPER_REJECT:
4247 l2cap_data_channel_rejframe(sk, rx_control);
4250 case L2CAP_SUPER_SELECT_REJECT:
4251 l2cap_data_channel_srejframe(sk, rx_control);
4254 case L2CAP_SUPER_RCV_NOT_READY:
4255 l2cap_data_channel_rnrframe(sk, rx_control);
4263 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4265 struct l2cap_pinfo *pi = l2cap_pi(sk);
4268 int len, next_tx_seq_offset, req_seq_offset;
4270 control = get_unaligned_le16(skb->data);
4275 * We can just drop the corrupted I-frame here.
4276 * Receiver will miss it and start proper recovery
4277 * procedures and ask retransmission.
4279 if (l2cap_check_fcs(pi, skb))
4282 if (__is_sar_start(control) && __is_iframe(control))
4285 if (pi->fcs == L2CAP_FCS_CRC16)
4288 if (len > pi->mps) {
4289 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4293 req_seq = __get_reqseq(control);
4294 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4295 if (req_seq_offset < 0)
4296 req_seq_offset += 64;
4298 next_tx_seq_offset =
4299 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4300 if (next_tx_seq_offset < 0)
4301 next_tx_seq_offset += 64;
4303 /* check for invalid req-seq */
4304 if (req_seq_offset > next_tx_seq_offset) {
4305 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4309 if (__is_iframe(control)) {
4311 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4315 l2cap_data_channel_iframe(sk, control, skb);
4319 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4323 l2cap_data_channel_sframe(sk, control, skb);
4333 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4336 struct l2cap_pinfo *pi;
4341 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4343 BT_DBG("unknown cid 0x%4.4x", cid);
4349 BT_DBG("sk %p, len %d", sk, skb->len);
4351 if (sk->sk_state != BT_CONNECTED)
4355 case L2CAP_MODE_BASIC:
4356 /* If socket recv buffers overflows we drop data here
4357 * which is *bad* because L2CAP has to be reliable.
4358 * But we don't have any other choice. L2CAP doesn't
4359 * provide flow control mechanism. */
4361 if (pi->imtu < skb->len)
4364 if (!sock_queue_rcv_skb(sk, skb))
4368 case L2CAP_MODE_ERTM:
4369 if (!sock_owned_by_user(sk)) {
4370 l2cap_ertm_data_rcv(sk, skb);
4372 if (sk_add_backlog(sk, skb))
4378 case L2CAP_MODE_STREAMING:
4379 control = get_unaligned_le16(skb->data);
4383 if (l2cap_check_fcs(pi, skb))
4386 if (__is_sar_start(control))
4389 if (pi->fcs == L2CAP_FCS_CRC16)
4392 if (len > pi->mps || len < 0 || __is_sframe(control))
4395 tx_seq = __get_txseq(control);
4397 if (pi->expected_tx_seq == tx_seq)
4398 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4400 pi->expected_tx_seq = (tx_seq + 1) % 64;
4402 l2cap_streaming_reassembly_sdu(sk, skb, control);
4407 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4421 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4425 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4429 BT_DBG("sk %p, len %d", sk, skb->len);
4431 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4434 if (l2cap_pi(sk)->imtu < skb->len)
4437 if (!sock_queue_rcv_skb(sk, skb))
4449 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4451 struct l2cap_hdr *lh = (void *) skb->data;
4455 skb_pull(skb, L2CAP_HDR_SIZE);
4456 cid = __le16_to_cpu(lh->cid);
4457 len = __le16_to_cpu(lh->len);
4459 if (len != skb->len) {
4464 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4467 case L2CAP_CID_SIGNALING:
4468 l2cap_sig_channel(conn, skb);
4471 case L2CAP_CID_CONN_LESS:
4472 psm = get_unaligned_le16(skb->data);
4474 l2cap_conless_channel(conn, psm, skb);
4478 l2cap_data_channel(conn, cid, skb);
4483 /* ---- L2CAP interface with lower layer (HCI) ---- */
4485 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4487 int exact = 0, lm1 = 0, lm2 = 0;
4488 register struct sock *sk;
4489 struct hlist_node *node;
4491 if (type != ACL_LINK)
4494 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4496 /* Find listening sockets and check their link_mode */
4497 read_lock(&l2cap_sk_list.lock);
4498 sk_for_each(sk, node, &l2cap_sk_list.head) {
4499 if (sk->sk_state != BT_LISTEN)
4502 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4503 lm1 |= HCI_LM_ACCEPT;
4504 if (l2cap_pi(sk)->role_switch)
4505 lm1 |= HCI_LM_MASTER;
4507 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4508 lm2 |= HCI_LM_ACCEPT;
4509 if (l2cap_pi(sk)->role_switch)
4510 lm2 |= HCI_LM_MASTER;
4513 read_unlock(&l2cap_sk_list.lock);
4515 return exact ? lm1 : lm2;
4518 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4520 struct l2cap_conn *conn;
4522 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4524 if (hcon->type != ACL_LINK)
4528 conn = l2cap_conn_add(hcon, status);
4530 l2cap_conn_ready(conn);
4532 l2cap_conn_del(hcon, bt_err(status));
4537 static int l2cap_disconn_ind(struct hci_conn *hcon)
4539 struct l2cap_conn *conn = hcon->l2cap_data;
4541 BT_DBG("hcon %p", hcon);
4543 if (hcon->type != ACL_LINK || !conn)
4546 return conn->disc_reason;
4549 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4551 BT_DBG("hcon %p reason %d", hcon, reason);
4553 if (hcon->type != ACL_LINK)
4556 l2cap_conn_del(hcon, bt_err(reason));
4561 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4563 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4566 if (encrypt == 0x00) {
4567 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4568 l2cap_sock_clear_timer(sk);
4569 l2cap_sock_set_timer(sk, HZ * 5);
4570 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4571 __l2cap_sock_close(sk, ECONNREFUSED);
4573 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4574 l2cap_sock_clear_timer(sk);
4578 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4580 struct l2cap_chan_list *l;
4581 struct l2cap_conn *conn = hcon->l2cap_data;
4587 l = &conn->chan_list;
4589 BT_DBG("conn %p", conn);
4591 read_lock(&l->lock);
4593 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4596 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4601 if (!status && (sk->sk_state == BT_CONNECTED ||
4602 sk->sk_state == BT_CONFIG)) {
4603 l2cap_check_encryption(sk, encrypt);
4608 if (sk->sk_state == BT_CONNECT) {
4610 struct l2cap_conn_req req;
4611 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4612 req.psm = l2cap_pi(sk)->psm;
4614 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4615 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4617 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4618 L2CAP_CONN_REQ, sizeof(req), &req);
4620 l2cap_sock_clear_timer(sk);
4621 l2cap_sock_set_timer(sk, HZ / 10);
4623 } else if (sk->sk_state == BT_CONNECT2) {
4624 struct l2cap_conn_rsp rsp;
4628 sk->sk_state = BT_CONFIG;
4629 result = L2CAP_CR_SUCCESS;
4631 sk->sk_state = BT_DISCONN;
4632 l2cap_sock_set_timer(sk, HZ / 10);
4633 result = L2CAP_CR_SEC_BLOCK;
4636 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4637 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4638 rsp.result = cpu_to_le16(result);
4639 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4640 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4641 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4647 read_unlock(&l->lock);
4652 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4654 struct l2cap_conn *conn = hcon->l2cap_data;
4656 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4659 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4661 if (flags & ACL_START) {
4662 struct l2cap_hdr *hdr;
4666 BT_ERR("Unexpected start frame (len %d)", skb->len);
4667 kfree_skb(conn->rx_skb);
4668 conn->rx_skb = NULL;
4670 l2cap_conn_unreliable(conn, ECOMM);
4674 BT_ERR("Frame is too short (len %d)", skb->len);
4675 l2cap_conn_unreliable(conn, ECOMM);
4679 hdr = (struct l2cap_hdr *) skb->data;
4680 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4682 if (len == skb->len) {
4683 /* Complete frame received */
4684 l2cap_recv_frame(conn, skb);
4688 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4690 if (skb->len > len) {
4691 BT_ERR("Frame is too long (len %d, expected len %d)",
4693 l2cap_conn_unreliable(conn, ECOMM);
4697 /* Allocate skb for the complete frame (with header) */
4698 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4702 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4704 conn->rx_len = len - skb->len;
4706 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4708 if (!conn->rx_len) {
4709 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4710 l2cap_conn_unreliable(conn, ECOMM);
4714 if (skb->len > conn->rx_len) {
4715 BT_ERR("Fragment is too long (len %d, expected %d)",
4716 skb->len, conn->rx_len);
4717 kfree_skb(conn->rx_skb);
4718 conn->rx_skb = NULL;
4720 l2cap_conn_unreliable(conn, ECOMM);
4724 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4726 conn->rx_len -= skb->len;
4728 if (!conn->rx_len) {
4729 /* Complete frame received */
4730 l2cap_recv_frame(conn, conn->rx_skb);
4731 conn->rx_skb = NULL;
4740 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4743 struct hlist_node *node;
4745 read_lock_bh(&l2cap_sk_list.lock);
4747 sk_for_each(sk, node, &l2cap_sk_list.head) {
4748 struct l2cap_pinfo *pi = l2cap_pi(sk);
4750 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4751 batostr(&bt_sk(sk)->src),
4752 batostr(&bt_sk(sk)->dst),
4753 sk->sk_state, __le16_to_cpu(pi->psm),
4755 pi->imtu, pi->omtu, pi->sec_level);
4758 read_unlock_bh(&l2cap_sk_list.lock);
4763 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4765 return single_open(file, l2cap_debugfs_show, inode->i_private);
4768 static const struct file_operations l2cap_debugfs_fops = {
4769 .open = l2cap_debugfs_open,
4771 .llseek = seq_lseek,
4772 .release = single_release,
4775 static struct dentry *l2cap_debugfs;
4777 static const struct proto_ops l2cap_sock_ops = {
4778 .family = PF_BLUETOOTH,
4779 .owner = THIS_MODULE,
4780 .release = l2cap_sock_release,
4781 .bind = l2cap_sock_bind,
4782 .connect = l2cap_sock_connect,
4783 .listen = l2cap_sock_listen,
4784 .accept = l2cap_sock_accept,
4785 .getname = l2cap_sock_getname,
4786 .sendmsg = l2cap_sock_sendmsg,
4787 .recvmsg = l2cap_sock_recvmsg,
4788 .poll = bt_sock_poll,
4789 .ioctl = bt_sock_ioctl,
4790 .mmap = sock_no_mmap,
4791 .socketpair = sock_no_socketpair,
4792 .shutdown = l2cap_sock_shutdown,
4793 .setsockopt = l2cap_sock_setsockopt,
4794 .getsockopt = l2cap_sock_getsockopt
4797 static const struct net_proto_family l2cap_sock_family_ops = {
4798 .family = PF_BLUETOOTH,
4799 .owner = THIS_MODULE,
4800 .create = l2cap_sock_create,
4803 static struct hci_proto l2cap_hci_proto = {
4805 .id = HCI_PROTO_L2CAP,
4806 .connect_ind = l2cap_connect_ind,
4807 .connect_cfm = l2cap_connect_cfm,
4808 .disconn_ind = l2cap_disconn_ind,
4809 .disconn_cfm = l2cap_disconn_cfm,
4810 .security_cfm = l2cap_security_cfm,
4811 .recv_acldata = l2cap_recv_acldata
4814 static int __init l2cap_init(void)
4818 err = proto_register(&l2cap_proto, 0);
4822 _busy_wq = create_singlethread_workqueue("l2cap");
4826 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4828 BT_ERR("L2CAP socket registration failed");
4832 err = hci_register_proto(&l2cap_hci_proto);
4834 BT_ERR("L2CAP protocol registration failed");
4835 bt_sock_unregister(BTPROTO_L2CAP);
4840 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4841 bt_debugfs, NULL, &l2cap_debugfs_fops);
4843 BT_ERR("Failed to create L2CAP debug file");
4846 BT_INFO("L2CAP ver %s", VERSION);
4847 BT_INFO("L2CAP socket layer initialized");
4852 proto_unregister(&l2cap_proto);
4856 static void __exit l2cap_exit(void)
4858 debugfs_remove(l2cap_debugfs);
4860 flush_workqueue(_busy_wq);
4861 destroy_workqueue(_busy_wq);
4863 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4864 BT_ERR("L2CAP socket unregistration failed");
4866 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4867 BT_ERR("L2CAP protocol unregistration failed");
4869 proto_unregister(&l2cap_proto);
4872 void l2cap_load(void)
4874 /* Dummy function to trigger automatic L2CAP module loading by
4875 * other modules that use L2CAP sockets but don't use any other
4876 * symbols from it. */
4878 EXPORT_SYMBOL(l2cap_load);
4880 module_init(l2cap_init);
4881 module_exit(l2cap_exit);
4883 module_param(disable_ertm, bool, 0644);
4884 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4886 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4887 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4888 MODULE_VERSION(VERSION);
4889 MODULE_LICENSE("GPL");
4890 MODULE_ALIAS("bt-proto-0");