2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
277 /* Service level security */
278 static inline int l2cap_check_security(struct sock *sk)
280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
283 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
285 auth_type = HCI_AT_NO_BONDING_MITM;
287 auth_type = HCI_AT_NO_BONDING;
289 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
290 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
292 switch (l2cap_pi(sk)->sec_level) {
293 case BT_SECURITY_HIGH:
294 auth_type = HCI_AT_GENERAL_BONDING_MITM;
296 case BT_SECURITY_MEDIUM:
297 auth_type = HCI_AT_GENERAL_BONDING;
300 auth_type = HCI_AT_NO_BONDING;
305 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
309 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn->lock);
321 if (++conn->tx_ident > 128)
326 spin_unlock_bh(&conn->lock);
331 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
340 hci_send_acl(conn->hcon, skb, 0);
343 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
346 struct l2cap_hdr *lh;
347 struct l2cap_conn *conn = pi->conn;
348 int count, hlen = L2CAP_HDR_SIZE + 2;
350 if (pi->fcs == L2CAP_FCS_CRC16)
353 BT_DBG("pi %p, control 0x%2.2x", pi, control);
355 count = min_t(unsigned int, conn->mtu, hlen);
356 control |= L2CAP_CTRL_FRAME_TYPE;
358 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
359 control |= L2CAP_CTRL_FINAL;
360 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
363 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
364 control |= L2CAP_CTRL_POLL;
365 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
368 skb = bt_skb_alloc(count, GFP_ATOMIC);
372 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
373 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
374 lh->cid = cpu_to_le16(pi->dcid);
375 put_unaligned_le16(control, skb_put(skb, 2));
377 if (pi->fcs == L2CAP_FCS_CRC16) {
378 u16 fcs = crc16(0, (u8 *)lh, count - 2);
379 put_unaligned_le16(fcs, skb_put(skb, 2));
382 hci_send_acl(pi->conn->hcon, skb, 0);
385 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
387 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
388 control |= L2CAP_SUPER_RCV_NOT_READY;
389 pi->conn_state |= L2CAP_CONN_RNR_SENT;
391 control |= L2CAP_SUPER_RCV_READY;
393 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
395 l2cap_send_sframe(pi, control);
398 static inline int __l2cap_no_conn_pending(struct sock *sk)
400 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
403 static void l2cap_do_start(struct sock *sk)
405 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
407 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
411 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
412 struct l2cap_conn_req req;
413 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
414 req.psm = l2cap_pi(sk)->psm;
416 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
417 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
453 BT_DBG("conn %p", conn);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk) &&
468 __l2cap_no_conn_pending(sk)) {
469 struct l2cap_conn_req req;
470 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
471 req.psm = l2cap_pi(sk)->psm;
473 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_REQ, sizeof(req), &req);
479 } else if (sk->sk_state == BT_CONNECT2) {
480 struct l2cap_conn_rsp rsp;
481 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
482 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
484 if (l2cap_check_security(sk)) {
485 if (bt_sk(sk)->defer_setup) {
486 struct sock *parent = bt_sk(sk)->parent;
487 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
488 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
489 parent->sk_data_ready(parent, 0);
492 sk->sk_state = BT_CONFIG;
493 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
494 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
497 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
498 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
508 read_unlock(&l->lock);
511 static void l2cap_conn_ready(struct l2cap_conn *conn)
513 struct l2cap_chan_list *l = &conn->chan_list;
516 BT_DBG("conn %p", conn);
520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
523 if (sk->sk_type != SOCK_SEQPACKET &&
524 sk->sk_type != SOCK_STREAM) {
525 l2cap_sock_clear_timer(sk);
526 sk->sk_state = BT_CONNECTED;
527 sk->sk_state_change(sk);
528 } else if (sk->sk_state == BT_CONNECT)
534 read_unlock(&l->lock);
537 /* Notify sockets that we cannot guaranty reliability anymore */
538 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
540 struct l2cap_chan_list *l = &conn->chan_list;
543 BT_DBG("conn %p", conn);
547 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
548 if (l2cap_pi(sk)->force_reliable)
552 read_unlock(&l->lock);
555 static void l2cap_info_timeout(unsigned long arg)
557 struct l2cap_conn *conn = (void *) arg;
559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
560 conn->info_ident = 0;
562 l2cap_conn_start(conn);
565 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
567 struct l2cap_conn *conn = hcon->l2cap_data;
572 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
576 hcon->l2cap_data = conn;
579 BT_DBG("hcon %p conn %p", hcon, conn);
581 conn->mtu = hcon->hdev->acl_mtu;
582 conn->src = &hcon->hdev->bdaddr;
583 conn->dst = &hcon->dst;
587 spin_lock_init(&conn->lock);
588 rwlock_init(&conn->chan_list.lock);
590 setup_timer(&conn->info_timer, l2cap_info_timeout,
591 (unsigned long) conn);
593 conn->disc_reason = 0x13;
598 static void l2cap_conn_del(struct hci_conn *hcon, int err)
600 struct l2cap_conn *conn = hcon->l2cap_data;
606 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
608 kfree_skb(conn->rx_skb);
611 while ((sk = conn->chan_list.head)) {
613 l2cap_chan_del(sk, err);
618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
619 del_timer_sync(&conn->info_timer);
621 hcon->l2cap_data = NULL;
625 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
627 struct l2cap_chan_list *l = &conn->chan_list;
628 write_lock_bh(&l->lock);
629 __l2cap_chan_add(conn, sk, parent);
630 write_unlock_bh(&l->lock);
633 /* ---- Socket interface ---- */
634 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
637 struct hlist_node *node;
638 sk_for_each(sk, node, &l2cap_sk_list.head)
639 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
646 /* Find socket with psm and source bdaddr.
647 * Returns closest match.
649 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *sk = NULL, *sk1 = NULL;
652 struct hlist_node *node;
654 sk_for_each(sk, node, &l2cap_sk_list.head) {
655 if (state && sk->sk_state != state)
658 if (l2cap_pi(sk)->psm == psm) {
660 if (!bacmp(&bt_sk(sk)->src, src))
664 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
668 return node ? sk : sk1;
671 /* Find socket with given address (psm, src).
672 * Returns locked socket */
673 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
676 read_lock(&l2cap_sk_list.lock);
677 s = __l2cap_get_sock_by_psm(state, psm, src);
680 read_unlock(&l2cap_sk_list.lock);
684 static void l2cap_sock_destruct(struct sock *sk)
688 skb_queue_purge(&sk->sk_receive_queue);
689 skb_queue_purge(&sk->sk_write_queue);
692 static void l2cap_sock_cleanup_listen(struct sock *parent)
696 BT_DBG("parent %p", parent);
698 /* Close not yet accepted channels */
699 while ((sk = bt_accept_dequeue(parent, NULL)))
700 l2cap_sock_close(sk);
702 parent->sk_state = BT_CLOSED;
703 sock_set_flag(parent, SOCK_ZAPPED);
706 /* Kill socket (only if zapped and orphan)
707 * Must be called on unlocked socket.
709 static void l2cap_sock_kill(struct sock *sk)
711 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
714 BT_DBG("sk %p state %d", sk, sk->sk_state);
716 /* Kill poor orphan */
717 bt_sock_unlink(&l2cap_sk_list, sk);
718 sock_set_flag(sk, SOCK_DEAD);
722 static void __l2cap_sock_close(struct sock *sk, int reason)
724 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
726 switch (sk->sk_state) {
728 l2cap_sock_cleanup_listen(sk);
733 if (sk->sk_type == SOCK_SEQPACKET ||
734 sk->sk_type == SOCK_STREAM) {
735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
737 sk->sk_state = BT_DISCONN;
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739 l2cap_send_disconn_req(conn, sk);
741 l2cap_chan_del(sk, reason);
745 if (sk->sk_type == SOCK_SEQPACKET ||
746 sk->sk_type == SOCK_STREAM) {
747 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
748 struct l2cap_conn_rsp rsp;
751 if (bt_sk(sk)->defer_setup)
752 result = L2CAP_CR_SEC_BLOCK;
754 result = L2CAP_CR_BAD_PSM;
756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
758 rsp.result = cpu_to_le16(result);
759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
760 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
763 l2cap_chan_del(sk, reason);
768 l2cap_chan_del(sk, reason);
772 sock_set_flag(sk, SOCK_ZAPPED);
777 /* Must be called on unlocked socket. */
778 static void l2cap_sock_close(struct sock *sk)
780 l2cap_sock_clear_timer(sk);
782 __l2cap_sock_close(sk, ECONNRESET);
787 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
789 struct l2cap_pinfo *pi = l2cap_pi(sk);
794 sk->sk_type = parent->sk_type;
795 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
797 pi->imtu = l2cap_pi(parent)->imtu;
798 pi->omtu = l2cap_pi(parent)->omtu;
799 pi->mode = l2cap_pi(parent)->mode;
800 pi->fcs = l2cap_pi(parent)->fcs;
801 pi->max_tx = l2cap_pi(parent)->max_tx;
802 pi->tx_win = l2cap_pi(parent)->tx_win;
803 pi->sec_level = l2cap_pi(parent)->sec_level;
804 pi->role_switch = l2cap_pi(parent)->role_switch;
805 pi->force_reliable = l2cap_pi(parent)->force_reliable;
807 pi->imtu = L2CAP_DEFAULT_MTU;
809 if (enable_ertm && sk->sk_type == SOCK_STREAM)
810 pi->mode = L2CAP_MODE_ERTM;
812 pi->mode = L2CAP_MODE_BASIC;
813 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
814 pi->fcs = L2CAP_FCS_CRC16;
815 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
816 pi->sec_level = BT_SECURITY_LOW;
818 pi->force_reliable = 0;
821 /* Default config options */
823 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
824 skb_queue_head_init(TX_QUEUE(sk));
825 skb_queue_head_init(SREJ_QUEUE(sk));
826 skb_queue_head_init(BUSY_QUEUE(sk));
827 INIT_LIST_HEAD(SREJ_LIST(sk));
830 static struct proto l2cap_proto = {
832 .owner = THIS_MODULE,
833 .obj_size = sizeof(struct l2cap_pinfo)
836 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
840 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
844 sock_init_data(sock, sk);
845 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
847 sk->sk_destruct = l2cap_sock_destruct;
848 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
850 sock_reset_flag(sk, SOCK_ZAPPED);
852 sk->sk_protocol = proto;
853 sk->sk_state = BT_OPEN;
855 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
857 bt_sock_link(&l2cap_sk_list, sk);
861 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
866 BT_DBG("sock %p", sock);
868 sock->state = SS_UNCONNECTED;
870 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
871 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
872 return -ESOCKTNOSUPPORT;
874 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
877 sock->ops = &l2cap_sock_ops;
879 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
883 l2cap_sock_init(sk, NULL);
887 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
889 struct sock *sk = sock->sk;
890 struct sockaddr_l2 la;
895 if (!addr || addr->sa_family != AF_BLUETOOTH)
898 memset(&la, 0, sizeof(la));
899 len = min_t(unsigned int, sizeof(la), alen);
900 memcpy(&la, addr, len);
907 if (sk->sk_state != BT_OPEN) {
912 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
913 !capable(CAP_NET_BIND_SERVICE)) {
918 write_lock_bh(&l2cap_sk_list.lock);
920 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
923 /* Save source address */
924 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
925 l2cap_pi(sk)->psm = la.l2_psm;
926 l2cap_pi(sk)->sport = la.l2_psm;
927 sk->sk_state = BT_BOUND;
929 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
930 __le16_to_cpu(la.l2_psm) == 0x0003)
931 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
934 write_unlock_bh(&l2cap_sk_list.lock);
941 static int l2cap_do_connect(struct sock *sk)
943 bdaddr_t *src = &bt_sk(sk)->src;
944 bdaddr_t *dst = &bt_sk(sk)->dst;
945 struct l2cap_conn *conn;
946 struct hci_conn *hcon;
947 struct hci_dev *hdev;
951 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
954 hdev = hci_get_route(dst, src);
956 return -EHOSTUNREACH;
958 hci_dev_lock_bh(hdev);
962 if (sk->sk_type == SOCK_RAW) {
963 switch (l2cap_pi(sk)->sec_level) {
964 case BT_SECURITY_HIGH:
965 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
967 case BT_SECURITY_MEDIUM:
968 auth_type = HCI_AT_DEDICATED_BONDING;
971 auth_type = HCI_AT_NO_BONDING;
974 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
975 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
976 auth_type = HCI_AT_NO_BONDING_MITM;
978 auth_type = HCI_AT_NO_BONDING;
980 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
981 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
983 switch (l2cap_pi(sk)->sec_level) {
984 case BT_SECURITY_HIGH:
985 auth_type = HCI_AT_GENERAL_BONDING_MITM;
987 case BT_SECURITY_MEDIUM:
988 auth_type = HCI_AT_GENERAL_BONDING;
991 auth_type = HCI_AT_NO_BONDING;
996 hcon = hci_connect(hdev, ACL_LINK, dst,
997 l2cap_pi(sk)->sec_level, auth_type);
1001 conn = l2cap_conn_add(hcon, 0);
1009 /* Update source addr of the socket */
1010 bacpy(src, conn->src);
1012 l2cap_chan_add(conn, sk, NULL);
1014 sk->sk_state = BT_CONNECT;
1015 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1017 if (hcon->state == BT_CONNECTED) {
1018 if (sk->sk_type != SOCK_SEQPACKET &&
1019 sk->sk_type != SOCK_STREAM) {
1020 l2cap_sock_clear_timer(sk);
1021 sk->sk_state = BT_CONNECTED;
1027 hci_dev_unlock_bh(hdev);
1032 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1034 struct sock *sk = sock->sk;
1035 struct sockaddr_l2 la;
1038 BT_DBG("sk %p", sk);
1040 if (!addr || alen < sizeof(addr->sa_family) ||
1041 addr->sa_family != AF_BLUETOOTH)
1044 memset(&la, 0, sizeof(la));
1045 len = min_t(unsigned int, sizeof(la), alen);
1046 memcpy(&la, addr, len);
1053 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1059 switch (l2cap_pi(sk)->mode) {
1060 case L2CAP_MODE_BASIC:
1062 case L2CAP_MODE_ERTM:
1063 case L2CAP_MODE_STREAMING:
1072 switch (sk->sk_state) {
1076 /* Already connecting */
1080 /* Already connected */
1093 /* Set destination address and psm */
1094 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1095 l2cap_pi(sk)->psm = la.l2_psm;
1097 err = l2cap_do_connect(sk);
1102 err = bt_sock_wait_state(sk, BT_CONNECTED,
1103 sock_sndtimeo(sk, flags & O_NONBLOCK));
1109 static int l2cap_sock_listen(struct socket *sock, int backlog)
1111 struct sock *sk = sock->sk;
1114 BT_DBG("sk %p backlog %d", sk, backlog);
1118 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1119 || sk->sk_state != BT_BOUND) {
1124 switch (l2cap_pi(sk)->mode) {
1125 case L2CAP_MODE_BASIC:
1127 case L2CAP_MODE_ERTM:
1128 case L2CAP_MODE_STREAMING:
1137 if (!l2cap_pi(sk)->psm) {
1138 bdaddr_t *src = &bt_sk(sk)->src;
1143 write_lock_bh(&l2cap_sk_list.lock);
1145 for (psm = 0x1001; psm < 0x1100; psm += 2)
1146 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1147 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1148 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1153 write_unlock_bh(&l2cap_sk_list.lock);
1159 sk->sk_max_ack_backlog = backlog;
1160 sk->sk_ack_backlog = 0;
1161 sk->sk_state = BT_LISTEN;
1168 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1170 DECLARE_WAITQUEUE(wait, current);
1171 struct sock *sk = sock->sk, *nsk;
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1182 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1184 BT_DBG("sk %p timeo %ld", sk, timeo);
1186 /* Wait for an incoming connection. (wake-one). */
1187 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1188 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1189 set_current_state(TASK_INTERRUPTIBLE);
1196 timeo = schedule_timeout(timeo);
1197 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1199 if (sk->sk_state != BT_LISTEN) {
1204 if (signal_pending(current)) {
1205 err = sock_intr_errno(timeo);
1209 set_current_state(TASK_RUNNING);
1210 remove_wait_queue(sk_sleep(sk), &wait);
1215 newsock->state = SS_CONNECTED;
1217 BT_DBG("new socket %p", nsk);
1224 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1226 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1227 struct sock *sk = sock->sk;
1229 BT_DBG("sock %p, sk %p", sock, sk);
1231 addr->sa_family = AF_BLUETOOTH;
1232 *len = sizeof(struct sockaddr_l2);
1235 la->l2_psm = l2cap_pi(sk)->psm;
1236 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1237 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1239 la->l2_psm = l2cap_pi(sk)->sport;
1240 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1241 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1247 static int __l2cap_wait_ack(struct sock *sk)
1249 DECLARE_WAITQUEUE(wait, current);
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1255 set_current_state(TASK_INTERRUPTIBLE);
1260 if (signal_pending(current)) {
1261 err = sock_intr_errno(timeo);
1266 timeo = schedule_timeout(timeo);
1269 err = sock_error(sk);
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1278 static void l2cap_monitor_timeout(unsigned long arg)
1280 struct sock *sk = (void *) arg;
1283 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1284 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1289 l2cap_pi(sk)->retry_count++;
1290 __mod_monitor_timer();
1292 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1296 static void l2cap_retrans_timeout(unsigned long arg)
1298 struct sock *sk = (void *) arg;
1301 l2cap_pi(sk)->retry_count = 1;
1302 __mod_monitor_timer();
1304 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1306 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1310 static void l2cap_drop_acked_frames(struct sock *sk)
1312 struct sk_buff *skb;
1314 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1315 l2cap_pi(sk)->unacked_frames) {
1316 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1319 skb = skb_dequeue(TX_QUEUE(sk));
1322 l2cap_pi(sk)->unacked_frames--;
1325 if (!l2cap_pi(sk)->unacked_frames)
1326 del_timer(&l2cap_pi(sk)->retrans_timer);
1329 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1331 struct l2cap_pinfo *pi = l2cap_pi(sk);
1333 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1335 hci_send_acl(pi->conn->hcon, skb, 0);
1338 static int l2cap_streaming_send(struct sock *sk)
1340 struct sk_buff *skb, *tx_skb;
1341 struct l2cap_pinfo *pi = l2cap_pi(sk);
1344 while ((skb = sk->sk_send_head)) {
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1349 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1351 if (pi->fcs == L2CAP_FCS_CRC16) {
1352 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1353 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1356 l2cap_do_send(sk, tx_skb);
1358 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1360 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1361 sk->sk_send_head = NULL;
1363 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1365 skb = skb_dequeue(TX_QUEUE(sk));
1371 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb, *tx_skb;
1377 skb = skb_peek(TX_QUEUE(sk));
1382 if (bt_cb(skb)->tx_seq == tx_seq)
1385 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1388 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1397 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1400 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1401 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403 if (pi->fcs == L2CAP_FCS_CRC16) {
1404 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1405 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1408 l2cap_do_send(sk, tx_skb);
1411 static int l2cap_ertm_send(struct sock *sk)
1413 struct sk_buff *skb, *tx_skb;
1414 struct l2cap_pinfo *pi = l2cap_pi(sk);
1419 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1421 if (pi->remote_max_tx &&
1422 bt_cb(skb)->retries == pi->remote_max_tx) {
1423 l2cap_send_disconn_req(pi->conn, sk);
1427 tx_skb = skb_clone(skb, GFP_ATOMIC);
1429 bt_cb(skb)->retries++;
1431 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1432 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1433 control |= L2CAP_CTRL_FINAL;
1434 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1436 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1437 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1438 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1441 if (pi->fcs == L2CAP_FCS_CRC16) {
1442 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1443 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1446 l2cap_do_send(sk, tx_skb);
1448 __mod_retrans_timer();
1450 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1451 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1453 pi->unacked_frames++;
1456 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1457 sk->sk_send_head = NULL;
1459 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1467 static int l2cap_retransmit_frames(struct sock *sk)
1469 struct l2cap_pinfo *pi = l2cap_pi(sk);
1472 spin_lock_bh(&pi->send_lock);
1474 if (!skb_queue_empty(TX_QUEUE(sk)))
1475 sk->sk_send_head = TX_QUEUE(sk)->next;
1477 pi->next_tx_seq = pi->expected_ack_seq;
1478 ret = l2cap_ertm_send(sk);
1480 spin_unlock_bh(&pi->send_lock);
1485 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1487 struct sock *sk = (struct sock *)pi;
1491 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1493 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1494 control |= L2CAP_SUPER_RCV_NOT_READY;
1495 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1496 l2cap_send_sframe(pi, control);
1500 spin_lock_bh(&pi->send_lock);
1501 nframes = l2cap_ertm_send(sk);
1502 spin_unlock_bh(&pi->send_lock);
1507 control |= L2CAP_SUPER_RCV_READY;
1508 l2cap_send_sframe(pi, control);
1511 static void l2cap_send_srejtail(struct sock *sk)
1513 struct srej_list *tail;
1516 control = L2CAP_SUPER_SELECT_REJECT;
1517 control |= L2CAP_CTRL_FINAL;
1519 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1520 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1522 l2cap_send_sframe(l2cap_pi(sk), control);
1525 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1527 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1528 struct sk_buff **frag;
1531 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1537 /* Continuation fragments (no L2CAP header) */
1538 frag = &skb_shinfo(skb)->frag_list;
1540 count = min_t(unsigned int, conn->mtu, len);
1542 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1545 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1551 frag = &(*frag)->next;
1557 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1559 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1560 struct sk_buff *skb;
1561 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1562 struct l2cap_hdr *lh;
1564 BT_DBG("sk %p len %d", sk, (int)len);
1566 count = min_t(unsigned int, (conn->mtu - hlen), len);
1567 skb = bt_skb_send_alloc(sk, count + hlen,
1568 msg->msg_flags & MSG_DONTWAIT, &err);
1570 return ERR_PTR(-ENOMEM);
1572 /* Create L2CAP header */
1573 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1574 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1575 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1576 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1578 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1579 if (unlikely(err < 0)) {
1581 return ERR_PTR(err);
1586 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1588 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1589 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE;
1591 struct l2cap_hdr *lh;
1593 BT_DBG("sk %p len %d", sk, (int)len);
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1599 return ERR_PTR(-ENOMEM);
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1606 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1607 if (unlikely(err < 0)) {
1609 return ERR_PTR(err);
1614 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1616 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1617 struct sk_buff *skb;
1618 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1619 struct l2cap_hdr *lh;
1621 BT_DBG("sk %p len %d", sk, (int)len);
1624 return ERR_PTR(-ENOTCONN);
1629 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1632 count = min_t(unsigned int, (conn->mtu - hlen), len);
1633 skb = bt_skb_send_alloc(sk, count + hlen,
1634 msg->msg_flags & MSG_DONTWAIT, &err);
1636 return ERR_PTR(-ENOMEM);
1638 /* Create L2CAP header */
1639 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1640 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1641 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1642 put_unaligned_le16(control, skb_put(skb, 2));
1644 put_unaligned_le16(sdulen, skb_put(skb, 2));
1646 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1647 if (unlikely(err < 0)) {
1649 return ERR_PTR(err);
1652 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1653 put_unaligned_le16(0, skb_put(skb, 2));
1655 bt_cb(skb)->retries = 0;
1659 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1661 struct l2cap_pinfo *pi = l2cap_pi(sk);
1662 struct sk_buff *skb;
1663 struct sk_buff_head sar_queue;
1667 skb_queue_head_init(&sar_queue);
1668 control = L2CAP_SDU_START;
1669 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1671 return PTR_ERR(skb);
1673 __skb_queue_tail(&sar_queue, skb);
1674 len -= pi->remote_mps;
1675 size += pi->remote_mps;
1680 if (len > pi->remote_mps) {
1681 control = L2CAP_SDU_CONTINUE;
1682 buflen = pi->remote_mps;
1684 control = L2CAP_SDU_END;
1688 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1690 skb_queue_purge(&sar_queue);
1691 return PTR_ERR(skb);
1694 __skb_queue_tail(&sar_queue, skb);
1698 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1699 spin_lock_bh(&pi->send_lock);
1700 if (sk->sk_send_head == NULL)
1701 sk->sk_send_head = sar_queue.next;
1702 spin_unlock_bh(&pi->send_lock);
1707 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1709 struct sock *sk = sock->sk;
1710 struct l2cap_pinfo *pi = l2cap_pi(sk);
1711 struct sk_buff *skb;
1715 BT_DBG("sock %p, sk %p", sock, sk);
1717 err = sock_error(sk);
1721 if (msg->msg_flags & MSG_OOB)
1726 if (sk->sk_state != BT_CONNECTED) {
1731 /* Connectionless channel */
1732 if (sk->sk_type == SOCK_DGRAM) {
1733 skb = l2cap_create_connless_pdu(sk, msg, len);
1737 l2cap_do_send(sk, skb);
1744 case L2CAP_MODE_BASIC:
1745 /* Check outgoing MTU */
1746 if (len > pi->omtu) {
1751 /* Create a basic PDU */
1752 skb = l2cap_create_basic_pdu(sk, msg, len);
1758 l2cap_do_send(sk, skb);
1762 case L2CAP_MODE_ERTM:
1763 case L2CAP_MODE_STREAMING:
1764 /* Entire SDU fits into one PDU */
1765 if (len <= pi->remote_mps) {
1766 control = L2CAP_SDU_UNSEGMENTED;
1767 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1772 __skb_queue_tail(TX_QUEUE(sk), skb);
1774 if (pi->mode == L2CAP_MODE_ERTM)
1775 spin_lock_bh(&pi->send_lock);
1777 if (sk->sk_send_head == NULL)
1778 sk->sk_send_head = skb;
1780 if (pi->mode == L2CAP_MODE_ERTM)
1781 spin_unlock_bh(&pi->send_lock);
1783 /* Segment SDU into multiples PDUs */
1784 err = l2cap_sar_segment_sdu(sk, msg, len);
1789 if (pi->mode == L2CAP_MODE_STREAMING) {
1790 err = l2cap_streaming_send(sk);
1792 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1793 pi->conn_state && L2CAP_CONN_WAIT_F) {
1797 spin_lock_bh(&pi->send_lock);
1798 err = l2cap_ertm_send(sk);
1799 spin_unlock_bh(&pi->send_lock);
1807 BT_DBG("bad state %1.1x", pi->mode);
1816 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1818 struct sock *sk = sock->sk;
1822 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1823 struct l2cap_conn_rsp rsp;
1825 sk->sk_state = BT_CONFIG;
1827 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1828 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1829 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1830 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1831 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1832 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1840 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1843 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1845 struct sock *sk = sock->sk;
1846 struct l2cap_options opts;
1850 BT_DBG("sk %p", sk);
1856 opts.imtu = l2cap_pi(sk)->imtu;
1857 opts.omtu = l2cap_pi(sk)->omtu;
1858 opts.flush_to = l2cap_pi(sk)->flush_to;
1859 opts.mode = l2cap_pi(sk)->mode;
1860 opts.fcs = l2cap_pi(sk)->fcs;
1861 opts.max_tx = l2cap_pi(sk)->max_tx;
1862 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1864 len = min_t(unsigned int, sizeof(opts), optlen);
1865 if (copy_from_user((char *) &opts, optval, len)) {
1870 l2cap_pi(sk)->mode = opts.mode;
1871 switch (l2cap_pi(sk)->mode) {
1872 case L2CAP_MODE_BASIC:
1874 case L2CAP_MODE_ERTM:
1875 case L2CAP_MODE_STREAMING:
1884 l2cap_pi(sk)->imtu = opts.imtu;
1885 l2cap_pi(sk)->omtu = opts.omtu;
1886 l2cap_pi(sk)->fcs = opts.fcs;
1887 l2cap_pi(sk)->max_tx = opts.max_tx;
1888 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1892 if (get_user(opt, (u32 __user *) optval)) {
1897 if (opt & L2CAP_LM_AUTH)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1899 if (opt & L2CAP_LM_ENCRYPT)
1900 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1901 if (opt & L2CAP_LM_SECURE)
1902 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1904 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1905 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1917 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1919 struct sock *sk = sock->sk;
1920 struct bt_security sec;
1924 BT_DBG("sk %p", sk);
1926 if (level == SOL_L2CAP)
1927 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1929 if (level != SOL_BLUETOOTH)
1930 return -ENOPROTOOPT;
1936 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1937 && sk->sk_type != SOCK_RAW) {
1942 sec.level = BT_SECURITY_LOW;
1944 len = min_t(unsigned int, sizeof(sec), optlen);
1945 if (copy_from_user((char *) &sec, optval, len)) {
1950 if (sec.level < BT_SECURITY_LOW ||
1951 sec.level > BT_SECURITY_HIGH) {
1956 l2cap_pi(sk)->sec_level = sec.level;
1959 case BT_DEFER_SETUP:
1960 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1965 if (get_user(opt, (u32 __user *) optval)) {
1970 bt_sk(sk)->defer_setup = opt;
1982 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1984 struct sock *sk = sock->sk;
1985 struct l2cap_options opts;
1986 struct l2cap_conninfo cinfo;
1990 BT_DBG("sk %p", sk);
1992 if (get_user(len, optlen))
1999 opts.imtu = l2cap_pi(sk)->imtu;
2000 opts.omtu = l2cap_pi(sk)->omtu;
2001 opts.flush_to = l2cap_pi(sk)->flush_to;
2002 opts.mode = l2cap_pi(sk)->mode;
2003 opts.fcs = l2cap_pi(sk)->fcs;
2004 opts.max_tx = l2cap_pi(sk)->max_tx;
2005 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2007 len = min_t(unsigned int, len, sizeof(opts));
2008 if (copy_to_user(optval, (char *) &opts, len))
2014 switch (l2cap_pi(sk)->sec_level) {
2015 case BT_SECURITY_LOW:
2016 opt = L2CAP_LM_AUTH;
2018 case BT_SECURITY_MEDIUM:
2019 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2021 case BT_SECURITY_HIGH:
2022 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2030 if (l2cap_pi(sk)->role_switch)
2031 opt |= L2CAP_LM_MASTER;
2033 if (l2cap_pi(sk)->force_reliable)
2034 opt |= L2CAP_LM_RELIABLE;
2036 if (put_user(opt, (u32 __user *) optval))
2040 case L2CAP_CONNINFO:
2041 if (sk->sk_state != BT_CONNECTED &&
2042 !(sk->sk_state == BT_CONNECT2 &&
2043 bt_sk(sk)->defer_setup)) {
2048 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2049 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2051 len = min_t(unsigned int, len, sizeof(cinfo));
2052 if (copy_to_user(optval, (char *) &cinfo, len))
2066 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2068 struct sock *sk = sock->sk;
2069 struct bt_security sec;
2072 BT_DBG("sk %p", sk);
2074 if (level == SOL_L2CAP)
2075 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2077 if (level != SOL_BLUETOOTH)
2078 return -ENOPROTOOPT;
2080 if (get_user(len, optlen))
2087 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2088 && sk->sk_type != SOCK_RAW) {
2093 sec.level = l2cap_pi(sk)->sec_level;
2095 len = min_t(unsigned int, len, sizeof(sec));
2096 if (copy_to_user(optval, (char *) &sec, len))
2101 case BT_DEFER_SETUP:
2102 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2107 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2121 static int l2cap_sock_shutdown(struct socket *sock, int how)
2123 struct sock *sk = sock->sk;
2126 BT_DBG("sock %p, sk %p", sock, sk);
2132 if (!sk->sk_shutdown) {
2133 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2134 err = __l2cap_wait_ack(sk);
2136 sk->sk_shutdown = SHUTDOWN_MASK;
2137 l2cap_sock_clear_timer(sk);
2138 __l2cap_sock_close(sk, 0);
2140 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2141 err = bt_sock_wait_state(sk, BT_CLOSED,
2148 static int l2cap_sock_release(struct socket *sock)
2150 struct sock *sk = sock->sk;
2153 BT_DBG("sock %p, sk %p", sock, sk);
2158 err = l2cap_sock_shutdown(sock, 2);
2161 l2cap_sock_kill(sk);
2165 static void l2cap_chan_ready(struct sock *sk)
2167 struct sock *parent = bt_sk(sk)->parent;
2169 BT_DBG("sk %p, parent %p", sk, parent);
2171 l2cap_pi(sk)->conf_state = 0;
2172 l2cap_sock_clear_timer(sk);
2175 /* Outgoing channel.
2176 * Wake up socket sleeping on connect.
2178 sk->sk_state = BT_CONNECTED;
2179 sk->sk_state_change(sk);
2181 /* Incoming channel.
2182 * Wake up socket sleeping on accept.
2184 parent->sk_data_ready(parent, 0);
2188 /* Copy frame to all raw sockets on that connection */
2189 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2191 struct l2cap_chan_list *l = &conn->chan_list;
2192 struct sk_buff *nskb;
2195 BT_DBG("conn %p", conn);
2197 read_lock(&l->lock);
2198 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2199 if (sk->sk_type != SOCK_RAW)
2202 /* Don't send frame to the socket it came from */
2205 nskb = skb_clone(skb, GFP_ATOMIC);
2209 if (sock_queue_rcv_skb(sk, nskb))
2212 read_unlock(&l->lock);
2215 /* ---- L2CAP signalling commands ---- */
2216 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2217 u8 code, u8 ident, u16 dlen, void *data)
2219 struct sk_buff *skb, **frag;
2220 struct l2cap_cmd_hdr *cmd;
2221 struct l2cap_hdr *lh;
2224 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2225 conn, code, ident, dlen);
2227 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2228 count = min_t(unsigned int, conn->mtu, len);
2230 skb = bt_skb_alloc(count, GFP_ATOMIC);
2234 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2235 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2236 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2238 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2241 cmd->len = cpu_to_le16(dlen);
2244 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2245 memcpy(skb_put(skb, count), data, count);
2251 /* Continuation fragments (no L2CAP header) */
2252 frag = &skb_shinfo(skb)->frag_list;
2254 count = min_t(unsigned int, conn->mtu, len);
2256 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2260 memcpy(skb_put(*frag, count), data, count);
2265 frag = &(*frag)->next;
2275 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2277 struct l2cap_conf_opt *opt = *ptr;
2280 len = L2CAP_CONF_OPT_SIZE + opt->len;
2288 *val = *((u8 *) opt->val);
2292 *val = __le16_to_cpu(*((__le16 *) opt->val));
2296 *val = __le32_to_cpu(*((__le32 *) opt->val));
2300 *val = (unsigned long) opt->val;
2304 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2308 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2310 struct l2cap_conf_opt *opt = *ptr;
2312 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2319 *((u8 *) opt->val) = val;
2323 *((__le16 *) opt->val) = cpu_to_le16(val);
2327 *((__le32 *) opt->val) = cpu_to_le32(val);
2331 memcpy(opt->val, (void *) val, len);
2335 *ptr += L2CAP_CONF_OPT_SIZE + len;
2338 static void l2cap_ack_timeout(unsigned long arg)
2340 struct sock *sk = (void *) arg;
2343 l2cap_send_ack(l2cap_pi(sk));
2347 static inline void l2cap_ertm_init(struct sock *sk)
2349 l2cap_pi(sk)->expected_ack_seq = 0;
2350 l2cap_pi(sk)->unacked_frames = 0;
2351 l2cap_pi(sk)->buffer_seq = 0;
2352 l2cap_pi(sk)->num_acked = 0;
2353 l2cap_pi(sk)->frames_sent = 0;
2355 setup_timer(&l2cap_pi(sk)->retrans_timer,
2356 l2cap_retrans_timeout, (unsigned long) sk);
2357 setup_timer(&l2cap_pi(sk)->monitor_timer,
2358 l2cap_monitor_timeout, (unsigned long) sk);
2359 setup_timer(&l2cap_pi(sk)->ack_timer,
2360 l2cap_ack_timeout, (unsigned long) sk);
2362 __skb_queue_head_init(SREJ_QUEUE(sk));
2363 __skb_queue_head_init(BUSY_QUEUE(sk));
2364 spin_lock_init(&l2cap_pi(sk)->send_lock);
2366 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2369 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2371 u32 local_feat_mask = l2cap_feat_mask;
2373 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2376 case L2CAP_MODE_ERTM:
2377 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2378 case L2CAP_MODE_STREAMING:
2379 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2385 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2388 case L2CAP_MODE_STREAMING:
2389 case L2CAP_MODE_ERTM:
2390 if (l2cap_mode_supported(mode, remote_feat_mask))
2394 return L2CAP_MODE_BASIC;
2398 static int l2cap_build_conf_req(struct sock *sk, void *data)
2400 struct l2cap_pinfo *pi = l2cap_pi(sk);
2401 struct l2cap_conf_req *req = data;
2402 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2403 void *ptr = req->data;
2405 BT_DBG("sk %p", sk);
2407 if (pi->num_conf_req || pi->num_conf_rsp)
2411 case L2CAP_MODE_STREAMING:
2412 case L2CAP_MODE_ERTM:
2413 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2414 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2415 l2cap_send_disconn_req(pi->conn, sk);
2418 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2424 case L2CAP_MODE_BASIC:
2425 if (pi->imtu != L2CAP_DEFAULT_MTU)
2426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2429 case L2CAP_MODE_ERTM:
2430 rfc.mode = L2CAP_MODE_ERTM;
2431 rfc.txwin_size = pi->tx_win;
2432 rfc.max_transmit = pi->max_tx;
2433 rfc.retrans_timeout = 0;
2434 rfc.monitor_timeout = 0;
2435 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2436 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2437 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2440 sizeof(rfc), (unsigned long) &rfc);
2442 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2445 if (pi->fcs == L2CAP_FCS_NONE ||
2446 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2447 pi->fcs = L2CAP_FCS_NONE;
2448 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2452 case L2CAP_MODE_STREAMING:
2453 rfc.mode = L2CAP_MODE_STREAMING;
2455 rfc.max_transmit = 0;
2456 rfc.retrans_timeout = 0;
2457 rfc.monitor_timeout = 0;
2458 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2459 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2460 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2462 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2463 sizeof(rfc), (unsigned long) &rfc);
2465 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2468 if (pi->fcs == L2CAP_FCS_NONE ||
2469 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2470 pi->fcs = L2CAP_FCS_NONE;
2471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2476 /* FIXME: Need actual value of the flush timeout */
2477 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2478 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2480 req->dcid = cpu_to_le16(pi->dcid);
2481 req->flags = cpu_to_le16(0);
2486 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2488 struct l2cap_pinfo *pi = l2cap_pi(sk);
2489 struct l2cap_conf_rsp *rsp = data;
2490 void *ptr = rsp->data;
2491 void *req = pi->conf_req;
2492 int len = pi->conf_len;
2493 int type, hint, olen;
2495 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2496 u16 mtu = L2CAP_DEFAULT_MTU;
2497 u16 result = L2CAP_CONF_SUCCESS;
2499 BT_DBG("sk %p", sk);
2501 while (len >= L2CAP_CONF_OPT_SIZE) {
2502 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2504 hint = type & L2CAP_CONF_HINT;
2505 type &= L2CAP_CONF_MASK;
2508 case L2CAP_CONF_MTU:
2512 case L2CAP_CONF_FLUSH_TO:
2516 case L2CAP_CONF_QOS:
2519 case L2CAP_CONF_RFC:
2520 if (olen == sizeof(rfc))
2521 memcpy(&rfc, (void *) val, olen);
2524 case L2CAP_CONF_FCS:
2525 if (val == L2CAP_FCS_NONE)
2526 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2534 result = L2CAP_CONF_UNKNOWN;
2535 *((u8 *) ptr++) = type;
2540 if (pi->num_conf_rsp || pi->num_conf_req)
2544 case L2CAP_MODE_STREAMING:
2545 case L2CAP_MODE_ERTM:
2546 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2547 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2548 return -ECONNREFUSED;
2551 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2556 if (pi->mode != rfc.mode) {
2557 result = L2CAP_CONF_UNACCEPT;
2558 rfc.mode = pi->mode;
2560 if (pi->num_conf_rsp == 1)
2561 return -ECONNREFUSED;
2563 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2564 sizeof(rfc), (unsigned long) &rfc);
2568 if (result == L2CAP_CONF_SUCCESS) {
2569 /* Configure output options and let the other side know
2570 * which ones we don't like. */
2572 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2573 result = L2CAP_CONF_UNACCEPT;
2576 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2581 case L2CAP_MODE_BASIC:
2582 pi->fcs = L2CAP_FCS_NONE;
2583 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2586 case L2CAP_MODE_ERTM:
2587 pi->remote_tx_win = rfc.txwin_size;
2588 pi->remote_max_tx = rfc.max_transmit;
2589 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2590 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2592 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2594 rfc.retrans_timeout =
2595 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2596 rfc.monitor_timeout =
2597 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2599 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2602 sizeof(rfc), (unsigned long) &rfc);
2606 case L2CAP_MODE_STREAMING:
2607 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2608 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2610 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2612 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2615 sizeof(rfc), (unsigned long) &rfc);
2620 result = L2CAP_CONF_UNACCEPT;
2622 memset(&rfc, 0, sizeof(rfc));
2623 rfc.mode = pi->mode;
2626 if (result == L2CAP_CONF_SUCCESS)
2627 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2629 rsp->scid = cpu_to_le16(pi->dcid);
2630 rsp->result = cpu_to_le16(result);
2631 rsp->flags = cpu_to_le16(0x0000);
2636 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2638 struct l2cap_pinfo *pi = l2cap_pi(sk);
2639 struct l2cap_conf_req *req = data;
2640 void *ptr = req->data;
2643 struct l2cap_conf_rfc rfc;
2645 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2647 while (len >= L2CAP_CONF_OPT_SIZE) {
2648 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2651 case L2CAP_CONF_MTU:
2652 if (val < L2CAP_DEFAULT_MIN_MTU) {
2653 *result = L2CAP_CONF_UNACCEPT;
2654 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2660 case L2CAP_CONF_FLUSH_TO:
2662 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2666 case L2CAP_CONF_RFC:
2667 if (olen == sizeof(rfc))
2668 memcpy(&rfc, (void *)val, olen);
2670 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2671 rfc.mode != pi->mode)
2672 return -ECONNREFUSED;
2674 pi->mode = rfc.mode;
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2683 if (*result == L2CAP_CONF_SUCCESS) {
2685 case L2CAP_MODE_ERTM:
2686 pi->remote_tx_win = rfc.txwin_size;
2687 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2688 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2689 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2691 case L2CAP_MODE_STREAMING:
2692 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2696 req->dcid = cpu_to_le16(pi->dcid);
2697 req->flags = cpu_to_le16(0x0000);
2702 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2704 struct l2cap_conf_rsp *rsp = data;
2705 void *ptr = rsp->data;
2707 BT_DBG("sk %p", sk);
2709 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2710 rsp->result = cpu_to_le16(result);
2711 rsp->flags = cpu_to_le16(flags);
2716 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2718 struct l2cap_pinfo *pi = l2cap_pi(sk);
2721 struct l2cap_conf_rfc rfc;
2723 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2725 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2728 while (len >= L2CAP_CONF_OPT_SIZE) {
2729 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2732 case L2CAP_CONF_RFC:
2733 if (olen == sizeof(rfc))
2734 memcpy(&rfc, (void *)val, olen);
2741 case L2CAP_MODE_ERTM:
2742 pi->remote_tx_win = rfc.txwin_size;
2743 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2744 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2745 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2747 case L2CAP_MODE_STREAMING:
2748 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2752 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2754 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2756 if (rej->reason != 0x0000)
2759 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2760 cmd->ident == conn->info_ident) {
2761 del_timer(&conn->info_timer);
2763 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2764 conn->info_ident = 0;
2766 l2cap_conn_start(conn);
2772 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2774 struct l2cap_chan_list *list = &conn->chan_list;
2775 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2776 struct l2cap_conn_rsp rsp;
2777 struct sock *sk, *parent;
2778 int result, status = L2CAP_CS_NO_INFO;
2780 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2781 __le16 psm = req->psm;
2783 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2785 /* Check if we have socket listening on psm */
2786 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2788 result = L2CAP_CR_BAD_PSM;
2792 /* Check if the ACL is secure enough (if not SDP) */
2793 if (psm != cpu_to_le16(0x0001) &&
2794 !hci_conn_check_link_mode(conn->hcon)) {
2795 conn->disc_reason = 0x05;
2796 result = L2CAP_CR_SEC_BLOCK;
2800 result = L2CAP_CR_NO_MEM;
2802 /* Check for backlog size */
2803 if (sk_acceptq_is_full(parent)) {
2804 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2808 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2812 write_lock_bh(&list->lock);
2814 /* Check if we already have channel with that dcid */
2815 if (__l2cap_get_chan_by_dcid(list, scid)) {
2816 write_unlock_bh(&list->lock);
2817 sock_set_flag(sk, SOCK_ZAPPED);
2818 l2cap_sock_kill(sk);
2822 hci_conn_hold(conn->hcon);
2824 l2cap_sock_init(sk, parent);
2825 bacpy(&bt_sk(sk)->src, conn->src);
2826 bacpy(&bt_sk(sk)->dst, conn->dst);
2827 l2cap_pi(sk)->psm = psm;
2828 l2cap_pi(sk)->dcid = scid;
2830 __l2cap_chan_add(conn, sk, parent);
2831 dcid = l2cap_pi(sk)->scid;
2833 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2835 l2cap_pi(sk)->ident = cmd->ident;
2837 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2838 if (l2cap_check_security(sk)) {
2839 if (bt_sk(sk)->defer_setup) {
2840 sk->sk_state = BT_CONNECT2;
2841 result = L2CAP_CR_PEND;
2842 status = L2CAP_CS_AUTHOR_PEND;
2843 parent->sk_data_ready(parent, 0);
2845 sk->sk_state = BT_CONFIG;
2846 result = L2CAP_CR_SUCCESS;
2847 status = L2CAP_CS_NO_INFO;
2850 sk->sk_state = BT_CONNECT2;
2851 result = L2CAP_CR_PEND;
2852 status = L2CAP_CS_AUTHEN_PEND;
2855 sk->sk_state = BT_CONNECT2;
2856 result = L2CAP_CR_PEND;
2857 status = L2CAP_CS_NO_INFO;
2860 write_unlock_bh(&list->lock);
2863 bh_unlock_sock(parent);
2866 rsp.scid = cpu_to_le16(scid);
2867 rsp.dcid = cpu_to_le16(dcid);
2868 rsp.result = cpu_to_le16(result);
2869 rsp.status = cpu_to_le16(status);
2870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2872 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2873 struct l2cap_info_req info;
2874 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2877 conn->info_ident = l2cap_get_ident(conn);
2879 mod_timer(&conn->info_timer, jiffies +
2880 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2882 l2cap_send_cmd(conn, conn->info_ident,
2883 L2CAP_INFO_REQ, sizeof(info), &info);
2889 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2891 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2892 u16 scid, dcid, result, status;
2896 scid = __le16_to_cpu(rsp->scid);
2897 dcid = __le16_to_cpu(rsp->dcid);
2898 result = __le16_to_cpu(rsp->result);
2899 status = __le16_to_cpu(rsp->status);
2901 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2904 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2908 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2914 case L2CAP_CR_SUCCESS:
2915 sk->sk_state = BT_CONFIG;
2916 l2cap_pi(sk)->ident = 0;
2917 l2cap_pi(sk)->dcid = dcid;
2918 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2919 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2921 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2922 l2cap_build_conf_req(sk, req), req);
2923 l2cap_pi(sk)->num_conf_req++;
2927 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2931 l2cap_chan_del(sk, ECONNREFUSED);
2939 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2941 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2947 dcid = __le16_to_cpu(req->dcid);
2948 flags = __le16_to_cpu(req->flags);
2950 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2952 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2956 if (sk->sk_state == BT_DISCONN)
2959 /* Reject if config buffer is too small. */
2960 len = cmd_len - sizeof(*req);
2961 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2962 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2963 l2cap_build_conf_rsp(sk, rsp,
2964 L2CAP_CONF_REJECT, flags), rsp);
2969 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2970 l2cap_pi(sk)->conf_len += len;
2972 if (flags & 0x0001) {
2973 /* Incomplete config. Send empty response. */
2974 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2975 l2cap_build_conf_rsp(sk, rsp,
2976 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2980 /* Complete config. */
2981 len = l2cap_parse_conf_req(sk, rsp);
2983 l2cap_send_disconn_req(conn, sk);
2987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2988 l2cap_pi(sk)->num_conf_rsp++;
2990 /* Reset config buffer. */
2991 l2cap_pi(sk)->conf_len = 0;
2993 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2996 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2997 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2998 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2999 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3001 sk->sk_state = BT_CONNECTED;
3003 l2cap_pi(sk)->next_tx_seq = 0;
3004 l2cap_pi(sk)->expected_tx_seq = 0;
3005 __skb_queue_head_init(TX_QUEUE(sk));
3006 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3007 l2cap_ertm_init(sk);
3009 l2cap_chan_ready(sk);
3013 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3015 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3016 l2cap_build_conf_req(sk, buf), buf);
3017 l2cap_pi(sk)->num_conf_req++;
3025 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3027 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3028 u16 scid, flags, result;
3030 int len = cmd->len - sizeof(*rsp);
3032 scid = __le16_to_cpu(rsp->scid);
3033 flags = __le16_to_cpu(rsp->flags);
3034 result = __le16_to_cpu(rsp->result);
3036 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3037 scid, flags, result);
3039 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3044 case L2CAP_CONF_SUCCESS:
3045 l2cap_conf_rfc_get(sk, rsp->data, len);
3048 case L2CAP_CONF_UNACCEPT:
3049 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3052 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3053 l2cap_send_disconn_req(conn, sk);
3057 /* throw out any old stored conf requests */
3058 result = L2CAP_CONF_SUCCESS;
3059 len = l2cap_parse_conf_rsp(sk, rsp->data,
3062 l2cap_send_disconn_req(conn, sk);
3066 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3067 L2CAP_CONF_REQ, len, req);
3068 l2cap_pi(sk)->num_conf_req++;
3069 if (result != L2CAP_CONF_SUCCESS)
3075 sk->sk_state = BT_DISCONN;
3076 sk->sk_err = ECONNRESET;
3077 l2cap_sock_set_timer(sk, HZ * 5);
3078 l2cap_send_disconn_req(conn, sk);
3085 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3087 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3088 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3089 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3090 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3092 sk->sk_state = BT_CONNECTED;
3093 l2cap_pi(sk)->next_tx_seq = 0;
3094 l2cap_pi(sk)->expected_tx_seq = 0;
3095 __skb_queue_head_init(TX_QUEUE(sk));
3096 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3097 l2cap_ertm_init(sk);
3099 l2cap_chan_ready(sk);
3107 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3109 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3110 struct l2cap_disconn_rsp rsp;
3114 scid = __le16_to_cpu(req->scid);
3115 dcid = __le16_to_cpu(req->dcid);
3117 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3119 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3123 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3124 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3125 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3127 sk->sk_shutdown = SHUTDOWN_MASK;
3129 skb_queue_purge(TX_QUEUE(sk));
3131 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3132 skb_queue_purge(SREJ_QUEUE(sk));
3133 skb_queue_purge(BUSY_QUEUE(sk));
3134 del_timer(&l2cap_pi(sk)->retrans_timer);
3135 del_timer(&l2cap_pi(sk)->monitor_timer);
3136 del_timer(&l2cap_pi(sk)->ack_timer);
3139 l2cap_chan_del(sk, ECONNRESET);
3142 l2cap_sock_kill(sk);
3146 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3148 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3152 scid = __le16_to_cpu(rsp->scid);
3153 dcid = __le16_to_cpu(rsp->dcid);
3155 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3157 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3161 skb_queue_purge(TX_QUEUE(sk));
3163 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3164 skb_queue_purge(SREJ_QUEUE(sk));
3165 skb_queue_purge(BUSY_QUEUE(sk));
3166 del_timer(&l2cap_pi(sk)->retrans_timer);
3167 del_timer(&l2cap_pi(sk)->monitor_timer);
3168 del_timer(&l2cap_pi(sk)->ack_timer);
3171 l2cap_chan_del(sk, 0);
3174 l2cap_sock_kill(sk);
3178 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3180 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3183 type = __le16_to_cpu(req->type);
3185 BT_DBG("type 0x%4.4x", type);
3187 if (type == L2CAP_IT_FEAT_MASK) {
3189 u32 feat_mask = l2cap_feat_mask;
3190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3191 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3192 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3194 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3196 put_unaligned_le32(feat_mask, rsp->data);
3197 l2cap_send_cmd(conn, cmd->ident,
3198 L2CAP_INFO_RSP, sizeof(buf), buf);
3199 } else if (type == L2CAP_IT_FIXED_CHAN) {
3201 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3202 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3203 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3204 memcpy(buf + 4, l2cap_fixed_chan, 8);
3205 l2cap_send_cmd(conn, cmd->ident,
3206 L2CAP_INFO_RSP, sizeof(buf), buf);
3208 struct l2cap_info_rsp rsp;
3209 rsp.type = cpu_to_le16(type);
3210 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3211 l2cap_send_cmd(conn, cmd->ident,
3212 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3218 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3220 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3223 type = __le16_to_cpu(rsp->type);
3224 result = __le16_to_cpu(rsp->result);
3226 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3228 del_timer(&conn->info_timer);
3230 if (type == L2CAP_IT_FEAT_MASK) {
3231 conn->feat_mask = get_unaligned_le32(rsp->data);
3233 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3234 struct l2cap_info_req req;
3235 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3237 conn->info_ident = l2cap_get_ident(conn);
3239 l2cap_send_cmd(conn, conn->info_ident,
3240 L2CAP_INFO_REQ, sizeof(req), &req);
3242 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3243 conn->info_ident = 0;
3245 l2cap_conn_start(conn);
3247 } else if (type == L2CAP_IT_FIXED_CHAN) {
3248 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3249 conn->info_ident = 0;
3251 l2cap_conn_start(conn);
3257 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3259 u8 *data = skb->data;
3261 struct l2cap_cmd_hdr cmd;
3264 l2cap_raw_recv(conn, skb);
3266 while (len >= L2CAP_CMD_HDR_SIZE) {
3268 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3269 data += L2CAP_CMD_HDR_SIZE;
3270 len -= L2CAP_CMD_HDR_SIZE;
3272 cmd_len = le16_to_cpu(cmd.len);
3274 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3276 if (cmd_len > len || !cmd.ident) {
3277 BT_DBG("corrupted command");
3282 case L2CAP_COMMAND_REJ:
3283 l2cap_command_rej(conn, &cmd, data);
3286 case L2CAP_CONN_REQ:
3287 err = l2cap_connect_req(conn, &cmd, data);
3290 case L2CAP_CONN_RSP:
3291 err = l2cap_connect_rsp(conn, &cmd, data);
3294 case L2CAP_CONF_REQ:
3295 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3298 case L2CAP_CONF_RSP:
3299 err = l2cap_config_rsp(conn, &cmd, data);
3302 case L2CAP_DISCONN_REQ:
3303 err = l2cap_disconnect_req(conn, &cmd, data);
3306 case L2CAP_DISCONN_RSP:
3307 err = l2cap_disconnect_rsp(conn, &cmd, data);
3310 case L2CAP_ECHO_REQ:
3311 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3314 case L2CAP_ECHO_RSP:
3317 case L2CAP_INFO_REQ:
3318 err = l2cap_information_req(conn, &cmd, data);
3321 case L2CAP_INFO_RSP:
3322 err = l2cap_information_rsp(conn, &cmd, data);
3326 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3332 struct l2cap_cmd_rej rej;
3333 BT_DBG("error %d", err);
3335 /* FIXME: Map err to a valid reason */
3336 rej.reason = cpu_to_le16(0);
3337 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3347 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3349 u16 our_fcs, rcv_fcs;
3350 int hdr_size = L2CAP_HDR_SIZE + 2;
3352 if (pi->fcs == L2CAP_FCS_CRC16) {
3353 skb_trim(skb, skb->len - 2);
3354 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3355 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3357 if (our_fcs != rcv_fcs)
3363 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3365 struct l2cap_pinfo *pi = l2cap_pi(sk);
3368 pi->frames_sent = 0;
3369 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3371 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3373 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3374 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3375 l2cap_send_sframe(pi, control);
3376 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3377 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3380 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3381 __mod_retrans_timer();
3383 spin_lock_bh(&pi->send_lock);
3384 l2cap_ertm_send(sk);
3385 spin_unlock_bh(&pi->send_lock);
3387 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3388 pi->frames_sent == 0) {
3389 control |= L2CAP_SUPER_RCV_READY;
3390 l2cap_send_sframe(pi, control);
3394 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3396 struct sk_buff *next_skb;
3398 bt_cb(skb)->tx_seq = tx_seq;
3399 bt_cb(skb)->sar = sar;
3401 next_skb = skb_peek(SREJ_QUEUE(sk));
3403 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3408 if (bt_cb(next_skb)->tx_seq == tx_seq)
3411 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3412 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3416 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3419 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3421 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3426 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct sk_buff *_skb;
3432 switch (control & L2CAP_CTRL_SAR) {
3433 case L2CAP_SDU_UNSEGMENTED:
3434 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3437 err = sock_queue_rcv_skb(sk, skb);
3443 case L2CAP_SDU_START:
3444 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3447 pi->sdu_len = get_unaligned_le16(skb->data);
3449 if (pi->sdu_len > pi->imtu)
3452 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3456 /* pull sdu_len bytes only after alloc, because of Local Busy
3457 * condition we have to be sure that this will be executed
3458 * only once, i.e., when alloc does not fail */
3461 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3463 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3464 pi->partial_sdu_len = skb->len;
3467 case L2CAP_SDU_CONTINUE:
3468 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3474 pi->partial_sdu_len += skb->len;
3475 if (pi->partial_sdu_len > pi->sdu_len)
3478 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3483 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3489 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3490 pi->partial_sdu_len += skb->len;
3492 if (pi->partial_sdu_len > pi->imtu)
3495 if (pi->partial_sdu_len != pi->sdu_len)
3498 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3501 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3503 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3507 err = sock_queue_rcv_skb(sk, _skb);
3510 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3515 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3529 l2cap_send_disconn_req(pi->conn, sk);
3534 static void l2cap_busy_work(struct work_struct *work)
3536 DECLARE_WAITQUEUE(wait, current);
3537 struct l2cap_pinfo *pi =
3538 container_of(work, struct l2cap_pinfo, busy_work);
3539 struct sock *sk = (struct sock *)pi;
3540 int n_tries = 0, timeo = HZ/5, err;
3541 struct sk_buff *skb;
3546 add_wait_queue(sk_sleep(sk), &wait);
3547 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3548 set_current_state(TASK_INTERRUPTIBLE);
3550 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3552 l2cap_send_disconn_req(pi->conn, sk);
3559 if (signal_pending(current)) {
3560 err = sock_intr_errno(timeo);
3565 timeo = schedule_timeout(timeo);
3568 err = sock_error(sk);
3572 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3573 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3574 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3576 skb_queue_head(BUSY_QUEUE(sk), skb);
3580 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3587 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3590 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3591 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3592 l2cap_send_sframe(pi, control);
3593 l2cap_pi(sk)->retry_count = 1;
3595 del_timer(&pi->retrans_timer);
3596 __mod_monitor_timer();
3598 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3601 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3602 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3604 set_current_state(TASK_RUNNING);
3605 remove_wait_queue(sk_sleep(sk), &wait);
3610 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3612 struct l2cap_pinfo *pi = l2cap_pi(sk);
3615 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3616 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3617 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3621 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3623 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3627 /* Busy Condition */
3628 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3629 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3630 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3632 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3633 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3634 l2cap_send_sframe(pi, sctrl);
3636 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3638 queue_work(_busy_wq, &pi->busy_work);
3643 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3645 struct l2cap_pinfo *pi = l2cap_pi(sk);
3646 struct sk_buff *_skb;
3650 * TODO: We have to notify the userland if some data is lost with the
3654 switch (control & L2CAP_CTRL_SAR) {
3655 case L2CAP_SDU_UNSEGMENTED:
3656 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3661 err = sock_queue_rcv_skb(sk, skb);
3667 case L2CAP_SDU_START:
3668 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3673 pi->sdu_len = get_unaligned_le16(skb->data);
3676 if (pi->sdu_len > pi->imtu) {
3681 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3687 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3689 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3690 pi->partial_sdu_len = skb->len;
3694 case L2CAP_SDU_CONTINUE:
3695 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3698 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3700 pi->partial_sdu_len += skb->len;
3701 if (pi->partial_sdu_len > pi->sdu_len)
3709 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3712 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3714 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3715 pi->partial_sdu_len += skb->len;
3717 if (pi->partial_sdu_len > pi->imtu)
3720 if (pi->partial_sdu_len == pi->sdu_len) {
3721 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3722 err = sock_queue_rcv_skb(sk, _skb);
3737 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3739 struct sk_buff *skb;
3742 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3743 if (bt_cb(skb)->tx_seq != tx_seq)
3746 skb = skb_dequeue(SREJ_QUEUE(sk));
3747 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3748 l2cap_ertm_reassembly_sdu(sk, skb, control);
3749 l2cap_pi(sk)->buffer_seq_srej =
3750 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3751 tx_seq = (tx_seq + 1) % 64;
3755 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3757 struct l2cap_pinfo *pi = l2cap_pi(sk);
3758 struct srej_list *l, *tmp;
3761 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3762 if (l->tx_seq == tx_seq) {
3767 control = L2CAP_SUPER_SELECT_REJECT;
3768 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3769 l2cap_send_sframe(pi, control);
3771 list_add_tail(&l->list, SREJ_LIST(sk));
3775 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3777 struct l2cap_pinfo *pi = l2cap_pi(sk);
3778 struct srej_list *new;
3781 while (tx_seq != pi->expected_tx_seq) {
3782 control = L2CAP_SUPER_SELECT_REJECT;
3783 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3784 l2cap_send_sframe(pi, control);
3786 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3787 new->tx_seq = pi->expected_tx_seq;
3788 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3789 list_add_tail(&new->list, SREJ_LIST(sk));
3791 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3794 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3796 struct l2cap_pinfo *pi = l2cap_pi(sk);
3797 u8 tx_seq = __get_txseq(rx_control);
3798 u8 req_seq = __get_reqseq(rx_control);
3799 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3800 int tx_seq_offset, expected_tx_seq_offset;
3801 int num_to_ack = (pi->tx_win/6) + 1;
3804 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3806 if (L2CAP_CTRL_FINAL & rx_control &&
3807 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3808 del_timer(&pi->monitor_timer);
3809 if (pi->unacked_frames > 0)
3810 __mod_retrans_timer();
3811 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3814 pi->expected_ack_seq = req_seq;
3815 l2cap_drop_acked_frames(sk);
3817 if (tx_seq == pi->expected_tx_seq)
3820 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3821 if (tx_seq_offset < 0)
3822 tx_seq_offset += 64;
3824 /* invalid tx_seq */
3825 if (tx_seq_offset >= pi->tx_win) {
3826 l2cap_send_disconn_req(pi->conn, sk);
3830 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3833 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3834 struct srej_list *first;
3836 first = list_first_entry(SREJ_LIST(sk),
3837 struct srej_list, list);
3838 if (tx_seq == first->tx_seq) {
3839 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3840 l2cap_check_srej_gap(sk, tx_seq);
3842 list_del(&first->list);
3845 if (list_empty(SREJ_LIST(sk))) {
3846 pi->buffer_seq = pi->buffer_seq_srej;
3847 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3851 struct srej_list *l;
3853 /* duplicated tx_seq */
3854 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3857 list_for_each_entry(l, SREJ_LIST(sk), list) {
3858 if (l->tx_seq == tx_seq) {
3859 l2cap_resend_srejframe(sk, tx_seq);
3863 l2cap_send_srejframe(sk, tx_seq);
3866 expected_tx_seq_offset =
3867 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3868 if (expected_tx_seq_offset < 0)
3869 expected_tx_seq_offset += 64;
3871 /* duplicated tx_seq */
3872 if (tx_seq_offset < expected_tx_seq_offset)
3875 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3877 INIT_LIST_HEAD(SREJ_LIST(sk));
3878 pi->buffer_seq_srej = pi->buffer_seq;
3880 __skb_queue_head_init(SREJ_QUEUE(sk));
3881 __skb_queue_head_init(BUSY_QUEUE(sk));
3882 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3884 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3886 l2cap_send_srejframe(sk, tx_seq);
3891 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3893 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3894 bt_cb(skb)->tx_seq = tx_seq;
3895 bt_cb(skb)->sar = sar;
3896 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3900 if (rx_control & L2CAP_CTRL_FINAL) {
3901 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3902 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3904 l2cap_retransmit_frames(sk);
3907 err = l2cap_push_rx_skb(sk, skb, rx_control);
3913 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3914 if (pi->num_acked == num_to_ack - 1)
3924 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3928 pi->expected_ack_seq = __get_reqseq(rx_control);
3929 l2cap_drop_acked_frames(sk);
3931 if (rx_control & L2CAP_CTRL_POLL) {
3932 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3933 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3934 (pi->unacked_frames > 0))
3935 __mod_retrans_timer();
3937 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3938 l2cap_send_srejtail(sk);
3940 l2cap_send_i_or_rr_or_rnr(sk);
3943 } else if (rx_control & L2CAP_CTRL_FINAL) {
3944 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3946 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3947 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3949 l2cap_retransmit_frames(sk);
3952 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3953 (pi->unacked_frames > 0))
3954 __mod_retrans_timer();
3956 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3957 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3960 spin_lock_bh(&pi->send_lock);
3961 l2cap_ertm_send(sk);
3962 spin_unlock_bh(&pi->send_lock);
3967 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3970 u8 tx_seq = __get_reqseq(rx_control);
3972 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3974 pi->expected_ack_seq = tx_seq;
3975 l2cap_drop_acked_frames(sk);
3977 if (rx_control & L2CAP_CTRL_FINAL) {
3978 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3979 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3981 l2cap_retransmit_frames(sk);
3983 l2cap_retransmit_frames(sk);
3985 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3986 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3989 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3991 struct l2cap_pinfo *pi = l2cap_pi(sk);
3992 u8 tx_seq = __get_reqseq(rx_control);
3994 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3996 if (rx_control & L2CAP_CTRL_POLL) {
3997 pi->expected_ack_seq = tx_seq;
3998 l2cap_drop_acked_frames(sk);
3999 l2cap_retransmit_one_frame(sk, tx_seq);
4001 spin_lock_bh(&pi->send_lock);
4002 l2cap_ertm_send(sk);
4003 spin_unlock_bh(&pi->send_lock);
4005 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4006 pi->srej_save_reqseq = tx_seq;
4007 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4009 } else if (rx_control & L2CAP_CTRL_FINAL) {
4010 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4011 pi->srej_save_reqseq == tx_seq)
4012 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4016 l2cap_retransmit_one_frame(sk, tx_seq);
4017 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4018 pi->srej_save_reqseq = tx_seq;
4019 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4024 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4026 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027 u8 tx_seq = __get_reqseq(rx_control);
4029 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4030 pi->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(sk);
4033 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4034 del_timer(&pi->retrans_timer);
4035 if (rx_control & L2CAP_CTRL_POLL)
4036 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4040 if (rx_control & L2CAP_CTRL_POLL)
4041 l2cap_send_srejtail(sk);
4043 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4046 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4048 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4050 if (L2CAP_CTRL_FINAL & rx_control &&
4051 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4052 del_timer(&l2cap_pi(sk)->monitor_timer);
4053 if (l2cap_pi(sk)->unacked_frames > 0)
4054 __mod_retrans_timer();
4055 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4058 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4059 case L2CAP_SUPER_RCV_READY:
4060 l2cap_data_channel_rrframe(sk, rx_control);
4063 case L2CAP_SUPER_REJECT:
4064 l2cap_data_channel_rejframe(sk, rx_control);
4067 case L2CAP_SUPER_SELECT_REJECT:
4068 l2cap_data_channel_srejframe(sk, rx_control);
4071 case L2CAP_SUPER_RCV_NOT_READY:
4072 l2cap_data_channel_rnrframe(sk, rx_control);
4080 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4083 struct l2cap_pinfo *pi;
4086 int next_tx_seq_offset, req_seq_offset;
4088 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4090 BT_DBG("unknown cid 0x%4.4x", cid);
4096 BT_DBG("sk %p, len %d", sk, skb->len);
4098 if (sk->sk_state != BT_CONNECTED)
4102 case L2CAP_MODE_BASIC:
4103 /* If socket recv buffers overflows we drop data here
4104 * which is *bad* because L2CAP has to be reliable.
4105 * But we don't have any other choice. L2CAP doesn't
4106 * provide flow control mechanism. */
4108 if (pi->imtu < skb->len)
4111 if (!sock_queue_rcv_skb(sk, skb))
4115 case L2CAP_MODE_ERTM:
4116 control = get_unaligned_le16(skb->data);
4120 if (__is_sar_start(control) && __is_iframe(control))
4123 if (pi->fcs == L2CAP_FCS_CRC16)
4127 * We can just drop the corrupted I-frame here.
4128 * Receiver will miss it and start proper recovery
4129 * procedures and ask retransmission.
4131 if (len > pi->mps) {
4132 l2cap_send_disconn_req(pi->conn, sk);
4136 if (l2cap_check_fcs(pi, skb))
4139 req_seq = __get_reqseq(control);
4140 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4141 if (req_seq_offset < 0)
4142 req_seq_offset += 64;
4144 next_tx_seq_offset =
4145 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4146 if (next_tx_seq_offset < 0)
4147 next_tx_seq_offset += 64;
4149 /* check for invalid req-seq */
4150 if (req_seq_offset > next_tx_seq_offset) {
4151 l2cap_send_disconn_req(pi->conn, sk);
4155 if (__is_iframe(control)) {
4157 l2cap_send_disconn_req(pi->conn, sk);
4161 l2cap_data_channel_iframe(sk, control, skb);
4164 l2cap_send_disconn_req(pi->conn, sk);
4168 l2cap_data_channel_sframe(sk, control, skb);
4173 case L2CAP_MODE_STREAMING:
4174 control = get_unaligned_le16(skb->data);
4178 if (__is_sar_start(control))
4181 if (pi->fcs == L2CAP_FCS_CRC16)
4184 if (len > pi->mps || len < 4 || __is_sframe(control))
4187 if (l2cap_check_fcs(pi, skb))
4190 tx_seq = __get_txseq(control);
4192 if (pi->expected_tx_seq == tx_seq)
4193 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4195 pi->expected_tx_seq = (tx_seq + 1) % 64;
4197 l2cap_streaming_reassembly_sdu(sk, skb, control);
4202 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4216 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4220 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4224 BT_DBG("sk %p, len %d", sk, skb->len);
4226 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4229 if (l2cap_pi(sk)->imtu < skb->len)
4232 if (!sock_queue_rcv_skb(sk, skb))
4244 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4246 struct l2cap_hdr *lh = (void *) skb->data;
4250 skb_pull(skb, L2CAP_HDR_SIZE);
4251 cid = __le16_to_cpu(lh->cid);
4252 len = __le16_to_cpu(lh->len);
4254 if (len != skb->len) {
4259 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4262 case L2CAP_CID_SIGNALING:
4263 l2cap_sig_channel(conn, skb);
4266 case L2CAP_CID_CONN_LESS:
4267 psm = get_unaligned_le16(skb->data);
4269 l2cap_conless_channel(conn, psm, skb);
4273 l2cap_data_channel(conn, cid, skb);
4278 /* ---- L2CAP interface with lower layer (HCI) ---- */
4280 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4282 int exact = 0, lm1 = 0, lm2 = 0;
4283 register struct sock *sk;
4284 struct hlist_node *node;
4286 if (type != ACL_LINK)
4289 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4291 /* Find listening sockets and check their link_mode */
4292 read_lock(&l2cap_sk_list.lock);
4293 sk_for_each(sk, node, &l2cap_sk_list.head) {
4294 if (sk->sk_state != BT_LISTEN)
4297 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4298 lm1 |= HCI_LM_ACCEPT;
4299 if (l2cap_pi(sk)->role_switch)
4300 lm1 |= HCI_LM_MASTER;
4302 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4303 lm2 |= HCI_LM_ACCEPT;
4304 if (l2cap_pi(sk)->role_switch)
4305 lm2 |= HCI_LM_MASTER;
4308 read_unlock(&l2cap_sk_list.lock);
4310 return exact ? lm1 : lm2;
4313 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4315 struct l2cap_conn *conn;
4317 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4319 if (hcon->type != ACL_LINK)
4323 conn = l2cap_conn_add(hcon, status);
4325 l2cap_conn_ready(conn);
4327 l2cap_conn_del(hcon, bt_err(status));
4332 static int l2cap_disconn_ind(struct hci_conn *hcon)
4334 struct l2cap_conn *conn = hcon->l2cap_data;
4336 BT_DBG("hcon %p", hcon);
4338 if (hcon->type != ACL_LINK || !conn)
4341 return conn->disc_reason;
4344 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4346 BT_DBG("hcon %p reason %d", hcon, reason);
4348 if (hcon->type != ACL_LINK)
4351 l2cap_conn_del(hcon, bt_err(reason));
4356 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4358 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4361 if (encrypt == 0x00) {
4362 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4363 l2cap_sock_clear_timer(sk);
4364 l2cap_sock_set_timer(sk, HZ * 5);
4365 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4366 __l2cap_sock_close(sk, ECONNREFUSED);
4368 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4369 l2cap_sock_clear_timer(sk);
4373 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4375 struct l2cap_chan_list *l;
4376 struct l2cap_conn *conn = hcon->l2cap_data;
4382 l = &conn->chan_list;
4384 BT_DBG("conn %p", conn);
4386 read_lock(&l->lock);
4388 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4391 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4396 if (!status && (sk->sk_state == BT_CONNECTED ||
4397 sk->sk_state == BT_CONFIG)) {
4398 l2cap_check_encryption(sk, encrypt);
4403 if (sk->sk_state == BT_CONNECT) {
4405 struct l2cap_conn_req req;
4406 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4407 req.psm = l2cap_pi(sk)->psm;
4409 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4410 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4412 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4413 L2CAP_CONN_REQ, sizeof(req), &req);
4415 l2cap_sock_clear_timer(sk);
4416 l2cap_sock_set_timer(sk, HZ / 10);
4418 } else if (sk->sk_state == BT_CONNECT2) {
4419 struct l2cap_conn_rsp rsp;
4423 sk->sk_state = BT_CONFIG;
4424 result = L2CAP_CR_SUCCESS;
4426 sk->sk_state = BT_DISCONN;
4427 l2cap_sock_set_timer(sk, HZ / 10);
4428 result = L2CAP_CR_SEC_BLOCK;
4431 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4432 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4433 rsp.result = cpu_to_le16(result);
4434 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4435 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4436 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4442 read_unlock(&l->lock);
4447 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4449 struct l2cap_conn *conn = hcon->l2cap_data;
4451 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4454 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4456 if (flags & ACL_START) {
4457 struct l2cap_hdr *hdr;
4461 BT_ERR("Unexpected start frame (len %d)", skb->len);
4462 kfree_skb(conn->rx_skb);
4463 conn->rx_skb = NULL;
4465 l2cap_conn_unreliable(conn, ECOMM);
4469 BT_ERR("Frame is too short (len %d)", skb->len);
4470 l2cap_conn_unreliable(conn, ECOMM);
4474 hdr = (struct l2cap_hdr *) skb->data;
4475 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4477 if (len == skb->len) {
4478 /* Complete frame received */
4479 l2cap_recv_frame(conn, skb);
4483 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4485 if (skb->len > len) {
4486 BT_ERR("Frame is too long (len %d, expected len %d)",
4488 l2cap_conn_unreliable(conn, ECOMM);
4492 /* Allocate skb for the complete frame (with header) */
4493 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4497 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4499 conn->rx_len = len - skb->len;
4501 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4503 if (!conn->rx_len) {
4504 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4505 l2cap_conn_unreliable(conn, ECOMM);
4509 if (skb->len > conn->rx_len) {
4510 BT_ERR("Fragment is too long (len %d, expected %d)",
4511 skb->len, conn->rx_len);
4512 kfree_skb(conn->rx_skb);
4513 conn->rx_skb = NULL;
4515 l2cap_conn_unreliable(conn, ECOMM);
4519 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4521 conn->rx_len -= skb->len;
4523 if (!conn->rx_len) {
4524 /* Complete frame received */
4525 l2cap_recv_frame(conn, conn->rx_skb);
4526 conn->rx_skb = NULL;
4535 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4538 struct hlist_node *node;
4540 read_lock_bh(&l2cap_sk_list.lock);
4542 sk_for_each(sk, node, &l2cap_sk_list.head) {
4543 struct l2cap_pinfo *pi = l2cap_pi(sk);
4545 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4546 batostr(&bt_sk(sk)->src),
4547 batostr(&bt_sk(sk)->dst),
4548 sk->sk_state, __le16_to_cpu(pi->psm),
4550 pi->imtu, pi->omtu, pi->sec_level);
4553 read_unlock_bh(&l2cap_sk_list.lock);
4558 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4560 return single_open(file, l2cap_debugfs_show, inode->i_private);
4563 static const struct file_operations l2cap_debugfs_fops = {
4564 .open = l2cap_debugfs_open,
4566 .llseek = seq_lseek,
4567 .release = single_release,
4570 static struct dentry *l2cap_debugfs;
4572 static const struct proto_ops l2cap_sock_ops = {
4573 .family = PF_BLUETOOTH,
4574 .owner = THIS_MODULE,
4575 .release = l2cap_sock_release,
4576 .bind = l2cap_sock_bind,
4577 .connect = l2cap_sock_connect,
4578 .listen = l2cap_sock_listen,
4579 .accept = l2cap_sock_accept,
4580 .getname = l2cap_sock_getname,
4581 .sendmsg = l2cap_sock_sendmsg,
4582 .recvmsg = l2cap_sock_recvmsg,
4583 .poll = bt_sock_poll,
4584 .ioctl = bt_sock_ioctl,
4585 .mmap = sock_no_mmap,
4586 .socketpair = sock_no_socketpair,
4587 .shutdown = l2cap_sock_shutdown,
4588 .setsockopt = l2cap_sock_setsockopt,
4589 .getsockopt = l2cap_sock_getsockopt
4592 static const struct net_proto_family l2cap_sock_family_ops = {
4593 .family = PF_BLUETOOTH,
4594 .owner = THIS_MODULE,
4595 .create = l2cap_sock_create,
4598 static struct hci_proto l2cap_hci_proto = {
4600 .id = HCI_PROTO_L2CAP,
4601 .connect_ind = l2cap_connect_ind,
4602 .connect_cfm = l2cap_connect_cfm,
4603 .disconn_ind = l2cap_disconn_ind,
4604 .disconn_cfm = l2cap_disconn_cfm,
4605 .security_cfm = l2cap_security_cfm,
4606 .recv_acldata = l2cap_recv_acldata
4609 static int __init l2cap_init(void)
4613 err = proto_register(&l2cap_proto, 0);
4617 _busy_wq = create_singlethread_workqueue("l2cap");
4621 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4623 BT_ERR("L2CAP socket registration failed");
4627 err = hci_register_proto(&l2cap_hci_proto);
4629 BT_ERR("L2CAP protocol registration failed");
4630 bt_sock_unregister(BTPROTO_L2CAP);
4635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4636 bt_debugfs, NULL, &l2cap_debugfs_fops);
4638 BT_ERR("Failed to create L2CAP debug file");
4641 BT_INFO("L2CAP ver %s", VERSION);
4642 BT_INFO("L2CAP socket layer initialized");
4647 proto_unregister(&l2cap_proto);
4651 static void __exit l2cap_exit(void)
4653 debugfs_remove(l2cap_debugfs);
4655 flush_workqueue(_busy_wq);
4656 destroy_workqueue(_busy_wq);
4658 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4659 BT_ERR("L2CAP socket unregistration failed");
4661 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4662 BT_ERR("L2CAP protocol unregistration failed");
4664 proto_unregister(&l2cap_proto);
4667 void l2cap_load(void)
4669 /* Dummy function to trigger automatic L2CAP module loading by
4670 * other modules that use L2CAP sockets but don't use any other
4671 * symbols from it. */
4673 EXPORT_SYMBOL(l2cap_load);
4675 module_init(l2cap_init);
4676 module_exit(l2cap_exit);
4678 module_param(enable_ertm, bool, 0644);
4679 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4681 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4682 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4683 MODULE_VERSION(VERSION);
4684 MODULE_LICENSE("GPL");
4685 MODULE_ALIAS("bt-proto-0");