2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
486 struct l2cap_chan_list *l = &conn->chan_list;
489 BT_DBG("conn %p", conn);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
544 read_unlock(&l->lock);
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
549 struct l2cap_chan_list *l = &conn->chan_list;
552 BT_DBG("conn %p", conn);
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
570 read_unlock(&l->lock);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
576 struct l2cap_chan_list *l = &conn->chan_list;
579 BT_DBG("conn %p", conn);
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
588 read_unlock(&l->lock);
591 static void l2cap_info_timeout(unsigned long arg)
593 struct l2cap_conn *conn = (void *) arg;
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
598 l2cap_conn_start(conn);
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
603 struct l2cap_conn *conn = hcon->l2cap_data;
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
612 hcon->l2cap_data = conn;
615 BT_DBG("hcon %p conn %p", hcon, conn);
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
629 conn->disc_reason = 0x13;
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
636 struct l2cap_conn *conn = hcon->l2cap_data;
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
644 kfree_skb(conn->rx_skb);
647 while ((sk = conn->chan_list.head)) {
649 l2cap_chan_del(sk, err);
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
657 hcon->l2cap_data = NULL;
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
694 if (l2cap_pi(sk)->psm == psm) {
696 if (!bacmp(&bt_sk(sk)->src, src))
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
704 return node ? sk : sk1;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
716 read_unlock(&l2cap_sk_list.lock);
720 static void l2cap_sock_destruct(struct sock *sk)
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
732 BT_DBG("parent %p", parent);
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock *sk)
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
758 static void __l2cap_sock_close(struct sock *sk, int reason)
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
762 switch (sk->sk_state) {
764 l2cap_sock_cleanup_listen(sk);
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
776 l2cap_chan_del(sk, reason);
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
789 result = L2CAP_CR_BAD_PSM;
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
798 l2cap_chan_del(sk, reason);
803 l2cap_chan_del(sk, reason);
807 sock_set_flag(sk, SOCK_ZAPPED);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
815 l2cap_sock_clear_timer(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->conf_state = l2cap_pi(parent)->conf_state;
835 pi->mode = l2cap_pi(parent)->mode;
836 pi->fcs = l2cap_pi(parent)->fcs;
837 pi->max_tx = l2cap_pi(parent)->max_tx;
838 pi->tx_win = l2cap_pi(parent)->tx_win;
839 pi->sec_level = l2cap_pi(parent)->sec_level;
840 pi->role_switch = l2cap_pi(parent)->role_switch;
841 pi->force_reliable = l2cap_pi(parent)->force_reliable;
843 pi->imtu = L2CAP_DEFAULT_MTU;
845 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
846 pi->mode = L2CAP_MODE_ERTM;
847 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
849 pi->mode = L2CAP_MODE_BASIC;
851 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
852 pi->fcs = L2CAP_FCS_CRC16;
853 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
854 pi->sec_level = BT_SECURITY_LOW;
856 pi->force_reliable = 0;
859 /* Default config options */
861 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
862 skb_queue_head_init(TX_QUEUE(sk));
863 skb_queue_head_init(SREJ_QUEUE(sk));
864 skb_queue_head_init(BUSY_QUEUE(sk));
865 INIT_LIST_HEAD(SREJ_LIST(sk));
868 static struct proto l2cap_proto = {
870 .owner = THIS_MODULE,
871 .obj_size = sizeof(struct l2cap_pinfo)
874 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
878 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
882 sock_init_data(sock, sk);
883 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
885 sk->sk_destruct = l2cap_sock_destruct;
886 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
888 sock_reset_flag(sk, SOCK_ZAPPED);
890 sk->sk_protocol = proto;
891 sk->sk_state = BT_OPEN;
893 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
895 bt_sock_link(&l2cap_sk_list, sk);
899 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
904 BT_DBG("sock %p", sock);
906 sock->state = SS_UNCONNECTED;
908 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
909 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
910 return -ESOCKTNOSUPPORT;
912 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
915 sock->ops = &l2cap_sock_ops;
917 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
921 l2cap_sock_init(sk, NULL);
925 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
927 struct sock *sk = sock->sk;
928 struct sockaddr_l2 la;
933 if (!addr || addr->sa_family != AF_BLUETOOTH)
936 memset(&la, 0, sizeof(la));
937 len = min_t(unsigned int, sizeof(la), alen);
938 memcpy(&la, addr, len);
945 if (sk->sk_state != BT_OPEN) {
950 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
951 !capable(CAP_NET_BIND_SERVICE)) {
956 write_lock_bh(&l2cap_sk_list.lock);
958 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
961 /* Save source address */
962 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
963 l2cap_pi(sk)->psm = la.l2_psm;
964 l2cap_pi(sk)->sport = la.l2_psm;
965 sk->sk_state = BT_BOUND;
967 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
968 __le16_to_cpu(la.l2_psm) == 0x0003)
969 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
972 write_unlock_bh(&l2cap_sk_list.lock);
979 static int l2cap_do_connect(struct sock *sk)
981 bdaddr_t *src = &bt_sk(sk)->src;
982 bdaddr_t *dst = &bt_sk(sk)->dst;
983 struct l2cap_conn *conn;
984 struct hci_conn *hcon;
985 struct hci_dev *hdev;
989 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
992 hdev = hci_get_route(dst, src);
994 return -EHOSTUNREACH;
996 hci_dev_lock_bh(hdev);
1000 if (sk->sk_type == SOCK_RAW) {
1001 switch (l2cap_pi(sk)->sec_level) {
1002 case BT_SECURITY_HIGH:
1003 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1005 case BT_SECURITY_MEDIUM:
1006 auth_type = HCI_AT_DEDICATED_BONDING;
1009 auth_type = HCI_AT_NO_BONDING;
1012 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1013 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1014 auth_type = HCI_AT_NO_BONDING_MITM;
1016 auth_type = HCI_AT_NO_BONDING;
1018 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1019 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1021 switch (l2cap_pi(sk)->sec_level) {
1022 case BT_SECURITY_HIGH:
1023 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1025 case BT_SECURITY_MEDIUM:
1026 auth_type = HCI_AT_GENERAL_BONDING;
1029 auth_type = HCI_AT_NO_BONDING;
1034 hcon = hci_connect(hdev, ACL_LINK, dst,
1035 l2cap_pi(sk)->sec_level, auth_type);
1039 conn = l2cap_conn_add(hcon, 0);
1047 /* Update source addr of the socket */
1048 bacpy(src, conn->src);
1050 l2cap_chan_add(conn, sk, NULL);
1052 sk->sk_state = BT_CONNECT;
1053 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1055 if (hcon->state == BT_CONNECTED) {
1056 if (sk->sk_type != SOCK_SEQPACKET &&
1057 sk->sk_type != SOCK_STREAM) {
1058 l2cap_sock_clear_timer(sk);
1059 sk->sk_state = BT_CONNECTED;
1065 hci_dev_unlock_bh(hdev);
1070 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1072 struct sock *sk = sock->sk;
1073 struct sockaddr_l2 la;
1076 BT_DBG("sk %p", sk);
1078 if (!addr || alen < sizeof(addr->sa_family) ||
1079 addr->sa_family != AF_BLUETOOTH)
1082 memset(&la, 0, sizeof(la));
1083 len = min_t(unsigned int, sizeof(la), alen);
1084 memcpy(&la, addr, len);
1091 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1110 switch (sk->sk_state) {
1114 /* Already connecting */
1118 /* Already connected */
1131 /* Set destination address and psm */
1132 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1133 l2cap_pi(sk)->psm = la.l2_psm;
1135 err = l2cap_do_connect(sk);
1140 err = bt_sock_wait_state(sk, BT_CONNECTED,
1141 sock_sndtimeo(sk, flags & O_NONBLOCK));
1147 static int l2cap_sock_listen(struct socket *sock, int backlog)
1149 struct sock *sk = sock->sk;
1152 BT_DBG("sk %p backlog %d", sk, backlog);
1156 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1157 || sk->sk_state != BT_BOUND) {
1162 switch (l2cap_pi(sk)->mode) {
1163 case L2CAP_MODE_BASIC:
1165 case L2CAP_MODE_ERTM:
1166 case L2CAP_MODE_STREAMING:
1175 if (!l2cap_pi(sk)->psm) {
1176 bdaddr_t *src = &bt_sk(sk)->src;
1181 write_lock_bh(&l2cap_sk_list.lock);
1183 for (psm = 0x1001; psm < 0x1100; psm += 2)
1184 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1185 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1186 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1191 write_unlock_bh(&l2cap_sk_list.lock);
1197 sk->sk_max_ack_backlog = backlog;
1198 sk->sk_ack_backlog = 0;
1199 sk->sk_state = BT_LISTEN;
1206 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1208 DECLARE_WAITQUEUE(wait, current);
1209 struct sock *sk = sock->sk, *nsk;
1213 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1215 if (sk->sk_state != BT_LISTEN) {
1220 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1222 BT_DBG("sk %p timeo %ld", sk, timeo);
1224 /* Wait for an incoming connection. (wake-one). */
1225 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1226 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1227 set_current_state(TASK_INTERRUPTIBLE);
1234 timeo = schedule_timeout(timeo);
1235 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1237 if (sk->sk_state != BT_LISTEN) {
1242 if (signal_pending(current)) {
1243 err = sock_intr_errno(timeo);
1247 set_current_state(TASK_RUNNING);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1253 newsock->state = SS_CONNECTED;
1255 BT_DBG("new socket %p", nsk);
1262 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1264 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1265 struct sock *sk = sock->sk;
1267 BT_DBG("sock %p, sk %p", sock, sk);
1269 addr->sa_family = AF_BLUETOOTH;
1270 *len = sizeof(struct sockaddr_l2);
1273 la->l2_psm = l2cap_pi(sk)->psm;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1277 la->l2_psm = l2cap_pi(sk)->sport;
1278 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1279 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1285 static int __l2cap_wait_ack(struct sock *sk)
1287 DECLARE_WAITQUEUE(wait, current);
1291 add_wait_queue(sk_sleep(sk), &wait);
1292 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1293 set_current_state(TASK_INTERRUPTIBLE);
1298 if (signal_pending(current)) {
1299 err = sock_intr_errno(timeo);
1304 timeo = schedule_timeout(timeo);
1307 err = sock_error(sk);
1311 set_current_state(TASK_RUNNING);
1312 remove_wait_queue(sk_sleep(sk), &wait);
1316 static void l2cap_monitor_timeout(unsigned long arg)
1318 struct sock *sk = (void *) arg;
1320 BT_DBG("sk %p", sk);
1323 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1324 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1329 l2cap_pi(sk)->retry_count++;
1330 __mod_monitor_timer();
1332 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1336 static void l2cap_retrans_timeout(unsigned long arg)
1338 struct sock *sk = (void *) arg;
1340 BT_DBG("sk %p", sk);
1343 l2cap_pi(sk)->retry_count = 1;
1344 __mod_monitor_timer();
1346 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1348 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1352 static void l2cap_drop_acked_frames(struct sock *sk)
1354 struct sk_buff *skb;
1356 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1357 l2cap_pi(sk)->unacked_frames) {
1358 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1361 skb = skb_dequeue(TX_QUEUE(sk));
1364 l2cap_pi(sk)->unacked_frames--;
1367 if (!l2cap_pi(sk)->unacked_frames)
1368 del_timer(&l2cap_pi(sk)->retrans_timer);
1371 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1375 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1377 hci_send_acl(pi->conn->hcon, skb, 0);
1380 static int l2cap_streaming_send(struct sock *sk)
1382 struct sk_buff *skb, *tx_skb;
1383 struct l2cap_pinfo *pi = l2cap_pi(sk);
1386 while ((skb = sk->sk_send_head)) {
1387 tx_skb = skb_clone(skb, GFP_ATOMIC);
1389 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1390 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1391 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (pi->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1395 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1398 l2cap_do_send(sk, tx_skb);
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1402 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1403 sk->sk_send_head = NULL;
1405 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1407 skb = skb_dequeue(TX_QUEUE(sk));
1413 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1416 struct sk_buff *skb, *tx_skb;
1419 skb = skb_peek(TX_QUEUE(sk));
1424 if (bt_cb(skb)->tx_seq == tx_seq)
1427 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1430 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1432 if (pi->remote_max_tx &&
1433 bt_cb(skb)->retries == pi->remote_max_tx) {
1434 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1438 tx_skb = skb_clone(skb, GFP_ATOMIC);
1439 bt_cb(skb)->retries++;
1440 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1442 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1443 control |= L2CAP_CTRL_FINAL;
1444 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1447 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1448 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1450 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1452 if (pi->fcs == L2CAP_FCS_CRC16) {
1453 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1454 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1457 l2cap_do_send(sk, tx_skb);
1460 static int l2cap_ertm_send(struct sock *sk)
1462 struct sk_buff *skb, *tx_skb;
1463 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 if (sk->sk_state != BT_CONNECTED)
1470 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1480 bt_cb(skb)->retries++;
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1483 control &= L2CAP_CTRL_SAR;
1485 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1486 control |= L2CAP_CTRL_FINAL;
1487 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1491 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1499 l2cap_do_send(sk, tx_skb);
1501 __mod_retrans_timer();
1503 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1504 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1506 pi->unacked_frames++;
1509 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1510 sk->sk_send_head = NULL;
1512 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1520 static int l2cap_retransmit_frames(struct sock *sk)
1522 struct l2cap_pinfo *pi = l2cap_pi(sk);
1525 spin_lock_bh(&pi->send_lock);
1527 if (!skb_queue_empty(TX_QUEUE(sk)))
1528 sk->sk_send_head = TX_QUEUE(sk)->next;
1530 pi->next_tx_seq = pi->expected_ack_seq;
1531 ret = l2cap_ertm_send(sk);
1533 spin_unlock_bh(&pi->send_lock);
1538 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1540 struct sock *sk = (struct sock *)pi;
1544 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1546 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1547 control |= L2CAP_SUPER_RCV_NOT_READY;
1548 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1549 l2cap_send_sframe(pi, control);
1553 spin_lock_bh(&pi->send_lock);
1554 nframes = l2cap_ertm_send(sk);
1555 spin_unlock_bh(&pi->send_lock);
1560 control |= L2CAP_SUPER_RCV_READY;
1561 l2cap_send_sframe(pi, control);
1564 static void l2cap_send_srejtail(struct sock *sk)
1566 struct srej_list *tail;
1569 control = L2CAP_SUPER_SELECT_REJECT;
1570 control |= L2CAP_CTRL_FINAL;
1572 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1573 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1575 l2cap_send_sframe(l2cap_pi(sk), control);
1578 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1580 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1581 struct sk_buff **frag;
1584 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1590 /* Continuation fragments (no L2CAP header) */
1591 frag = &skb_shinfo(skb)->frag_list;
1593 count = min_t(unsigned int, conn->mtu, len);
1595 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1604 frag = &(*frag)->next;
1610 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff *skb;
1614 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1615 struct l2cap_hdr *lh;
1617 BT_DBG("sk %p len %d", sk, (int)len);
1619 count = min_t(unsigned int, (conn->mtu - hlen), len);
1620 skb = bt_skb_send_alloc(sk, count + hlen,
1621 msg->msg_flags & MSG_DONTWAIT, &err);
1623 return ERR_PTR(-ENOMEM);
1625 /* Create L2CAP header */
1626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1627 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1628 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1629 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1631 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1632 if (unlikely(err < 0)) {
1634 return ERR_PTR(err);
1639 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1641 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1642 struct sk_buff *skb;
1643 int err, count, hlen = L2CAP_HDR_SIZE;
1644 struct l2cap_hdr *lh;
1646 BT_DBG("sk %p len %d", sk, (int)len);
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1652 return ERR_PTR(-ENOMEM);
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1660 if (unlikely(err < 0)) {
1662 return ERR_PTR(err);
1667 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1669 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1670 struct sk_buff *skb;
1671 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1672 struct l2cap_hdr *lh;
1674 BT_DBG("sk %p len %d", sk, (int)len);
1677 return ERR_PTR(-ENOTCONN);
1682 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1689 return ERR_PTR(-ENOMEM);
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(control, skb_put(skb, 2));
1697 put_unaligned_le16(sdulen, skb_put(skb, 2));
1699 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1702 return ERR_PTR(err);
1705 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, 2));
1708 bt_cb(skb)->retries = 0;
1712 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1714 struct l2cap_pinfo *pi = l2cap_pi(sk);
1715 struct sk_buff *skb;
1716 struct sk_buff_head sar_queue;
1720 skb_queue_head_init(&sar_queue);
1721 control = L2CAP_SDU_START;
1722 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1724 return PTR_ERR(skb);
1726 __skb_queue_tail(&sar_queue, skb);
1727 len -= pi->remote_mps;
1728 size += pi->remote_mps;
1733 if (len > pi->remote_mps) {
1734 control = L2CAP_SDU_CONTINUE;
1735 buflen = pi->remote_mps;
1737 control = L2CAP_SDU_END;
1741 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1743 skb_queue_purge(&sar_queue);
1744 return PTR_ERR(skb);
1747 __skb_queue_tail(&sar_queue, skb);
1751 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1752 spin_lock_bh(&pi->send_lock);
1753 if (sk->sk_send_head == NULL)
1754 sk->sk_send_head = sar_queue.next;
1755 spin_unlock_bh(&pi->send_lock);
1760 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1762 struct sock *sk = sock->sk;
1763 struct l2cap_pinfo *pi = l2cap_pi(sk);
1764 struct sk_buff *skb;
1768 BT_DBG("sock %p, sk %p", sock, sk);
1770 err = sock_error(sk);
1774 if (msg->msg_flags & MSG_OOB)
1779 if (sk->sk_state != BT_CONNECTED) {
1784 /* Connectionless channel */
1785 if (sk->sk_type == SOCK_DGRAM) {
1786 skb = l2cap_create_connless_pdu(sk, msg, len);
1790 l2cap_do_send(sk, skb);
1797 case L2CAP_MODE_BASIC:
1798 /* Check outgoing MTU */
1799 if (len > pi->omtu) {
1804 /* Create a basic PDU */
1805 skb = l2cap_create_basic_pdu(sk, msg, len);
1811 l2cap_do_send(sk, skb);
1815 case L2CAP_MODE_ERTM:
1816 case L2CAP_MODE_STREAMING:
1817 /* Entire SDU fits into one PDU */
1818 if (len <= pi->remote_mps) {
1819 control = L2CAP_SDU_UNSEGMENTED;
1820 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1825 __skb_queue_tail(TX_QUEUE(sk), skb);
1827 if (pi->mode == L2CAP_MODE_ERTM)
1828 spin_lock_bh(&pi->send_lock);
1830 if (sk->sk_send_head == NULL)
1831 sk->sk_send_head = skb;
1833 if (pi->mode == L2CAP_MODE_ERTM)
1834 spin_unlock_bh(&pi->send_lock);
1836 /* Segment SDU into multiples PDUs */
1837 err = l2cap_sar_segment_sdu(sk, msg, len);
1842 if (pi->mode == L2CAP_MODE_STREAMING) {
1843 err = l2cap_streaming_send(sk);
1845 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1846 pi->conn_state && L2CAP_CONN_WAIT_F) {
1850 spin_lock_bh(&pi->send_lock);
1851 err = l2cap_ertm_send(sk);
1852 spin_unlock_bh(&pi->send_lock);
1860 BT_DBG("bad state %1.1x", pi->mode);
1869 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1871 struct sock *sk = sock->sk;
1875 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1876 struct l2cap_conn_rsp rsp;
1878 sk->sk_state = BT_CONFIG;
1880 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1881 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1882 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1884 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1885 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1893 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1896 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1898 struct sock *sk = sock->sk;
1899 struct l2cap_options opts;
1903 BT_DBG("sk %p", sk);
1909 opts.imtu = l2cap_pi(sk)->imtu;
1910 opts.omtu = l2cap_pi(sk)->omtu;
1911 opts.flush_to = l2cap_pi(sk)->flush_to;
1912 opts.mode = l2cap_pi(sk)->mode;
1913 opts.fcs = l2cap_pi(sk)->fcs;
1914 opts.max_tx = l2cap_pi(sk)->max_tx;
1915 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1917 len = min_t(unsigned int, sizeof(opts), optlen);
1918 if (copy_from_user((char *) &opts, optval, len)) {
1923 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1928 l2cap_pi(sk)->mode = opts.mode;
1929 switch (l2cap_pi(sk)->mode) {
1930 case L2CAP_MODE_BASIC:
1931 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1933 case L2CAP_MODE_ERTM:
1934 case L2CAP_MODE_STREAMING:
1943 l2cap_pi(sk)->imtu = opts.imtu;
1944 l2cap_pi(sk)->omtu = opts.omtu;
1945 l2cap_pi(sk)->fcs = opts.fcs;
1946 l2cap_pi(sk)->max_tx = opts.max_tx;
1947 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1951 if (get_user(opt, (u32 __user *) optval)) {
1956 if (opt & L2CAP_LM_AUTH)
1957 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1958 if (opt & L2CAP_LM_ENCRYPT)
1959 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1960 if (opt & L2CAP_LM_SECURE)
1961 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1963 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1964 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1976 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1978 struct sock *sk = sock->sk;
1979 struct bt_security sec;
1983 BT_DBG("sk %p", sk);
1985 if (level == SOL_L2CAP)
1986 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1988 if (level != SOL_BLUETOOTH)
1989 return -ENOPROTOOPT;
1995 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1996 && sk->sk_type != SOCK_RAW) {
2001 sec.level = BT_SECURITY_LOW;
2003 len = min_t(unsigned int, sizeof(sec), optlen);
2004 if (copy_from_user((char *) &sec, optval, len)) {
2009 if (sec.level < BT_SECURITY_LOW ||
2010 sec.level > BT_SECURITY_HIGH) {
2015 l2cap_pi(sk)->sec_level = sec.level;
2018 case BT_DEFER_SETUP:
2019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2024 if (get_user(opt, (u32 __user *) optval)) {
2029 bt_sk(sk)->defer_setup = opt;
2041 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2043 struct sock *sk = sock->sk;
2044 struct l2cap_options opts;
2045 struct l2cap_conninfo cinfo;
2049 BT_DBG("sk %p", sk);
2051 if (get_user(len, optlen))
2058 opts.imtu = l2cap_pi(sk)->imtu;
2059 opts.omtu = l2cap_pi(sk)->omtu;
2060 opts.flush_to = l2cap_pi(sk)->flush_to;
2061 opts.mode = l2cap_pi(sk)->mode;
2062 opts.fcs = l2cap_pi(sk)->fcs;
2063 opts.max_tx = l2cap_pi(sk)->max_tx;
2064 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2066 len = min_t(unsigned int, len, sizeof(opts));
2067 if (copy_to_user(optval, (char *) &opts, len))
2073 switch (l2cap_pi(sk)->sec_level) {
2074 case BT_SECURITY_LOW:
2075 opt = L2CAP_LM_AUTH;
2077 case BT_SECURITY_MEDIUM:
2078 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2080 case BT_SECURITY_HIGH:
2081 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2089 if (l2cap_pi(sk)->role_switch)
2090 opt |= L2CAP_LM_MASTER;
2092 if (l2cap_pi(sk)->force_reliable)
2093 opt |= L2CAP_LM_RELIABLE;
2095 if (put_user(opt, (u32 __user *) optval))
2099 case L2CAP_CONNINFO:
2100 if (sk->sk_state != BT_CONNECTED &&
2101 !(sk->sk_state == BT_CONNECT2 &&
2102 bt_sk(sk)->defer_setup)) {
2107 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2108 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2110 len = min_t(unsigned int, len, sizeof(cinfo));
2111 if (copy_to_user(optval, (char *) &cinfo, len))
2125 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2127 struct sock *sk = sock->sk;
2128 struct bt_security sec;
2131 BT_DBG("sk %p", sk);
2133 if (level == SOL_L2CAP)
2134 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2136 if (level != SOL_BLUETOOTH)
2137 return -ENOPROTOOPT;
2139 if (get_user(len, optlen))
2146 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2147 && sk->sk_type != SOCK_RAW) {
2152 sec.level = l2cap_pi(sk)->sec_level;
2154 len = min_t(unsigned int, len, sizeof(sec));
2155 if (copy_to_user(optval, (char *) &sec, len))
2160 case BT_DEFER_SETUP:
2161 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2166 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2180 static int l2cap_sock_shutdown(struct socket *sock, int how)
2182 struct sock *sk = sock->sk;
2185 BT_DBG("sock %p, sk %p", sock, sk);
2191 if (!sk->sk_shutdown) {
2192 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2193 err = __l2cap_wait_ack(sk);
2195 sk->sk_shutdown = SHUTDOWN_MASK;
2196 l2cap_sock_clear_timer(sk);
2197 __l2cap_sock_close(sk, 0);
2199 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2200 err = bt_sock_wait_state(sk, BT_CLOSED,
2204 if (!err && sk->sk_err)
2211 static int l2cap_sock_release(struct socket *sock)
2213 struct sock *sk = sock->sk;
2216 BT_DBG("sock %p, sk %p", sock, sk);
2221 err = l2cap_sock_shutdown(sock, 2);
2224 l2cap_sock_kill(sk);
2228 static void l2cap_chan_ready(struct sock *sk)
2230 struct sock *parent = bt_sk(sk)->parent;
2232 BT_DBG("sk %p, parent %p", sk, parent);
2234 l2cap_pi(sk)->conf_state = 0;
2235 l2cap_sock_clear_timer(sk);
2238 /* Outgoing channel.
2239 * Wake up socket sleeping on connect.
2241 sk->sk_state = BT_CONNECTED;
2242 sk->sk_state_change(sk);
2244 /* Incoming channel.
2245 * Wake up socket sleeping on accept.
2247 parent->sk_data_ready(parent, 0);
2251 /* Copy frame to all raw sockets on that connection */
2252 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2254 struct l2cap_chan_list *l = &conn->chan_list;
2255 struct sk_buff *nskb;
2258 BT_DBG("conn %p", conn);
2260 read_lock(&l->lock);
2261 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2262 if (sk->sk_type != SOCK_RAW)
2265 /* Don't send frame to the socket it came from */
2268 nskb = skb_clone(skb, GFP_ATOMIC);
2272 if (sock_queue_rcv_skb(sk, nskb))
2275 read_unlock(&l->lock);
2278 /* ---- L2CAP signalling commands ---- */
2279 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2280 u8 code, u8 ident, u16 dlen, void *data)
2282 struct sk_buff *skb, **frag;
2283 struct l2cap_cmd_hdr *cmd;
2284 struct l2cap_hdr *lh;
2287 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2288 conn, code, ident, dlen);
2290 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2291 count = min_t(unsigned int, conn->mtu, len);
2293 skb = bt_skb_alloc(count, GFP_ATOMIC);
2297 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2298 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2299 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2301 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2304 cmd->len = cpu_to_le16(dlen);
2307 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2308 memcpy(skb_put(skb, count), data, count);
2314 /* Continuation fragments (no L2CAP header) */
2315 frag = &skb_shinfo(skb)->frag_list;
2317 count = min_t(unsigned int, conn->mtu, len);
2319 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2323 memcpy(skb_put(*frag, count), data, count);
2328 frag = &(*frag)->next;
2338 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2340 struct l2cap_conf_opt *opt = *ptr;
2343 len = L2CAP_CONF_OPT_SIZE + opt->len;
2351 *val = *((u8 *) opt->val);
2355 *val = __le16_to_cpu(*((__le16 *) opt->val));
2359 *val = __le32_to_cpu(*((__le32 *) opt->val));
2363 *val = (unsigned long) opt->val;
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2371 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2373 struct l2cap_conf_opt *opt = *ptr;
2375 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2382 *((u8 *) opt->val) = val;
2386 *((__le16 *) opt->val) = cpu_to_le16(val);
2390 *((__le32 *) opt->val) = cpu_to_le32(val);
2394 memcpy(opt->val, (void *) val, len);
2398 *ptr += L2CAP_CONF_OPT_SIZE + len;
2401 static void l2cap_ack_timeout(unsigned long arg)
2403 struct sock *sk = (void *) arg;
2406 l2cap_send_ack(l2cap_pi(sk));
2410 static inline void l2cap_ertm_init(struct sock *sk)
2412 l2cap_pi(sk)->expected_ack_seq = 0;
2413 l2cap_pi(sk)->unacked_frames = 0;
2414 l2cap_pi(sk)->buffer_seq = 0;
2415 l2cap_pi(sk)->num_acked = 0;
2416 l2cap_pi(sk)->frames_sent = 0;
2418 setup_timer(&l2cap_pi(sk)->retrans_timer,
2419 l2cap_retrans_timeout, (unsigned long) sk);
2420 setup_timer(&l2cap_pi(sk)->monitor_timer,
2421 l2cap_monitor_timeout, (unsigned long) sk);
2422 setup_timer(&l2cap_pi(sk)->ack_timer,
2423 l2cap_ack_timeout, (unsigned long) sk);
2425 __skb_queue_head_init(SREJ_QUEUE(sk));
2426 __skb_queue_head_init(BUSY_QUEUE(sk));
2427 spin_lock_init(&l2cap_pi(sk)->send_lock);
2429 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2432 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2434 u32 local_feat_mask = l2cap_feat_mask;
2436 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2439 case L2CAP_MODE_ERTM:
2440 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2441 case L2CAP_MODE_STREAMING:
2442 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2448 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 if (l2cap_mode_supported(mode, remote_feat_mask))
2457 return L2CAP_MODE_BASIC;
2461 static int l2cap_build_conf_req(struct sock *sk, void *data)
2463 struct l2cap_pinfo *pi = l2cap_pi(sk);
2464 struct l2cap_conf_req *req = data;
2465 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2466 void *ptr = req->data;
2468 BT_DBG("sk %p", sk);
2470 if (pi->num_conf_req || pi->num_conf_rsp)
2474 case L2CAP_MODE_STREAMING:
2475 case L2CAP_MODE_ERTM:
2476 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2481 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2487 case L2CAP_MODE_BASIC:
2488 if (pi->imtu != L2CAP_DEFAULT_MTU)
2489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2491 rfc.mode = L2CAP_MODE_BASIC;
2493 rfc.max_transmit = 0;
2494 rfc.retrans_timeout = 0;
2495 rfc.monitor_timeout = 0;
2496 rfc.max_pdu_size = 0;
2500 case L2CAP_MODE_ERTM:
2501 rfc.mode = L2CAP_MODE_ERTM;
2502 rfc.txwin_size = pi->tx_win;
2503 rfc.max_transmit = pi->max_tx;
2504 rfc.retrans_timeout = 0;
2505 rfc.monitor_timeout = 0;
2506 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2507 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2508 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2510 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2513 if (pi->fcs == L2CAP_FCS_NONE ||
2514 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2515 pi->fcs = L2CAP_FCS_NONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2520 case L2CAP_MODE_STREAMING:
2521 rfc.mode = L2CAP_MODE_STREAMING;
2523 rfc.max_transmit = 0;
2524 rfc.retrans_timeout = 0;
2525 rfc.monitor_timeout = 0;
2526 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2527 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2528 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2530 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2533 if (pi->fcs == L2CAP_FCS_NONE ||
2534 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2535 pi->fcs = L2CAP_FCS_NONE;
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2542 (unsigned long) &rfc);
2544 /* FIXME: Need actual value of the flush timeout */
2545 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2546 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2548 req->dcid = cpu_to_le16(pi->dcid);
2549 req->flags = cpu_to_le16(0);
2554 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2556 struct l2cap_pinfo *pi = l2cap_pi(sk);
2557 struct l2cap_conf_rsp *rsp = data;
2558 void *ptr = rsp->data;
2559 void *req = pi->conf_req;
2560 int len = pi->conf_len;
2561 int type, hint, olen;
2563 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2564 u16 mtu = L2CAP_DEFAULT_MTU;
2565 u16 result = L2CAP_CONF_SUCCESS;
2567 BT_DBG("sk %p", sk);
2569 while (len >= L2CAP_CONF_OPT_SIZE) {
2570 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2572 hint = type & L2CAP_CONF_HINT;
2573 type &= L2CAP_CONF_MASK;
2576 case L2CAP_CONF_MTU:
2580 case L2CAP_CONF_FLUSH_TO:
2584 case L2CAP_CONF_QOS:
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *) val, olen);
2592 case L2CAP_CONF_FCS:
2593 if (val == L2CAP_FCS_NONE)
2594 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2602 result = L2CAP_CONF_UNKNOWN;
2603 *((u8 *) ptr++) = type;
2608 if (pi->num_conf_rsp || pi->num_conf_req)
2612 case L2CAP_MODE_STREAMING:
2613 case L2CAP_MODE_ERTM:
2614 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2615 pi->mode = l2cap_select_mode(rfc.mode,
2616 pi->conn->feat_mask);
2620 if (pi->mode != rfc.mode)
2621 return -ECONNREFUSED;
2627 if (pi->mode != rfc.mode) {
2628 result = L2CAP_CONF_UNACCEPT;
2629 rfc.mode = pi->mode;
2631 if (pi->num_conf_rsp == 1)
2632 return -ECONNREFUSED;
2634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2635 sizeof(rfc), (unsigned long) &rfc);
2639 if (result == L2CAP_CONF_SUCCESS) {
2640 /* Configure output options and let the other side know
2641 * which ones we don't like. */
2643 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2644 result = L2CAP_CONF_UNACCEPT;
2647 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2652 case L2CAP_MODE_BASIC:
2653 pi->fcs = L2CAP_FCS_NONE;
2654 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2657 case L2CAP_MODE_ERTM:
2658 pi->remote_tx_win = rfc.txwin_size;
2659 pi->remote_max_tx = rfc.max_transmit;
2660 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2661 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2663 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2665 rfc.retrans_timeout =
2666 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2667 rfc.monitor_timeout =
2668 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2670 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2673 sizeof(rfc), (unsigned long) &rfc);
2677 case L2CAP_MODE_STREAMING:
2678 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2679 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2681 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2683 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2685 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2686 sizeof(rfc), (unsigned long) &rfc);
2691 result = L2CAP_CONF_UNACCEPT;
2693 memset(&rfc, 0, sizeof(rfc));
2694 rfc.mode = pi->mode;
2697 if (result == L2CAP_CONF_SUCCESS)
2698 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2700 rsp->scid = cpu_to_le16(pi->dcid);
2701 rsp->result = cpu_to_le16(result);
2702 rsp->flags = cpu_to_le16(0x0000);
2707 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2709 struct l2cap_pinfo *pi = l2cap_pi(sk);
2710 struct l2cap_conf_req *req = data;
2711 void *ptr = req->data;
2714 struct l2cap_conf_rfc rfc;
2716 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2718 while (len >= L2CAP_CONF_OPT_SIZE) {
2719 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2722 case L2CAP_CONF_MTU:
2723 if (val < L2CAP_DEFAULT_MIN_MTU) {
2724 *result = L2CAP_CONF_UNACCEPT;
2725 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2728 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2731 case L2CAP_CONF_FLUSH_TO:
2733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2737 case L2CAP_CONF_RFC:
2738 if (olen == sizeof(rfc))
2739 memcpy(&rfc, (void *)val, olen);
2741 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2742 rfc.mode != pi->mode)
2743 return -ECONNREFUSED;
2747 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2748 sizeof(rfc), (unsigned long) &rfc);
2753 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2754 return -ECONNREFUSED;
2756 pi->mode = rfc.mode;
2758 if (*result == L2CAP_CONF_SUCCESS) {
2760 case L2CAP_MODE_ERTM:
2761 pi->remote_tx_win = rfc.txwin_size;
2762 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2763 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2764 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2766 case L2CAP_MODE_STREAMING:
2767 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2771 req->dcid = cpu_to_le16(pi->dcid);
2772 req->flags = cpu_to_le16(0x0000);
2777 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2779 struct l2cap_conf_rsp *rsp = data;
2780 void *ptr = rsp->data;
2782 BT_DBG("sk %p", sk);
2784 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2785 rsp->result = cpu_to_le16(result);
2786 rsp->flags = cpu_to_le16(flags);
2791 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2793 struct l2cap_pinfo *pi = l2cap_pi(sk);
2796 struct l2cap_conf_rfc rfc;
2798 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2800 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2803 while (len >= L2CAP_CONF_OPT_SIZE) {
2804 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2807 case L2CAP_CONF_RFC:
2808 if (olen == sizeof(rfc))
2809 memcpy(&rfc, (void *)val, olen);
2816 case L2CAP_MODE_ERTM:
2817 pi->remote_tx_win = rfc.txwin_size;
2818 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2819 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2820 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2822 case L2CAP_MODE_STREAMING:
2823 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2827 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2829 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2831 if (rej->reason != 0x0000)
2834 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2835 cmd->ident == conn->info_ident) {
2836 del_timer(&conn->info_timer);
2838 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2839 conn->info_ident = 0;
2841 l2cap_conn_start(conn);
2847 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2849 struct l2cap_chan_list *list = &conn->chan_list;
2850 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2851 struct l2cap_conn_rsp rsp;
2852 struct sock *sk, *parent;
2853 int result, status = L2CAP_CS_NO_INFO;
2855 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2856 __le16 psm = req->psm;
2858 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2860 /* Check if we have socket listening on psm */
2861 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2863 result = L2CAP_CR_BAD_PSM;
2867 /* Check if the ACL is secure enough (if not SDP) */
2868 if (psm != cpu_to_le16(0x0001) &&
2869 !hci_conn_check_link_mode(conn->hcon)) {
2870 conn->disc_reason = 0x05;
2871 result = L2CAP_CR_SEC_BLOCK;
2875 result = L2CAP_CR_NO_MEM;
2877 /* Check for backlog size */
2878 if (sk_acceptq_is_full(parent)) {
2879 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2883 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2887 write_lock_bh(&list->lock);
2889 /* Check if we already have channel with that dcid */
2890 if (__l2cap_get_chan_by_dcid(list, scid)) {
2891 write_unlock_bh(&list->lock);
2892 sock_set_flag(sk, SOCK_ZAPPED);
2893 l2cap_sock_kill(sk);
2897 hci_conn_hold(conn->hcon);
2899 l2cap_sock_init(sk, parent);
2900 bacpy(&bt_sk(sk)->src, conn->src);
2901 bacpy(&bt_sk(sk)->dst, conn->dst);
2902 l2cap_pi(sk)->psm = psm;
2903 l2cap_pi(sk)->dcid = scid;
2905 __l2cap_chan_add(conn, sk, parent);
2906 dcid = l2cap_pi(sk)->scid;
2908 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2910 l2cap_pi(sk)->ident = cmd->ident;
2912 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2913 if (l2cap_check_security(sk)) {
2914 if (bt_sk(sk)->defer_setup) {
2915 sk->sk_state = BT_CONNECT2;
2916 result = L2CAP_CR_PEND;
2917 status = L2CAP_CS_AUTHOR_PEND;
2918 parent->sk_data_ready(parent, 0);
2920 sk->sk_state = BT_CONFIG;
2921 result = L2CAP_CR_SUCCESS;
2922 status = L2CAP_CS_NO_INFO;
2925 sk->sk_state = BT_CONNECT2;
2926 result = L2CAP_CR_PEND;
2927 status = L2CAP_CS_AUTHEN_PEND;
2930 sk->sk_state = BT_CONNECT2;
2931 result = L2CAP_CR_PEND;
2932 status = L2CAP_CS_NO_INFO;
2935 write_unlock_bh(&list->lock);
2938 bh_unlock_sock(parent);
2941 rsp.scid = cpu_to_le16(scid);
2942 rsp.dcid = cpu_to_le16(dcid);
2943 rsp.result = cpu_to_le16(result);
2944 rsp.status = cpu_to_le16(status);
2945 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2947 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2948 struct l2cap_info_req info;
2949 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2951 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2952 conn->info_ident = l2cap_get_ident(conn);
2954 mod_timer(&conn->info_timer, jiffies +
2955 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2957 l2cap_send_cmd(conn, conn->info_ident,
2958 L2CAP_INFO_REQ, sizeof(info), &info);
2964 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2966 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2967 u16 scid, dcid, result, status;
2971 scid = __le16_to_cpu(rsp->scid);
2972 dcid = __le16_to_cpu(rsp->dcid);
2973 result = __le16_to_cpu(rsp->result);
2974 status = __le16_to_cpu(rsp->status);
2976 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2979 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2983 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2989 case L2CAP_CR_SUCCESS:
2990 sk->sk_state = BT_CONFIG;
2991 l2cap_pi(sk)->ident = 0;
2992 l2cap_pi(sk)->dcid = dcid;
2993 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2994 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2996 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2997 l2cap_build_conf_req(sk, req), req);
2998 l2cap_pi(sk)->num_conf_req++;
3002 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3006 l2cap_chan_del(sk, ECONNREFUSED);
3014 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3016 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3022 dcid = __le16_to_cpu(req->dcid);
3023 flags = __le16_to_cpu(req->flags);
3025 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3027 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3031 if (sk->sk_state == BT_DISCONN)
3034 /* Reject if config buffer is too small. */
3035 len = cmd_len - sizeof(*req);
3036 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3038 l2cap_build_conf_rsp(sk, rsp,
3039 L2CAP_CONF_REJECT, flags), rsp);
3044 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3045 l2cap_pi(sk)->conf_len += len;
3047 if (flags & 0x0001) {
3048 /* Incomplete config. Send empty response. */
3049 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3050 l2cap_build_conf_rsp(sk, rsp,
3051 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3055 /* Complete config. */
3056 len = l2cap_parse_conf_req(sk, rsp);
3058 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3062 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3063 l2cap_pi(sk)->num_conf_rsp++;
3065 /* Reset config buffer. */
3066 l2cap_pi(sk)->conf_len = 0;
3068 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3071 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3072 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3073 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3074 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3076 sk->sk_state = BT_CONNECTED;
3078 l2cap_pi(sk)->next_tx_seq = 0;
3079 l2cap_pi(sk)->expected_tx_seq = 0;
3080 __skb_queue_head_init(TX_QUEUE(sk));
3081 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3082 l2cap_ertm_init(sk);
3084 l2cap_chan_ready(sk);
3088 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3090 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3091 l2cap_build_conf_req(sk, buf), buf);
3092 l2cap_pi(sk)->num_conf_req++;
3100 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3102 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3103 u16 scid, flags, result;
3105 int len = cmd->len - sizeof(*rsp);
3107 scid = __le16_to_cpu(rsp->scid);
3108 flags = __le16_to_cpu(rsp->flags);
3109 result = __le16_to_cpu(rsp->result);
3111 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3112 scid, flags, result);
3114 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3119 case L2CAP_CONF_SUCCESS:
3120 l2cap_conf_rfc_get(sk, rsp->data, len);
3123 case L2CAP_CONF_UNACCEPT:
3124 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3127 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3128 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3132 /* throw out any old stored conf requests */
3133 result = L2CAP_CONF_SUCCESS;
3134 len = l2cap_parse_conf_rsp(sk, rsp->data,
3137 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3141 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3142 L2CAP_CONF_REQ, len, req);
3143 l2cap_pi(sk)->num_conf_req++;
3144 if (result != L2CAP_CONF_SUCCESS)
3150 sk->sk_err = ECONNRESET;
3151 l2cap_sock_set_timer(sk, HZ * 5);
3152 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3159 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3161 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3162 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3163 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3164 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3166 sk->sk_state = BT_CONNECTED;
3167 l2cap_pi(sk)->next_tx_seq = 0;
3168 l2cap_pi(sk)->expected_tx_seq = 0;
3169 __skb_queue_head_init(TX_QUEUE(sk));
3170 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3171 l2cap_ertm_init(sk);
3173 l2cap_chan_ready(sk);
3181 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3183 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3184 struct l2cap_disconn_rsp rsp;
3188 scid = __le16_to_cpu(req->scid);
3189 dcid = __le16_to_cpu(req->dcid);
3191 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3193 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3197 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3198 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3199 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3201 sk->sk_shutdown = SHUTDOWN_MASK;
3203 l2cap_chan_del(sk, ECONNRESET);
3206 l2cap_sock_kill(sk);
3210 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3212 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3216 scid = __le16_to_cpu(rsp->scid);
3217 dcid = __le16_to_cpu(rsp->dcid);
3219 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3221 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3225 l2cap_chan_del(sk, 0);
3228 l2cap_sock_kill(sk);
3232 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3234 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3237 type = __le16_to_cpu(req->type);
3239 BT_DBG("type 0x%4.4x", type);
3241 if (type == L2CAP_IT_FEAT_MASK) {
3243 u32 feat_mask = l2cap_feat_mask;
3244 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3245 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3246 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3248 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3250 put_unaligned_le32(feat_mask, rsp->data);
3251 l2cap_send_cmd(conn, cmd->ident,
3252 L2CAP_INFO_RSP, sizeof(buf), buf);
3253 } else if (type == L2CAP_IT_FIXED_CHAN) {
3255 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3256 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3257 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3258 memcpy(buf + 4, l2cap_fixed_chan, 8);
3259 l2cap_send_cmd(conn, cmd->ident,
3260 L2CAP_INFO_RSP, sizeof(buf), buf);
3262 struct l2cap_info_rsp rsp;
3263 rsp.type = cpu_to_le16(type);
3264 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3265 l2cap_send_cmd(conn, cmd->ident,
3266 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3272 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3274 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3277 type = __le16_to_cpu(rsp->type);
3278 result = __le16_to_cpu(rsp->result);
3280 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3282 del_timer(&conn->info_timer);
3284 if (type == L2CAP_IT_FEAT_MASK) {
3285 conn->feat_mask = get_unaligned_le32(rsp->data);
3287 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3288 struct l2cap_info_req req;
3289 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3291 conn->info_ident = l2cap_get_ident(conn);
3293 l2cap_send_cmd(conn, conn->info_ident,
3294 L2CAP_INFO_REQ, sizeof(req), &req);
3296 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3297 conn->info_ident = 0;
3299 l2cap_conn_start(conn);
3301 } else if (type == L2CAP_IT_FIXED_CHAN) {
3302 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3303 conn->info_ident = 0;
3305 l2cap_conn_start(conn);
3311 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3313 u8 *data = skb->data;
3315 struct l2cap_cmd_hdr cmd;
3318 l2cap_raw_recv(conn, skb);
3320 while (len >= L2CAP_CMD_HDR_SIZE) {
3322 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3323 data += L2CAP_CMD_HDR_SIZE;
3324 len -= L2CAP_CMD_HDR_SIZE;
3326 cmd_len = le16_to_cpu(cmd.len);
3328 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3330 if (cmd_len > len || !cmd.ident) {
3331 BT_DBG("corrupted command");
3336 case L2CAP_COMMAND_REJ:
3337 l2cap_command_rej(conn, &cmd, data);
3340 case L2CAP_CONN_REQ:
3341 err = l2cap_connect_req(conn, &cmd, data);
3344 case L2CAP_CONN_RSP:
3345 err = l2cap_connect_rsp(conn, &cmd, data);
3348 case L2CAP_CONF_REQ:
3349 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3352 case L2CAP_CONF_RSP:
3353 err = l2cap_config_rsp(conn, &cmd, data);
3356 case L2CAP_DISCONN_REQ:
3357 err = l2cap_disconnect_req(conn, &cmd, data);
3360 case L2CAP_DISCONN_RSP:
3361 err = l2cap_disconnect_rsp(conn, &cmd, data);
3364 case L2CAP_ECHO_REQ:
3365 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3368 case L2CAP_ECHO_RSP:
3371 case L2CAP_INFO_REQ:
3372 err = l2cap_information_req(conn, &cmd, data);
3375 case L2CAP_INFO_RSP:
3376 err = l2cap_information_rsp(conn, &cmd, data);
3380 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3386 struct l2cap_cmd_rej rej;
3387 BT_DBG("error %d", err);
3389 /* FIXME: Map err to a valid reason */
3390 rej.reason = cpu_to_le16(0);
3391 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3401 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3403 u16 our_fcs, rcv_fcs;
3404 int hdr_size = L2CAP_HDR_SIZE + 2;
3406 if (pi->fcs == L2CAP_FCS_CRC16) {
3407 skb_trim(skb, skb->len - 2);
3408 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3409 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3411 if (our_fcs != rcv_fcs)
3417 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3419 struct l2cap_pinfo *pi = l2cap_pi(sk);
3422 pi->frames_sent = 0;
3424 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3426 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3427 control |= L2CAP_SUPER_RCV_NOT_READY;
3428 l2cap_send_sframe(pi, control);
3429 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3432 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3433 l2cap_retransmit_frames(sk);
3435 spin_lock_bh(&pi->send_lock);
3436 l2cap_ertm_send(sk);
3437 spin_unlock_bh(&pi->send_lock);
3439 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3440 pi->frames_sent == 0) {
3441 control |= L2CAP_SUPER_RCV_READY;
3442 l2cap_send_sframe(pi, control);
3446 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3448 struct sk_buff *next_skb;
3449 struct l2cap_pinfo *pi = l2cap_pi(sk);
3450 int tx_seq_offset, next_tx_seq_offset;
3452 bt_cb(skb)->tx_seq = tx_seq;
3453 bt_cb(skb)->sar = sar;
3455 next_skb = skb_peek(SREJ_QUEUE(sk));
3457 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3461 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3462 if (tx_seq_offset < 0)
3463 tx_seq_offset += 64;
3466 if (bt_cb(next_skb)->tx_seq == tx_seq)
3469 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3470 pi->buffer_seq) % 64;
3471 if (next_tx_seq_offset < 0)
3472 next_tx_seq_offset += 64;
3474 if (next_tx_seq_offset > tx_seq_offset) {
3475 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3479 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3482 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3484 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3489 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3491 struct l2cap_pinfo *pi = l2cap_pi(sk);
3492 struct sk_buff *_skb;
3495 switch (control & L2CAP_CTRL_SAR) {
3496 case L2CAP_SDU_UNSEGMENTED:
3497 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3500 err = sock_queue_rcv_skb(sk, skb);
3506 case L2CAP_SDU_START:
3507 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3510 pi->sdu_len = get_unaligned_le16(skb->data);
3512 if (pi->sdu_len > pi->imtu)
3515 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3519 /* pull sdu_len bytes only after alloc, because of Local Busy
3520 * condition we have to be sure that this will be executed
3521 * only once, i.e., when alloc does not fail */
3524 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3526 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3527 pi->partial_sdu_len = skb->len;
3530 case L2CAP_SDU_CONTINUE:
3531 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3537 pi->partial_sdu_len += skb->len;
3538 if (pi->partial_sdu_len > pi->sdu_len)
3541 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3546 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3552 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3553 pi->partial_sdu_len += skb->len;
3555 if (pi->partial_sdu_len > pi->imtu)
3558 if (pi->partial_sdu_len != pi->sdu_len)
3561 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3564 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3566 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3570 err = sock_queue_rcv_skb(sk, _skb);
3573 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3577 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3578 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3592 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3597 static void l2cap_busy_work(struct work_struct *work)
3599 DECLARE_WAITQUEUE(wait, current);
3600 struct l2cap_pinfo *pi =
3601 container_of(work, struct l2cap_pinfo, busy_work);
3602 struct sock *sk = (struct sock *)pi;
3603 int n_tries = 0, timeo = HZ/5, err;
3604 struct sk_buff *skb;
3609 add_wait_queue(sk_sleep(sk), &wait);
3610 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3611 set_current_state(TASK_INTERRUPTIBLE);
3613 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3615 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3622 if (signal_pending(current)) {
3623 err = sock_intr_errno(timeo);
3628 timeo = schedule_timeout(timeo);
3631 err = sock_error(sk);
3635 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3636 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3637 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3639 skb_queue_head(BUSY_QUEUE(sk), skb);
3643 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3650 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3653 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3654 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3655 l2cap_send_sframe(pi, control);
3656 l2cap_pi(sk)->retry_count = 1;
3658 del_timer(&pi->retrans_timer);
3659 __mod_monitor_timer();
3661 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3664 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3665 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3667 BT_DBG("sk %p, Exit local busy", sk);
3669 set_current_state(TASK_RUNNING);
3670 remove_wait_queue(sk_sleep(sk), &wait);
3675 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3677 struct l2cap_pinfo *pi = l2cap_pi(sk);
3680 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3681 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3682 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3686 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3688 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3692 /* Busy Condition */
3693 BT_DBG("sk %p, Enter local busy", sk);
3695 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3696 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3697 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3699 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3700 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3701 l2cap_send_sframe(pi, sctrl);
3703 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3705 del_timer(&pi->ack_timer);
3707 queue_work(_busy_wq, &pi->busy_work);
3712 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3714 struct l2cap_pinfo *pi = l2cap_pi(sk);
3715 struct sk_buff *_skb;
3719 * TODO: We have to notify the userland if some data is lost with the
3723 switch (control & L2CAP_CTRL_SAR) {
3724 case L2CAP_SDU_UNSEGMENTED:
3725 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3730 err = sock_queue_rcv_skb(sk, skb);
3736 case L2CAP_SDU_START:
3737 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3742 pi->sdu_len = get_unaligned_le16(skb->data);
3745 if (pi->sdu_len > pi->imtu) {
3750 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3756 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3758 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3759 pi->partial_sdu_len = skb->len;
3763 case L2CAP_SDU_CONTINUE:
3764 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3767 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3769 pi->partial_sdu_len += skb->len;
3770 if (pi->partial_sdu_len > pi->sdu_len)
3778 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3781 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3783 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3784 pi->partial_sdu_len += skb->len;
3786 if (pi->partial_sdu_len > pi->imtu)
3789 if (pi->partial_sdu_len == pi->sdu_len) {
3790 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3791 err = sock_queue_rcv_skb(sk, _skb);
3806 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3808 struct sk_buff *skb;
3811 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3812 if (bt_cb(skb)->tx_seq != tx_seq)
3815 skb = skb_dequeue(SREJ_QUEUE(sk));
3816 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3817 l2cap_ertm_reassembly_sdu(sk, skb, control);
3818 l2cap_pi(sk)->buffer_seq_srej =
3819 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3820 tx_seq = (tx_seq + 1) % 64;
3824 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3826 struct l2cap_pinfo *pi = l2cap_pi(sk);
3827 struct srej_list *l, *tmp;
3830 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3831 if (l->tx_seq == tx_seq) {
3836 control = L2CAP_SUPER_SELECT_REJECT;
3837 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3838 l2cap_send_sframe(pi, control);
3840 list_add_tail(&l->list, SREJ_LIST(sk));
3844 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3846 struct l2cap_pinfo *pi = l2cap_pi(sk);
3847 struct srej_list *new;
3850 while (tx_seq != pi->expected_tx_seq) {
3851 control = L2CAP_SUPER_SELECT_REJECT;
3852 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3853 l2cap_send_sframe(pi, control);
3855 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3856 new->tx_seq = pi->expected_tx_seq;
3857 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3858 list_add_tail(&new->list, SREJ_LIST(sk));
3860 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3863 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3865 struct l2cap_pinfo *pi = l2cap_pi(sk);
3866 u8 tx_seq = __get_txseq(rx_control);
3867 u8 req_seq = __get_reqseq(rx_control);
3868 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3869 int tx_seq_offset, expected_tx_seq_offset;
3870 int num_to_ack = (pi->tx_win/6) + 1;
3873 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3876 if (L2CAP_CTRL_FINAL & rx_control &&
3877 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3878 del_timer(&pi->monitor_timer);
3879 if (pi->unacked_frames > 0)
3880 __mod_retrans_timer();
3881 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3884 pi->expected_ack_seq = req_seq;
3885 l2cap_drop_acked_frames(sk);
3887 if (tx_seq == pi->expected_tx_seq)
3890 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3891 if (tx_seq_offset < 0)
3892 tx_seq_offset += 64;
3894 /* invalid tx_seq */
3895 if (tx_seq_offset >= pi->tx_win) {
3896 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3900 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3903 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3904 struct srej_list *first;
3906 first = list_first_entry(SREJ_LIST(sk),
3907 struct srej_list, list);
3908 if (tx_seq == first->tx_seq) {
3909 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3910 l2cap_check_srej_gap(sk, tx_seq);
3912 list_del(&first->list);
3915 if (list_empty(SREJ_LIST(sk))) {
3916 pi->buffer_seq = pi->buffer_seq_srej;
3917 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3919 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3922 struct srej_list *l;
3924 /* duplicated tx_seq */
3925 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3928 list_for_each_entry(l, SREJ_LIST(sk), list) {
3929 if (l->tx_seq == tx_seq) {
3930 l2cap_resend_srejframe(sk, tx_seq);
3934 l2cap_send_srejframe(sk, tx_seq);
3937 expected_tx_seq_offset =
3938 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3939 if (expected_tx_seq_offset < 0)
3940 expected_tx_seq_offset += 64;
3942 /* duplicated tx_seq */
3943 if (tx_seq_offset < expected_tx_seq_offset)
3946 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3948 BT_DBG("sk %p, Enter SREJ", sk);
3950 INIT_LIST_HEAD(SREJ_LIST(sk));
3951 pi->buffer_seq_srej = pi->buffer_seq;
3953 __skb_queue_head_init(SREJ_QUEUE(sk));
3954 __skb_queue_head_init(BUSY_QUEUE(sk));
3955 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3957 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3959 l2cap_send_srejframe(sk, tx_seq);
3961 del_timer(&pi->ack_timer);
3966 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3968 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3969 bt_cb(skb)->tx_seq = tx_seq;
3970 bt_cb(skb)->sar = sar;
3971 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3975 err = l2cap_push_rx_skb(sk, skb, rx_control);
3979 if (rx_control & L2CAP_CTRL_FINAL) {
3980 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3981 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3983 l2cap_retransmit_frames(sk);
3988 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3989 if (pi->num_acked == num_to_ack - 1)
3999 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4001 struct l2cap_pinfo *pi = l2cap_pi(sk);
4003 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4006 pi->expected_ack_seq = __get_reqseq(rx_control);
4007 l2cap_drop_acked_frames(sk);
4009 if (rx_control & L2CAP_CTRL_POLL) {
4010 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4011 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4012 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4013 (pi->unacked_frames > 0))
4014 __mod_retrans_timer();
4016 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4017 l2cap_send_srejtail(sk);
4019 l2cap_send_i_or_rr_or_rnr(sk);
4022 } else if (rx_control & L2CAP_CTRL_FINAL) {
4023 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4025 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4026 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4028 l2cap_retransmit_frames(sk);
4031 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4032 (pi->unacked_frames > 0))
4033 __mod_retrans_timer();
4035 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4036 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4039 spin_lock_bh(&pi->send_lock);
4040 l2cap_ertm_send(sk);
4041 spin_unlock_bh(&pi->send_lock);
4046 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4048 struct l2cap_pinfo *pi = l2cap_pi(sk);
4049 u8 tx_seq = __get_reqseq(rx_control);
4051 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4053 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4055 pi->expected_ack_seq = tx_seq;
4056 l2cap_drop_acked_frames(sk);
4058 if (rx_control & L2CAP_CTRL_FINAL) {
4059 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4060 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4062 l2cap_retransmit_frames(sk);
4064 l2cap_retransmit_frames(sk);
4066 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4067 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4070 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4072 struct l2cap_pinfo *pi = l2cap_pi(sk);
4073 u8 tx_seq = __get_reqseq(rx_control);
4075 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4077 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4079 if (rx_control & L2CAP_CTRL_POLL) {
4080 pi->expected_ack_seq = tx_seq;
4081 l2cap_drop_acked_frames(sk);
4083 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4084 l2cap_retransmit_one_frame(sk, tx_seq);
4086 spin_lock_bh(&pi->send_lock);
4087 l2cap_ertm_send(sk);
4088 spin_unlock_bh(&pi->send_lock);
4090 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4091 pi->srej_save_reqseq = tx_seq;
4092 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4094 } else if (rx_control & L2CAP_CTRL_FINAL) {
4095 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4096 pi->srej_save_reqseq == tx_seq)
4097 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4099 l2cap_retransmit_one_frame(sk, tx_seq);
4101 l2cap_retransmit_one_frame(sk, tx_seq);
4102 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4103 pi->srej_save_reqseq = tx_seq;
4104 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4109 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4111 struct l2cap_pinfo *pi = l2cap_pi(sk);
4112 u8 tx_seq = __get_reqseq(rx_control);
4114 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4116 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4117 pi->expected_ack_seq = tx_seq;
4118 l2cap_drop_acked_frames(sk);
4120 if (rx_control & L2CAP_CTRL_POLL)
4121 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4123 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4124 del_timer(&pi->retrans_timer);
4125 if (rx_control & L2CAP_CTRL_POLL)
4126 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4130 if (rx_control & L2CAP_CTRL_POLL)
4131 l2cap_send_srejtail(sk);
4133 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4136 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4138 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4140 if (L2CAP_CTRL_FINAL & rx_control &&
4141 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4142 del_timer(&l2cap_pi(sk)->monitor_timer);
4143 if (l2cap_pi(sk)->unacked_frames > 0)
4144 __mod_retrans_timer();
4145 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4148 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4149 case L2CAP_SUPER_RCV_READY:
4150 l2cap_data_channel_rrframe(sk, rx_control);
4153 case L2CAP_SUPER_REJECT:
4154 l2cap_data_channel_rejframe(sk, rx_control);
4157 case L2CAP_SUPER_SELECT_REJECT:
4158 l2cap_data_channel_srejframe(sk, rx_control);
4161 case L2CAP_SUPER_RCV_NOT_READY:
4162 l2cap_data_channel_rnrframe(sk, rx_control);
4170 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4173 struct l2cap_pinfo *pi;
4176 int len, next_tx_seq_offset, req_seq_offset;
4178 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4180 BT_DBG("unknown cid 0x%4.4x", cid);
4186 BT_DBG("sk %p, len %d", sk, skb->len);
4188 if (sk->sk_state != BT_CONNECTED)
4192 case L2CAP_MODE_BASIC:
4193 /* If socket recv buffers overflows we drop data here
4194 * which is *bad* because L2CAP has to be reliable.
4195 * But we don't have any other choice. L2CAP doesn't
4196 * provide flow control mechanism. */
4198 if (pi->imtu < skb->len)
4201 if (!sock_queue_rcv_skb(sk, skb))
4205 case L2CAP_MODE_ERTM:
4206 control = get_unaligned_le16(skb->data);
4211 * We can just drop the corrupted I-frame here.
4212 * Receiver will miss it and start proper recovery
4213 * procedures and ask retransmission.
4215 if (l2cap_check_fcs(pi, skb))
4218 if (__is_sar_start(control) && __is_iframe(control))
4221 if (pi->fcs == L2CAP_FCS_CRC16)
4224 if (len > pi->mps) {
4225 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4229 req_seq = __get_reqseq(control);
4230 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4231 if (req_seq_offset < 0)
4232 req_seq_offset += 64;
4234 next_tx_seq_offset =
4235 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4236 if (next_tx_seq_offset < 0)
4237 next_tx_seq_offset += 64;
4239 /* check for invalid req-seq */
4240 if (req_seq_offset > next_tx_seq_offset) {
4241 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4245 if (__is_iframe(control)) {
4247 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4251 l2cap_data_channel_iframe(sk, control, skb);
4254 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4258 l2cap_data_channel_sframe(sk, control, skb);
4263 case L2CAP_MODE_STREAMING:
4264 control = get_unaligned_le16(skb->data);
4268 if (l2cap_check_fcs(pi, skb))
4271 if (__is_sar_start(control))
4274 if (pi->fcs == L2CAP_FCS_CRC16)
4277 if (len > pi->mps || len < 0 || __is_sframe(control))
4280 tx_seq = __get_txseq(control);
4282 if (pi->expected_tx_seq == tx_seq)
4283 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4285 pi->expected_tx_seq = (tx_seq + 1) % 64;
4287 l2cap_streaming_reassembly_sdu(sk, skb, control);
4292 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4306 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4310 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4314 BT_DBG("sk %p, len %d", sk, skb->len);
4316 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4319 if (l2cap_pi(sk)->imtu < skb->len)
4322 if (!sock_queue_rcv_skb(sk, skb))
4334 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4336 struct l2cap_hdr *lh = (void *) skb->data;
4340 skb_pull(skb, L2CAP_HDR_SIZE);
4341 cid = __le16_to_cpu(lh->cid);
4342 len = __le16_to_cpu(lh->len);
4344 if (len != skb->len) {
4349 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4352 case L2CAP_CID_SIGNALING:
4353 l2cap_sig_channel(conn, skb);
4356 case L2CAP_CID_CONN_LESS:
4357 psm = get_unaligned_le16(skb->data);
4359 l2cap_conless_channel(conn, psm, skb);
4363 l2cap_data_channel(conn, cid, skb);
4368 /* ---- L2CAP interface with lower layer (HCI) ---- */
4370 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4372 int exact = 0, lm1 = 0, lm2 = 0;
4373 register struct sock *sk;
4374 struct hlist_node *node;
4376 if (type != ACL_LINK)
4379 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4381 /* Find listening sockets and check their link_mode */
4382 read_lock(&l2cap_sk_list.lock);
4383 sk_for_each(sk, node, &l2cap_sk_list.head) {
4384 if (sk->sk_state != BT_LISTEN)
4387 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4388 lm1 |= HCI_LM_ACCEPT;
4389 if (l2cap_pi(sk)->role_switch)
4390 lm1 |= HCI_LM_MASTER;
4392 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4393 lm2 |= HCI_LM_ACCEPT;
4394 if (l2cap_pi(sk)->role_switch)
4395 lm2 |= HCI_LM_MASTER;
4398 read_unlock(&l2cap_sk_list.lock);
4400 return exact ? lm1 : lm2;
4403 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4405 struct l2cap_conn *conn;
4407 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4409 if (hcon->type != ACL_LINK)
4413 conn = l2cap_conn_add(hcon, status);
4415 l2cap_conn_ready(conn);
4417 l2cap_conn_del(hcon, bt_err(status));
4422 static int l2cap_disconn_ind(struct hci_conn *hcon)
4424 struct l2cap_conn *conn = hcon->l2cap_data;
4426 BT_DBG("hcon %p", hcon);
4428 if (hcon->type != ACL_LINK || !conn)
4431 return conn->disc_reason;
4434 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4436 BT_DBG("hcon %p reason %d", hcon, reason);
4438 if (hcon->type != ACL_LINK)
4441 l2cap_conn_del(hcon, bt_err(reason));
4446 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4448 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4451 if (encrypt == 0x00) {
4452 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4453 l2cap_sock_clear_timer(sk);
4454 l2cap_sock_set_timer(sk, HZ * 5);
4455 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4456 __l2cap_sock_close(sk, ECONNREFUSED);
4458 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4459 l2cap_sock_clear_timer(sk);
4463 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4465 struct l2cap_chan_list *l;
4466 struct l2cap_conn *conn = hcon->l2cap_data;
4472 l = &conn->chan_list;
4474 BT_DBG("conn %p", conn);
4476 read_lock(&l->lock);
4478 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4481 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4486 if (!status && (sk->sk_state == BT_CONNECTED ||
4487 sk->sk_state == BT_CONFIG)) {
4488 l2cap_check_encryption(sk, encrypt);
4493 if (sk->sk_state == BT_CONNECT) {
4495 struct l2cap_conn_req req;
4496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4497 req.psm = l2cap_pi(sk)->psm;
4499 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4500 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4502 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4503 L2CAP_CONN_REQ, sizeof(req), &req);
4505 l2cap_sock_clear_timer(sk);
4506 l2cap_sock_set_timer(sk, HZ / 10);
4508 } else if (sk->sk_state == BT_CONNECT2) {
4509 struct l2cap_conn_rsp rsp;
4513 sk->sk_state = BT_CONFIG;
4514 result = L2CAP_CR_SUCCESS;
4516 sk->sk_state = BT_DISCONN;
4517 l2cap_sock_set_timer(sk, HZ / 10);
4518 result = L2CAP_CR_SEC_BLOCK;
4521 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4522 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4523 rsp.result = cpu_to_le16(result);
4524 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4525 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4526 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4532 read_unlock(&l->lock);
4537 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4539 struct l2cap_conn *conn = hcon->l2cap_data;
4541 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4544 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4546 if (flags & ACL_START) {
4547 struct l2cap_hdr *hdr;
4551 BT_ERR("Unexpected start frame (len %d)", skb->len);
4552 kfree_skb(conn->rx_skb);
4553 conn->rx_skb = NULL;
4555 l2cap_conn_unreliable(conn, ECOMM);
4559 BT_ERR("Frame is too short (len %d)", skb->len);
4560 l2cap_conn_unreliable(conn, ECOMM);
4564 hdr = (struct l2cap_hdr *) skb->data;
4565 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4567 if (len == skb->len) {
4568 /* Complete frame received */
4569 l2cap_recv_frame(conn, skb);
4573 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4575 if (skb->len > len) {
4576 BT_ERR("Frame is too long (len %d, expected len %d)",
4578 l2cap_conn_unreliable(conn, ECOMM);
4582 /* Allocate skb for the complete frame (with header) */
4583 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4587 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4589 conn->rx_len = len - skb->len;
4591 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4593 if (!conn->rx_len) {
4594 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4595 l2cap_conn_unreliable(conn, ECOMM);
4599 if (skb->len > conn->rx_len) {
4600 BT_ERR("Fragment is too long (len %d, expected %d)",
4601 skb->len, conn->rx_len);
4602 kfree_skb(conn->rx_skb);
4603 conn->rx_skb = NULL;
4605 l2cap_conn_unreliable(conn, ECOMM);
4609 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4611 conn->rx_len -= skb->len;
4613 if (!conn->rx_len) {
4614 /* Complete frame received */
4615 l2cap_recv_frame(conn, conn->rx_skb);
4616 conn->rx_skb = NULL;
4625 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4628 struct hlist_node *node;
4630 read_lock_bh(&l2cap_sk_list.lock);
4632 sk_for_each(sk, node, &l2cap_sk_list.head) {
4633 struct l2cap_pinfo *pi = l2cap_pi(sk);
4635 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4636 batostr(&bt_sk(sk)->src),
4637 batostr(&bt_sk(sk)->dst),
4638 sk->sk_state, __le16_to_cpu(pi->psm),
4640 pi->imtu, pi->omtu, pi->sec_level);
4643 read_unlock_bh(&l2cap_sk_list.lock);
4648 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4650 return single_open(file, l2cap_debugfs_show, inode->i_private);
4653 static const struct file_operations l2cap_debugfs_fops = {
4654 .open = l2cap_debugfs_open,
4656 .llseek = seq_lseek,
4657 .release = single_release,
4660 static struct dentry *l2cap_debugfs;
4662 static const struct proto_ops l2cap_sock_ops = {
4663 .family = PF_BLUETOOTH,
4664 .owner = THIS_MODULE,
4665 .release = l2cap_sock_release,
4666 .bind = l2cap_sock_bind,
4667 .connect = l2cap_sock_connect,
4668 .listen = l2cap_sock_listen,
4669 .accept = l2cap_sock_accept,
4670 .getname = l2cap_sock_getname,
4671 .sendmsg = l2cap_sock_sendmsg,
4672 .recvmsg = l2cap_sock_recvmsg,
4673 .poll = bt_sock_poll,
4674 .ioctl = bt_sock_ioctl,
4675 .mmap = sock_no_mmap,
4676 .socketpair = sock_no_socketpair,
4677 .shutdown = l2cap_sock_shutdown,
4678 .setsockopt = l2cap_sock_setsockopt,
4679 .getsockopt = l2cap_sock_getsockopt
4682 static const struct net_proto_family l2cap_sock_family_ops = {
4683 .family = PF_BLUETOOTH,
4684 .owner = THIS_MODULE,
4685 .create = l2cap_sock_create,
4688 static struct hci_proto l2cap_hci_proto = {
4690 .id = HCI_PROTO_L2CAP,
4691 .connect_ind = l2cap_connect_ind,
4692 .connect_cfm = l2cap_connect_cfm,
4693 .disconn_ind = l2cap_disconn_ind,
4694 .disconn_cfm = l2cap_disconn_cfm,
4695 .security_cfm = l2cap_security_cfm,
4696 .recv_acldata = l2cap_recv_acldata
4699 static int __init l2cap_init(void)
4703 err = proto_register(&l2cap_proto, 0);
4707 _busy_wq = create_singlethread_workqueue("l2cap");
4711 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4713 BT_ERR("L2CAP socket registration failed");
4717 err = hci_register_proto(&l2cap_hci_proto);
4719 BT_ERR("L2CAP protocol registration failed");
4720 bt_sock_unregister(BTPROTO_L2CAP);
4725 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4726 bt_debugfs, NULL, &l2cap_debugfs_fops);
4728 BT_ERR("Failed to create L2CAP debug file");
4731 BT_INFO("L2CAP ver %s", VERSION);
4732 BT_INFO("L2CAP socket layer initialized");
4737 proto_unregister(&l2cap_proto);
4741 static void __exit l2cap_exit(void)
4743 debugfs_remove(l2cap_debugfs);
4745 flush_workqueue(_busy_wq);
4746 destroy_workqueue(_busy_wq);
4748 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4749 BT_ERR("L2CAP socket unregistration failed");
4751 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4752 BT_ERR("L2CAP protocol unregistration failed");
4754 proto_unregister(&l2cap_proto);
4757 void l2cap_load(void)
4759 /* Dummy function to trigger automatic L2CAP module loading by
4760 * other modules that use L2CAP sockets but don't use any other
4761 * symbols from it. */
4763 EXPORT_SYMBOL(l2cap_load);
4765 module_init(l2cap_init);
4766 module_exit(l2cap_exit);
4768 module_param(enable_ertm, bool, 0644);
4769 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4771 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4772 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4773 MODULE_VERSION(VERSION);
4774 MODULE_LICENSE("GPL");
4775 MODULE_ALIAS("bt-proto-0");