2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
57 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
59 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
60 static u8 l2cap_fixed_chan[8] = { 0x02, };
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
86 reason = ECONNREFUSED;
87 else if (sk->sk_state == BT_CONNECT &&
88 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
89 reason = ECONNREFUSED;
93 __l2cap_sock_close(sk, reason);
101 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
103 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
104 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
107 static void l2cap_sock_clear_timer(struct sock *sk)
109 BT_DBG("sock %p state %d", sk, sk->sk_state);
110 sk_stop_timer(sk, &sk->sk_timer);
113 /* ---- L2CAP channels ---- */
114 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
117 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
118 if (l2cap_pi(s)->dcid == cid)
124 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->scid == cid)
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
140 s = __l2cap_get_chan_by_scid(l, cid);
143 read_unlock(&l->lock);
147 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
150 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
151 if (l2cap_pi(s)->ident == ident)
157 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 s = __l2cap_get_chan_by_ident(l, ident);
164 read_unlock(&l->lock);
168 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
170 u16 cid = L2CAP_CID_DYN_START;
172 for (; cid < L2CAP_CID_DYN_END; cid++) {
173 if (!__l2cap_get_chan_by_scid(l, cid))
180 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
185 l2cap_pi(l->head)->prev_c = sk;
187 l2cap_pi(sk)->next_c = l->head;
188 l2cap_pi(sk)->prev_c = NULL;
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
194 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
196 write_lock_bh(&l->lock);
201 l2cap_pi(next)->prev_c = prev;
203 l2cap_pi(prev)->next_c = next;
204 write_unlock_bh(&l->lock);
209 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
211 struct l2cap_chan_list *l = &conn->chan_list;
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
214 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
216 conn->disc_reason = 0x13;
218 l2cap_pi(sk)->conn = conn;
220 if (sk->sk_type == SOCK_SEQPACKET) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
223 } else if (sk->sk_type == SOCK_DGRAM) {
224 /* Connectionless socket */
225 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
227 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 __l2cap_chan_link(l, sk);
238 bt_accept_enqueue(parent, sk);
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock *sk, int err)
245 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
246 struct sock *parent = bt_sk(sk)->parent;
248 l2cap_sock_clear_timer(sk);
250 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn->chan_list, sk);
255 l2cap_pi(sk)->conn = NULL;
256 hci_conn_put(conn->hcon);
259 sk->sk_state = BT_CLOSED;
260 sock_set_flag(sk, SOCK_ZAPPED);
266 bt_accept_unlink(sk);
267 parent->sk_data_ready(parent, 0);
269 sk->sk_state_change(sk);
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock *sk)
275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
280 auth_type = HCI_AT_NO_BONDING_MITM;
282 auth_type = HCI_AT_NO_BONDING;
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
285 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
287 switch (l2cap_pi(sk)->sec_level) {
288 case BT_SECURITY_HIGH:
289 auth_type = HCI_AT_GENERAL_BONDING_MITM;
291 case BT_SECURITY_MEDIUM:
292 auth_type = HCI_AT_GENERAL_BONDING;
295 auth_type = HCI_AT_NO_BONDING;
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
304 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn->lock);
316 if (++conn->tx_ident > 128)
321 spin_unlock_bh(&conn->lock);
326 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
330 BT_DBG("code 0x%2.2x", code);
335 return hci_send_acl(conn->hcon, skb, 0);
338 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
341 struct l2cap_hdr *lh;
342 struct l2cap_conn *conn = pi->conn;
343 int count, hlen = L2CAP_HDR_SIZE + 2;
345 if (pi->fcs == L2CAP_FCS_CRC16)
348 BT_DBG("pi %p, control 0x%2.2x", pi, control);
350 count = min_t(unsigned int, conn->mtu, hlen);
351 control |= L2CAP_CTRL_FRAME_TYPE;
353 skb = bt_skb_alloc(count, GFP_ATOMIC);
357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
358 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
359 lh->cid = cpu_to_le16(pi->dcid);
360 put_unaligned_le16(control, skb_put(skb, 2));
362 if (pi->fcs == L2CAP_FCS_CRC16) {
363 u16 fcs = crc16(0, (u8 *)lh, count - 2);
364 put_unaligned_le16(fcs, skb_put(skb, 2));
367 return hci_send_acl(pi->conn->hcon, skb, 0);
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
372 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
373 control |= L2CAP_SUPER_RCV_NOT_READY;
375 control |= L2CAP_SUPER_RCV_READY;
377 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
379 return l2cap_send_sframe(pi, control);
382 static void l2cap_do_start(struct sock *sk)
384 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
386 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
387 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
390 if (l2cap_check_security(sk)) {
391 struct l2cap_conn_req req;
392 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
393 req.psm = l2cap_pi(sk)->psm;
395 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
397 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
398 L2CAP_CONN_REQ, sizeof(req), &req);
401 struct l2cap_info_req req;
402 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
405 conn->info_ident = l2cap_get_ident(conn);
407 mod_timer(&conn->info_timer, jiffies +
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
410 l2cap_send_cmd(conn, conn->info_ident,
411 L2CAP_INFO_REQ, sizeof(req), &req);
415 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
417 struct l2cap_disconn_req req;
419 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
420 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
421 l2cap_send_cmd(conn, l2cap_get_ident(conn),
422 L2CAP_DISCONN_REQ, sizeof(req), &req);
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn *conn)
428 struct l2cap_chan_list *l = &conn->chan_list;
431 BT_DBG("conn %p", conn);
435 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 if (sk->sk_type != SOCK_SEQPACKET) {
443 if (sk->sk_state == BT_CONNECT) {
444 if (l2cap_check_security(sk)) {
445 struct l2cap_conn_req req;
446 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
447 req.psm = l2cap_pi(sk)->psm;
449 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
451 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
452 L2CAP_CONN_REQ, sizeof(req), &req);
454 } else if (sk->sk_state == BT_CONNECT2) {
455 struct l2cap_conn_rsp rsp;
456 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
457 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
459 if (l2cap_check_security(sk)) {
460 if (bt_sk(sk)->defer_setup) {
461 struct sock *parent = bt_sk(sk)->parent;
462 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
463 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
464 parent->sk_data_ready(parent, 0);
467 sk->sk_state = BT_CONFIG;
468 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
469 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
472 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
473 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
483 read_unlock(&l->lock);
486 static void l2cap_conn_ready(struct l2cap_conn *conn)
488 struct l2cap_chan_list *l = &conn->chan_list;
491 BT_DBG("conn %p", conn);
495 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
498 if (sk->sk_type != SOCK_SEQPACKET) {
499 l2cap_sock_clear_timer(sk);
500 sk->sk_state = BT_CONNECTED;
501 sk->sk_state_change(sk);
502 } else if (sk->sk_state == BT_CONNECT)
508 read_unlock(&l->lock);
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
514 struct l2cap_chan_list *l = &conn->chan_list;
517 BT_DBG("conn %p", conn);
521 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
522 if (l2cap_pi(sk)->force_reliable)
526 read_unlock(&l->lock);
529 static void l2cap_info_timeout(unsigned long arg)
531 struct l2cap_conn *conn = (void *) arg;
533 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
534 conn->info_ident = 0;
536 l2cap_conn_start(conn);
539 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
541 struct l2cap_conn *conn = hcon->l2cap_data;
546 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
550 hcon->l2cap_data = conn;
553 BT_DBG("hcon %p conn %p", hcon, conn);
555 conn->mtu = hcon->hdev->acl_mtu;
556 conn->src = &hcon->hdev->bdaddr;
557 conn->dst = &hcon->dst;
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
564 setup_timer(&conn->info_timer, l2cap_info_timeout,
565 (unsigned long) conn);
567 conn->disc_reason = 0x13;
572 static void l2cap_conn_del(struct hci_conn *hcon, int err)
574 struct l2cap_conn *conn = hcon->l2cap_data;
580 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
582 kfree_skb(conn->rx_skb);
585 while ((sk = conn->chan_list.head)) {
587 l2cap_chan_del(sk, err);
592 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
593 del_timer_sync(&conn->info_timer);
595 hcon->l2cap_data = NULL;
599 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
601 struct l2cap_chan_list *l = &conn->chan_list;
602 write_lock_bh(&l->lock);
603 __l2cap_chan_add(conn, sk, parent);
604 write_unlock_bh(&l->lock);
607 /* ---- Socket interface ---- */
608 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
611 struct hlist_node *node;
612 sk_for_each(sk, node, &l2cap_sk_list.head)
613 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
623 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
625 struct sock *sk = NULL, *sk1 = NULL;
626 struct hlist_node *node;
628 sk_for_each(sk, node, &l2cap_sk_list.head) {
629 if (state && sk->sk_state != state)
632 if (l2cap_pi(sk)->psm == psm) {
634 if (!bacmp(&bt_sk(sk)->src, src))
638 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
642 return node ? sk : sk1;
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
650 read_lock(&l2cap_sk_list.lock);
651 s = __l2cap_get_sock_by_psm(state, psm, src);
654 read_unlock(&l2cap_sk_list.lock);
658 static void l2cap_sock_destruct(struct sock *sk)
662 skb_queue_purge(&sk->sk_receive_queue);
663 skb_queue_purge(&sk->sk_write_queue);
666 static void l2cap_sock_cleanup_listen(struct sock *parent)
670 BT_DBG("parent %p", parent);
672 /* Close not yet accepted channels */
673 while ((sk = bt_accept_dequeue(parent, NULL)))
674 l2cap_sock_close(sk);
676 parent->sk_state = BT_CLOSED;
677 sock_set_flag(parent, SOCK_ZAPPED);
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
683 static void l2cap_sock_kill(struct sock *sk)
685 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
688 BT_DBG("sk %p state %d", sk, sk->sk_state);
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list, sk);
692 sock_set_flag(sk, SOCK_DEAD);
696 static void __l2cap_sock_close(struct sock *sk, int reason)
698 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
700 switch (sk->sk_state) {
702 l2cap_sock_cleanup_listen(sk);
707 if (sk->sk_type == SOCK_SEQPACKET) {
708 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
710 sk->sk_state = BT_DISCONN;
711 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
712 l2cap_send_disconn_req(conn, sk);
714 l2cap_chan_del(sk, reason);
718 if (sk->sk_type == SOCK_SEQPACKET) {
719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
720 struct l2cap_conn_rsp rsp;
723 if (bt_sk(sk)->defer_setup)
724 result = L2CAP_CR_SEC_BLOCK;
726 result = L2CAP_CR_BAD_PSM;
728 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
729 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
730 rsp.result = cpu_to_le16(result);
731 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
732 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
733 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
735 l2cap_chan_del(sk, reason);
740 l2cap_chan_del(sk, reason);
744 sock_set_flag(sk, SOCK_ZAPPED);
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock *sk)
752 l2cap_sock_clear_timer(sk);
754 __l2cap_sock_close(sk, ECONNRESET);
759 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
761 struct l2cap_pinfo *pi = l2cap_pi(sk);
766 sk->sk_type = parent->sk_type;
767 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
769 pi->imtu = l2cap_pi(parent)->imtu;
770 pi->omtu = l2cap_pi(parent)->omtu;
771 pi->mode = l2cap_pi(parent)->mode;
772 pi->fcs = l2cap_pi(parent)->fcs;
773 pi->sec_level = l2cap_pi(parent)->sec_level;
774 pi->role_switch = l2cap_pi(parent)->role_switch;
775 pi->force_reliable = l2cap_pi(parent)->force_reliable;
777 pi->imtu = L2CAP_DEFAULT_MTU;
779 pi->mode = L2CAP_MODE_BASIC;
780 pi->fcs = L2CAP_FCS_CRC16;
781 pi->sec_level = BT_SECURITY_LOW;
783 pi->force_reliable = 0;
786 /* Default config options */
788 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
789 skb_queue_head_init(TX_QUEUE(sk));
790 skb_queue_head_init(SREJ_QUEUE(sk));
791 INIT_LIST_HEAD(SREJ_LIST(sk));
794 static struct proto l2cap_proto = {
796 .owner = THIS_MODULE,
797 .obj_size = sizeof(struct l2cap_pinfo)
800 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
804 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
808 sock_init_data(sock, sk);
809 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
811 sk->sk_destruct = l2cap_sock_destruct;
812 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
814 sock_reset_flag(sk, SOCK_ZAPPED);
816 sk->sk_protocol = proto;
817 sk->sk_state = BT_OPEN;
819 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
821 bt_sock_link(&l2cap_sk_list, sk);
825 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32))
826 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
829 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
834 BT_DBG("sock %p", sock);
836 sock->state = SS_UNCONNECTED;
838 if (sock->type != SOCK_SEQPACKET &&
839 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
840 return -ESOCKTNOSUPPORT;
842 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32))
843 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
845 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
849 sock->ops = &l2cap_sock_ops;
851 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
855 l2cap_sock_init(sk, NULL);
859 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
861 struct sock *sk = sock->sk;
862 struct sockaddr_l2 la;
867 if (!addr || addr->sa_family != AF_BLUETOOTH)
870 memset(&la, 0, sizeof(la));
871 len = min_t(unsigned int, sizeof(la), alen);
872 memcpy(&la, addr, len);
879 if (sk->sk_state != BT_OPEN) {
884 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
885 !capable(CAP_NET_BIND_SERVICE)) {
890 write_lock_bh(&l2cap_sk_list.lock);
892 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
895 /* Save source address */
896 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
897 l2cap_pi(sk)->psm = la.l2_psm;
898 l2cap_pi(sk)->sport = la.l2_psm;
899 sk->sk_state = BT_BOUND;
901 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
902 __le16_to_cpu(la.l2_psm) == 0x0003)
903 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
906 write_unlock_bh(&l2cap_sk_list.lock);
913 static int l2cap_do_connect(struct sock *sk)
915 bdaddr_t *src = &bt_sk(sk)->src;
916 bdaddr_t *dst = &bt_sk(sk)->dst;
917 struct l2cap_conn *conn;
918 struct hci_conn *hcon;
919 struct hci_dev *hdev;
923 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
926 hdev = hci_get_route(dst, src);
928 return -EHOSTUNREACH;
930 hci_dev_lock_bh(hdev);
934 if (sk->sk_type == SOCK_RAW) {
935 switch (l2cap_pi(sk)->sec_level) {
936 case BT_SECURITY_HIGH:
937 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
939 case BT_SECURITY_MEDIUM:
940 auth_type = HCI_AT_DEDICATED_BONDING;
943 auth_type = HCI_AT_NO_BONDING;
946 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
947 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
948 auth_type = HCI_AT_NO_BONDING_MITM;
950 auth_type = HCI_AT_NO_BONDING;
952 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
953 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
955 switch (l2cap_pi(sk)->sec_level) {
956 case BT_SECURITY_HIGH:
957 auth_type = HCI_AT_GENERAL_BONDING_MITM;
959 case BT_SECURITY_MEDIUM:
960 auth_type = HCI_AT_GENERAL_BONDING;
963 auth_type = HCI_AT_NO_BONDING;
968 hcon = hci_connect(hdev, ACL_LINK, dst,
969 l2cap_pi(sk)->sec_level, auth_type);
973 conn = l2cap_conn_add(hcon, 0);
981 /* Update source addr of the socket */
982 bacpy(src, conn->src);
984 l2cap_chan_add(conn, sk, NULL);
986 sk->sk_state = BT_CONNECT;
987 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
989 if (hcon->state == BT_CONNECTED) {
990 if (sk->sk_type != SOCK_SEQPACKET) {
991 l2cap_sock_clear_timer(sk);
992 sk->sk_state = BT_CONNECTED;
998 hci_dev_unlock_bh(hdev);
1003 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1005 struct sock *sk = sock->sk;
1006 struct sockaddr_l2 la;
1009 BT_DBG("sk %p", sk);
1011 if (!addr || addr->sa_family != AF_BLUETOOTH)
1014 memset(&la, 0, sizeof(la));
1015 len = min_t(unsigned int, sizeof(la), alen);
1016 memcpy(&la, addr, len);
1023 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1028 switch (l2cap_pi(sk)->mode) {
1029 case L2CAP_MODE_BASIC:
1031 case L2CAP_MODE_ERTM:
1032 case L2CAP_MODE_STREAMING:
1041 switch (sk->sk_state) {
1045 /* Already connecting */
1049 /* Already connected */
1062 /* Set destination address and psm */
1063 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1064 l2cap_pi(sk)->psm = la.l2_psm;
1066 err = l2cap_do_connect(sk);
1071 err = bt_sock_wait_state(sk, BT_CONNECTED,
1072 sock_sndtimeo(sk, flags & O_NONBLOCK));
1078 static int l2cap_sock_listen(struct socket *sock, int backlog)
1080 struct sock *sk = sock->sk;
1083 BT_DBG("sk %p backlog %d", sk, backlog);
1087 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1092 switch (l2cap_pi(sk)->mode) {
1093 case L2CAP_MODE_BASIC:
1095 case L2CAP_MODE_ERTM:
1096 case L2CAP_MODE_STREAMING:
1105 if (!l2cap_pi(sk)->psm) {
1106 bdaddr_t *src = &bt_sk(sk)->src;
1111 write_lock_bh(&l2cap_sk_list.lock);
1113 for (psm = 0x1001; psm < 0x1100; psm += 2)
1114 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1115 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1116 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1121 write_unlock_bh(&l2cap_sk_list.lock);
1127 sk->sk_max_ack_backlog = backlog;
1128 sk->sk_ack_backlog = 0;
1129 sk->sk_state = BT_LISTEN;
1136 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct sock *sk = sock->sk, *nsk;
1143 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1145 if (sk->sk_state != BT_LISTEN) {
1150 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1152 BT_DBG("sk %p timeo %ld", sk, timeo);
1154 /* Wait for an incoming connection. (wake-one). */
1155 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1156 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1157 set_current_state(TASK_INTERRUPTIBLE);
1164 timeo = schedule_timeout(timeo);
1165 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1167 if (sk->sk_state != BT_LISTEN) {
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1177 set_current_state(TASK_RUNNING);
1178 remove_wait_queue(sk->sk_sleep, &wait);
1183 newsock->state = SS_CONNECTED;
1185 BT_DBG("new socket %p", nsk);
1192 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1194 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1195 struct sock *sk = sock->sk;
1197 BT_DBG("sock %p, sk %p", sock, sk);
1199 addr->sa_family = AF_BLUETOOTH;
1200 *len = sizeof(struct sockaddr_l2);
1203 la->l2_psm = l2cap_pi(sk)->psm;
1204 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1205 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1207 la->l2_psm = l2cap_pi(sk)->sport;
1208 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1209 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1215 static void l2cap_monitor_timeout(unsigned long arg)
1217 struct sock *sk = (void *) arg;
1221 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1222 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1227 l2cap_pi(sk)->retry_count++;
1228 __mod_monitor_timer();
1230 control = L2CAP_CTRL_POLL;
1231 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1235 static void l2cap_retrans_timeout(unsigned long arg)
1237 struct sock *sk = (void *) arg;
1241 l2cap_pi(sk)->retry_count = 1;
1242 __mod_monitor_timer();
1244 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1246 control = L2CAP_CTRL_POLL;
1247 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1251 static void l2cap_drop_acked_frames(struct sock *sk)
1253 struct sk_buff *skb;
1255 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1256 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1259 skb = skb_dequeue(TX_QUEUE(sk));
1262 l2cap_pi(sk)->unacked_frames--;
1265 if (!l2cap_pi(sk)->unacked_frames)
1266 del_timer(&l2cap_pi(sk)->retrans_timer);
1271 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1273 struct l2cap_pinfo *pi = l2cap_pi(sk);
1276 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1278 err = hci_send_acl(pi->conn->hcon, skb, 0);
1285 static int l2cap_streaming_send(struct sock *sk)
1287 struct sk_buff *skb, *tx_skb;
1288 struct l2cap_pinfo *pi = l2cap_pi(sk);
1292 while ((skb = sk->sk_send_head)) {
1293 tx_skb = skb_clone(skb, GFP_ATOMIC);
1295 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1296 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1297 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1299 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1300 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1301 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1304 err = l2cap_do_send(sk, tx_skb);
1306 l2cap_send_disconn_req(pi->conn, sk);
1310 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1312 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1313 sk->sk_send_head = NULL;
1315 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1317 skb = skb_dequeue(TX_QUEUE(sk));
1323 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1325 struct l2cap_pinfo *pi = l2cap_pi(sk);
1326 struct sk_buff *skb, *tx_skb;
1330 skb = skb_peek(TX_QUEUE(sk));
1332 if (bt_cb(skb)->tx_seq != tx_seq) {
1333 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1335 skb = skb_queue_next(TX_QUEUE(sk), skb);
1339 if (pi->remote_max_tx &&
1340 bt_cb(skb)->retries == pi->remote_max_tx) {
1341 l2cap_send_disconn_req(pi->conn, sk);
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1346 bt_cb(skb)->retries++;
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1349 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1350 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1352 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1353 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1354 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1357 err = l2cap_do_send(sk, tx_skb);
1359 l2cap_send_disconn_req(pi->conn, sk);
1367 static int l2cap_ertm_send(struct sock *sk)
1369 struct sk_buff *skb, *tx_skb;
1370 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1377 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1378 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1380 if (pi->remote_max_tx &&
1381 bt_cb(skb)->retries == pi->remote_max_tx) {
1382 l2cap_send_disconn_req(pi->conn, sk);
1386 tx_skb = skb_clone(skb, GFP_ATOMIC);
1388 bt_cb(skb)->retries++;
1390 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1391 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1392 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1393 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1396 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1397 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1398 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1401 err = l2cap_do_send(sk, tx_skb);
1403 l2cap_send_disconn_req(pi->conn, sk);
1406 __mod_retrans_timer();
1408 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1409 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1411 pi->unacked_frames++;
1413 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1414 sk->sk_send_head = NULL;
1416 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1422 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1424 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1425 struct sk_buff **frag;
1428 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1435 /* Continuation fragments (no L2CAP header) */
1436 frag = &skb_shinfo(skb)->frag_list;
1438 count = min_t(unsigned int, conn->mtu, len);
1440 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1443 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1449 frag = &(*frag)->next;
1455 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1457 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1458 struct sk_buff *skb;
1459 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1460 struct l2cap_hdr *lh;
1462 BT_DBG("sk %p len %d", sk, (int)len);
1464 count = min_t(unsigned int, (conn->mtu - hlen), len);
1465 skb = bt_skb_send_alloc(sk, count + hlen,
1466 msg->msg_flags & MSG_DONTWAIT, &err);
1468 return ERR_PTR(-ENOMEM);
1470 /* Create L2CAP header */
1471 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1472 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1473 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1474 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1476 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1477 if (unlikely(err < 0)) {
1479 return ERR_PTR(err);
1484 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1486 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1487 struct sk_buff *skb;
1488 int err, count, hlen = L2CAP_HDR_SIZE;
1489 struct l2cap_hdr *lh;
1491 BT_DBG("sk %p len %d", sk, (int)len);
1493 count = min_t(unsigned int, (conn->mtu - hlen), len);
1494 skb = bt_skb_send_alloc(sk, count + hlen,
1495 msg->msg_flags & MSG_DONTWAIT, &err);
1497 return ERR_PTR(-ENOMEM);
1499 /* Create L2CAP header */
1500 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1501 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1504 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1505 if (unlikely(err < 0)) {
1507 return ERR_PTR(err);
1512 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1514 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1515 struct sk_buff *skb;
1516 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1517 struct l2cap_hdr *lh;
1519 BT_DBG("sk %p len %d", sk, (int)len);
1524 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1527 count = min_t(unsigned int, (conn->mtu - hlen), len);
1528 skb = bt_skb_send_alloc(sk, count + hlen,
1529 msg->msg_flags & MSG_DONTWAIT, &err);
1531 return ERR_PTR(-ENOMEM);
1533 /* Create L2CAP header */
1534 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1535 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1536 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1537 put_unaligned_le16(control, skb_put(skb, 2));
1539 put_unaligned_le16(sdulen, skb_put(skb, 2));
1541 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1542 if (unlikely(err < 0)) {
1544 return ERR_PTR(err);
1547 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1548 put_unaligned_le16(0, skb_put(skb, 2));
1550 bt_cb(skb)->retries = 0;
1554 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1556 struct l2cap_pinfo *pi = l2cap_pi(sk);
1557 struct sk_buff *skb;
1558 struct sk_buff_head sar_queue;
1562 __skb_queue_head_init(&sar_queue);
1563 control = L2CAP_SDU_START;
1564 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1566 return PTR_ERR(skb);
1568 __skb_queue_tail(&sar_queue, skb);
1569 len -= pi->max_pdu_size;
1570 size +=pi->max_pdu_size;
1576 if (len > pi->max_pdu_size) {
1577 control |= L2CAP_SDU_CONTINUE;
1578 buflen = pi->max_pdu_size;
1580 control |= L2CAP_SDU_END;
1584 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1586 skb_queue_purge(&sar_queue);
1587 return PTR_ERR(skb);
1590 __skb_queue_tail(&sar_queue, skb);
1595 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1596 if (sk->sk_send_head == NULL)
1597 sk->sk_send_head = sar_queue.next;
1602 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1604 struct sock *sk = sock->sk;
1605 struct l2cap_pinfo *pi = l2cap_pi(sk);
1606 struct sk_buff *skb;
1610 BT_DBG("sock %p, sk %p", sock, sk);
1612 err = sock_error(sk);
1616 if (msg->msg_flags & MSG_OOB)
1619 /* Check outgoing MTU */
1620 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1626 if (sk->sk_state != BT_CONNECTED) {
1631 /* Connectionless channel */
1632 if (sk->sk_type == SOCK_DGRAM) {
1633 skb = l2cap_create_connless_pdu(sk, msg, len);
1634 err = l2cap_do_send(sk, skb);
1639 case L2CAP_MODE_BASIC:
1640 /* Create a basic PDU */
1641 skb = l2cap_create_basic_pdu(sk, msg, len);
1647 err = l2cap_do_send(sk, skb);
1652 case L2CAP_MODE_ERTM:
1653 case L2CAP_MODE_STREAMING:
1654 /* Entire SDU fits into one PDU */
1655 if (len <= pi->max_pdu_size) {
1656 control = L2CAP_SDU_UNSEGMENTED;
1657 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1662 __skb_queue_tail(TX_QUEUE(sk), skb);
1663 if (sk->sk_send_head == NULL)
1664 sk->sk_send_head = skb;
1666 /* Segment SDU into multiples PDUs */
1667 err = l2cap_sar_segment_sdu(sk, msg, len);
1672 if (pi->mode == L2CAP_MODE_STREAMING)
1673 err = l2cap_streaming_send(sk);
1675 err = l2cap_ertm_send(sk);
1682 BT_DBG("bad state %1.1x", pi->mode);
1691 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1693 struct sock *sk = sock->sk;
1697 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1698 struct l2cap_conn_rsp rsp;
1700 sk->sk_state = BT_CONFIG;
1702 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1703 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1704 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1706 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1707 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1715 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1718 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1720 struct sock *sk = sock->sk;
1721 struct l2cap_options opts;
1725 BT_DBG("sk %p", sk);
1731 opts.imtu = l2cap_pi(sk)->imtu;
1732 opts.omtu = l2cap_pi(sk)->omtu;
1733 opts.flush_to = l2cap_pi(sk)->flush_to;
1734 opts.mode = l2cap_pi(sk)->mode;
1735 opts.fcs = l2cap_pi(sk)->fcs;
1737 len = min_t(unsigned int, sizeof(opts), optlen);
1738 if (copy_from_user((char *) &opts, optval, len)) {
1743 l2cap_pi(sk)->imtu = opts.imtu;
1744 l2cap_pi(sk)->omtu = opts.omtu;
1745 l2cap_pi(sk)->mode = opts.mode;
1746 l2cap_pi(sk)->fcs = opts.fcs;
1750 if (get_user(opt, (u32 __user *) optval)) {
1755 if (opt & L2CAP_LM_AUTH)
1756 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1757 if (opt & L2CAP_LM_ENCRYPT)
1758 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1759 if (opt & L2CAP_LM_SECURE)
1760 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1762 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1763 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1775 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31))
1776 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1778 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1781 struct sock *sk = sock->sk;
1782 struct bt_security sec;
1786 BT_DBG("sk %p", sk);
1788 if (level == SOL_L2CAP)
1789 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1791 if (level != SOL_BLUETOOTH)
1792 return -ENOPROTOOPT;
1798 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1803 sec.level = BT_SECURITY_LOW;
1805 len = min_t(unsigned int, sizeof(sec), optlen);
1806 if (copy_from_user((char *) &sec, optval, len)) {
1811 if (sec.level < BT_SECURITY_LOW ||
1812 sec.level > BT_SECURITY_HIGH) {
1817 l2cap_pi(sk)->sec_level = sec.level;
1820 case BT_DEFER_SETUP:
1821 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1826 if (get_user(opt, (u32 __user *) optval)) {
1831 bt_sk(sk)->defer_setup = opt;
1843 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1845 struct sock *sk = sock->sk;
1846 struct l2cap_options opts;
1847 struct l2cap_conninfo cinfo;
1851 BT_DBG("sk %p", sk);
1853 if (get_user(len, optlen))
1860 opts.imtu = l2cap_pi(sk)->imtu;
1861 opts.omtu = l2cap_pi(sk)->omtu;
1862 opts.flush_to = l2cap_pi(sk)->flush_to;
1863 opts.mode = l2cap_pi(sk)->mode;
1864 opts.fcs = l2cap_pi(sk)->fcs;
1866 len = min_t(unsigned int, len, sizeof(opts));
1867 if (copy_to_user(optval, (char *) &opts, len))
1873 switch (l2cap_pi(sk)->sec_level) {
1874 case BT_SECURITY_LOW:
1875 opt = L2CAP_LM_AUTH;
1877 case BT_SECURITY_MEDIUM:
1878 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1880 case BT_SECURITY_HIGH:
1881 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1889 if (l2cap_pi(sk)->role_switch)
1890 opt |= L2CAP_LM_MASTER;
1892 if (l2cap_pi(sk)->force_reliable)
1893 opt |= L2CAP_LM_RELIABLE;
1895 if (put_user(opt, (u32 __user *) optval))
1899 case L2CAP_CONNINFO:
1900 if (sk->sk_state != BT_CONNECTED &&
1901 !(sk->sk_state == BT_CONNECT2 &&
1902 bt_sk(sk)->defer_setup)) {
1907 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1908 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1910 len = min_t(unsigned int, len, sizeof(cinfo));
1911 if (copy_to_user(optval, (char *) &cinfo, len))
1925 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1927 struct sock *sk = sock->sk;
1928 struct bt_security sec;
1931 BT_DBG("sk %p", sk);
1933 if (level == SOL_L2CAP)
1934 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1936 if (level != SOL_BLUETOOTH)
1937 return -ENOPROTOOPT;
1939 if (get_user(len, optlen))
1946 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1951 sec.level = l2cap_pi(sk)->sec_level;
1953 len = min_t(unsigned int, len, sizeof(sec));
1954 if (copy_to_user(optval, (char *) &sec, len))
1959 case BT_DEFER_SETUP:
1960 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1965 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1979 static int l2cap_sock_shutdown(struct socket *sock, int how)
1981 struct sock *sk = sock->sk;
1984 BT_DBG("sock %p, sk %p", sock, sk);
1990 if (!sk->sk_shutdown) {
1991 sk->sk_shutdown = SHUTDOWN_MASK;
1992 l2cap_sock_clear_timer(sk);
1993 __l2cap_sock_close(sk, 0);
1995 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1996 err = bt_sock_wait_state(sk, BT_CLOSED,
2003 static int l2cap_sock_release(struct socket *sock)
2005 struct sock *sk = sock->sk;
2008 BT_DBG("sock %p, sk %p", sock, sk);
2013 err = l2cap_sock_shutdown(sock, 2);
2016 l2cap_sock_kill(sk);
2020 static void l2cap_chan_ready(struct sock *sk)
2022 struct sock *parent = bt_sk(sk)->parent;
2024 BT_DBG("sk %p, parent %p", sk, parent);
2026 l2cap_pi(sk)->conf_state = 0;
2027 l2cap_sock_clear_timer(sk);
2030 /* Outgoing channel.
2031 * Wake up socket sleeping on connect.
2033 sk->sk_state = BT_CONNECTED;
2034 sk->sk_state_change(sk);
2036 /* Incoming channel.
2037 * Wake up socket sleeping on accept.
2039 parent->sk_data_ready(parent, 0);
2043 /* Copy frame to all raw sockets on that connection */
2044 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2046 struct l2cap_chan_list *l = &conn->chan_list;
2047 struct sk_buff *nskb;
2050 BT_DBG("conn %p", conn);
2052 read_lock(&l->lock);
2053 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2054 if (sk->sk_type != SOCK_RAW)
2057 /* Don't send frame to the socket it came from */
2060 nskb = skb_clone(skb, GFP_ATOMIC);
2064 if (sock_queue_rcv_skb(sk, nskb))
2067 read_unlock(&l->lock);
2070 /* ---- L2CAP signalling commands ---- */
2071 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2072 u8 code, u8 ident, u16 dlen, void *data)
2074 struct sk_buff *skb, **frag;
2075 struct l2cap_cmd_hdr *cmd;
2076 struct l2cap_hdr *lh;
2079 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2080 conn, code, ident, dlen);
2082 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2083 count = min_t(unsigned int, conn->mtu, len);
2085 skb = bt_skb_alloc(count, GFP_ATOMIC);
2089 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2090 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2091 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2093 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2096 cmd->len = cpu_to_le16(dlen);
2099 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2100 memcpy(skb_put(skb, count), data, count);
2106 /* Continuation fragments (no L2CAP header) */
2107 frag = &skb_shinfo(skb)->frag_list;
2109 count = min_t(unsigned int, conn->mtu, len);
2111 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2115 memcpy(skb_put(*frag, count), data, count);
2120 frag = &(*frag)->next;
2130 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2132 struct l2cap_conf_opt *opt = *ptr;
2135 len = L2CAP_CONF_OPT_SIZE + opt->len;
2143 *val = *((u8 *) opt->val);
2147 *val = __le16_to_cpu(*((__le16 *) opt->val));
2151 *val = __le32_to_cpu(*((__le32 *) opt->val));
2155 *val = (unsigned long) opt->val;
2159 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2163 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2165 struct l2cap_conf_opt *opt = *ptr;
2167 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2174 *((u8 *) opt->val) = val;
2178 *((__le16 *) opt->val) = cpu_to_le16(val);
2182 *((__le32 *) opt->val) = cpu_to_le32(val);
2186 memcpy(opt->val, (void *) val, len);
2190 *ptr += L2CAP_CONF_OPT_SIZE + len;
2193 static inline void l2cap_ertm_init(struct sock *sk)
2195 l2cap_pi(sk)->expected_ack_seq = 0;
2196 l2cap_pi(sk)->unacked_frames = 0;
2197 l2cap_pi(sk)->buffer_seq = 0;
2198 l2cap_pi(sk)->num_to_ack = 0;
2200 setup_timer(&l2cap_pi(sk)->retrans_timer,
2201 l2cap_retrans_timeout, (unsigned long) sk);
2202 setup_timer(&l2cap_pi(sk)->monitor_timer,
2203 l2cap_monitor_timeout, (unsigned long) sk);
2205 __skb_queue_head_init(SREJ_QUEUE(sk));
2208 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2210 u32 local_feat_mask = l2cap_feat_mask;
2212 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2215 case L2CAP_MODE_ERTM:
2216 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2217 case L2CAP_MODE_STREAMING:
2218 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2224 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2227 case L2CAP_MODE_STREAMING:
2228 case L2CAP_MODE_ERTM:
2229 if (l2cap_mode_supported(mode, remote_feat_mask))
2233 return L2CAP_MODE_BASIC;
2237 static int l2cap_build_conf_req(struct sock *sk, void *data)
2239 struct l2cap_pinfo *pi = l2cap_pi(sk);
2240 struct l2cap_conf_req *req = data;
2241 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2242 void *ptr = req->data;
2244 BT_DBG("sk %p", sk);
2246 if (pi->num_conf_req || pi->num_conf_rsp)
2250 case L2CAP_MODE_STREAMING:
2251 case L2CAP_MODE_ERTM:
2252 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2253 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2254 l2cap_send_disconn_req(pi->conn, sk);
2257 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2263 case L2CAP_MODE_BASIC:
2264 if (pi->imtu != L2CAP_DEFAULT_MTU)
2265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2268 case L2CAP_MODE_ERTM:
2269 rfc.mode = L2CAP_MODE_ERTM;
2270 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2271 rfc.max_transmit = max_transmit;
2272 rfc.retrans_timeout = 0;
2273 rfc.monitor_timeout = 0;
2274 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2277 sizeof(rfc), (unsigned long) &rfc);
2279 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2282 if (pi->fcs == L2CAP_FCS_NONE ||
2283 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2284 pi->fcs = L2CAP_FCS_NONE;
2285 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2289 case L2CAP_MODE_STREAMING:
2290 rfc.mode = L2CAP_MODE_STREAMING;
2292 rfc.max_transmit = 0;
2293 rfc.retrans_timeout = 0;
2294 rfc.monitor_timeout = 0;
2295 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2298 sizeof(rfc), (unsigned long) &rfc);
2300 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2303 if (pi->fcs == L2CAP_FCS_NONE ||
2304 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2305 pi->fcs = L2CAP_FCS_NONE;
2306 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2311 /* FIXME: Need actual value of the flush timeout */
2312 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2313 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2315 req->dcid = cpu_to_le16(pi->dcid);
2316 req->flags = cpu_to_le16(0);
2321 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2323 struct l2cap_pinfo *pi = l2cap_pi(sk);
2324 struct l2cap_conf_rsp *rsp = data;
2325 void *ptr = rsp->data;
2326 void *req = pi->conf_req;
2327 int len = pi->conf_len;
2328 int type, hint, olen;
2330 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2331 u16 mtu = L2CAP_DEFAULT_MTU;
2332 u16 result = L2CAP_CONF_SUCCESS;
2334 BT_DBG("sk %p", sk);
2336 while (len >= L2CAP_CONF_OPT_SIZE) {
2337 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2339 hint = type & L2CAP_CONF_HINT;
2340 type &= L2CAP_CONF_MASK;
2343 case L2CAP_CONF_MTU:
2347 case L2CAP_CONF_FLUSH_TO:
2351 case L2CAP_CONF_QOS:
2354 case L2CAP_CONF_RFC:
2355 if (olen == sizeof(rfc))
2356 memcpy(&rfc, (void *) val, olen);
2359 case L2CAP_CONF_FCS:
2360 if (val == L2CAP_FCS_NONE)
2361 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2369 result = L2CAP_CONF_UNKNOWN;
2370 *((u8 *) ptr++) = type;
2375 if (pi->num_conf_rsp || pi->num_conf_req)
2379 case L2CAP_MODE_STREAMING:
2380 case L2CAP_MODE_ERTM:
2381 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2382 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2383 return -ECONNREFUSED;
2386 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2391 if (pi->mode != rfc.mode) {
2392 result = L2CAP_CONF_UNACCEPT;
2393 rfc.mode = pi->mode;
2395 if (pi->num_conf_rsp == 1)
2396 return -ECONNREFUSED;
2398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2399 sizeof(rfc), (unsigned long) &rfc);
2403 if (result == L2CAP_CONF_SUCCESS) {
2404 /* Configure output options and let the other side know
2405 * which ones we don't like. */
2407 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2408 result = L2CAP_CONF_UNACCEPT;
2411 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2416 case L2CAP_MODE_BASIC:
2417 pi->fcs = L2CAP_FCS_NONE;
2418 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2421 case L2CAP_MODE_ERTM:
2422 pi->remote_tx_win = rfc.txwin_size;
2423 pi->remote_max_tx = rfc.max_transmit;
2424 pi->max_pdu_size = rfc.max_pdu_size;
2426 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2427 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2429 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2432 sizeof(rfc), (unsigned long) &rfc);
2436 case L2CAP_MODE_STREAMING:
2437 pi->remote_tx_win = rfc.txwin_size;
2438 pi->max_pdu_size = rfc.max_pdu_size;
2440 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2442 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2443 sizeof(rfc), (unsigned long) &rfc);
2448 result = L2CAP_CONF_UNACCEPT;
2450 memset(&rfc, 0, sizeof(rfc));
2451 rfc.mode = pi->mode;
2454 if (result == L2CAP_CONF_SUCCESS)
2455 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2457 rsp->scid = cpu_to_le16(pi->dcid);
2458 rsp->result = cpu_to_le16(result);
2459 rsp->flags = cpu_to_le16(0x0000);
2464 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2466 struct l2cap_pinfo *pi = l2cap_pi(sk);
2467 struct l2cap_conf_req *req = data;
2468 void *ptr = req->data;
2471 struct l2cap_conf_rfc rfc;
2473 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2475 while (len >= L2CAP_CONF_OPT_SIZE) {
2476 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2479 case L2CAP_CONF_MTU:
2480 if (val < L2CAP_DEFAULT_MIN_MTU) {
2481 *result = L2CAP_CONF_UNACCEPT;
2482 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2488 case L2CAP_CONF_FLUSH_TO:
2490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2494 case L2CAP_CONF_RFC:
2495 if (olen == sizeof(rfc))
2496 memcpy(&rfc, (void *)val, olen);
2498 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2499 rfc.mode != pi->mode)
2500 return -ECONNREFUSED;
2502 pi->mode = rfc.mode;
2505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2506 sizeof(rfc), (unsigned long) &rfc);
2511 if (*result == L2CAP_CONF_SUCCESS) {
2513 case L2CAP_MODE_ERTM:
2514 pi->remote_tx_win = rfc.txwin_size;
2515 pi->retrans_timeout = rfc.retrans_timeout;
2516 pi->monitor_timeout = rfc.monitor_timeout;
2517 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2519 case L2CAP_MODE_STREAMING:
2520 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2525 req->dcid = cpu_to_le16(pi->dcid);
2526 req->flags = cpu_to_le16(0x0000);
2531 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2533 struct l2cap_conf_rsp *rsp = data;
2534 void *ptr = rsp->data;
2536 BT_DBG("sk %p", sk);
2538 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2539 rsp->result = cpu_to_le16(result);
2540 rsp->flags = cpu_to_le16(flags);
2545 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2547 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2549 if (rej->reason != 0x0000)
2552 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2553 cmd->ident == conn->info_ident) {
2554 del_timer(&conn->info_timer);
2556 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2557 conn->info_ident = 0;
2559 l2cap_conn_start(conn);
2565 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2567 struct l2cap_chan_list *list = &conn->chan_list;
2568 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2569 struct l2cap_conn_rsp rsp;
2570 struct sock *sk, *parent;
2571 int result, status = L2CAP_CS_NO_INFO;
2573 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2574 __le16 psm = req->psm;
2576 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2578 /* Check if we have socket listening on psm */
2579 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2581 result = L2CAP_CR_BAD_PSM;
2585 /* Check if the ACL is secure enough (if not SDP) */
2586 if (psm != cpu_to_le16(0x0001) &&
2587 !hci_conn_check_link_mode(conn->hcon)) {
2588 conn->disc_reason = 0x05;
2589 result = L2CAP_CR_SEC_BLOCK;
2593 result = L2CAP_CR_NO_MEM;
2595 /* Check for backlog size */
2596 if (sk_acceptq_is_full(parent)) {
2597 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2601 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2605 write_lock_bh(&list->lock);
2607 /* Check if we already have channel with that dcid */
2608 if (__l2cap_get_chan_by_dcid(list, scid)) {
2609 write_unlock_bh(&list->lock);
2610 sock_set_flag(sk, SOCK_ZAPPED);
2611 l2cap_sock_kill(sk);
2615 hci_conn_hold(conn->hcon);
2617 l2cap_sock_init(sk, parent);
2618 bacpy(&bt_sk(sk)->src, conn->src);
2619 bacpy(&bt_sk(sk)->dst, conn->dst);
2620 l2cap_pi(sk)->psm = psm;
2621 l2cap_pi(sk)->dcid = scid;
2623 __l2cap_chan_add(conn, sk, parent);
2624 dcid = l2cap_pi(sk)->scid;
2626 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2628 l2cap_pi(sk)->ident = cmd->ident;
2630 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2631 if (l2cap_check_security(sk)) {
2632 if (bt_sk(sk)->defer_setup) {
2633 sk->sk_state = BT_CONNECT2;
2634 result = L2CAP_CR_PEND;
2635 status = L2CAP_CS_AUTHOR_PEND;
2636 parent->sk_data_ready(parent, 0);
2638 sk->sk_state = BT_CONFIG;
2639 result = L2CAP_CR_SUCCESS;
2640 status = L2CAP_CS_NO_INFO;
2643 sk->sk_state = BT_CONNECT2;
2644 result = L2CAP_CR_PEND;
2645 status = L2CAP_CS_AUTHEN_PEND;
2648 sk->sk_state = BT_CONNECT2;
2649 result = L2CAP_CR_PEND;
2650 status = L2CAP_CS_NO_INFO;
2653 write_unlock_bh(&list->lock);
2656 bh_unlock_sock(parent);
2659 rsp.scid = cpu_to_le16(scid);
2660 rsp.dcid = cpu_to_le16(dcid);
2661 rsp.result = cpu_to_le16(result);
2662 rsp.status = cpu_to_le16(status);
2663 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2665 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2666 struct l2cap_info_req info;
2667 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2669 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2670 conn->info_ident = l2cap_get_ident(conn);
2672 mod_timer(&conn->info_timer, jiffies +
2673 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2675 l2cap_send_cmd(conn, conn->info_ident,
2676 L2CAP_INFO_REQ, sizeof(info), &info);
2682 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2684 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2685 u16 scid, dcid, result, status;
2689 scid = __le16_to_cpu(rsp->scid);
2690 dcid = __le16_to_cpu(rsp->dcid);
2691 result = __le16_to_cpu(rsp->result);
2692 status = __le16_to_cpu(rsp->status);
2694 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2697 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2701 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2707 case L2CAP_CR_SUCCESS:
2708 sk->sk_state = BT_CONFIG;
2709 l2cap_pi(sk)->ident = 0;
2710 l2cap_pi(sk)->dcid = dcid;
2711 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2713 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2715 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2716 l2cap_build_conf_req(sk, req), req);
2717 l2cap_pi(sk)->num_conf_req++;
2721 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2725 l2cap_chan_del(sk, ECONNREFUSED);
2733 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2735 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2741 dcid = __le16_to_cpu(req->dcid);
2742 flags = __le16_to_cpu(req->flags);
2744 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2746 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2750 if (sk->sk_state == BT_DISCONN)
2753 /* Reject if config buffer is too small. */
2754 len = cmd_len - sizeof(*req);
2755 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2756 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2757 l2cap_build_conf_rsp(sk, rsp,
2758 L2CAP_CONF_REJECT, flags), rsp);
2763 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2764 l2cap_pi(sk)->conf_len += len;
2766 if (flags & 0x0001) {
2767 /* Incomplete config. Send empty response. */
2768 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2769 l2cap_build_conf_rsp(sk, rsp,
2770 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2774 /* Complete config. */
2775 len = l2cap_parse_conf_req(sk, rsp);
2777 l2cap_send_disconn_req(conn, sk);
2781 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2782 l2cap_pi(sk)->num_conf_rsp++;
2784 /* Reset config buffer. */
2785 l2cap_pi(sk)->conf_len = 0;
2787 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2790 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2791 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2792 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2793 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2795 sk->sk_state = BT_CONNECTED;
2797 l2cap_pi(sk)->next_tx_seq = 0;
2798 l2cap_pi(sk)->expected_tx_seq = 0;
2799 __skb_queue_head_init(TX_QUEUE(sk));
2800 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2801 l2cap_ertm_init(sk);
2803 l2cap_chan_ready(sk);
2807 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2809 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2810 l2cap_build_conf_req(sk, buf), buf);
2811 l2cap_pi(sk)->num_conf_req++;
2819 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2821 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2822 u16 scid, flags, result;
2825 scid = __le16_to_cpu(rsp->scid);
2826 flags = __le16_to_cpu(rsp->flags);
2827 result = __le16_to_cpu(rsp->result);
2829 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2830 scid, flags, result);
2832 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2837 case L2CAP_CONF_SUCCESS:
2840 case L2CAP_CONF_UNACCEPT:
2841 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2842 int len = cmd->len - sizeof(*rsp);
2845 /* throw out any old stored conf requests */
2846 result = L2CAP_CONF_SUCCESS;
2847 len = l2cap_parse_conf_rsp(sk, rsp->data,
2850 l2cap_send_disconn_req(conn, sk);
2854 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2855 L2CAP_CONF_REQ, len, req);
2856 l2cap_pi(sk)->num_conf_req++;
2857 if (result != L2CAP_CONF_SUCCESS)
2863 sk->sk_state = BT_DISCONN;
2864 sk->sk_err = ECONNRESET;
2865 l2cap_sock_set_timer(sk, HZ * 5);
2866 l2cap_send_disconn_req(conn, sk);
2873 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2875 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2876 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2877 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2878 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2880 sk->sk_state = BT_CONNECTED;
2881 l2cap_pi(sk)->next_tx_seq = 0;
2882 l2cap_pi(sk)->expected_tx_seq = 0;
2883 __skb_queue_head_init(TX_QUEUE(sk));
2884 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2885 l2cap_ertm_init(sk);
2887 l2cap_chan_ready(sk);
2895 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2897 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2898 struct l2cap_disconn_rsp rsp;
2902 scid = __le16_to_cpu(req->scid);
2903 dcid = __le16_to_cpu(req->dcid);
2905 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2907 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2911 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2913 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2915 sk->sk_shutdown = SHUTDOWN_MASK;
2917 skb_queue_purge(TX_QUEUE(sk));
2919 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2920 skb_queue_purge(SREJ_QUEUE(sk));
2921 del_timer(&l2cap_pi(sk)->retrans_timer);
2922 del_timer(&l2cap_pi(sk)->monitor_timer);
2925 l2cap_chan_del(sk, ECONNRESET);
2928 l2cap_sock_kill(sk);
2932 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2934 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2938 scid = __le16_to_cpu(rsp->scid);
2939 dcid = __le16_to_cpu(rsp->dcid);
2941 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2943 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2947 skb_queue_purge(TX_QUEUE(sk));
2949 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2950 skb_queue_purge(SREJ_QUEUE(sk));
2951 del_timer(&l2cap_pi(sk)->retrans_timer);
2952 del_timer(&l2cap_pi(sk)->monitor_timer);
2955 l2cap_chan_del(sk, 0);
2958 l2cap_sock_kill(sk);
2962 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2964 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2967 type = __le16_to_cpu(req->type);
2969 BT_DBG("type 0x%4.4x", type);
2971 if (type == L2CAP_IT_FEAT_MASK) {
2973 u32 feat_mask = l2cap_feat_mask;
2974 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2975 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2976 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2978 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2980 put_unaligned_le32(feat_mask, rsp->data);
2981 l2cap_send_cmd(conn, cmd->ident,
2982 L2CAP_INFO_RSP, sizeof(buf), buf);
2983 } else if (type == L2CAP_IT_FIXED_CHAN) {
2985 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2986 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2987 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2988 memcpy(buf + 4, l2cap_fixed_chan, 8);
2989 l2cap_send_cmd(conn, cmd->ident,
2990 L2CAP_INFO_RSP, sizeof(buf), buf);
2992 struct l2cap_info_rsp rsp;
2993 rsp.type = cpu_to_le16(type);
2994 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2995 l2cap_send_cmd(conn, cmd->ident,
2996 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3002 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3004 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3007 type = __le16_to_cpu(rsp->type);
3008 result = __le16_to_cpu(rsp->result);
3010 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3012 del_timer(&conn->info_timer);
3014 if (type == L2CAP_IT_FEAT_MASK) {
3015 conn->feat_mask = get_unaligned_le32(rsp->data);
3017 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3018 struct l2cap_info_req req;
3019 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3021 conn->info_ident = l2cap_get_ident(conn);
3023 l2cap_send_cmd(conn, conn->info_ident,
3024 L2CAP_INFO_REQ, sizeof(req), &req);
3026 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3027 conn->info_ident = 0;
3029 l2cap_conn_start(conn);
3031 } else if (type == L2CAP_IT_FIXED_CHAN) {
3032 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3033 conn->info_ident = 0;
3035 l2cap_conn_start(conn);
3041 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3043 u8 *data = skb->data;
3045 struct l2cap_cmd_hdr cmd;
3048 l2cap_raw_recv(conn, skb);
3050 while (len >= L2CAP_CMD_HDR_SIZE) {
3052 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3053 data += L2CAP_CMD_HDR_SIZE;
3054 len -= L2CAP_CMD_HDR_SIZE;
3056 cmd_len = le16_to_cpu(cmd.len);
3058 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3060 if (cmd_len > len || !cmd.ident) {
3061 BT_DBG("corrupted command");
3066 case L2CAP_COMMAND_REJ:
3067 l2cap_command_rej(conn, &cmd, data);
3070 case L2CAP_CONN_REQ:
3071 err = l2cap_connect_req(conn, &cmd, data);
3074 case L2CAP_CONN_RSP:
3075 err = l2cap_connect_rsp(conn, &cmd, data);
3078 case L2CAP_CONF_REQ:
3079 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3082 case L2CAP_CONF_RSP:
3083 err = l2cap_config_rsp(conn, &cmd, data);
3086 case L2CAP_DISCONN_REQ:
3087 err = l2cap_disconnect_req(conn, &cmd, data);
3090 case L2CAP_DISCONN_RSP:
3091 err = l2cap_disconnect_rsp(conn, &cmd, data);
3094 case L2CAP_ECHO_REQ:
3095 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3098 case L2CAP_ECHO_RSP:
3101 case L2CAP_INFO_REQ:
3102 err = l2cap_information_req(conn, &cmd, data);
3105 case L2CAP_INFO_RSP:
3106 err = l2cap_information_rsp(conn, &cmd, data);
3110 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3116 struct l2cap_cmd_rej rej;
3117 BT_DBG("error %d", err);
3119 /* FIXME: Map err to a valid reason */
3120 rej.reason = cpu_to_le16(0);
3121 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3131 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3133 u16 our_fcs, rcv_fcs;
3134 int hdr_size = L2CAP_HDR_SIZE + 2;
3136 if (pi->fcs == L2CAP_FCS_CRC16) {
3137 skb_trim(skb, skb->len - 2);
3138 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3139 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3141 if (our_fcs != rcv_fcs)
3147 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3149 struct sk_buff *next_skb;
3151 bt_cb(skb)->tx_seq = tx_seq;
3152 bt_cb(skb)->sar = sar;
3154 next_skb = skb_peek(SREJ_QUEUE(sk));
3156 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3161 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3162 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3166 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3169 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3171 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3174 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3176 struct l2cap_pinfo *pi = l2cap_pi(sk);
3177 struct sk_buff *_skb;
3180 switch (control & L2CAP_CTRL_SAR) {
3181 case L2CAP_SDU_UNSEGMENTED:
3182 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3187 err = sock_queue_rcv_skb(sk, skb);
3193 case L2CAP_SDU_START:
3194 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3199 pi->sdu_len = get_unaligned_le16(skb->data);
3202 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3208 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3210 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3211 pi->partial_sdu_len = skb->len;
3215 case L2CAP_SDU_CONTINUE:
3216 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3219 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3221 pi->partial_sdu_len += skb->len;
3222 if (pi->partial_sdu_len > pi->sdu_len)
3230 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3233 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3235 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3236 pi->partial_sdu_len += skb->len;
3238 if (pi->partial_sdu_len == pi->sdu_len) {
3239 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3240 err = sock_queue_rcv_skb(sk, _skb);
3254 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3256 struct sk_buff *skb;
3259 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3260 if (bt_cb(skb)->tx_seq != tx_seq)
3263 skb = skb_dequeue(SREJ_QUEUE(sk));
3264 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3265 l2cap_sar_reassembly_sdu(sk, skb, control);
3266 l2cap_pi(sk)->buffer_seq_srej =
3267 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3272 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3274 struct l2cap_pinfo *pi = l2cap_pi(sk);
3275 struct srej_list *l, *tmp;
3278 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3279 if (l->tx_seq == tx_seq) {
3284 control = L2CAP_SUPER_SELECT_REJECT;
3285 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3286 l2cap_send_sframe(pi, control);
3288 list_add_tail(&l->list, SREJ_LIST(sk));
3292 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3294 struct l2cap_pinfo *pi = l2cap_pi(sk);
3295 struct srej_list *new;
3298 while (tx_seq != pi->expected_tx_seq) {
3299 control = L2CAP_SUPER_SELECT_REJECT;
3300 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3301 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3302 control |= L2CAP_CTRL_POLL;
3303 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3305 l2cap_send_sframe(pi, control);
3307 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3308 new->tx_seq = pi->expected_tx_seq++;
3309 list_add_tail(&new->list, SREJ_LIST(sk));
3311 pi->expected_tx_seq++;
3314 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3316 struct l2cap_pinfo *pi = l2cap_pi(sk);
3317 u8 tx_seq = __get_txseq(rx_control);
3318 u8 req_seq = __get_reqseq(rx_control);
3320 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3323 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3325 pi->expected_ack_seq = req_seq;
3326 l2cap_drop_acked_frames(sk);
3328 if (tx_seq == pi->expected_tx_seq)
3331 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3332 struct srej_list *first;
3334 first = list_first_entry(SREJ_LIST(sk),
3335 struct srej_list, list);
3336 if (tx_seq == first->tx_seq) {
3337 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3338 l2cap_check_srej_gap(sk, tx_seq);
3340 list_del(&first->list);
3343 if (list_empty(SREJ_LIST(sk))) {
3344 pi->buffer_seq = pi->buffer_seq_srej;
3345 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3348 struct srej_list *l;
3349 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3351 list_for_each_entry(l, SREJ_LIST(sk), list) {
3352 if (l->tx_seq == tx_seq) {
3353 l2cap_resend_srejframe(sk, tx_seq);
3357 l2cap_send_srejframe(sk, tx_seq);
3360 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3362 INIT_LIST_HEAD(SREJ_LIST(sk));
3363 pi->buffer_seq_srej = pi->buffer_seq;
3365 __skb_queue_head_init(SREJ_QUEUE(sk));
3366 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3368 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3370 l2cap_send_srejframe(sk, tx_seq);
3375 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3377 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3378 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3382 if (rx_control & L2CAP_CTRL_FINAL) {
3383 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3384 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3386 sk->sk_send_head = TX_QUEUE(sk)->next;
3387 pi->next_tx_seq = pi->expected_ack_seq;
3388 l2cap_ertm_send(sk);
3392 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3394 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3398 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3399 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3400 tx_control |= L2CAP_SUPER_RCV_READY;
3401 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3402 l2cap_send_sframe(pi, tx_control);
3407 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3409 struct l2cap_pinfo *pi = l2cap_pi(sk);
3410 u8 tx_seq = __get_reqseq(rx_control);
3412 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3414 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3415 case L2CAP_SUPER_RCV_READY:
3416 if (rx_control & L2CAP_CTRL_POLL) {
3417 u16 control = L2CAP_CTRL_FINAL;
3418 control |= L2CAP_SUPER_RCV_READY |
3419 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3420 l2cap_send_sframe(l2cap_pi(sk), control);
3421 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3423 } else if (rx_control & L2CAP_CTRL_FINAL) {
3424 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3425 pi->expected_ack_seq = tx_seq;
3426 l2cap_drop_acked_frames(sk);
3428 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3429 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3431 sk->sk_send_head = TX_QUEUE(sk)->next;
3432 pi->next_tx_seq = pi->expected_ack_seq;
3433 l2cap_ertm_send(sk);
3436 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3439 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3440 del_timer(&pi->monitor_timer);
3442 if (pi->unacked_frames > 0)
3443 __mod_retrans_timer();
3445 pi->expected_ack_seq = tx_seq;
3446 l2cap_drop_acked_frames(sk);
3448 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3449 (pi->unacked_frames > 0))
3450 __mod_retrans_timer();
3452 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3453 l2cap_ertm_send(sk);
3457 case L2CAP_SUPER_REJECT:
3458 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3460 pi->expected_ack_seq = __get_reqseq(rx_control);
3461 l2cap_drop_acked_frames(sk);
3463 if (rx_control & L2CAP_CTRL_FINAL) {
3464 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3465 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3467 sk->sk_send_head = TX_QUEUE(sk)->next;
3468 pi->next_tx_seq = pi->expected_ack_seq;
3469 l2cap_ertm_send(sk);
3472 sk->sk_send_head = TX_QUEUE(sk)->next;
3473 pi->next_tx_seq = pi->expected_ack_seq;
3474 l2cap_ertm_send(sk);
3476 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3477 pi->srej_save_reqseq = tx_seq;
3478 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3484 case L2CAP_SUPER_SELECT_REJECT:
3485 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3487 if (rx_control & L2CAP_CTRL_POLL) {
3488 pi->expected_ack_seq = tx_seq;
3489 l2cap_drop_acked_frames(sk);
3490 l2cap_retransmit_frame(sk, tx_seq);
3491 l2cap_ertm_send(sk);
3492 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3493 pi->srej_save_reqseq = tx_seq;
3494 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3496 } else if (rx_control & L2CAP_CTRL_FINAL) {
3497 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3498 pi->srej_save_reqseq == tx_seq)
3499 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3501 l2cap_retransmit_frame(sk, tx_seq);
3504 l2cap_retransmit_frame(sk, tx_seq);
3505 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3506 pi->srej_save_reqseq = tx_seq;
3507 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3512 case L2CAP_SUPER_RCV_NOT_READY:
3513 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3514 pi->expected_ack_seq = tx_seq;
3515 l2cap_drop_acked_frames(sk);
3517 del_timer(&l2cap_pi(sk)->retrans_timer);
3518 if (rx_control & L2CAP_CTRL_POLL) {
3519 u16 control = L2CAP_CTRL_FINAL;
3520 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3528 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3531 struct l2cap_pinfo *pi;
3535 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3537 BT_DBG("unknown cid 0x%4.4x", cid);
3543 BT_DBG("sk %p, len %d", sk, skb->len);
3545 if (sk->sk_state != BT_CONNECTED)
3549 case L2CAP_MODE_BASIC:
3550 /* If socket recv buffers overflows we drop data here
3551 * which is *bad* because L2CAP has to be reliable.
3552 * But we don't have any other choice. L2CAP doesn't
3553 * provide flow control mechanism. */
3555 if (pi->imtu < skb->len)
3558 if (!sock_queue_rcv_skb(sk, skb))
3562 case L2CAP_MODE_ERTM:
3563 control = get_unaligned_le16(skb->data);
3567 if (__is_sar_start(control))
3570 if (pi->fcs == L2CAP_FCS_CRC16)
3574 * We can just drop the corrupted I-frame here.
3575 * Receiver will miss it and start proper recovery
3576 * procedures and ask retransmission.
3578 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3581 if (l2cap_check_fcs(pi, skb))
3584 if (__is_iframe(control))
3585 l2cap_data_channel_iframe(sk, control, skb);
3587 l2cap_data_channel_sframe(sk, control, skb);
3591 case L2CAP_MODE_STREAMING:
3592 control = get_unaligned_le16(skb->data);
3596 if (__is_sar_start(control))
3599 if (pi->fcs == L2CAP_FCS_CRC16)
3602 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3605 if (l2cap_check_fcs(pi, skb))
3608 tx_seq = __get_txseq(control);
3610 if (pi->expected_tx_seq == tx_seq)
3611 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3613 pi->expected_tx_seq = tx_seq + 1;
3615 l2cap_sar_reassembly_sdu(sk, skb, control);
3620 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3634 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3638 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3642 BT_DBG("sk %p, len %d", sk, skb->len);
3644 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3647 if (l2cap_pi(sk)->imtu < skb->len)
3650 if (!sock_queue_rcv_skb(sk, skb))
3662 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3664 struct l2cap_hdr *lh = (void *) skb->data;
3668 skb_pull(skb, L2CAP_HDR_SIZE);
3669 cid = __le16_to_cpu(lh->cid);
3670 len = __le16_to_cpu(lh->len);
3672 if (len != skb->len) {
3677 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3680 case L2CAP_CID_SIGNALING:
3681 l2cap_sig_channel(conn, skb);
3684 case L2CAP_CID_CONN_LESS:
3685 psm = get_unaligned_le16(skb->data);
3687 l2cap_conless_channel(conn, psm, skb);
3691 l2cap_data_channel(conn, cid, skb);
3696 /* ---- L2CAP interface with lower layer (HCI) ---- */
3698 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3700 int exact = 0, lm1 = 0, lm2 = 0;
3701 register struct sock *sk;
3702 struct hlist_node *node;
3704 if (type != ACL_LINK)
3707 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3709 /* Find listening sockets and check their link_mode */
3710 read_lock(&l2cap_sk_list.lock);
3711 sk_for_each(sk, node, &l2cap_sk_list.head) {
3712 if (sk->sk_state != BT_LISTEN)
3715 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3716 lm1 |= HCI_LM_ACCEPT;
3717 if (l2cap_pi(sk)->role_switch)
3718 lm1 |= HCI_LM_MASTER;
3720 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3721 lm2 |= HCI_LM_ACCEPT;
3722 if (l2cap_pi(sk)->role_switch)
3723 lm2 |= HCI_LM_MASTER;
3726 read_unlock(&l2cap_sk_list.lock);
3728 return exact ? lm1 : lm2;
3731 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3733 struct l2cap_conn *conn;
3735 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3737 if (hcon->type != ACL_LINK)
3741 conn = l2cap_conn_add(hcon, status);
3743 l2cap_conn_ready(conn);
3745 l2cap_conn_del(hcon, bt_err(status));
3750 static int l2cap_disconn_ind(struct hci_conn *hcon)
3752 struct l2cap_conn *conn = hcon->l2cap_data;
3754 BT_DBG("hcon %p", hcon);
3756 if (hcon->type != ACL_LINK || !conn)
3759 return conn->disc_reason;
3762 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3764 BT_DBG("hcon %p reason %d", hcon, reason);
3766 if (hcon->type != ACL_LINK)
3769 l2cap_conn_del(hcon, bt_err(reason));
3774 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3776 if (sk->sk_type != SOCK_SEQPACKET)
3779 if (encrypt == 0x00) {
3780 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3781 l2cap_sock_clear_timer(sk);
3782 l2cap_sock_set_timer(sk, HZ * 5);
3783 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3784 __l2cap_sock_close(sk, ECONNREFUSED);
3786 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3787 l2cap_sock_clear_timer(sk);
3791 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3793 struct l2cap_chan_list *l;
3794 struct l2cap_conn *conn = hcon->l2cap_data;
3800 l = &conn->chan_list;
3802 BT_DBG("conn %p", conn);
3804 read_lock(&l->lock);
3806 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3809 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3814 if (!status && (sk->sk_state == BT_CONNECTED ||
3815 sk->sk_state == BT_CONFIG)) {
3816 l2cap_check_encryption(sk, encrypt);
3821 if (sk->sk_state == BT_CONNECT) {
3823 struct l2cap_conn_req req;
3824 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3825 req.psm = l2cap_pi(sk)->psm;
3827 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3829 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3830 L2CAP_CONN_REQ, sizeof(req), &req);
3832 l2cap_sock_clear_timer(sk);
3833 l2cap_sock_set_timer(sk, HZ / 10);
3835 } else if (sk->sk_state == BT_CONNECT2) {
3836 struct l2cap_conn_rsp rsp;
3840 sk->sk_state = BT_CONFIG;
3841 result = L2CAP_CR_SUCCESS;
3843 sk->sk_state = BT_DISCONN;
3844 l2cap_sock_set_timer(sk, HZ / 10);
3845 result = L2CAP_CR_SEC_BLOCK;
3848 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3849 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3850 rsp.result = cpu_to_le16(result);
3851 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3852 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3853 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3859 read_unlock(&l->lock);
3864 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3866 struct l2cap_conn *conn = hcon->l2cap_data;
3868 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3871 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3873 if (flags & ACL_START) {
3874 struct l2cap_hdr *hdr;
3878 BT_ERR("Unexpected start frame (len %d)", skb->len);
3879 kfree_skb(conn->rx_skb);
3880 conn->rx_skb = NULL;
3882 l2cap_conn_unreliable(conn, ECOMM);
3886 BT_ERR("Frame is too short (len %d)", skb->len);
3887 l2cap_conn_unreliable(conn, ECOMM);
3891 hdr = (struct l2cap_hdr *) skb->data;
3892 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3894 if (len == skb->len) {
3895 /* Complete frame received */
3896 l2cap_recv_frame(conn, skb);
3900 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3902 if (skb->len > len) {
3903 BT_ERR("Frame is too long (len %d, expected len %d)",
3905 l2cap_conn_unreliable(conn, ECOMM);
3909 /* Allocate skb for the complete frame (with header) */
3910 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3914 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3916 conn->rx_len = len - skb->len;
3918 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3920 if (!conn->rx_len) {
3921 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3922 l2cap_conn_unreliable(conn, ECOMM);
3926 if (skb->len > conn->rx_len) {
3927 BT_ERR("Fragment is too long (len %d, expected %d)",
3928 skb->len, conn->rx_len);
3929 kfree_skb(conn->rx_skb);
3930 conn->rx_skb = NULL;
3932 l2cap_conn_unreliable(conn, ECOMM);
3936 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3938 conn->rx_len -= skb->len;
3940 if (!conn->rx_len) {
3941 /* Complete frame received */
3942 l2cap_recv_frame(conn, conn->rx_skb);
3943 conn->rx_skb = NULL;
3952 static ssize_t l2cap_sysfs_show(struct class *dev,
3953 struct class_attribute *attr,
3957 struct hlist_node *node;
3960 read_lock_bh(&l2cap_sk_list.lock);
3962 sk_for_each(sk, node, &l2cap_sk_list.head) {
3963 struct l2cap_pinfo *pi = l2cap_pi(sk);
3965 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3966 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3967 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3968 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3971 read_unlock_bh(&l2cap_sk_list.lock);
3976 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3978 static const struct proto_ops l2cap_sock_ops = {
3979 .family = PF_BLUETOOTH,
3980 .owner = THIS_MODULE,
3981 .release = l2cap_sock_release,
3982 .bind = l2cap_sock_bind,
3983 .connect = l2cap_sock_connect,
3984 .listen = l2cap_sock_listen,
3985 .accept = l2cap_sock_accept,
3986 .getname = l2cap_sock_getname,
3987 .sendmsg = l2cap_sock_sendmsg,
3988 .recvmsg = l2cap_sock_recvmsg,
3989 .poll = bt_sock_poll,
3990 .ioctl = bt_sock_ioctl,
3991 .mmap = sock_no_mmap,
3992 .socketpair = sock_no_socketpair,
3993 .shutdown = l2cap_sock_shutdown,
3994 .setsockopt = l2cap_sock_setsockopt,
3995 .getsockopt = l2cap_sock_getsockopt
3998 static const struct net_proto_family l2cap_sock_family_ops = {
3999 .family = PF_BLUETOOTH,
4000 .owner = THIS_MODULE,
4001 .create = l2cap_sock_create,
4004 static struct hci_proto l2cap_hci_proto = {
4006 .id = HCI_PROTO_L2CAP,
4007 .connect_ind = l2cap_connect_ind,
4008 .connect_cfm = l2cap_connect_cfm,
4009 .disconn_ind = l2cap_disconn_ind,
4010 .disconn_cfm = l2cap_disconn_cfm,
4011 .security_cfm = l2cap_security_cfm,
4012 .recv_acldata = l2cap_recv_acldata
4015 static int __init l2cap_init(void)
4019 err = proto_register(&l2cap_proto, 0);
4023 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4025 BT_ERR("L2CAP socket registration failed");
4029 err = hci_register_proto(&l2cap_hci_proto);
4031 BT_ERR("L2CAP protocol registration failed");
4032 bt_sock_unregister(BTPROTO_L2CAP);
4036 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
4037 BT_ERR("Failed to create L2CAP info file");
4039 BT_INFO("L2CAP ver %s", VERSION);
4040 BT_INFO("L2CAP socket layer initialized");
4045 proto_unregister(&l2cap_proto);
4049 static void __exit l2cap_exit(void)
4051 class_remove_file(bt_class, &class_attr_l2cap);
4053 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4054 BT_ERR("L2CAP socket unregistration failed");
4056 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4057 BT_ERR("L2CAP protocol unregistration failed");
4059 proto_unregister(&l2cap_proto);
4062 void l2cap_load(void)
4064 /* Dummy function to trigger automatic L2CAP module loading by
4065 * other modules that use L2CAP sockets but don't use any other
4066 * symbols from it. */
4069 EXPORT_SYMBOL(l2cap_load);
4071 module_init(l2cap_init);
4072 module_exit(l2cap_exit);
4074 module_param(enable_ertm, bool, 0644);
4075 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4077 module_param(max_transmit, uint, 0644);
4078 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4080 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4081 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4082 MODULE_VERSION(VERSION);
4083 MODULE_LICENSE("GPL");
4084 MODULE_ALIAS("bt-proto-0");