2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
142 read_unlock(&l->lock);
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 s = __l2cap_get_chan_by_ident(l, ident);
163 read_unlock(&l->lock);
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
200 l2cap_pi(next)->prev_c = prev;
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
237 bt_accept_enqueue(parent, sk);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
294 auth_type = HCI_AT_NO_BONDING;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static void l2cap_do_start(struct sock *sk)
371 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
373 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
374 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
377 if (l2cap_check_security(sk)) {
378 struct l2cap_conn_req req;
379 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
380 req.psm = l2cap_pi(sk)->psm;
382 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
384 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
385 L2CAP_CONN_REQ, sizeof(req), &req);
388 struct l2cap_info_req req;
389 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
391 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
392 conn->info_ident = l2cap_get_ident(conn);
394 mod_timer(&conn->info_timer, jiffies +
395 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
397 l2cap_send_cmd(conn, conn->info_ident,
398 L2CAP_INFO_REQ, sizeof(req), &req);
402 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
404 struct l2cap_disconn_req req;
406 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
407 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
408 l2cap_send_cmd(conn, l2cap_get_ident(conn),
409 L2CAP_DISCONN_REQ, sizeof(req), &req);
412 /* ---- L2CAP connections ---- */
413 static void l2cap_conn_start(struct l2cap_conn *conn)
415 struct l2cap_chan_list *l = &conn->chan_list;
418 BT_DBG("conn %p", conn);
422 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
425 if (sk->sk_type != SOCK_SEQPACKET) {
430 if (sk->sk_state == BT_CONNECT) {
431 if (l2cap_check_security(sk)) {
432 struct l2cap_conn_req req;
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 req.psm = l2cap_pi(sk)->psm;
436 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
438 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
439 L2CAP_CONN_REQ, sizeof(req), &req);
441 } else if (sk->sk_state == BT_CONNECT2) {
442 struct l2cap_conn_rsp rsp;
443 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
444 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
446 if (l2cap_check_security(sk)) {
447 if (bt_sk(sk)->defer_setup) {
448 struct sock *parent = bt_sk(sk)->parent;
449 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
450 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
451 parent->sk_data_ready(parent, 0);
454 sk->sk_state = BT_CONFIG;
455 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
456 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
470 read_unlock(&l->lock);
473 static void l2cap_conn_ready(struct l2cap_conn *conn)
475 struct l2cap_chan_list *l = &conn->chan_list;
478 BT_DBG("conn %p", conn);
482 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
485 if (sk->sk_type != SOCK_SEQPACKET) {
486 l2cap_sock_clear_timer(sk);
487 sk->sk_state = BT_CONNECTED;
488 sk->sk_state_change(sk);
489 } else if (sk->sk_state == BT_CONNECT)
495 read_unlock(&l->lock);
498 /* Notify sockets that we cannot guaranty reliability anymore */
499 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
509 if (l2cap_pi(sk)->force_reliable)
513 read_unlock(&l->lock);
516 static void l2cap_info_timeout(unsigned long arg)
518 struct l2cap_conn *conn = (void *) arg;
520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
521 conn->info_ident = 0;
523 l2cap_conn_start(conn);
526 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
528 struct l2cap_conn *conn = hcon->l2cap_data;
533 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
537 hcon->l2cap_data = conn;
540 BT_DBG("hcon %p conn %p", hcon, conn);
542 conn->mtu = hcon->hdev->acl_mtu;
543 conn->src = &hcon->hdev->bdaddr;
544 conn->dst = &hcon->dst;
548 setup_timer(&conn->info_timer, l2cap_info_timeout,
549 (unsigned long) conn);
551 spin_lock_init(&conn->lock);
552 rwlock_init(&conn->chan_list.lock);
554 conn->disc_reason = 0x13;
559 static void l2cap_conn_del(struct hci_conn *hcon, int err)
561 struct l2cap_conn *conn = hcon->l2cap_data;
567 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
569 kfree_skb(conn->rx_skb);
572 while ((sk = conn->chan_list.head)) {
574 l2cap_chan_del(sk, err);
579 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
580 del_timer_sync(&conn->info_timer);
582 hcon->l2cap_data = NULL;
586 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
588 struct l2cap_chan_list *l = &conn->chan_list;
589 write_lock_bh(&l->lock);
590 __l2cap_chan_add(conn, sk, parent);
591 write_unlock_bh(&l->lock);
594 /* ---- Socket interface ---- */
595 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
598 struct hlist_node *node;
599 sk_for_each(sk, node, &l2cap_sk_list.head)
600 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
607 /* Find socket with psm and source bdaddr.
608 * Returns closest match.
610 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
612 struct sock *sk = NULL, *sk1 = NULL;
613 struct hlist_node *node;
615 sk_for_each(sk, node, &l2cap_sk_list.head) {
616 if (state && sk->sk_state != state)
619 if (l2cap_pi(sk)->psm == psm) {
621 if (!bacmp(&bt_sk(sk)->src, src))
625 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
629 return node ? sk : sk1;
632 /* Find socket with given address (psm, src).
633 * Returns locked socket */
634 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
637 read_lock(&l2cap_sk_list.lock);
638 s = __l2cap_get_sock_by_psm(state, psm, src);
641 read_unlock(&l2cap_sk_list.lock);
645 static void l2cap_sock_destruct(struct sock *sk)
649 skb_queue_purge(&sk->sk_receive_queue);
650 skb_queue_purge(&sk->sk_write_queue);
653 static void l2cap_sock_cleanup_listen(struct sock *parent)
657 BT_DBG("parent %p", parent);
659 /* Close not yet accepted channels */
660 while ((sk = bt_accept_dequeue(parent, NULL)))
661 l2cap_sock_close(sk);
663 parent->sk_state = BT_CLOSED;
664 sock_set_flag(parent, SOCK_ZAPPED);
667 /* Kill socket (only if zapped and orphan)
668 * Must be called on unlocked socket.
670 static void l2cap_sock_kill(struct sock *sk)
672 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
675 BT_DBG("sk %p state %d", sk, sk->sk_state);
677 /* Kill poor orphan */
678 bt_sock_unlink(&l2cap_sk_list, sk);
679 sock_set_flag(sk, SOCK_DEAD);
683 static void __l2cap_sock_close(struct sock *sk, int reason)
685 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
687 switch (sk->sk_state) {
689 l2cap_sock_cleanup_listen(sk);
694 if (sk->sk_type == SOCK_SEQPACKET) {
695 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
697 sk->sk_state = BT_DISCONN;
698 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
699 l2cap_send_disconn_req(conn, sk);
701 l2cap_chan_del(sk, reason);
705 if (sk->sk_type == SOCK_SEQPACKET) {
706 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
707 struct l2cap_conn_rsp rsp;
710 if (bt_sk(sk)->defer_setup)
711 result = L2CAP_CR_SEC_BLOCK;
713 result = L2CAP_CR_BAD_PSM;
715 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
716 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
717 rsp.result = cpu_to_le16(result);
718 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
719 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
720 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
722 l2cap_chan_del(sk, reason);
727 l2cap_chan_del(sk, reason);
731 sock_set_flag(sk, SOCK_ZAPPED);
736 /* Must be called on unlocked socket. */
737 static void l2cap_sock_close(struct sock *sk)
739 l2cap_sock_clear_timer(sk);
741 __l2cap_sock_close(sk, ECONNRESET);
746 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
748 struct l2cap_pinfo *pi = l2cap_pi(sk);
753 sk->sk_type = parent->sk_type;
754 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
756 pi->imtu = l2cap_pi(parent)->imtu;
757 pi->omtu = l2cap_pi(parent)->omtu;
758 pi->mode = l2cap_pi(parent)->mode;
759 pi->fcs = l2cap_pi(parent)->fcs;
760 pi->sec_level = l2cap_pi(parent)->sec_level;
761 pi->role_switch = l2cap_pi(parent)->role_switch;
762 pi->force_reliable = l2cap_pi(parent)->force_reliable;
764 pi->imtu = L2CAP_DEFAULT_MTU;
766 pi->mode = L2CAP_MODE_BASIC;
767 pi->fcs = L2CAP_FCS_CRC16;
768 pi->sec_level = BT_SECURITY_LOW;
770 pi->force_reliable = 0;
773 /* Default config options */
775 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
778 static struct proto l2cap_proto = {
780 .owner = THIS_MODULE,
781 .obj_size = sizeof(struct l2cap_pinfo)
784 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
788 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
792 sock_init_data(sock, sk);
793 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
795 sk->sk_destruct = l2cap_sock_destruct;
796 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
798 sock_reset_flag(sk, SOCK_ZAPPED);
800 sk->sk_protocol = proto;
801 sk->sk_state = BT_OPEN;
803 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
805 bt_sock_link(&l2cap_sk_list, sk);
809 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
813 BT_DBG("sock %p", sock);
815 sock->state = SS_UNCONNECTED;
817 if (sock->type != SOCK_SEQPACKET &&
818 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
819 return -ESOCKTNOSUPPORT;
821 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
824 sock->ops = &l2cap_sock_ops;
826 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
830 l2cap_sock_init(sk, NULL);
834 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
836 struct sock *sk = sock->sk;
837 struct sockaddr_l2 la;
842 if (!addr || addr->sa_family != AF_BLUETOOTH)
845 memset(&la, 0, sizeof(la));
846 len = min_t(unsigned int, sizeof(la), alen);
847 memcpy(&la, addr, len);
854 if (sk->sk_state != BT_OPEN) {
859 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
860 !capable(CAP_NET_BIND_SERVICE)) {
865 write_lock_bh(&l2cap_sk_list.lock);
867 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
870 /* Save source address */
871 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
872 l2cap_pi(sk)->psm = la.l2_psm;
873 l2cap_pi(sk)->sport = la.l2_psm;
874 sk->sk_state = BT_BOUND;
876 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
877 __le16_to_cpu(la.l2_psm) == 0x0003)
878 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
881 write_unlock_bh(&l2cap_sk_list.lock);
888 static int l2cap_do_connect(struct sock *sk)
890 bdaddr_t *src = &bt_sk(sk)->src;
891 bdaddr_t *dst = &bt_sk(sk)->dst;
892 struct l2cap_conn *conn;
893 struct hci_conn *hcon;
894 struct hci_dev *hdev;
898 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
901 hdev = hci_get_route(dst, src);
903 return -EHOSTUNREACH;
905 hci_dev_lock_bh(hdev);
909 if (sk->sk_type == SOCK_RAW) {
910 switch (l2cap_pi(sk)->sec_level) {
911 case BT_SECURITY_HIGH:
912 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
914 case BT_SECURITY_MEDIUM:
915 auth_type = HCI_AT_DEDICATED_BONDING;
918 auth_type = HCI_AT_NO_BONDING;
921 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
922 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
923 auth_type = HCI_AT_NO_BONDING_MITM;
925 auth_type = HCI_AT_NO_BONDING;
927 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
928 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
930 switch (l2cap_pi(sk)->sec_level) {
931 case BT_SECURITY_HIGH:
932 auth_type = HCI_AT_GENERAL_BONDING_MITM;
934 case BT_SECURITY_MEDIUM:
935 auth_type = HCI_AT_GENERAL_BONDING;
938 auth_type = HCI_AT_NO_BONDING;
943 hcon = hci_connect(hdev, ACL_LINK, dst,
944 l2cap_pi(sk)->sec_level, auth_type);
948 conn = l2cap_conn_add(hcon, 0);
956 /* Update source addr of the socket */
957 bacpy(src, conn->src);
959 l2cap_chan_add(conn, sk, NULL);
961 sk->sk_state = BT_CONNECT;
962 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
964 if (hcon->state == BT_CONNECTED) {
965 if (sk->sk_type != SOCK_SEQPACKET) {
966 l2cap_sock_clear_timer(sk);
967 sk->sk_state = BT_CONNECTED;
973 hci_dev_unlock_bh(hdev);
978 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
980 struct sock *sk = sock->sk;
981 struct sockaddr_l2 la;
986 if (!addr || addr->sa_family != AF_BLUETOOTH)
989 memset(&la, 0, sizeof(la));
990 len = min_t(unsigned int, sizeof(la), alen);
991 memcpy(&la, addr, len);
998 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1003 switch (l2cap_pi(sk)->mode) {
1004 case L2CAP_MODE_BASIC:
1006 case L2CAP_MODE_ERTM:
1007 case L2CAP_MODE_STREAMING:
1016 switch (sk->sk_state) {
1020 /* Already connecting */
1024 /* Already connected */
1037 /* Set destination address and psm */
1038 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1039 l2cap_pi(sk)->psm = la.l2_psm;
1041 err = l2cap_do_connect(sk);
1046 err = bt_sock_wait_state(sk, BT_CONNECTED,
1047 sock_sndtimeo(sk, flags & O_NONBLOCK));
1053 static int l2cap_sock_listen(struct socket *sock, int backlog)
1055 struct sock *sk = sock->sk;
1058 BT_DBG("sk %p backlog %d", sk, backlog);
1062 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1067 switch (l2cap_pi(sk)->mode) {
1068 case L2CAP_MODE_BASIC:
1070 case L2CAP_MODE_ERTM:
1071 case L2CAP_MODE_STREAMING:
1080 if (!l2cap_pi(sk)->psm) {
1081 bdaddr_t *src = &bt_sk(sk)->src;
1086 write_lock_bh(&l2cap_sk_list.lock);
1088 for (psm = 0x1001; psm < 0x1100; psm += 2)
1089 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1090 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1091 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1096 write_unlock_bh(&l2cap_sk_list.lock);
1102 sk->sk_max_ack_backlog = backlog;
1103 sk->sk_ack_backlog = 0;
1104 sk->sk_state = BT_LISTEN;
1111 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct sock *sk = sock->sk, *nsk;
1118 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1120 if (sk->sk_state != BT_LISTEN) {
1125 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1127 BT_DBG("sk %p timeo %ld", sk, timeo);
1129 /* Wait for an incoming connection. (wake-one). */
1130 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1131 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1132 set_current_state(TASK_INTERRUPTIBLE);
1139 timeo = schedule_timeout(timeo);
1140 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1142 if (sk->sk_state != BT_LISTEN) {
1147 if (signal_pending(current)) {
1148 err = sock_intr_errno(timeo);
1152 set_current_state(TASK_RUNNING);
1153 remove_wait_queue(sk->sk_sleep, &wait);
1158 newsock->state = SS_CONNECTED;
1160 BT_DBG("new socket %p", nsk);
1167 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1169 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1170 struct sock *sk = sock->sk;
1172 BT_DBG("sock %p, sk %p", sock, sk);
1174 addr->sa_family = AF_BLUETOOTH;
1175 *len = sizeof(struct sockaddr_l2);
1178 la->l2_psm = l2cap_pi(sk)->psm;
1179 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1180 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1182 la->l2_psm = l2cap_pi(sk)->sport;
1183 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1184 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1190 static void l2cap_monitor_timeout(unsigned long arg)
1192 struct sock *sk = (void *) arg;
1195 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1196 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1200 l2cap_pi(sk)->retry_count++;
1201 __mod_monitor_timer();
1203 control = L2CAP_CTRL_POLL;
1204 control |= L2CAP_SUPER_RCV_READY;
1205 l2cap_send_sframe(l2cap_pi(sk), control);
1208 static void l2cap_retrans_timeout(unsigned long arg)
1210 struct sock *sk = (void *) arg;
1213 l2cap_pi(sk)->retry_count = 1;
1214 __mod_monitor_timer();
1216 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1218 control = L2CAP_CTRL_POLL;
1219 control |= L2CAP_SUPER_RCV_READY;
1220 l2cap_send_sframe(l2cap_pi(sk), control);
1223 static void l2cap_drop_acked_frames(struct sock *sk)
1225 struct sk_buff *skb;
1227 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1228 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1231 skb = skb_dequeue(TX_QUEUE(sk));
1234 l2cap_pi(sk)->unacked_frames--;
1237 if (!l2cap_pi(sk)->unacked_frames)
1238 del_timer(&l2cap_pi(sk)->retrans_timer);
1243 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1245 struct l2cap_pinfo *pi = l2cap_pi(sk);
1248 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1250 err = hci_send_acl(pi->conn->hcon, skb, 0);
1257 static int l2cap_streaming_send(struct sock *sk)
1259 struct sk_buff *skb, *tx_skb;
1260 struct l2cap_pinfo *pi = l2cap_pi(sk);
1264 while ((skb = sk->sk_send_head)) {
1265 tx_skb = skb_clone(skb, GFP_ATOMIC);
1267 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1268 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1269 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1271 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1272 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1273 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1276 err = l2cap_do_send(sk, tx_skb);
1278 l2cap_send_disconn_req(pi->conn, sk);
1282 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1284 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1285 sk->sk_send_head = NULL;
1287 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1289 skb = skb_dequeue(TX_QUEUE(sk));
1295 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1297 struct l2cap_pinfo *pi = l2cap_pi(sk);
1298 struct sk_buff *skb, *tx_skb;
1302 skb = skb_peek(TX_QUEUE(sk));
1304 if (bt_cb(skb)->tx_seq != tx_seq) {
1305 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1307 skb = skb_queue_next(TX_QUEUE(sk), skb);
1311 if (pi->remote_max_tx &&
1312 bt_cb(skb)->retries == pi->remote_max_tx) {
1313 l2cap_send_disconn_req(pi->conn, sk);
1317 tx_skb = skb_clone(skb, GFP_ATOMIC);
1318 bt_cb(skb)->retries++;
1319 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1320 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1321 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 err = l2cap_do_send(sk, tx_skb);
1331 l2cap_send_disconn_req(pi->conn, sk);
1339 static int l2cap_ertm_send(struct sock *sk)
1341 struct sk_buff *skb, *tx_skb;
1342 struct l2cap_pinfo *pi = l2cap_pi(sk);
1346 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1349 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1352 if (pi->remote_max_tx &&
1353 bt_cb(skb)->retries == pi->remote_max_tx) {
1354 l2cap_send_disconn_req(pi->conn, sk);
1358 bt_cb(skb)->retries++;
1360 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1361 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1362 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1363 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1366 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1367 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1368 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1371 err = l2cap_do_send(sk, tx_skb);
1373 l2cap_send_disconn_req(pi->conn, sk);
1376 __mod_retrans_timer();
1378 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1379 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1381 pi->unacked_frames++;
1383 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1384 sk->sk_send_head = NULL;
1386 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1392 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1394 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1395 struct sk_buff **frag;
1398 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1405 /* Continuation fragments (no L2CAP header) */
1406 frag = &skb_shinfo(skb)->frag_list;
1408 count = min_t(unsigned int, conn->mtu, len);
1410 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1413 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1419 frag = &(*frag)->next;
1425 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1428 struct sk_buff *skb;
1429 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1430 struct l2cap_hdr *lh;
1432 BT_DBG("sk %p len %d", sk, (int)len);
1434 count = min_t(unsigned int, (conn->mtu - hlen), len);
1435 skb = bt_skb_send_alloc(sk, count + hlen,
1436 msg->msg_flags & MSG_DONTWAIT, &err);
1438 return ERR_PTR(-ENOMEM);
1440 /* Create L2CAP header */
1441 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1442 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1443 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1444 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1446 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1447 if (unlikely(err < 0)) {
1449 return ERR_PTR(err);
1454 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1457 struct sk_buff *skb;
1458 int err, count, hlen = L2CAP_HDR_SIZE;
1459 struct l2cap_hdr *lh;
1461 BT_DBG("sk %p len %d", sk, (int)len);
1463 count = min_t(unsigned int, (conn->mtu - hlen), len);
1464 skb = bt_skb_send_alloc(sk, count + hlen,
1465 msg->msg_flags & MSG_DONTWAIT, &err);
1467 return ERR_PTR(-ENOMEM);
1469 /* Create L2CAP header */
1470 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1471 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1472 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1474 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1475 if (unlikely(err < 0)) {
1477 return ERR_PTR(err);
1482 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1484 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1485 struct sk_buff *skb;
1486 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1487 struct l2cap_hdr *lh;
1489 BT_DBG("sk %p len %d", sk, (int)len);
1494 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1497 count = min_t(unsigned int, (conn->mtu - hlen), len);
1498 skb = bt_skb_send_alloc(sk, count + hlen,
1499 msg->msg_flags & MSG_DONTWAIT, &err);
1501 return ERR_PTR(-ENOMEM);
1503 /* Create L2CAP header */
1504 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1505 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1506 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1507 put_unaligned_le16(control, skb_put(skb, 2));
1509 put_unaligned_le16(sdulen, skb_put(skb, 2));
1511 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1512 if (unlikely(err < 0)) {
1514 return ERR_PTR(err);
1517 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1518 put_unaligned_le16(0, skb_put(skb, 2));
1520 bt_cb(skb)->retries = 0;
1524 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1526 struct l2cap_pinfo *pi = l2cap_pi(sk);
1527 struct sk_buff *skb;
1528 struct sk_buff_head sar_queue;
1532 __skb_queue_head_init(&sar_queue);
1533 control = L2CAP_SDU_START;
1534 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1536 return PTR_ERR(skb);
1538 __skb_queue_tail(&sar_queue, skb);
1539 len -= pi->max_pdu_size;
1540 size +=pi->max_pdu_size;
1546 if (len > pi->max_pdu_size) {
1547 control |= L2CAP_SDU_CONTINUE;
1548 buflen = pi->max_pdu_size;
1550 control |= L2CAP_SDU_END;
1554 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1556 skb_queue_purge(&sar_queue);
1557 return PTR_ERR(skb);
1560 __skb_queue_tail(&sar_queue, skb);
1565 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1566 if (sk->sk_send_head == NULL)
1567 sk->sk_send_head = sar_queue.next;
1572 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1574 struct sock *sk = sock->sk;
1575 struct l2cap_pinfo *pi = l2cap_pi(sk);
1576 struct sk_buff *skb;
1580 BT_DBG("sock %p, sk %p", sock, sk);
1582 err = sock_error(sk);
1586 if (msg->msg_flags & MSG_OOB)
1589 /* Check outgoing MTU */
1590 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1596 if (sk->sk_state != BT_CONNECTED) {
1601 /* Connectionless channel */
1602 if (sk->sk_type == SOCK_DGRAM) {
1603 skb = l2cap_create_connless_pdu(sk, msg, len);
1604 err = l2cap_do_send(sk, skb);
1609 case L2CAP_MODE_BASIC:
1610 /* Create a basic PDU */
1611 skb = l2cap_create_basic_pdu(sk, msg, len);
1617 err = l2cap_do_send(sk, skb);
1622 case L2CAP_MODE_ERTM:
1623 case L2CAP_MODE_STREAMING:
1624 /* Entire SDU fits into one PDU */
1625 if (len <= pi->max_pdu_size) {
1626 control = L2CAP_SDU_UNSEGMENTED;
1627 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1632 __skb_queue_tail(TX_QUEUE(sk), skb);
1633 if (sk->sk_send_head == NULL)
1634 sk->sk_send_head = skb;
1636 /* Segment SDU into multiples PDUs */
1637 err = l2cap_sar_segment_sdu(sk, msg, len);
1642 if (pi->mode == L2CAP_MODE_STREAMING)
1643 err = l2cap_streaming_send(sk);
1645 err = l2cap_ertm_send(sk);
1652 BT_DBG("bad state %1.1x", pi->mode);
1661 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1663 struct sock *sk = sock->sk;
1667 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1668 struct l2cap_conn_rsp rsp;
1670 sk->sk_state = BT_CONFIG;
1672 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1673 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1674 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1675 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1676 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1677 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1685 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1688 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1690 struct sock *sk = sock->sk;
1691 struct l2cap_options opts;
1695 BT_DBG("sk %p", sk);
1701 opts.imtu = l2cap_pi(sk)->imtu;
1702 opts.omtu = l2cap_pi(sk)->omtu;
1703 opts.flush_to = l2cap_pi(sk)->flush_to;
1704 opts.mode = l2cap_pi(sk)->mode;
1705 opts.fcs = l2cap_pi(sk)->fcs;
1707 len = min_t(unsigned int, sizeof(opts), optlen);
1708 if (copy_from_user((char *) &opts, optval, len)) {
1713 l2cap_pi(sk)->imtu = opts.imtu;
1714 l2cap_pi(sk)->omtu = opts.omtu;
1715 l2cap_pi(sk)->mode = opts.mode;
1716 l2cap_pi(sk)->fcs = opts.fcs;
1720 if (get_user(opt, (u32 __user *) optval)) {
1725 if (opt & L2CAP_LM_AUTH)
1726 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1727 if (opt & L2CAP_LM_ENCRYPT)
1728 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1729 if (opt & L2CAP_LM_SECURE)
1730 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1732 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1733 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1745 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1747 struct sock *sk = sock->sk;
1748 struct bt_security sec;
1752 BT_DBG("sk %p", sk);
1754 if (level == SOL_L2CAP)
1755 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1757 if (level != SOL_BLUETOOTH)
1758 return -ENOPROTOOPT;
1764 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1769 sec.level = BT_SECURITY_LOW;
1771 len = min_t(unsigned int, sizeof(sec), optlen);
1772 if (copy_from_user((char *) &sec, optval, len)) {
1777 if (sec.level < BT_SECURITY_LOW ||
1778 sec.level > BT_SECURITY_HIGH) {
1783 l2cap_pi(sk)->sec_level = sec.level;
1786 case BT_DEFER_SETUP:
1787 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1792 if (get_user(opt, (u32 __user *) optval)) {
1797 bt_sk(sk)->defer_setup = opt;
1809 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1811 struct sock *sk = sock->sk;
1812 struct l2cap_options opts;
1813 struct l2cap_conninfo cinfo;
1817 BT_DBG("sk %p", sk);
1819 if (get_user(len, optlen))
1826 opts.imtu = l2cap_pi(sk)->imtu;
1827 opts.omtu = l2cap_pi(sk)->omtu;
1828 opts.flush_to = l2cap_pi(sk)->flush_to;
1829 opts.mode = l2cap_pi(sk)->mode;
1830 opts.fcs = l2cap_pi(sk)->fcs;
1832 len = min_t(unsigned int, len, sizeof(opts));
1833 if (copy_to_user(optval, (char *) &opts, len))
1839 switch (l2cap_pi(sk)->sec_level) {
1840 case BT_SECURITY_LOW:
1841 opt = L2CAP_LM_AUTH;
1843 case BT_SECURITY_MEDIUM:
1844 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1846 case BT_SECURITY_HIGH:
1847 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1855 if (l2cap_pi(sk)->role_switch)
1856 opt |= L2CAP_LM_MASTER;
1858 if (l2cap_pi(sk)->force_reliable)
1859 opt |= L2CAP_LM_RELIABLE;
1861 if (put_user(opt, (u32 __user *) optval))
1865 case L2CAP_CONNINFO:
1866 if (sk->sk_state != BT_CONNECTED &&
1867 !(sk->sk_state == BT_CONNECT2 &&
1868 bt_sk(sk)->defer_setup)) {
1873 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1874 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1876 len = min_t(unsigned int, len, sizeof(cinfo));
1877 if (copy_to_user(optval, (char *) &cinfo, len))
1891 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1893 struct sock *sk = sock->sk;
1894 struct bt_security sec;
1897 BT_DBG("sk %p", sk);
1899 if (level == SOL_L2CAP)
1900 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1902 if (level != SOL_BLUETOOTH)
1903 return -ENOPROTOOPT;
1905 if (get_user(len, optlen))
1912 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1917 sec.level = l2cap_pi(sk)->sec_level;
1919 len = min_t(unsigned int, len, sizeof(sec));
1920 if (copy_to_user(optval, (char *) &sec, len))
1925 case BT_DEFER_SETUP:
1926 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1931 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1945 static int l2cap_sock_shutdown(struct socket *sock, int how)
1947 struct sock *sk = sock->sk;
1950 BT_DBG("sock %p, sk %p", sock, sk);
1956 if (!sk->sk_shutdown) {
1957 sk->sk_shutdown = SHUTDOWN_MASK;
1958 l2cap_sock_clear_timer(sk);
1959 __l2cap_sock_close(sk, 0);
1961 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1962 err = bt_sock_wait_state(sk, BT_CLOSED,
1969 static int l2cap_sock_release(struct socket *sock)
1971 struct sock *sk = sock->sk;
1974 BT_DBG("sock %p, sk %p", sock, sk);
1979 err = l2cap_sock_shutdown(sock, 2);
1982 l2cap_sock_kill(sk);
1986 static void l2cap_chan_ready(struct sock *sk)
1988 struct sock *parent = bt_sk(sk)->parent;
1990 BT_DBG("sk %p, parent %p", sk, parent);
1992 l2cap_pi(sk)->conf_state = 0;
1993 l2cap_sock_clear_timer(sk);
1996 /* Outgoing channel.
1997 * Wake up socket sleeping on connect.
1999 sk->sk_state = BT_CONNECTED;
2000 sk->sk_state_change(sk);
2002 /* Incoming channel.
2003 * Wake up socket sleeping on accept.
2005 parent->sk_data_ready(parent, 0);
2009 /* Copy frame to all raw sockets on that connection */
2010 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2012 struct l2cap_chan_list *l = &conn->chan_list;
2013 struct sk_buff *nskb;
2016 BT_DBG("conn %p", conn);
2018 read_lock(&l->lock);
2019 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2020 if (sk->sk_type != SOCK_RAW)
2023 /* Don't send frame to the socket it came from */
2026 nskb = skb_clone(skb, GFP_ATOMIC);
2030 if (sock_queue_rcv_skb(sk, nskb))
2033 read_unlock(&l->lock);
2036 /* ---- L2CAP signalling commands ---- */
2037 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2038 u8 code, u8 ident, u16 dlen, void *data)
2040 struct sk_buff *skb, **frag;
2041 struct l2cap_cmd_hdr *cmd;
2042 struct l2cap_hdr *lh;
2045 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2046 conn, code, ident, dlen);
2048 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2049 count = min_t(unsigned int, conn->mtu, len);
2051 skb = bt_skb_alloc(count, GFP_ATOMIC);
2055 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2056 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2057 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2059 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2062 cmd->len = cpu_to_le16(dlen);
2065 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2066 memcpy(skb_put(skb, count), data, count);
2072 /* Continuation fragments (no L2CAP header) */
2073 frag = &skb_shinfo(skb)->frag_list;
2075 count = min_t(unsigned int, conn->mtu, len);
2077 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2081 memcpy(skb_put(*frag, count), data, count);
2086 frag = &(*frag)->next;
2096 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2098 struct l2cap_conf_opt *opt = *ptr;
2101 len = L2CAP_CONF_OPT_SIZE + opt->len;
2109 *val = *((u8 *) opt->val);
2113 *val = __le16_to_cpu(*((__le16 *) opt->val));
2117 *val = __le32_to_cpu(*((__le32 *) opt->val));
2121 *val = (unsigned long) opt->val;
2125 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2129 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2131 struct l2cap_conf_opt *opt = *ptr;
2133 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2140 *((u8 *) opt->val) = val;
2144 *((__le16 *) opt->val) = cpu_to_le16(val);
2148 *((__le32 *) opt->val) = cpu_to_le32(val);
2152 memcpy(opt->val, (void *) val, len);
2156 *ptr += L2CAP_CONF_OPT_SIZE + len;
2159 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2161 u32 local_feat_mask = l2cap_feat_mask;
2163 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2166 case L2CAP_MODE_ERTM:
2167 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2168 case L2CAP_MODE_STREAMING:
2169 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2175 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2178 case L2CAP_MODE_STREAMING:
2179 case L2CAP_MODE_ERTM:
2180 if (l2cap_mode_supported(mode, remote_feat_mask))
2184 return L2CAP_MODE_BASIC;
2188 static int l2cap_build_conf_req(struct sock *sk, void *data)
2190 struct l2cap_pinfo *pi = l2cap_pi(sk);
2191 struct l2cap_conf_req *req = data;
2192 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2193 void *ptr = req->data;
2195 BT_DBG("sk %p", sk);
2197 if (pi->num_conf_req || pi->num_conf_rsp)
2201 case L2CAP_MODE_STREAMING:
2202 case L2CAP_MODE_ERTM:
2203 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2204 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2205 l2cap_send_disconn_req(pi->conn, sk);
2208 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2214 case L2CAP_MODE_BASIC:
2215 if (pi->imtu != L2CAP_DEFAULT_MTU)
2216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2219 case L2CAP_MODE_ERTM:
2220 rfc.mode = L2CAP_MODE_ERTM;
2221 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2222 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2223 rfc.retrans_timeout = 0;
2224 rfc.monitor_timeout = 0;
2225 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2228 sizeof(rfc), (unsigned long) &rfc);
2230 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2233 if (pi->fcs == L2CAP_FCS_NONE ||
2234 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2235 pi->fcs = L2CAP_FCS_NONE;
2236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2240 case L2CAP_MODE_STREAMING:
2241 rfc.mode = L2CAP_MODE_STREAMING;
2243 rfc.max_transmit = 0;
2244 rfc.retrans_timeout = 0;
2245 rfc.monitor_timeout = 0;
2246 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2249 sizeof(rfc), (unsigned long) &rfc);
2251 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2254 if (pi->fcs == L2CAP_FCS_NONE ||
2255 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2256 pi->fcs = L2CAP_FCS_NONE;
2257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2262 /* FIXME: Need actual value of the flush timeout */
2263 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2264 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2266 req->dcid = cpu_to_le16(pi->dcid);
2267 req->flags = cpu_to_le16(0);
2272 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2274 struct l2cap_pinfo *pi = l2cap_pi(sk);
2275 struct l2cap_conf_rsp *rsp = data;
2276 void *ptr = rsp->data;
2277 void *req = pi->conf_req;
2278 int len = pi->conf_len;
2279 int type, hint, olen;
2281 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2282 u16 mtu = L2CAP_DEFAULT_MTU;
2283 u16 result = L2CAP_CONF_SUCCESS;
2285 BT_DBG("sk %p", sk);
2287 while (len >= L2CAP_CONF_OPT_SIZE) {
2288 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2290 hint = type & L2CAP_CONF_HINT;
2291 type &= L2CAP_CONF_MASK;
2294 case L2CAP_CONF_MTU:
2298 case L2CAP_CONF_FLUSH_TO:
2302 case L2CAP_CONF_QOS:
2305 case L2CAP_CONF_RFC:
2306 if (olen == sizeof(rfc))
2307 memcpy(&rfc, (void *) val, olen);
2310 case L2CAP_CONF_FCS:
2311 if (val == L2CAP_FCS_NONE)
2312 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2320 result = L2CAP_CONF_UNKNOWN;
2321 *((u8 *) ptr++) = type;
2326 if (pi->num_conf_rsp || pi->num_conf_req)
2330 case L2CAP_MODE_STREAMING:
2331 case L2CAP_MODE_ERTM:
2332 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2333 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2334 return -ECONNREFUSED;
2337 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2342 if (pi->mode != rfc.mode) {
2343 result = L2CAP_CONF_UNACCEPT;
2344 rfc.mode = pi->mode;
2346 if (pi->num_conf_rsp == 1)
2347 return -ECONNREFUSED;
2349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2350 sizeof(rfc), (unsigned long) &rfc);
2354 if (result == L2CAP_CONF_SUCCESS) {
2355 /* Configure output options and let the other side know
2356 * which ones we don't like. */
2358 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2359 result = L2CAP_CONF_UNACCEPT;
2362 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2364 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2367 case L2CAP_MODE_BASIC:
2368 pi->fcs = L2CAP_FCS_NONE;
2369 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2372 case L2CAP_MODE_ERTM:
2373 pi->remote_tx_win = rfc.txwin_size;
2374 pi->remote_max_tx = rfc.max_transmit;
2375 pi->max_pdu_size = rfc.max_pdu_size;
2377 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2378 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2380 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2383 case L2CAP_MODE_STREAMING:
2384 pi->remote_tx_win = rfc.txwin_size;
2385 pi->max_pdu_size = rfc.max_pdu_size;
2387 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2391 result = L2CAP_CONF_UNACCEPT;
2393 memset(&rfc, 0, sizeof(rfc));
2394 rfc.mode = pi->mode;
2397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2398 sizeof(rfc), (unsigned long) &rfc);
2400 if (result == L2CAP_CONF_SUCCESS)
2401 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2403 rsp->scid = cpu_to_le16(pi->dcid);
2404 rsp->result = cpu_to_le16(result);
2405 rsp->flags = cpu_to_le16(0x0000);
2410 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2412 struct l2cap_pinfo *pi = l2cap_pi(sk);
2413 struct l2cap_conf_req *req = data;
2414 void *ptr = req->data;
2417 struct l2cap_conf_rfc rfc;
2419 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2421 while (len >= L2CAP_CONF_OPT_SIZE) {
2422 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2425 case L2CAP_CONF_MTU:
2426 if (val < L2CAP_DEFAULT_MIN_MTU) {
2427 *result = L2CAP_CONF_UNACCEPT;
2428 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2434 case L2CAP_CONF_FLUSH_TO:
2436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2440 case L2CAP_CONF_RFC:
2441 if (olen == sizeof(rfc))
2442 memcpy(&rfc, (void *)val, olen);
2444 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2445 rfc.mode != pi->mode)
2446 return -ECONNREFUSED;
2448 pi->mode = rfc.mode;
2451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2452 sizeof(rfc), (unsigned long) &rfc);
2457 if (*result == L2CAP_CONF_SUCCESS) {
2459 case L2CAP_MODE_ERTM:
2460 pi->remote_tx_win = rfc.txwin_size;
2461 pi->retrans_timeout = rfc.retrans_timeout;
2462 pi->monitor_timeout = rfc.monitor_timeout;
2463 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2465 case L2CAP_MODE_STREAMING:
2466 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2471 req->dcid = cpu_to_le16(pi->dcid);
2472 req->flags = cpu_to_le16(0x0000);
2477 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2479 struct l2cap_conf_rsp *rsp = data;
2480 void *ptr = rsp->data;
2482 BT_DBG("sk %p", sk);
2484 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2485 rsp->result = cpu_to_le16(result);
2486 rsp->flags = cpu_to_le16(flags);
2491 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2493 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2495 if (rej->reason != 0x0000)
2498 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2499 cmd->ident == conn->info_ident) {
2500 del_timer(&conn->info_timer);
2502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2503 conn->info_ident = 0;
2505 l2cap_conn_start(conn);
2511 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2513 struct l2cap_chan_list *list = &conn->chan_list;
2514 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2515 struct l2cap_conn_rsp rsp;
2516 struct sock *sk, *parent;
2517 int result, status = L2CAP_CS_NO_INFO;
2519 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2520 __le16 psm = req->psm;
2522 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2524 /* Check if we have socket listening on psm */
2525 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2527 result = L2CAP_CR_BAD_PSM;
2531 /* Check if the ACL is secure enough (if not SDP) */
2532 if (psm != cpu_to_le16(0x0001) &&
2533 !hci_conn_check_link_mode(conn->hcon)) {
2534 conn->disc_reason = 0x05;
2535 result = L2CAP_CR_SEC_BLOCK;
2539 result = L2CAP_CR_NO_MEM;
2541 /* Check for backlog size */
2542 if (sk_acceptq_is_full(parent)) {
2543 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2547 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2551 write_lock_bh(&list->lock);
2553 /* Check if we already have channel with that dcid */
2554 if (__l2cap_get_chan_by_dcid(list, scid)) {
2555 write_unlock_bh(&list->lock);
2556 sock_set_flag(sk, SOCK_ZAPPED);
2557 l2cap_sock_kill(sk);
2561 hci_conn_hold(conn->hcon);
2563 l2cap_sock_init(sk, parent);
2564 bacpy(&bt_sk(sk)->src, conn->src);
2565 bacpy(&bt_sk(sk)->dst, conn->dst);
2566 l2cap_pi(sk)->psm = psm;
2567 l2cap_pi(sk)->dcid = scid;
2569 __l2cap_chan_add(conn, sk, parent);
2570 dcid = l2cap_pi(sk)->scid;
2572 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2574 l2cap_pi(sk)->ident = cmd->ident;
2576 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2577 if (l2cap_check_security(sk)) {
2578 if (bt_sk(sk)->defer_setup) {
2579 sk->sk_state = BT_CONNECT2;
2580 result = L2CAP_CR_PEND;
2581 status = L2CAP_CS_AUTHOR_PEND;
2582 parent->sk_data_ready(parent, 0);
2584 sk->sk_state = BT_CONFIG;
2585 result = L2CAP_CR_SUCCESS;
2586 status = L2CAP_CS_NO_INFO;
2589 sk->sk_state = BT_CONNECT2;
2590 result = L2CAP_CR_PEND;
2591 status = L2CAP_CS_AUTHEN_PEND;
2594 sk->sk_state = BT_CONNECT2;
2595 result = L2CAP_CR_PEND;
2596 status = L2CAP_CS_NO_INFO;
2599 write_unlock_bh(&list->lock);
2602 bh_unlock_sock(parent);
2605 rsp.scid = cpu_to_le16(scid);
2606 rsp.dcid = cpu_to_le16(dcid);
2607 rsp.result = cpu_to_le16(result);
2608 rsp.status = cpu_to_le16(status);
2609 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2611 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2612 struct l2cap_info_req info;
2613 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2615 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2616 conn->info_ident = l2cap_get_ident(conn);
2618 mod_timer(&conn->info_timer, jiffies +
2619 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2621 l2cap_send_cmd(conn, conn->info_ident,
2622 L2CAP_INFO_REQ, sizeof(info), &info);
2628 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2630 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2631 u16 scid, dcid, result, status;
2635 scid = __le16_to_cpu(rsp->scid);
2636 dcid = __le16_to_cpu(rsp->dcid);
2637 result = __le16_to_cpu(rsp->result);
2638 status = __le16_to_cpu(rsp->status);
2640 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2643 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2647 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2653 case L2CAP_CR_SUCCESS:
2654 sk->sk_state = BT_CONFIG;
2655 l2cap_pi(sk)->ident = 0;
2656 l2cap_pi(sk)->dcid = dcid;
2657 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2659 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2661 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2662 l2cap_build_conf_req(sk, req), req);
2663 l2cap_pi(sk)->num_conf_req++;
2667 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2671 l2cap_chan_del(sk, ECONNREFUSED);
2679 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2681 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2687 dcid = __le16_to_cpu(req->dcid);
2688 flags = __le16_to_cpu(req->flags);
2690 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2692 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2696 if (sk->sk_state == BT_DISCONN)
2699 /* Reject if config buffer is too small. */
2700 len = cmd_len - sizeof(*req);
2701 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2702 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2703 l2cap_build_conf_rsp(sk, rsp,
2704 L2CAP_CONF_REJECT, flags), rsp);
2709 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2710 l2cap_pi(sk)->conf_len += len;
2712 if (flags & 0x0001) {
2713 /* Incomplete config. Send empty response. */
2714 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2715 l2cap_build_conf_rsp(sk, rsp,
2716 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2720 /* Complete config. */
2721 len = l2cap_parse_conf_req(sk, rsp);
2723 l2cap_send_disconn_req(conn, sk);
2727 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2728 l2cap_pi(sk)->num_conf_rsp++;
2730 /* Reset config buffer. */
2731 l2cap_pi(sk)->conf_len = 0;
2733 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2736 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2737 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2738 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2739 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2741 sk->sk_state = BT_CONNECTED;
2742 l2cap_pi(sk)->next_tx_seq = 0;
2743 l2cap_pi(sk)->expected_ack_seq = 0;
2744 l2cap_pi(sk)->unacked_frames = 0;
2746 setup_timer(&l2cap_pi(sk)->retrans_timer,
2747 l2cap_retrans_timeout, (unsigned long) sk);
2748 setup_timer(&l2cap_pi(sk)->monitor_timer,
2749 l2cap_monitor_timeout, (unsigned long) sk);
2751 __skb_queue_head_init(TX_QUEUE(sk));
2752 __skb_queue_head_init(SREJ_QUEUE(sk));
2753 l2cap_chan_ready(sk);
2757 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2759 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2760 l2cap_build_conf_req(sk, buf), buf);
2761 l2cap_pi(sk)->num_conf_req++;
2769 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2771 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2772 u16 scid, flags, result;
2775 scid = __le16_to_cpu(rsp->scid);
2776 flags = __le16_to_cpu(rsp->flags);
2777 result = __le16_to_cpu(rsp->result);
2779 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2780 scid, flags, result);
2782 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2787 case L2CAP_CONF_SUCCESS:
2790 case L2CAP_CONF_UNACCEPT:
2791 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2792 int len = cmd->len - sizeof(*rsp);
2795 /* throw out any old stored conf requests */
2796 result = L2CAP_CONF_SUCCESS;
2797 len = l2cap_parse_conf_rsp(sk, rsp->data,
2800 l2cap_send_disconn_req(conn, sk);
2804 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2805 L2CAP_CONF_REQ, len, req);
2806 l2cap_pi(sk)->num_conf_req++;
2807 if (result != L2CAP_CONF_SUCCESS)
2813 sk->sk_state = BT_DISCONN;
2814 sk->sk_err = ECONNRESET;
2815 l2cap_sock_set_timer(sk, HZ * 5);
2816 l2cap_send_disconn_req(conn, sk);
2823 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2825 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2826 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2827 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2828 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2830 sk->sk_state = BT_CONNECTED;
2831 l2cap_pi(sk)->expected_tx_seq = 0;
2832 l2cap_pi(sk)->buffer_seq = 0;
2833 l2cap_pi(sk)->num_to_ack = 0;
2834 __skb_queue_head_init(TX_QUEUE(sk));
2835 __skb_queue_head_init(SREJ_QUEUE(sk));
2836 l2cap_chan_ready(sk);
2844 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2846 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2847 struct l2cap_disconn_rsp rsp;
2851 scid = __le16_to_cpu(req->scid);
2852 dcid = __le16_to_cpu(req->dcid);
2854 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2856 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2860 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2861 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2862 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2864 sk->sk_shutdown = SHUTDOWN_MASK;
2866 skb_queue_purge(TX_QUEUE(sk));
2867 skb_queue_purge(SREJ_QUEUE(sk));
2868 del_timer(&l2cap_pi(sk)->retrans_timer);
2869 del_timer(&l2cap_pi(sk)->monitor_timer);
2871 l2cap_chan_del(sk, ECONNRESET);
2874 l2cap_sock_kill(sk);
2878 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2880 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2884 scid = __le16_to_cpu(rsp->scid);
2885 dcid = __le16_to_cpu(rsp->dcid);
2887 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2889 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2893 skb_queue_purge(TX_QUEUE(sk));
2894 skb_queue_purge(SREJ_QUEUE(sk));
2895 del_timer(&l2cap_pi(sk)->retrans_timer);
2896 del_timer(&l2cap_pi(sk)->monitor_timer);
2898 l2cap_chan_del(sk, 0);
2901 l2cap_sock_kill(sk);
2905 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2907 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2910 type = __le16_to_cpu(req->type);
2912 BT_DBG("type 0x%4.4x", type);
2914 if (type == L2CAP_IT_FEAT_MASK) {
2916 u32 feat_mask = l2cap_feat_mask;
2917 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2918 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2919 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2921 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2923 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2924 l2cap_send_cmd(conn, cmd->ident,
2925 L2CAP_INFO_RSP, sizeof(buf), buf);
2926 } else if (type == L2CAP_IT_FIXED_CHAN) {
2928 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2929 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2930 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2931 memcpy(buf + 4, l2cap_fixed_chan, 8);
2932 l2cap_send_cmd(conn, cmd->ident,
2933 L2CAP_INFO_RSP, sizeof(buf), buf);
2935 struct l2cap_info_rsp rsp;
2936 rsp.type = cpu_to_le16(type);
2937 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2938 l2cap_send_cmd(conn, cmd->ident,
2939 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2945 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2947 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2950 type = __le16_to_cpu(rsp->type);
2951 result = __le16_to_cpu(rsp->result);
2953 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2955 del_timer(&conn->info_timer);
2957 if (type == L2CAP_IT_FEAT_MASK) {
2958 conn->feat_mask = get_unaligned_le32(rsp->data);
2960 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2961 struct l2cap_info_req req;
2962 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2964 conn->info_ident = l2cap_get_ident(conn);
2966 l2cap_send_cmd(conn, conn->info_ident,
2967 L2CAP_INFO_REQ, sizeof(req), &req);
2969 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2970 conn->info_ident = 0;
2972 l2cap_conn_start(conn);
2974 } else if (type == L2CAP_IT_FIXED_CHAN) {
2975 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2976 conn->info_ident = 0;
2978 l2cap_conn_start(conn);
2984 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2986 u8 *data = skb->data;
2988 struct l2cap_cmd_hdr cmd;
2991 l2cap_raw_recv(conn, skb);
2993 while (len >= L2CAP_CMD_HDR_SIZE) {
2995 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2996 data += L2CAP_CMD_HDR_SIZE;
2997 len -= L2CAP_CMD_HDR_SIZE;
2999 cmd_len = le16_to_cpu(cmd.len);
3001 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3003 if (cmd_len > len || !cmd.ident) {
3004 BT_DBG("corrupted command");
3009 case L2CAP_COMMAND_REJ:
3010 l2cap_command_rej(conn, &cmd, data);
3013 case L2CAP_CONN_REQ:
3014 err = l2cap_connect_req(conn, &cmd, data);
3017 case L2CAP_CONN_RSP:
3018 err = l2cap_connect_rsp(conn, &cmd, data);
3021 case L2CAP_CONF_REQ:
3022 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3025 case L2CAP_CONF_RSP:
3026 err = l2cap_config_rsp(conn, &cmd, data);
3029 case L2CAP_DISCONN_REQ:
3030 err = l2cap_disconnect_req(conn, &cmd, data);
3033 case L2CAP_DISCONN_RSP:
3034 err = l2cap_disconnect_rsp(conn, &cmd, data);
3037 case L2CAP_ECHO_REQ:
3038 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3041 case L2CAP_ECHO_RSP:
3044 case L2CAP_INFO_REQ:
3045 err = l2cap_information_req(conn, &cmd, data);
3048 case L2CAP_INFO_RSP:
3049 err = l2cap_information_rsp(conn, &cmd, data);
3053 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3059 struct l2cap_cmd_rej rej;
3060 BT_DBG("error %d", err);
3062 /* FIXME: Map err to a valid reason */
3063 rej.reason = cpu_to_le16(0);
3064 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3074 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3076 u16 our_fcs, rcv_fcs;
3077 int hdr_size = L2CAP_HDR_SIZE + 2;
3079 if (pi->fcs == L2CAP_FCS_CRC16) {
3080 skb_trim(skb, skb->len - 2);
3081 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3082 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3084 if (our_fcs != rcv_fcs)
3090 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3092 struct sk_buff *next_skb;
3094 bt_cb(skb)->tx_seq = tx_seq;
3095 bt_cb(skb)->sar = sar;
3097 next_skb = skb_peek(SREJ_QUEUE(sk));
3099 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3104 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3105 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3109 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3112 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3114 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3117 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3119 struct l2cap_pinfo *pi = l2cap_pi(sk);
3120 struct sk_buff *_skb;
3123 switch (control & L2CAP_CTRL_SAR) {
3124 case L2CAP_SDU_UNSEGMENTED:
3125 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3130 err = sock_queue_rcv_skb(sk, skb);
3136 case L2CAP_SDU_START:
3137 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3142 pi->sdu_len = get_unaligned_le16(skb->data);
3145 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3151 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3153 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3154 pi->partial_sdu_len = skb->len;
3158 case L2CAP_SDU_CONTINUE:
3159 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3162 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3164 pi->partial_sdu_len += skb->len;
3165 if (pi->partial_sdu_len > pi->sdu_len)
3173 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3176 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3178 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3179 pi->partial_sdu_len += skb->len;
3181 if (pi->partial_sdu_len == pi->sdu_len) {
3182 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3183 err = sock_queue_rcv_skb(sk, _skb);
3197 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3199 struct sk_buff *skb;
3202 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3203 if (bt_cb(skb)->tx_seq != tx_seq)
3206 skb = skb_dequeue(SREJ_QUEUE(sk));
3207 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3208 l2cap_sar_reassembly_sdu(sk, skb, control);
3209 l2cap_pi(sk)->buffer_seq_srej =
3210 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3215 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3217 struct l2cap_pinfo *pi = l2cap_pi(sk);
3218 struct srej_list *l, *tmp;
3221 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3222 if (l->tx_seq == tx_seq) {
3227 control = L2CAP_SUPER_SELECT_REJECT;
3228 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3229 l2cap_send_sframe(pi, control);
3231 list_add_tail(&l->list, SREJ_LIST(sk));
3235 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3237 struct l2cap_pinfo *pi = l2cap_pi(sk);
3238 struct srej_list *new;
3241 while (tx_seq != pi->expected_tx_seq) {
3242 control = L2CAP_SUPER_SELECT_REJECT;
3243 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3244 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3245 control |= L2CAP_CTRL_POLL;
3246 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3248 l2cap_send_sframe(pi, control);
3250 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3251 new->tx_seq = pi->expected_tx_seq++;
3252 list_add_tail(&new->list, SREJ_LIST(sk));
3254 pi->expected_tx_seq++;
3257 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3259 struct l2cap_pinfo *pi = l2cap_pi(sk);
3260 u8 tx_seq = __get_txseq(rx_control);
3262 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3265 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3267 if (tx_seq == pi->expected_tx_seq)
3270 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3271 struct srej_list *first;
3273 first = list_first_entry(SREJ_LIST(sk),
3274 struct srej_list, list);
3275 if (tx_seq == first->tx_seq) {
3276 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3277 l2cap_check_srej_gap(sk, tx_seq);
3279 list_del(&first->list);
3282 if (list_empty(SREJ_LIST(sk))) {
3283 pi->buffer_seq = pi->buffer_seq_srej;
3284 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3287 struct srej_list *l;
3288 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3290 list_for_each_entry(l, SREJ_LIST(sk), list) {
3291 if (l->tx_seq == tx_seq) {
3292 l2cap_resend_srejframe(sk, tx_seq);
3296 l2cap_send_srejframe(sk, tx_seq);
3299 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3301 INIT_LIST_HEAD(SREJ_LIST(sk));
3302 pi->buffer_seq_srej = pi->buffer_seq;
3304 __skb_queue_head_init(SREJ_QUEUE(sk));
3305 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3307 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3309 l2cap_send_srejframe(sk, tx_seq);
3314 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3316 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3317 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3321 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3323 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3327 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3328 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3329 tx_control |= L2CAP_SUPER_RCV_READY;
3330 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3331 l2cap_send_sframe(pi, tx_control);
3336 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3338 struct l2cap_pinfo *pi = l2cap_pi(sk);
3339 u8 tx_seq = __get_reqseq(rx_control);
3341 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3343 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3344 case L2CAP_SUPER_RCV_READY:
3345 if (rx_control & L2CAP_CTRL_POLL) {
3346 u16 control = L2CAP_CTRL_FINAL;
3347 control |= L2CAP_SUPER_RCV_READY;
3348 l2cap_send_sframe(l2cap_pi(sk), control);
3349 } else if (rx_control & L2CAP_CTRL_FINAL) {
3350 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3353 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3354 del_timer(&pi->monitor_timer);
3356 if (pi->unacked_frames > 0)
3357 __mod_retrans_timer();
3359 pi->expected_ack_seq = tx_seq;
3360 l2cap_drop_acked_frames(sk);
3361 if (pi->unacked_frames > 0)
3362 __mod_retrans_timer();
3363 l2cap_ertm_send(sk);
3367 case L2CAP_SUPER_REJECT:
3368 pi->expected_ack_seq = __get_reqseq(rx_control);
3369 l2cap_drop_acked_frames(sk);
3371 sk->sk_send_head = TX_QUEUE(sk)->next;
3372 pi->next_tx_seq = pi->expected_ack_seq;
3374 l2cap_ertm_send(sk);
3378 case L2CAP_SUPER_SELECT_REJECT:
3379 if (rx_control & L2CAP_CTRL_POLL) {
3380 l2cap_retransmit_frame(sk, tx_seq);
3381 pi->expected_ack_seq = tx_seq;
3382 l2cap_drop_acked_frames(sk);
3383 l2cap_ertm_send(sk);
3384 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3385 pi->srej_save_reqseq = tx_seq;
3386 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3388 } else if (rx_control & L2CAP_CTRL_FINAL) {
3389 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3390 pi->srej_save_reqseq == tx_seq)
3391 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3393 l2cap_retransmit_frame(sk, tx_seq);
3396 l2cap_retransmit_frame(sk, tx_seq);
3397 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3398 pi->srej_save_reqseq = tx_seq;
3399 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3404 case L2CAP_SUPER_RCV_NOT_READY:
3411 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3414 struct l2cap_pinfo *pi;
3419 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3421 BT_DBG("unknown cid 0x%4.4x", cid);
3427 BT_DBG("sk %p, len %d", sk, skb->len);
3429 if (sk->sk_state != BT_CONNECTED)
3433 case L2CAP_MODE_BASIC:
3434 /* If socket recv buffers overflows we drop data here
3435 * which is *bad* because L2CAP has to be reliable.
3436 * But we don't have any other choice. L2CAP doesn't
3437 * provide flow control mechanism. */
3439 if (pi->imtu < skb->len)
3442 if (!sock_queue_rcv_skb(sk, skb))
3446 case L2CAP_MODE_ERTM:
3447 control = get_unaligned_le16(skb->data);
3451 if (__is_sar_start(control))
3454 if (pi->fcs == L2CAP_FCS_CRC16)
3458 * We can just drop the corrupted I-frame here.
3459 * Receiver will miss it and start proper recovery
3460 * procedures and ask retransmission.
3462 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3465 if (l2cap_check_fcs(pi, skb))
3468 if (__is_iframe(control))
3469 err = l2cap_data_channel_iframe(sk, control, skb);
3471 err = l2cap_data_channel_sframe(sk, control, skb);
3477 case L2CAP_MODE_STREAMING:
3478 control = get_unaligned_le16(skb->data);
3482 if (__is_sar_start(control))
3485 if (pi->fcs == L2CAP_FCS_CRC16)
3488 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3491 if (l2cap_check_fcs(pi, skb))
3494 tx_seq = __get_txseq(control);
3496 if (pi->expected_tx_seq == tx_seq)
3497 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3499 pi->expected_tx_seq = tx_seq + 1;
3501 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3506 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3520 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3524 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3528 BT_DBG("sk %p, len %d", sk, skb->len);
3530 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3533 if (l2cap_pi(sk)->imtu < skb->len)
3536 if (!sock_queue_rcv_skb(sk, skb))
3548 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3550 struct l2cap_hdr *lh = (void *) skb->data;
3554 skb_pull(skb, L2CAP_HDR_SIZE);
3555 cid = __le16_to_cpu(lh->cid);
3556 len = __le16_to_cpu(lh->len);
3558 if (len != skb->len) {
3563 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3566 case L2CAP_CID_SIGNALING:
3567 l2cap_sig_channel(conn, skb);
3570 case L2CAP_CID_CONN_LESS:
3571 psm = get_unaligned((__le16 *) skb->data);
3573 l2cap_conless_channel(conn, psm, skb);
3577 l2cap_data_channel(conn, cid, skb);
3582 /* ---- L2CAP interface with lower layer (HCI) ---- */
3584 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3586 int exact = 0, lm1 = 0, lm2 = 0;
3587 register struct sock *sk;
3588 struct hlist_node *node;
3590 if (type != ACL_LINK)
3593 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3595 /* Find listening sockets and check their link_mode */
3596 read_lock(&l2cap_sk_list.lock);
3597 sk_for_each(sk, node, &l2cap_sk_list.head) {
3598 if (sk->sk_state != BT_LISTEN)
3601 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3602 lm1 |= HCI_LM_ACCEPT;
3603 if (l2cap_pi(sk)->role_switch)
3604 lm1 |= HCI_LM_MASTER;
3606 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3607 lm2 |= HCI_LM_ACCEPT;
3608 if (l2cap_pi(sk)->role_switch)
3609 lm2 |= HCI_LM_MASTER;
3612 read_unlock(&l2cap_sk_list.lock);
3614 return exact ? lm1 : lm2;
3617 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3619 struct l2cap_conn *conn;
3621 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3623 if (hcon->type != ACL_LINK)
3627 conn = l2cap_conn_add(hcon, status);
3629 l2cap_conn_ready(conn);
3631 l2cap_conn_del(hcon, bt_err(status));
3636 static int l2cap_disconn_ind(struct hci_conn *hcon)
3638 struct l2cap_conn *conn = hcon->l2cap_data;
3640 BT_DBG("hcon %p", hcon);
3642 if (hcon->type != ACL_LINK || !conn)
3645 return conn->disc_reason;
3648 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3650 BT_DBG("hcon %p reason %d", hcon, reason);
3652 if (hcon->type != ACL_LINK)
3655 l2cap_conn_del(hcon, bt_err(reason));
3660 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3662 if (sk->sk_type != SOCK_SEQPACKET)
3665 if (encrypt == 0x00) {
3666 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3667 l2cap_sock_clear_timer(sk);
3668 l2cap_sock_set_timer(sk, HZ * 5);
3669 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3670 __l2cap_sock_close(sk, ECONNREFUSED);
3672 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3673 l2cap_sock_clear_timer(sk);
3677 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3679 struct l2cap_chan_list *l;
3680 struct l2cap_conn *conn = hcon->l2cap_data;
3686 l = &conn->chan_list;
3688 BT_DBG("conn %p", conn);
3690 read_lock(&l->lock);
3692 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3695 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3700 if (!status && (sk->sk_state == BT_CONNECTED ||
3701 sk->sk_state == BT_CONFIG)) {
3702 l2cap_check_encryption(sk, encrypt);
3707 if (sk->sk_state == BT_CONNECT) {
3709 struct l2cap_conn_req req;
3710 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3711 req.psm = l2cap_pi(sk)->psm;
3713 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3715 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3716 L2CAP_CONN_REQ, sizeof(req), &req);
3718 l2cap_sock_clear_timer(sk);
3719 l2cap_sock_set_timer(sk, HZ / 10);
3721 } else if (sk->sk_state == BT_CONNECT2) {
3722 struct l2cap_conn_rsp rsp;
3726 sk->sk_state = BT_CONFIG;
3727 result = L2CAP_CR_SUCCESS;
3729 sk->sk_state = BT_DISCONN;
3730 l2cap_sock_set_timer(sk, HZ / 10);
3731 result = L2CAP_CR_SEC_BLOCK;
3734 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3735 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3736 rsp.result = cpu_to_le16(result);
3737 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3738 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3739 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3745 read_unlock(&l->lock);
3750 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3752 struct l2cap_conn *conn = hcon->l2cap_data;
3754 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3757 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3759 if (flags & ACL_START) {
3760 struct l2cap_hdr *hdr;
3764 BT_ERR("Unexpected start frame (len %d)", skb->len);
3765 kfree_skb(conn->rx_skb);
3766 conn->rx_skb = NULL;
3768 l2cap_conn_unreliable(conn, ECOMM);
3772 BT_ERR("Frame is too short (len %d)", skb->len);
3773 l2cap_conn_unreliable(conn, ECOMM);
3777 hdr = (struct l2cap_hdr *) skb->data;
3778 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3780 if (len == skb->len) {
3781 /* Complete frame received */
3782 l2cap_recv_frame(conn, skb);
3786 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3788 if (skb->len > len) {
3789 BT_ERR("Frame is too long (len %d, expected len %d)",
3791 l2cap_conn_unreliable(conn, ECOMM);
3795 /* Allocate skb for the complete frame (with header) */
3796 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3800 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3802 conn->rx_len = len - skb->len;
3804 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3806 if (!conn->rx_len) {
3807 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3808 l2cap_conn_unreliable(conn, ECOMM);
3812 if (skb->len > conn->rx_len) {
3813 BT_ERR("Fragment is too long (len %d, expected %d)",
3814 skb->len, conn->rx_len);
3815 kfree_skb(conn->rx_skb);
3816 conn->rx_skb = NULL;
3818 l2cap_conn_unreliable(conn, ECOMM);
3822 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3824 conn->rx_len -= skb->len;
3826 if (!conn->rx_len) {
3827 /* Complete frame received */
3828 l2cap_recv_frame(conn, conn->rx_skb);
3829 conn->rx_skb = NULL;
3838 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3841 struct hlist_node *node;
3844 read_lock_bh(&l2cap_sk_list.lock);
3846 sk_for_each(sk, node, &l2cap_sk_list.head) {
3847 struct l2cap_pinfo *pi = l2cap_pi(sk);
3849 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3850 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3851 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3852 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3855 read_unlock_bh(&l2cap_sk_list.lock);
3860 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3862 static const struct proto_ops l2cap_sock_ops = {
3863 .family = PF_BLUETOOTH,
3864 .owner = THIS_MODULE,
3865 .release = l2cap_sock_release,
3866 .bind = l2cap_sock_bind,
3867 .connect = l2cap_sock_connect,
3868 .listen = l2cap_sock_listen,
3869 .accept = l2cap_sock_accept,
3870 .getname = l2cap_sock_getname,
3871 .sendmsg = l2cap_sock_sendmsg,
3872 .recvmsg = l2cap_sock_recvmsg,
3873 .poll = bt_sock_poll,
3874 .ioctl = bt_sock_ioctl,
3875 .mmap = sock_no_mmap,
3876 .socketpair = sock_no_socketpair,
3877 .shutdown = l2cap_sock_shutdown,
3878 .setsockopt = l2cap_sock_setsockopt,
3879 .getsockopt = l2cap_sock_getsockopt
3882 static struct net_proto_family l2cap_sock_family_ops = {
3883 .family = PF_BLUETOOTH,
3884 .owner = THIS_MODULE,
3885 .create = l2cap_sock_create,
3888 static struct hci_proto l2cap_hci_proto = {
3890 .id = HCI_PROTO_L2CAP,
3891 .connect_ind = l2cap_connect_ind,
3892 .connect_cfm = l2cap_connect_cfm,
3893 .disconn_ind = l2cap_disconn_ind,
3894 .disconn_cfm = l2cap_disconn_cfm,
3895 .security_cfm = l2cap_security_cfm,
3896 .recv_acldata = l2cap_recv_acldata
3899 static int __init l2cap_init(void)
3903 err = proto_register(&l2cap_proto, 0);
3907 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3909 BT_ERR("L2CAP socket registration failed");
3913 err = hci_register_proto(&l2cap_hci_proto);
3915 BT_ERR("L2CAP protocol registration failed");
3916 bt_sock_unregister(BTPROTO_L2CAP);
3920 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3921 BT_ERR("Failed to create L2CAP info file");
3923 BT_INFO("L2CAP ver %s", VERSION);
3924 BT_INFO("L2CAP socket layer initialized");
3929 proto_unregister(&l2cap_proto);
3933 static void __exit l2cap_exit(void)
3935 class_remove_file(bt_class, &class_attr_l2cap);
3937 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3938 BT_ERR("L2CAP socket unregistration failed");
3940 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3941 BT_ERR("L2CAP protocol unregistration failed");
3943 proto_unregister(&l2cap_proto);
3946 void l2cap_load(void)
3948 /* Dummy function to trigger automatic L2CAP module loading by
3949 * other modules that use L2CAP sockets but don't use any other
3950 * symbols from it. */
3953 EXPORT_SYMBOL(l2cap_load);
3955 module_init(l2cap_init);
3956 module_exit(l2cap_exit);
3958 module_param(enable_ertm, bool, 0644);
3959 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3961 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3962 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3963 MODULE_VERSION(VERSION);
3964 MODULE_LICENSE("GPL");
3965 MODULE_ALIAS("bt-proto-0");