2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct l2cap_hdr *lh;
340 struct l2cap_conn *conn = pi->conn;
343 BT_DBG("pi %p, control 0x%2.2x", pi, control);
345 count = min_t(unsigned int, conn->mtu, L2CAP_HDR_SIZE + 2);
346 control |= L2CAP_CTRL_FRAME_TYPE;
348 skb = bt_skb_alloc(count, GFP_ATOMIC);
352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
353 lh->len = cpu_to_le16(2);
354 lh->cid = cpu_to_le16(pi->dcid);
355 put_unaligned_le16(control, skb_put(skb, 2));
357 return hci_send_acl(pi->conn->hcon, skb, 0);
360 static void l2cap_do_start(struct sock *sk)
362 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
364 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
365 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
368 if (l2cap_check_security(sk)) {
369 struct l2cap_conn_req req;
370 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
371 req.psm = l2cap_pi(sk)->psm;
373 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
375 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
376 L2CAP_CONN_REQ, sizeof(req), &req);
379 struct l2cap_info_req req;
380 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
383 conn->info_ident = l2cap_get_ident(conn);
385 mod_timer(&conn->info_timer, jiffies +
386 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
388 l2cap_send_cmd(conn, conn->info_ident,
389 L2CAP_INFO_REQ, sizeof(req), &req);
393 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
395 struct l2cap_disconn_req req;
397 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
398 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
399 l2cap_send_cmd(conn, l2cap_get_ident(conn),
400 L2CAP_DISCONN_REQ, sizeof(req), &req);
403 /* ---- L2CAP connections ---- */
404 static void l2cap_conn_start(struct l2cap_conn *conn)
406 struct l2cap_chan_list *l = &conn->chan_list;
409 BT_DBG("conn %p", conn);
413 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
416 if (sk->sk_type != SOCK_SEQPACKET) {
421 if (sk->sk_state == BT_CONNECT) {
422 if (l2cap_check_security(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
429 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
430 L2CAP_CONN_REQ, sizeof(req), &req);
432 } else if (sk->sk_state == BT_CONNECT2) {
433 struct l2cap_conn_rsp rsp;
434 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
435 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
437 if (l2cap_check_security(sk)) {
438 if (bt_sk(sk)->defer_setup) {
439 struct sock *parent = bt_sk(sk)->parent;
440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
442 parent->sk_data_ready(parent, 0);
445 sk->sk_state = BT_CONFIG;
446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
461 read_unlock(&l->lock);
464 static void l2cap_conn_ready(struct l2cap_conn *conn)
466 struct l2cap_chan_list *l = &conn->chan_list;
469 BT_DBG("conn %p", conn);
473 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
476 if (sk->sk_type != SOCK_SEQPACKET) {
477 l2cap_sock_clear_timer(sk);
478 sk->sk_state = BT_CONNECTED;
479 sk->sk_state_change(sk);
480 } else if (sk->sk_state == BT_CONNECT)
486 read_unlock(&l->lock);
489 /* Notify sockets that we cannot guaranty reliability anymore */
490 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
492 struct l2cap_chan_list *l = &conn->chan_list;
495 BT_DBG("conn %p", conn);
499 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (l2cap_pi(sk)->force_reliable)
504 read_unlock(&l->lock);
507 static void l2cap_info_timeout(unsigned long arg)
509 struct l2cap_conn *conn = (void *) arg;
511 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
512 conn->info_ident = 0;
514 l2cap_conn_start(conn);
517 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
519 struct l2cap_conn *conn = hcon->l2cap_data;
524 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
528 hcon->l2cap_data = conn;
531 BT_DBG("hcon %p conn %p", hcon, conn);
533 conn->mtu = hcon->hdev->acl_mtu;
534 conn->src = &hcon->hdev->bdaddr;
535 conn->dst = &hcon->dst;
539 setup_timer(&conn->info_timer, l2cap_info_timeout,
540 (unsigned long) conn);
542 spin_lock_init(&conn->lock);
543 rwlock_init(&conn->chan_list.lock);
545 conn->disc_reason = 0x13;
550 static void l2cap_conn_del(struct hci_conn *hcon, int err)
552 struct l2cap_conn *conn = hcon->l2cap_data;
558 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
560 kfree_skb(conn->rx_skb);
563 while ((sk = conn->chan_list.head)) {
565 l2cap_chan_del(sk, err);
570 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
571 del_timer_sync(&conn->info_timer);
573 hcon->l2cap_data = NULL;
577 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
579 struct l2cap_chan_list *l = &conn->chan_list;
580 write_lock_bh(&l->lock);
581 __l2cap_chan_add(conn, sk, parent);
582 write_unlock_bh(&l->lock);
585 /* ---- Socket interface ---- */
586 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
589 struct hlist_node *node;
590 sk_for_each(sk, node, &l2cap_sk_list.head)
591 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
598 /* Find socket with psm and source bdaddr.
599 * Returns closest match.
601 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
603 struct sock *sk = NULL, *sk1 = NULL;
604 struct hlist_node *node;
606 sk_for_each(sk, node, &l2cap_sk_list.head) {
607 if (state && sk->sk_state != state)
610 if (l2cap_pi(sk)->psm == psm) {
612 if (!bacmp(&bt_sk(sk)->src, src))
616 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
620 return node ? sk : sk1;
623 /* Find socket with given address (psm, src).
624 * Returns locked socket */
625 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
628 read_lock(&l2cap_sk_list.lock);
629 s = __l2cap_get_sock_by_psm(state, psm, src);
632 read_unlock(&l2cap_sk_list.lock);
636 static void l2cap_sock_destruct(struct sock *sk)
640 skb_queue_purge(&sk->sk_receive_queue);
641 skb_queue_purge(&sk->sk_write_queue);
644 static void l2cap_sock_cleanup_listen(struct sock *parent)
648 BT_DBG("parent %p", parent);
650 /* Close not yet accepted channels */
651 while ((sk = bt_accept_dequeue(parent, NULL)))
652 l2cap_sock_close(sk);
654 parent->sk_state = BT_CLOSED;
655 sock_set_flag(parent, SOCK_ZAPPED);
658 /* Kill socket (only if zapped and orphan)
659 * Must be called on unlocked socket.
661 static void l2cap_sock_kill(struct sock *sk)
663 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
666 BT_DBG("sk %p state %d", sk, sk->sk_state);
668 /* Kill poor orphan */
669 bt_sock_unlink(&l2cap_sk_list, sk);
670 sock_set_flag(sk, SOCK_DEAD);
674 static void __l2cap_sock_close(struct sock *sk, int reason)
676 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
678 switch (sk->sk_state) {
680 l2cap_sock_cleanup_listen(sk);
685 if (sk->sk_type == SOCK_SEQPACKET) {
686 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
688 sk->sk_state = BT_DISCONN;
689 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
690 l2cap_send_disconn_req(conn, sk);
692 l2cap_chan_del(sk, reason);
696 if (sk->sk_type == SOCK_SEQPACKET) {
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct l2cap_conn_rsp rsp;
701 if (bt_sk(sk)->defer_setup)
702 result = L2CAP_CR_SEC_BLOCK;
704 result = L2CAP_CR_BAD_PSM;
706 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
707 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
708 rsp.result = cpu_to_le16(result);
709 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
710 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
711 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
713 l2cap_chan_del(sk, reason);
718 l2cap_chan_del(sk, reason);
722 sock_set_flag(sk, SOCK_ZAPPED);
727 /* Must be called on unlocked socket. */
728 static void l2cap_sock_close(struct sock *sk)
730 l2cap_sock_clear_timer(sk);
732 __l2cap_sock_close(sk, ECONNRESET);
737 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
739 struct l2cap_pinfo *pi = l2cap_pi(sk);
744 sk->sk_type = parent->sk_type;
745 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
747 pi->imtu = l2cap_pi(parent)->imtu;
748 pi->omtu = l2cap_pi(parent)->omtu;
749 pi->mode = l2cap_pi(parent)->mode;
750 pi->fcs = l2cap_pi(parent)->fcs;
751 pi->sec_level = l2cap_pi(parent)->sec_level;
752 pi->role_switch = l2cap_pi(parent)->role_switch;
753 pi->force_reliable = l2cap_pi(parent)->force_reliable;
755 pi->imtu = L2CAP_DEFAULT_MTU;
757 pi->mode = L2CAP_MODE_BASIC;
758 pi->fcs = L2CAP_FCS_CRC16;
759 pi->sec_level = BT_SECURITY_LOW;
761 pi->force_reliable = 0;
764 /* Default config options */
766 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
769 static struct proto l2cap_proto = {
771 .owner = THIS_MODULE,
772 .obj_size = sizeof(struct l2cap_pinfo)
775 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
779 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
783 sock_init_data(sock, sk);
784 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
786 sk->sk_destruct = l2cap_sock_destruct;
787 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
789 sock_reset_flag(sk, SOCK_ZAPPED);
791 sk->sk_protocol = proto;
792 sk->sk_state = BT_OPEN;
794 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
796 bt_sock_link(&l2cap_sk_list, sk);
800 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
804 BT_DBG("sock %p", sock);
806 sock->state = SS_UNCONNECTED;
808 if (sock->type != SOCK_SEQPACKET &&
809 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
810 return -ESOCKTNOSUPPORT;
812 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
815 sock->ops = &l2cap_sock_ops;
817 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
821 l2cap_sock_init(sk, NULL);
825 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
827 struct sock *sk = sock->sk;
828 struct sockaddr_l2 la;
833 if (!addr || addr->sa_family != AF_BLUETOOTH)
836 memset(&la, 0, sizeof(la));
837 len = min_t(unsigned int, sizeof(la), alen);
838 memcpy(&la, addr, len);
845 if (sk->sk_state != BT_OPEN) {
850 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
851 !capable(CAP_NET_BIND_SERVICE)) {
856 write_lock_bh(&l2cap_sk_list.lock);
858 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
861 /* Save source address */
862 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
863 l2cap_pi(sk)->psm = la.l2_psm;
864 l2cap_pi(sk)->sport = la.l2_psm;
865 sk->sk_state = BT_BOUND;
867 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
868 __le16_to_cpu(la.l2_psm) == 0x0003)
869 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
872 write_unlock_bh(&l2cap_sk_list.lock);
879 static int l2cap_do_connect(struct sock *sk)
881 bdaddr_t *src = &bt_sk(sk)->src;
882 bdaddr_t *dst = &bt_sk(sk)->dst;
883 struct l2cap_conn *conn;
884 struct hci_conn *hcon;
885 struct hci_dev *hdev;
889 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
892 hdev = hci_get_route(dst, src);
894 return -EHOSTUNREACH;
896 hci_dev_lock_bh(hdev);
900 if (sk->sk_type == SOCK_RAW) {
901 switch (l2cap_pi(sk)->sec_level) {
902 case BT_SECURITY_HIGH:
903 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
905 case BT_SECURITY_MEDIUM:
906 auth_type = HCI_AT_DEDICATED_BONDING;
909 auth_type = HCI_AT_NO_BONDING;
912 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
913 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
914 auth_type = HCI_AT_NO_BONDING_MITM;
916 auth_type = HCI_AT_NO_BONDING;
918 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
919 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
921 switch (l2cap_pi(sk)->sec_level) {
922 case BT_SECURITY_HIGH:
923 auth_type = HCI_AT_GENERAL_BONDING_MITM;
925 case BT_SECURITY_MEDIUM:
926 auth_type = HCI_AT_GENERAL_BONDING;
929 auth_type = HCI_AT_NO_BONDING;
934 hcon = hci_connect(hdev, ACL_LINK, dst,
935 l2cap_pi(sk)->sec_level, auth_type);
939 conn = l2cap_conn_add(hcon, 0);
947 /* Update source addr of the socket */
948 bacpy(src, conn->src);
950 l2cap_chan_add(conn, sk, NULL);
952 sk->sk_state = BT_CONNECT;
953 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
955 if (hcon->state == BT_CONNECTED) {
956 if (sk->sk_type != SOCK_SEQPACKET) {
957 l2cap_sock_clear_timer(sk);
958 sk->sk_state = BT_CONNECTED;
964 hci_dev_unlock_bh(hdev);
969 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
971 struct sock *sk = sock->sk;
972 struct sockaddr_l2 la;
977 if (!addr || addr->sa_family != AF_BLUETOOTH)
980 memset(&la, 0, sizeof(la));
981 len = min_t(unsigned int, sizeof(la), alen);
982 memcpy(&la, addr, len);
989 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
994 switch (l2cap_pi(sk)->mode) {
995 case L2CAP_MODE_BASIC:
997 case L2CAP_MODE_ERTM:
998 case L2CAP_MODE_STREAMING:
1007 switch (sk->sk_state) {
1011 /* Already connecting */
1015 /* Already connected */
1028 /* Set destination address and psm */
1029 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1030 l2cap_pi(sk)->psm = la.l2_psm;
1032 err = l2cap_do_connect(sk);
1037 err = bt_sock_wait_state(sk, BT_CONNECTED,
1038 sock_sndtimeo(sk, flags & O_NONBLOCK));
1044 static int l2cap_sock_listen(struct socket *sock, int backlog)
1046 struct sock *sk = sock->sk;
1049 BT_DBG("sk %p backlog %d", sk, backlog);
1053 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1058 switch (l2cap_pi(sk)->mode) {
1059 case L2CAP_MODE_BASIC:
1061 case L2CAP_MODE_ERTM:
1062 case L2CAP_MODE_STREAMING:
1071 if (!l2cap_pi(sk)->psm) {
1072 bdaddr_t *src = &bt_sk(sk)->src;
1077 write_lock_bh(&l2cap_sk_list.lock);
1079 for (psm = 0x1001; psm < 0x1100; psm += 2)
1080 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1081 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1082 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1087 write_unlock_bh(&l2cap_sk_list.lock);
1093 sk->sk_max_ack_backlog = backlog;
1094 sk->sk_ack_backlog = 0;
1095 sk->sk_state = BT_LISTEN;
1102 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1104 DECLARE_WAITQUEUE(wait, current);
1105 struct sock *sk = sock->sk, *nsk;
1109 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1111 if (sk->sk_state != BT_LISTEN) {
1116 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1118 BT_DBG("sk %p timeo %ld", sk, timeo);
1120 /* Wait for an incoming connection. (wake-one). */
1121 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1122 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1123 set_current_state(TASK_INTERRUPTIBLE);
1130 timeo = schedule_timeout(timeo);
1131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1133 if (sk->sk_state != BT_LISTEN) {
1138 if (signal_pending(current)) {
1139 err = sock_intr_errno(timeo);
1143 set_current_state(TASK_RUNNING);
1144 remove_wait_queue(sk->sk_sleep, &wait);
1149 newsock->state = SS_CONNECTED;
1151 BT_DBG("new socket %p", nsk);
1158 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1160 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1161 struct sock *sk = sock->sk;
1163 BT_DBG("sock %p, sk %p", sock, sk);
1165 addr->sa_family = AF_BLUETOOTH;
1166 *len = sizeof(struct sockaddr_l2);
1169 la->l2_psm = l2cap_pi(sk)->psm;
1170 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1171 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1173 la->l2_psm = l2cap_pi(sk)->sport;
1174 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1175 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1181 static void l2cap_monitor_timeout(unsigned long arg)
1183 struct sock *sk = (void *) arg;
1186 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1187 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1191 l2cap_pi(sk)->retry_count++;
1192 __mod_monitor_timer();
1194 control = L2CAP_CTRL_POLL;
1195 control |= L2CAP_SUPER_RCV_READY;
1196 l2cap_send_sframe(l2cap_pi(sk), control);
1199 static void l2cap_retrans_timeout(unsigned long arg)
1201 struct sock *sk = (void *) arg;
1204 l2cap_pi(sk)->retry_count = 1;
1205 __mod_monitor_timer();
1207 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1209 control = L2CAP_CTRL_POLL;
1210 control |= L2CAP_SUPER_RCV_READY;
1211 l2cap_send_sframe(l2cap_pi(sk), control);
1214 static void l2cap_drop_acked_frames(struct sock *sk)
1216 struct sk_buff *skb;
1218 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1219 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1222 skb = skb_dequeue(TX_QUEUE(sk));
1225 l2cap_pi(sk)->unacked_frames--;
1228 if (!l2cap_pi(sk)->unacked_frames)
1229 del_timer(&l2cap_pi(sk)->retrans_timer);
1234 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1236 struct l2cap_pinfo *pi = l2cap_pi(sk);
1239 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1241 err = hci_send_acl(pi->conn->hcon, skb, 0);
1248 static int l2cap_ertm_send(struct sock *sk)
1250 struct sk_buff *skb, *tx_skb;
1251 struct l2cap_pinfo *pi = l2cap_pi(sk);
1255 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1258 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1259 tx_skb = skb_clone(skb, GFP_ATOMIC);
1261 if (pi->remote_max_tx &&
1262 bt_cb(skb)->retries == pi->remote_max_tx) {
1263 l2cap_send_disconn_req(pi->conn, sk);
1267 bt_cb(skb)->retries++;
1269 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1270 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1271 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1272 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1275 err = l2cap_do_send(sk, tx_skb);
1277 l2cap_send_disconn_req(pi->conn, sk);
1280 __mod_retrans_timer();
1282 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1283 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1285 pi->unacked_frames++;
1287 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1288 sk->sk_send_head = NULL;
1290 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1296 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1299 struct sk_buff **frag;
1302 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1309 /* Continuation fragments (no L2CAP header) */
1310 frag = &skb_shinfo(skb)->frag_list;
1312 count = min_t(unsigned int, conn->mtu, len);
1314 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1317 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1323 frag = &(*frag)->next;
1329 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1331 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1332 struct sk_buff *skb;
1333 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1334 struct l2cap_hdr *lh;
1336 BT_DBG("sk %p len %d", sk, (int)len);
1338 count = min_t(unsigned int, (conn->mtu - hlen), len);
1339 skb = bt_skb_send_alloc(sk, count + hlen,
1340 msg->msg_flags & MSG_DONTWAIT, &err);
1342 return ERR_PTR(-ENOMEM);
1344 /* Create L2CAP header */
1345 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1346 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1347 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1348 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1350 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1351 if (unlikely(err < 0)) {
1353 return ERR_PTR(err);
1358 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1360 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1361 struct sk_buff *skb;
1362 int err, count, hlen = L2CAP_HDR_SIZE;
1363 struct l2cap_hdr *lh;
1365 BT_DBG("sk %p len %d", sk, (int)len);
1367 count = min_t(unsigned int, (conn->mtu - hlen), len);
1368 skb = bt_skb_send_alloc(sk, count + hlen,
1369 msg->msg_flags & MSG_DONTWAIT, &err);
1371 return ERR_PTR(-ENOMEM);
1373 /* Create L2CAP header */
1374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1375 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1376 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1378 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1379 if (unlikely(err < 0)) {
1381 return ERR_PTR(err);
1386 static struct sk_buff *l2cap_create_ertm_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1388 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1389 struct sk_buff *skb;
1390 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1391 struct l2cap_hdr *lh;
1393 BT_DBG("sk %p len %d", sk, (int)len);
1398 count = min_t(unsigned int, (conn->mtu - hlen), len);
1399 skb = bt_skb_send_alloc(sk, count + hlen,
1400 msg->msg_flags & MSG_DONTWAIT, &err);
1402 return ERR_PTR(-ENOMEM);
1404 /* Create L2CAP header */
1405 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1406 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1407 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1408 put_unaligned_le16(control, skb_put(skb, 2));
1410 put_unaligned_le16(sdulen, skb_put(skb, 2));
1412 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1413 if (unlikely(err < 0)) {
1415 return ERR_PTR(err);
1418 bt_cb(skb)->retries = 0;
1422 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1424 struct l2cap_pinfo *pi = l2cap_pi(sk);
1425 struct sk_buff *skb;
1426 struct sk_buff_head sar_queue;
1430 __skb_queue_head_init(&sar_queue);
1431 control = L2CAP_SDU_START;
1432 skb = l2cap_create_ertm_pdu(sk, msg, pi->max_pdu_size, control, len);
1434 return PTR_ERR(skb);
1436 __skb_queue_tail(&sar_queue, skb);
1437 len -= pi->max_pdu_size;
1438 size +=pi->max_pdu_size;
1444 if (len > pi->max_pdu_size) {
1445 control |= L2CAP_SDU_CONTINUE;
1446 buflen = pi->max_pdu_size;
1448 control |= L2CAP_SDU_END;
1452 skb = l2cap_create_ertm_pdu(sk, msg, buflen, control, 0);
1454 skb_queue_purge(&sar_queue);
1455 return PTR_ERR(skb);
1458 __skb_queue_tail(&sar_queue, skb);
1463 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1464 if (sk->sk_send_head == NULL)
1465 sk->sk_send_head = sar_queue.next;
1470 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1472 struct sock *sk = sock->sk;
1473 struct l2cap_pinfo *pi = l2cap_pi(sk);
1474 struct sk_buff *skb;
1478 BT_DBG("sock %p, sk %p", sock, sk);
1480 err = sock_error(sk);
1484 if (msg->msg_flags & MSG_OOB)
1487 /* Check outgoing MTU */
1488 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1494 if (sk->sk_state != BT_CONNECTED) {
1499 /* Connectionless channel */
1500 if (sk->sk_type == SOCK_DGRAM) {
1501 skb = l2cap_create_connless_pdu(sk, msg, len);
1502 err = l2cap_do_send(sk, skb);
1507 case L2CAP_MODE_BASIC:
1508 /* Create a basic PDU */
1509 skb = l2cap_create_basic_pdu(sk, msg, len);
1515 err = l2cap_do_send(sk, skb);
1520 case L2CAP_MODE_ERTM:
1521 /* Entire SDU fits into one PDU */
1522 if (len <= pi->max_pdu_size) {
1523 control = L2CAP_SDU_UNSEGMENTED;
1524 skb = l2cap_create_ertm_pdu(sk, msg, len, control, 0);
1529 __skb_queue_tail(TX_QUEUE(sk), skb);
1530 if (sk->sk_send_head == NULL)
1531 sk->sk_send_head = skb;
1533 /* Segment SDU into multiples PDUs */
1534 err = l2cap_sar_segment_sdu(sk, msg, len);
1539 err = l2cap_ertm_send(sk);
1545 BT_DBG("bad state %1.1x", pi->mode);
1554 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1556 struct sock *sk = sock->sk;
1560 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1561 struct l2cap_conn_rsp rsp;
1563 sk->sk_state = BT_CONFIG;
1565 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1566 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1567 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1568 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1569 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1570 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1578 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1581 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1583 struct sock *sk = sock->sk;
1584 struct l2cap_options opts;
1588 BT_DBG("sk %p", sk);
1594 opts.imtu = l2cap_pi(sk)->imtu;
1595 opts.omtu = l2cap_pi(sk)->omtu;
1596 opts.flush_to = l2cap_pi(sk)->flush_to;
1597 opts.mode = l2cap_pi(sk)->mode;
1599 len = min_t(unsigned int, sizeof(opts), optlen);
1600 if (copy_from_user((char *) &opts, optval, len)) {
1605 l2cap_pi(sk)->imtu = opts.imtu;
1606 l2cap_pi(sk)->omtu = opts.omtu;
1607 l2cap_pi(sk)->mode = opts.mode;
1611 if (get_user(opt, (u32 __user *) optval)) {
1616 if (opt & L2CAP_LM_AUTH)
1617 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1618 if (opt & L2CAP_LM_ENCRYPT)
1619 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1620 if (opt & L2CAP_LM_SECURE)
1621 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1623 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1624 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1636 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1638 struct sock *sk = sock->sk;
1639 struct bt_security sec;
1643 BT_DBG("sk %p", sk);
1645 if (level == SOL_L2CAP)
1646 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1648 if (level != SOL_BLUETOOTH)
1649 return -ENOPROTOOPT;
1655 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1660 sec.level = BT_SECURITY_LOW;
1662 len = min_t(unsigned int, sizeof(sec), optlen);
1663 if (copy_from_user((char *) &sec, optval, len)) {
1668 if (sec.level < BT_SECURITY_LOW ||
1669 sec.level > BT_SECURITY_HIGH) {
1674 l2cap_pi(sk)->sec_level = sec.level;
1677 case BT_DEFER_SETUP:
1678 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1683 if (get_user(opt, (u32 __user *) optval)) {
1688 bt_sk(sk)->defer_setup = opt;
1700 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1702 struct sock *sk = sock->sk;
1703 struct l2cap_options opts;
1704 struct l2cap_conninfo cinfo;
1708 BT_DBG("sk %p", sk);
1710 if (get_user(len, optlen))
1717 opts.imtu = l2cap_pi(sk)->imtu;
1718 opts.omtu = l2cap_pi(sk)->omtu;
1719 opts.flush_to = l2cap_pi(sk)->flush_to;
1720 opts.mode = l2cap_pi(sk)->mode;
1722 len = min_t(unsigned int, len, sizeof(opts));
1723 if (copy_to_user(optval, (char *) &opts, len))
1729 switch (l2cap_pi(sk)->sec_level) {
1730 case BT_SECURITY_LOW:
1731 opt = L2CAP_LM_AUTH;
1733 case BT_SECURITY_MEDIUM:
1734 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1736 case BT_SECURITY_HIGH:
1737 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1745 if (l2cap_pi(sk)->role_switch)
1746 opt |= L2CAP_LM_MASTER;
1748 if (l2cap_pi(sk)->force_reliable)
1749 opt |= L2CAP_LM_RELIABLE;
1751 if (put_user(opt, (u32 __user *) optval))
1755 case L2CAP_CONNINFO:
1756 if (sk->sk_state != BT_CONNECTED &&
1757 !(sk->sk_state == BT_CONNECT2 &&
1758 bt_sk(sk)->defer_setup)) {
1763 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1764 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1766 len = min_t(unsigned int, len, sizeof(cinfo));
1767 if (copy_to_user(optval, (char *) &cinfo, len))
1781 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1783 struct sock *sk = sock->sk;
1784 struct bt_security sec;
1787 BT_DBG("sk %p", sk);
1789 if (level == SOL_L2CAP)
1790 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1792 if (level != SOL_BLUETOOTH)
1793 return -ENOPROTOOPT;
1795 if (get_user(len, optlen))
1802 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1807 sec.level = l2cap_pi(sk)->sec_level;
1809 len = min_t(unsigned int, len, sizeof(sec));
1810 if (copy_to_user(optval, (char *) &sec, len))
1815 case BT_DEFER_SETUP:
1816 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1821 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1835 static int l2cap_sock_shutdown(struct socket *sock, int how)
1837 struct sock *sk = sock->sk;
1840 BT_DBG("sock %p, sk %p", sock, sk);
1846 if (!sk->sk_shutdown) {
1847 sk->sk_shutdown = SHUTDOWN_MASK;
1848 l2cap_sock_clear_timer(sk);
1849 __l2cap_sock_close(sk, 0);
1851 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1852 err = bt_sock_wait_state(sk, BT_CLOSED,
1859 static int l2cap_sock_release(struct socket *sock)
1861 struct sock *sk = sock->sk;
1864 BT_DBG("sock %p, sk %p", sock, sk);
1869 err = l2cap_sock_shutdown(sock, 2);
1872 l2cap_sock_kill(sk);
1876 static void l2cap_chan_ready(struct sock *sk)
1878 struct sock *parent = bt_sk(sk)->parent;
1880 BT_DBG("sk %p, parent %p", sk, parent);
1882 l2cap_pi(sk)->conf_state = 0;
1883 l2cap_sock_clear_timer(sk);
1886 /* Outgoing channel.
1887 * Wake up socket sleeping on connect.
1889 sk->sk_state = BT_CONNECTED;
1890 sk->sk_state_change(sk);
1892 /* Incoming channel.
1893 * Wake up socket sleeping on accept.
1895 parent->sk_data_ready(parent, 0);
1899 /* Copy frame to all raw sockets on that connection */
1900 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1902 struct l2cap_chan_list *l = &conn->chan_list;
1903 struct sk_buff *nskb;
1906 BT_DBG("conn %p", conn);
1908 read_lock(&l->lock);
1909 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1910 if (sk->sk_type != SOCK_RAW)
1913 /* Don't send frame to the socket it came from */
1916 nskb = skb_clone(skb, GFP_ATOMIC);
1920 if (sock_queue_rcv_skb(sk, nskb))
1923 read_unlock(&l->lock);
1926 /* ---- L2CAP signalling commands ---- */
1927 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1928 u8 code, u8 ident, u16 dlen, void *data)
1930 struct sk_buff *skb, **frag;
1931 struct l2cap_cmd_hdr *cmd;
1932 struct l2cap_hdr *lh;
1935 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1936 conn, code, ident, dlen);
1938 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1939 count = min_t(unsigned int, conn->mtu, len);
1941 skb = bt_skb_alloc(count, GFP_ATOMIC);
1945 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1946 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1947 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1949 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1952 cmd->len = cpu_to_le16(dlen);
1955 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1956 memcpy(skb_put(skb, count), data, count);
1962 /* Continuation fragments (no L2CAP header) */
1963 frag = &skb_shinfo(skb)->frag_list;
1965 count = min_t(unsigned int, conn->mtu, len);
1967 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1971 memcpy(skb_put(*frag, count), data, count);
1976 frag = &(*frag)->next;
1986 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1988 struct l2cap_conf_opt *opt = *ptr;
1991 len = L2CAP_CONF_OPT_SIZE + opt->len;
1999 *val = *((u8 *) opt->val);
2003 *val = __le16_to_cpu(*((__le16 *) opt->val));
2007 *val = __le32_to_cpu(*((__le32 *) opt->val));
2011 *val = (unsigned long) opt->val;
2015 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2019 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2021 struct l2cap_conf_opt *opt = *ptr;
2023 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2030 *((u8 *) opt->val) = val;
2034 *((__le16 *) opt->val) = cpu_to_le16(val);
2038 *((__le32 *) opt->val) = cpu_to_le32(val);
2042 memcpy(opt->val, (void *) val, len);
2046 *ptr += L2CAP_CONF_OPT_SIZE + len;
2049 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2051 u32 local_feat_mask = l2cap_feat_mask;
2053 local_feat_mask |= L2CAP_FEAT_ERTM;
2056 case L2CAP_MODE_ERTM:
2057 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2058 case L2CAP_MODE_STREAMING:
2059 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2065 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2068 case L2CAP_MODE_STREAMING:
2069 case L2CAP_MODE_ERTM:
2070 if (l2cap_mode_supported(mode, remote_feat_mask))
2074 return L2CAP_MODE_BASIC;
2078 static int l2cap_build_conf_req(struct sock *sk, void *data)
2080 struct l2cap_pinfo *pi = l2cap_pi(sk);
2081 struct l2cap_conf_req *req = data;
2082 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2083 void *ptr = req->data;
2085 BT_DBG("sk %p", sk);
2087 if (pi->num_conf_req || pi->num_conf_rsp)
2091 case L2CAP_MODE_STREAMING:
2092 case L2CAP_MODE_ERTM:
2093 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2094 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2095 l2cap_send_disconn_req(pi->conn, sk);
2098 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2104 case L2CAP_MODE_BASIC:
2105 if (pi->imtu != L2CAP_DEFAULT_MTU)
2106 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2109 case L2CAP_MODE_ERTM:
2110 rfc.mode = L2CAP_MODE_ERTM;
2111 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2112 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2113 rfc.retrans_timeout = 0;
2114 rfc.monitor_timeout = 0;
2115 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2118 sizeof(rfc), (unsigned long) &rfc);
2121 case L2CAP_MODE_STREAMING:
2122 rfc.mode = L2CAP_MODE_STREAMING;
2124 rfc.max_transmit = 0;
2125 rfc.retrans_timeout = 0;
2126 rfc.monitor_timeout = 0;
2127 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2129 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2130 sizeof(rfc), (unsigned long) &rfc);
2134 /* FIXME: Need actual value of the flush timeout */
2135 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2136 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2138 req->dcid = cpu_to_le16(pi->dcid);
2139 req->flags = cpu_to_le16(0);
2144 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2146 struct l2cap_pinfo *pi = l2cap_pi(sk);
2147 struct l2cap_conf_rsp *rsp = data;
2148 void *ptr = rsp->data;
2149 void *req = pi->conf_req;
2150 int len = pi->conf_len;
2151 int type, hint, olen;
2153 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2154 u16 mtu = L2CAP_DEFAULT_MTU;
2155 u16 result = L2CAP_CONF_SUCCESS;
2157 BT_DBG("sk %p", sk);
2159 while (len >= L2CAP_CONF_OPT_SIZE) {
2160 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2162 hint = type & L2CAP_CONF_HINT;
2163 type &= L2CAP_CONF_MASK;
2166 case L2CAP_CONF_MTU:
2170 case L2CAP_CONF_FLUSH_TO:
2174 case L2CAP_CONF_QOS:
2177 case L2CAP_CONF_RFC:
2178 if (olen == sizeof(rfc))
2179 memcpy(&rfc, (void *) val, olen);
2186 result = L2CAP_CONF_UNKNOWN;
2187 *((u8 *) ptr++) = type;
2192 if (pi->num_conf_rsp || pi->num_conf_req)
2196 case L2CAP_MODE_STREAMING:
2197 case L2CAP_MODE_ERTM:
2198 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2199 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2200 return -ECONNREFUSED;
2203 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2208 if (pi->mode != rfc.mode) {
2209 result = L2CAP_CONF_UNACCEPT;
2210 rfc.mode = pi->mode;
2212 if (pi->num_conf_rsp == 1)
2213 return -ECONNREFUSED;
2215 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2216 sizeof(rfc), (unsigned long) &rfc);
2220 if (result == L2CAP_CONF_SUCCESS) {
2221 /* Configure output options and let the other side know
2222 * which ones we don't like. */
2224 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2225 result = L2CAP_CONF_UNACCEPT;
2228 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2230 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2233 case L2CAP_MODE_BASIC:
2234 pi->fcs = L2CAP_FCS_NONE;
2235 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2238 case L2CAP_MODE_ERTM:
2239 pi->remote_tx_win = rfc.txwin_size;
2240 pi->remote_max_tx = rfc.max_transmit;
2241 pi->max_pdu_size = rfc.max_pdu_size;
2243 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2244 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2246 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2249 case L2CAP_MODE_STREAMING:
2250 pi->remote_tx_win = rfc.txwin_size;
2251 pi->max_pdu_size = rfc.max_pdu_size;
2253 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2257 result = L2CAP_CONF_UNACCEPT;
2259 memset(&rfc, 0, sizeof(rfc));
2260 rfc.mode = pi->mode;
2263 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2264 sizeof(rfc), (unsigned long) &rfc);
2266 if (result == L2CAP_CONF_SUCCESS)
2267 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2269 rsp->scid = cpu_to_le16(pi->dcid);
2270 rsp->result = cpu_to_le16(result);
2271 rsp->flags = cpu_to_le16(0x0000);
2276 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2278 struct l2cap_pinfo *pi = l2cap_pi(sk);
2279 struct l2cap_conf_req *req = data;
2280 void *ptr = req->data;
2283 struct l2cap_conf_rfc rfc;
2285 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2287 while (len >= L2CAP_CONF_OPT_SIZE) {
2288 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2291 case L2CAP_CONF_MTU:
2292 if (val < L2CAP_DEFAULT_MIN_MTU) {
2293 *result = L2CAP_CONF_UNACCEPT;
2294 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2300 case L2CAP_CONF_FLUSH_TO:
2302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2306 case L2CAP_CONF_RFC:
2307 if (olen == sizeof(rfc))
2308 memcpy(&rfc, (void *)val, olen);
2310 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2311 rfc.mode != pi->mode)
2312 return -ECONNREFUSED;
2314 pi->mode = rfc.mode;
2317 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2318 sizeof(rfc), (unsigned long) &rfc);
2323 if (*result == L2CAP_CONF_SUCCESS) {
2325 case L2CAP_MODE_ERTM:
2326 pi->remote_tx_win = rfc.txwin_size;
2327 pi->retrans_timeout = rfc.retrans_timeout;
2328 pi->monitor_timeout = rfc.monitor_timeout;
2329 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2331 case L2CAP_MODE_STREAMING:
2332 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2337 req->dcid = cpu_to_le16(pi->dcid);
2338 req->flags = cpu_to_le16(0x0000);
2343 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2345 struct l2cap_conf_rsp *rsp = data;
2346 void *ptr = rsp->data;
2348 BT_DBG("sk %p", sk);
2350 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2351 rsp->result = cpu_to_le16(result);
2352 rsp->flags = cpu_to_le16(flags);
2357 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2359 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2361 if (rej->reason != 0x0000)
2364 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2365 cmd->ident == conn->info_ident) {
2366 del_timer(&conn->info_timer);
2368 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2369 conn->info_ident = 0;
2371 l2cap_conn_start(conn);
2377 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2379 struct l2cap_chan_list *list = &conn->chan_list;
2380 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2381 struct l2cap_conn_rsp rsp;
2382 struct sock *sk, *parent;
2383 int result, status = L2CAP_CS_NO_INFO;
2385 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2386 __le16 psm = req->psm;
2388 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2390 /* Check if we have socket listening on psm */
2391 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2393 result = L2CAP_CR_BAD_PSM;
2397 /* Check if the ACL is secure enough (if not SDP) */
2398 if (psm != cpu_to_le16(0x0001) &&
2399 !hci_conn_check_link_mode(conn->hcon)) {
2400 conn->disc_reason = 0x05;
2401 result = L2CAP_CR_SEC_BLOCK;
2405 result = L2CAP_CR_NO_MEM;
2407 /* Check for backlog size */
2408 if (sk_acceptq_is_full(parent)) {
2409 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2413 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2417 write_lock_bh(&list->lock);
2419 /* Check if we already have channel with that dcid */
2420 if (__l2cap_get_chan_by_dcid(list, scid)) {
2421 write_unlock_bh(&list->lock);
2422 sock_set_flag(sk, SOCK_ZAPPED);
2423 l2cap_sock_kill(sk);
2427 hci_conn_hold(conn->hcon);
2429 l2cap_sock_init(sk, parent);
2430 bacpy(&bt_sk(sk)->src, conn->src);
2431 bacpy(&bt_sk(sk)->dst, conn->dst);
2432 l2cap_pi(sk)->psm = psm;
2433 l2cap_pi(sk)->dcid = scid;
2435 __l2cap_chan_add(conn, sk, parent);
2436 dcid = l2cap_pi(sk)->scid;
2438 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2440 l2cap_pi(sk)->ident = cmd->ident;
2442 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2443 if (l2cap_check_security(sk)) {
2444 if (bt_sk(sk)->defer_setup) {
2445 sk->sk_state = BT_CONNECT2;
2446 result = L2CAP_CR_PEND;
2447 status = L2CAP_CS_AUTHOR_PEND;
2448 parent->sk_data_ready(parent, 0);
2450 sk->sk_state = BT_CONFIG;
2451 result = L2CAP_CR_SUCCESS;
2452 status = L2CAP_CS_NO_INFO;
2455 sk->sk_state = BT_CONNECT2;
2456 result = L2CAP_CR_PEND;
2457 status = L2CAP_CS_AUTHEN_PEND;
2460 sk->sk_state = BT_CONNECT2;
2461 result = L2CAP_CR_PEND;
2462 status = L2CAP_CS_NO_INFO;
2465 write_unlock_bh(&list->lock);
2468 bh_unlock_sock(parent);
2471 rsp.scid = cpu_to_le16(scid);
2472 rsp.dcid = cpu_to_le16(dcid);
2473 rsp.result = cpu_to_le16(result);
2474 rsp.status = cpu_to_le16(status);
2475 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2477 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2478 struct l2cap_info_req info;
2479 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2481 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2482 conn->info_ident = l2cap_get_ident(conn);
2484 mod_timer(&conn->info_timer, jiffies +
2485 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2487 l2cap_send_cmd(conn, conn->info_ident,
2488 L2CAP_INFO_REQ, sizeof(info), &info);
2494 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2496 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2497 u16 scid, dcid, result, status;
2501 scid = __le16_to_cpu(rsp->scid);
2502 dcid = __le16_to_cpu(rsp->dcid);
2503 result = __le16_to_cpu(rsp->result);
2504 status = __le16_to_cpu(rsp->status);
2506 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2509 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2513 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2519 case L2CAP_CR_SUCCESS:
2520 sk->sk_state = BT_CONFIG;
2521 l2cap_pi(sk)->ident = 0;
2522 l2cap_pi(sk)->dcid = dcid;
2523 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2525 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2527 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2528 l2cap_build_conf_req(sk, req), req);
2529 l2cap_pi(sk)->num_conf_req++;
2533 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2537 l2cap_chan_del(sk, ECONNREFUSED);
2545 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2547 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2553 dcid = __le16_to_cpu(req->dcid);
2554 flags = __le16_to_cpu(req->flags);
2556 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2558 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2562 if (sk->sk_state == BT_DISCONN)
2565 /* Reject if config buffer is too small. */
2566 len = cmd_len - sizeof(*req);
2567 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2568 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2569 l2cap_build_conf_rsp(sk, rsp,
2570 L2CAP_CONF_REJECT, flags), rsp);
2575 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2576 l2cap_pi(sk)->conf_len += len;
2578 if (flags & 0x0001) {
2579 /* Incomplete config. Send empty response. */
2580 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2581 l2cap_build_conf_rsp(sk, rsp,
2582 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2586 /* Complete config. */
2587 len = l2cap_parse_conf_req(sk, rsp);
2589 l2cap_send_disconn_req(conn, sk);
2593 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2594 l2cap_pi(sk)->num_conf_rsp++;
2596 /* Reset config buffer. */
2597 l2cap_pi(sk)->conf_len = 0;
2599 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2602 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2603 sk->sk_state = BT_CONNECTED;
2604 l2cap_pi(sk)->next_tx_seq = 0;
2605 l2cap_pi(sk)->expected_ack_seq = 0;
2606 l2cap_pi(sk)->unacked_frames = 0;
2608 setup_timer(&l2cap_pi(sk)->retrans_timer,
2609 l2cap_retrans_timeout, (unsigned long) sk);
2610 setup_timer(&l2cap_pi(sk)->monitor_timer,
2611 l2cap_monitor_timeout, (unsigned long) sk);
2613 __skb_queue_head_init(TX_QUEUE(sk));
2614 l2cap_chan_ready(sk);
2618 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2620 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2621 l2cap_build_conf_req(sk, buf), buf);
2622 l2cap_pi(sk)->num_conf_req++;
2630 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2632 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2633 u16 scid, flags, result;
2636 scid = __le16_to_cpu(rsp->scid);
2637 flags = __le16_to_cpu(rsp->flags);
2638 result = __le16_to_cpu(rsp->result);
2640 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2641 scid, flags, result);
2643 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2648 case L2CAP_CONF_SUCCESS:
2651 case L2CAP_CONF_UNACCEPT:
2652 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2653 int len = cmd->len - sizeof(*rsp);
2656 /* throw out any old stored conf requests */
2657 result = L2CAP_CONF_SUCCESS;
2658 len = l2cap_parse_conf_rsp(sk, rsp->data,
2661 l2cap_send_disconn_req(conn, sk);
2665 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2666 L2CAP_CONF_REQ, len, req);
2667 l2cap_pi(sk)->num_conf_req++;
2668 if (result != L2CAP_CONF_SUCCESS)
2674 sk->sk_state = BT_DISCONN;
2675 sk->sk_err = ECONNRESET;
2676 l2cap_sock_set_timer(sk, HZ * 5);
2677 l2cap_send_disconn_req(conn, sk);
2684 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2686 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2687 sk->sk_state = BT_CONNECTED;
2688 l2cap_pi(sk)->expected_tx_seq = 0;
2689 l2cap_pi(sk)->num_to_ack = 0;
2690 __skb_queue_head_init(TX_QUEUE(sk));
2691 l2cap_chan_ready(sk);
2699 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2701 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2702 struct l2cap_disconn_rsp rsp;
2706 scid = __le16_to_cpu(req->scid);
2707 dcid = __le16_to_cpu(req->dcid);
2709 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2711 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2715 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2716 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2717 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2719 sk->sk_shutdown = SHUTDOWN_MASK;
2721 skb_queue_purge(TX_QUEUE(sk));
2722 del_timer(&l2cap_pi(sk)->retrans_timer);
2723 del_timer(&l2cap_pi(sk)->monitor_timer);
2725 l2cap_chan_del(sk, ECONNRESET);
2728 l2cap_sock_kill(sk);
2732 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2734 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2738 scid = __le16_to_cpu(rsp->scid);
2739 dcid = __le16_to_cpu(rsp->dcid);
2741 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2743 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2747 skb_queue_purge(TX_QUEUE(sk));
2748 del_timer(&l2cap_pi(sk)->retrans_timer);
2749 del_timer(&l2cap_pi(sk)->monitor_timer);
2751 l2cap_chan_del(sk, 0);
2754 l2cap_sock_kill(sk);
2758 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2760 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2763 type = __le16_to_cpu(req->type);
2765 BT_DBG("type 0x%4.4x", type);
2767 if (type == L2CAP_IT_FEAT_MASK) {
2769 u32 feat_mask = l2cap_feat_mask;
2770 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2771 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2772 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2774 feat_mask |= L2CAP_FEAT_ERTM;
2775 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2776 l2cap_send_cmd(conn, cmd->ident,
2777 L2CAP_INFO_RSP, sizeof(buf), buf);
2778 } else if (type == L2CAP_IT_FIXED_CHAN) {
2780 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2781 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2782 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2783 memcpy(buf + 4, l2cap_fixed_chan, 8);
2784 l2cap_send_cmd(conn, cmd->ident,
2785 L2CAP_INFO_RSP, sizeof(buf), buf);
2787 struct l2cap_info_rsp rsp;
2788 rsp.type = cpu_to_le16(type);
2789 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2790 l2cap_send_cmd(conn, cmd->ident,
2791 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2797 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2799 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2802 type = __le16_to_cpu(rsp->type);
2803 result = __le16_to_cpu(rsp->result);
2805 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2807 del_timer(&conn->info_timer);
2809 if (type == L2CAP_IT_FEAT_MASK) {
2810 conn->feat_mask = get_unaligned_le32(rsp->data);
2812 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2813 struct l2cap_info_req req;
2814 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2816 conn->info_ident = l2cap_get_ident(conn);
2818 l2cap_send_cmd(conn, conn->info_ident,
2819 L2CAP_INFO_REQ, sizeof(req), &req);
2821 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2822 conn->info_ident = 0;
2824 l2cap_conn_start(conn);
2826 } else if (type == L2CAP_IT_FIXED_CHAN) {
2827 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2828 conn->info_ident = 0;
2830 l2cap_conn_start(conn);
2836 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2838 u8 *data = skb->data;
2840 struct l2cap_cmd_hdr cmd;
2843 l2cap_raw_recv(conn, skb);
2845 while (len >= L2CAP_CMD_HDR_SIZE) {
2847 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2848 data += L2CAP_CMD_HDR_SIZE;
2849 len -= L2CAP_CMD_HDR_SIZE;
2851 cmd_len = le16_to_cpu(cmd.len);
2853 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2855 if (cmd_len > len || !cmd.ident) {
2856 BT_DBG("corrupted command");
2861 case L2CAP_COMMAND_REJ:
2862 l2cap_command_rej(conn, &cmd, data);
2865 case L2CAP_CONN_REQ:
2866 err = l2cap_connect_req(conn, &cmd, data);
2869 case L2CAP_CONN_RSP:
2870 err = l2cap_connect_rsp(conn, &cmd, data);
2873 case L2CAP_CONF_REQ:
2874 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2877 case L2CAP_CONF_RSP:
2878 err = l2cap_config_rsp(conn, &cmd, data);
2881 case L2CAP_DISCONN_REQ:
2882 err = l2cap_disconnect_req(conn, &cmd, data);
2885 case L2CAP_DISCONN_RSP:
2886 err = l2cap_disconnect_rsp(conn, &cmd, data);
2889 case L2CAP_ECHO_REQ:
2890 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2893 case L2CAP_ECHO_RSP:
2896 case L2CAP_INFO_REQ:
2897 err = l2cap_information_req(conn, &cmd, data);
2900 case L2CAP_INFO_RSP:
2901 err = l2cap_information_rsp(conn, &cmd, data);
2905 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2911 struct l2cap_cmd_rej rej;
2912 BT_DBG("error %d", err);
2914 /* FIXME: Map err to a valid reason */
2915 rej.reason = cpu_to_le16(0);
2916 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2926 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2928 struct l2cap_pinfo *pi = l2cap_pi(sk);
2929 struct sk_buff *_skb;
2932 switch (control & L2CAP_CTRL_SAR) {
2933 case L2CAP_SDU_UNSEGMENTED:
2934 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2939 err = sock_queue_rcv_skb(sk, skb);
2945 case L2CAP_SDU_START:
2946 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2951 pi->sdu_len = get_unaligned_le16(skb->data);
2954 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2960 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2962 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2963 pi->partial_sdu_len = skb->len;
2967 case L2CAP_SDU_CONTINUE:
2968 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2971 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2973 pi->partial_sdu_len += skb->len;
2974 if (pi->partial_sdu_len > pi->sdu_len)
2982 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2985 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2987 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2988 pi->partial_sdu_len += skb->len;
2990 if (pi->partial_sdu_len == pi->sdu_len) {
2991 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2992 err = sock_queue_rcv_skb(sk, _skb);
3006 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3008 struct l2cap_pinfo *pi = l2cap_pi(sk);
3009 u8 tx_seq = __get_txseq(rx_control);
3013 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3015 if (tx_seq == pi->expected_tx_seq) {
3016 if (pi->conn_state & L2CAP_CONN_UNDER_REJ)
3017 pi->conn_state &= ~L2CAP_CONN_UNDER_REJ;
3019 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3023 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3024 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3025 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3026 tx_control |= L2CAP_SUPER_RCV_READY;
3027 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3031 /* Unexpected txSeq. Send a REJ S-frame */
3033 if (!(pi->conn_state & L2CAP_CONN_UNDER_REJ)) {
3034 tx_control |= L2CAP_SUPER_REJECT;
3035 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3036 pi->conn_state |= L2CAP_CONN_UNDER_REJ;
3044 return l2cap_send_sframe(pi, tx_control);
3047 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3049 struct l2cap_pinfo *pi = l2cap_pi(sk);
3051 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3053 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3054 case L2CAP_SUPER_RCV_READY:
3055 if (rx_control & L2CAP_CTRL_POLL) {
3056 u16 control = L2CAP_CTRL_FINAL;
3057 control |= L2CAP_SUPER_RCV_READY;
3058 l2cap_send_sframe(l2cap_pi(sk), control);
3059 } else if (rx_control & L2CAP_CTRL_FINAL) {
3060 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3063 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3064 del_timer(&pi->monitor_timer);
3066 if (pi->unacked_frames > 0)
3067 __mod_retrans_timer();
3069 pi->expected_ack_seq = __get_reqseq(rx_control);
3070 l2cap_drop_acked_frames(sk);
3071 if (pi->unacked_frames > 0)
3072 __mod_retrans_timer();
3073 l2cap_ertm_send(sk);
3077 case L2CAP_SUPER_REJECT:
3078 pi->expected_ack_seq = __get_reqseq(rx_control);
3079 l2cap_drop_acked_frames(sk);
3081 sk->sk_send_head = TX_QUEUE(sk)->next;
3082 pi->next_tx_seq = pi->expected_ack_seq;
3084 l2cap_ertm_send(sk);
3088 case L2CAP_SUPER_RCV_NOT_READY:
3089 case L2CAP_SUPER_SELECT_REJECT:
3096 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3102 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3104 BT_DBG("unknown cid 0x%4.4x", cid);
3108 BT_DBG("sk %p, len %d", sk, skb->len);
3110 if (sk->sk_state != BT_CONNECTED)
3113 switch (l2cap_pi(sk)->mode) {
3114 case L2CAP_MODE_BASIC:
3115 /* If socket recv buffers overflows we drop data here
3116 * which is *bad* because L2CAP has to be reliable.
3117 * But we don't have any other choice. L2CAP doesn't
3118 * provide flow control mechanism. */
3120 if (l2cap_pi(sk)->imtu < skb->len)
3123 if (!sock_queue_rcv_skb(sk, skb))
3127 case L2CAP_MODE_ERTM:
3128 control = get_unaligned_le16(skb->data);
3132 if (__is_sar_start(control))
3136 * We can just drop the corrupted I-frame here.
3137 * Receiver will miss it and start proper recovery
3138 * procedures and ask retransmission.
3140 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3143 if (__is_iframe(control))
3144 err = l2cap_data_channel_iframe(sk, control, skb);
3146 err = l2cap_data_channel_sframe(sk, control, skb);
3153 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3167 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3171 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3175 BT_DBG("sk %p, len %d", sk, skb->len);
3177 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3180 if (l2cap_pi(sk)->imtu < skb->len)
3183 if (!sock_queue_rcv_skb(sk, skb))
3195 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3197 struct l2cap_hdr *lh = (void *) skb->data;
3201 skb_pull(skb, L2CAP_HDR_SIZE);
3202 cid = __le16_to_cpu(lh->cid);
3203 len = __le16_to_cpu(lh->len);
3205 if (len != skb->len) {
3210 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3213 case L2CAP_CID_SIGNALING:
3214 l2cap_sig_channel(conn, skb);
3217 case L2CAP_CID_CONN_LESS:
3218 psm = get_unaligned((__le16 *) skb->data);
3220 l2cap_conless_channel(conn, psm, skb);
3224 l2cap_data_channel(conn, cid, skb);
3229 /* ---- L2CAP interface with lower layer (HCI) ---- */
3231 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3233 int exact = 0, lm1 = 0, lm2 = 0;
3234 register struct sock *sk;
3235 struct hlist_node *node;
3237 if (type != ACL_LINK)
3240 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3242 /* Find listening sockets and check their link_mode */
3243 read_lock(&l2cap_sk_list.lock);
3244 sk_for_each(sk, node, &l2cap_sk_list.head) {
3245 if (sk->sk_state != BT_LISTEN)
3248 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3249 lm1 |= HCI_LM_ACCEPT;
3250 if (l2cap_pi(sk)->role_switch)
3251 lm1 |= HCI_LM_MASTER;
3253 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3254 lm2 |= HCI_LM_ACCEPT;
3255 if (l2cap_pi(sk)->role_switch)
3256 lm2 |= HCI_LM_MASTER;
3259 read_unlock(&l2cap_sk_list.lock);
3261 return exact ? lm1 : lm2;
3264 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3266 struct l2cap_conn *conn;
3268 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3270 if (hcon->type != ACL_LINK)
3274 conn = l2cap_conn_add(hcon, status);
3276 l2cap_conn_ready(conn);
3278 l2cap_conn_del(hcon, bt_err(status));
3283 static int l2cap_disconn_ind(struct hci_conn *hcon)
3285 struct l2cap_conn *conn = hcon->l2cap_data;
3287 BT_DBG("hcon %p", hcon);
3289 if (hcon->type != ACL_LINK || !conn)
3292 return conn->disc_reason;
3295 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3297 BT_DBG("hcon %p reason %d", hcon, reason);
3299 if (hcon->type != ACL_LINK)
3302 l2cap_conn_del(hcon, bt_err(reason));
3307 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3309 if (sk->sk_type != SOCK_SEQPACKET)
3312 if (encrypt == 0x00) {
3313 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3314 l2cap_sock_clear_timer(sk);
3315 l2cap_sock_set_timer(sk, HZ * 5);
3316 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3317 __l2cap_sock_close(sk, ECONNREFUSED);
3319 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3320 l2cap_sock_clear_timer(sk);
3324 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3326 struct l2cap_chan_list *l;
3327 struct l2cap_conn *conn = hcon->l2cap_data;
3333 l = &conn->chan_list;
3335 BT_DBG("conn %p", conn);
3337 read_lock(&l->lock);
3339 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3342 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3347 if (!status && (sk->sk_state == BT_CONNECTED ||
3348 sk->sk_state == BT_CONFIG)) {
3349 l2cap_check_encryption(sk, encrypt);
3354 if (sk->sk_state == BT_CONNECT) {
3356 struct l2cap_conn_req req;
3357 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3358 req.psm = l2cap_pi(sk)->psm;
3360 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3362 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3363 L2CAP_CONN_REQ, sizeof(req), &req);
3365 l2cap_sock_clear_timer(sk);
3366 l2cap_sock_set_timer(sk, HZ / 10);
3368 } else if (sk->sk_state == BT_CONNECT2) {
3369 struct l2cap_conn_rsp rsp;
3373 sk->sk_state = BT_CONFIG;
3374 result = L2CAP_CR_SUCCESS;
3376 sk->sk_state = BT_DISCONN;
3377 l2cap_sock_set_timer(sk, HZ / 10);
3378 result = L2CAP_CR_SEC_BLOCK;
3381 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3382 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3383 rsp.result = cpu_to_le16(result);
3384 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3385 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3386 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3392 read_unlock(&l->lock);
3397 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3399 struct l2cap_conn *conn = hcon->l2cap_data;
3401 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3404 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3406 if (flags & ACL_START) {
3407 struct l2cap_hdr *hdr;
3411 BT_ERR("Unexpected start frame (len %d)", skb->len);
3412 kfree_skb(conn->rx_skb);
3413 conn->rx_skb = NULL;
3415 l2cap_conn_unreliable(conn, ECOMM);
3419 BT_ERR("Frame is too short (len %d)", skb->len);
3420 l2cap_conn_unreliable(conn, ECOMM);
3424 hdr = (struct l2cap_hdr *) skb->data;
3425 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3427 if (len == skb->len) {
3428 /* Complete frame received */
3429 l2cap_recv_frame(conn, skb);
3433 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3435 if (skb->len > len) {
3436 BT_ERR("Frame is too long (len %d, expected len %d)",
3438 l2cap_conn_unreliable(conn, ECOMM);
3442 /* Allocate skb for the complete frame (with header) */
3443 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3447 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3449 conn->rx_len = len - skb->len;
3451 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3453 if (!conn->rx_len) {
3454 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3455 l2cap_conn_unreliable(conn, ECOMM);
3459 if (skb->len > conn->rx_len) {
3460 BT_ERR("Fragment is too long (len %d, expected %d)",
3461 skb->len, conn->rx_len);
3462 kfree_skb(conn->rx_skb);
3463 conn->rx_skb = NULL;
3465 l2cap_conn_unreliable(conn, ECOMM);
3469 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3471 conn->rx_len -= skb->len;
3473 if (!conn->rx_len) {
3474 /* Complete frame received */
3475 l2cap_recv_frame(conn, conn->rx_skb);
3476 conn->rx_skb = NULL;
3485 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3488 struct hlist_node *node;
3491 read_lock_bh(&l2cap_sk_list.lock);
3493 sk_for_each(sk, node, &l2cap_sk_list.head) {
3494 struct l2cap_pinfo *pi = l2cap_pi(sk);
3496 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3497 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3498 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3499 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3502 read_unlock_bh(&l2cap_sk_list.lock);
3507 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3509 static const struct proto_ops l2cap_sock_ops = {
3510 .family = PF_BLUETOOTH,
3511 .owner = THIS_MODULE,
3512 .release = l2cap_sock_release,
3513 .bind = l2cap_sock_bind,
3514 .connect = l2cap_sock_connect,
3515 .listen = l2cap_sock_listen,
3516 .accept = l2cap_sock_accept,
3517 .getname = l2cap_sock_getname,
3518 .sendmsg = l2cap_sock_sendmsg,
3519 .recvmsg = l2cap_sock_recvmsg,
3520 .poll = bt_sock_poll,
3521 .ioctl = bt_sock_ioctl,
3522 .mmap = sock_no_mmap,
3523 .socketpair = sock_no_socketpair,
3524 .shutdown = l2cap_sock_shutdown,
3525 .setsockopt = l2cap_sock_setsockopt,
3526 .getsockopt = l2cap_sock_getsockopt
3529 static struct net_proto_family l2cap_sock_family_ops = {
3530 .family = PF_BLUETOOTH,
3531 .owner = THIS_MODULE,
3532 .create = l2cap_sock_create,
3535 static struct hci_proto l2cap_hci_proto = {
3537 .id = HCI_PROTO_L2CAP,
3538 .connect_ind = l2cap_connect_ind,
3539 .connect_cfm = l2cap_connect_cfm,
3540 .disconn_ind = l2cap_disconn_ind,
3541 .disconn_cfm = l2cap_disconn_cfm,
3542 .security_cfm = l2cap_security_cfm,
3543 .recv_acldata = l2cap_recv_acldata
3546 static int __init l2cap_init(void)
3550 err = proto_register(&l2cap_proto, 0);
3554 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3556 BT_ERR("L2CAP socket registration failed");
3560 err = hci_register_proto(&l2cap_hci_proto);
3562 BT_ERR("L2CAP protocol registration failed");
3563 bt_sock_unregister(BTPROTO_L2CAP);
3567 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3568 BT_ERR("Failed to create L2CAP info file");
3570 BT_INFO("L2CAP ver %s", VERSION);
3571 BT_INFO("L2CAP socket layer initialized");
3576 proto_unregister(&l2cap_proto);
3580 static void __exit l2cap_exit(void)
3582 class_remove_file(bt_class, &class_attr_l2cap);
3584 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3585 BT_ERR("L2CAP socket unregistration failed");
3587 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3588 BT_ERR("L2CAP protocol unregistration failed");
3590 proto_unregister(&l2cap_proto);
3593 void l2cap_load(void)
3595 /* Dummy function to trigger automatic L2CAP module loading by
3596 * other modules that use L2CAP sockets but don't use any other
3597 * symbols from it. */
3600 EXPORT_SYMBOL(l2cap_load);
3602 module_init(l2cap_init);
3603 module_exit(l2cap_exit);
3605 module_param(enable_ertm, bool, 0644);
3606 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3608 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3609 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3610 MODULE_VERSION(VERSION);
3611 MODULE_LICENSE("GPL");
3612 MODULE_ALIAS("bt-proto-0");