2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
95 __l2cap_sock_close(sk, reason);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 s = __l2cap_get_chan_by_scid(l, cid);
145 read_unlock(&l->lock);
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 s = __l2cap_get_chan_by_ident(l, ident);
166 read_unlock(&l->lock);
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
203 l2cap_pi(next)->prev_c = prev;
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
240 bt_accept_enqueue(parent, sk);
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
297 auth_type = HCI_AT_NO_BONDING;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
323 spin_unlock_bh(&conn->lock);
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
356 control |= L2CAP_CTRL_FINAL;
357 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
360 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
361 control |= L2CAP_CTRL_POLL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
365 skb = bt_skb_alloc(count, GFP_ATOMIC);
369 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
370 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
371 lh->cid = cpu_to_le16(pi->dcid);
372 put_unaligned_le16(control, skb_put(skb, 2));
374 if (pi->fcs == L2CAP_FCS_CRC16) {
375 u16 fcs = crc16(0, (u8 *)lh, count - 2);
376 put_unaligned_le16(fcs, skb_put(skb, 2));
379 return hci_send_acl(pi->conn->hcon, skb, 0);
382 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
384 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
385 control |= L2CAP_SUPER_RCV_NOT_READY;
387 control |= L2CAP_SUPER_RCV_READY;
389 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
391 return l2cap_send_sframe(pi, control);
394 static void l2cap_do_start(struct sock *sk)
396 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
398 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
399 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
402 if (l2cap_check_security(sk)) {
403 struct l2cap_conn_req req;
404 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
405 req.psm = l2cap_pi(sk)->psm;
407 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
410 L2CAP_CONN_REQ, sizeof(req), &req);
413 struct l2cap_info_req req;
414 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
417 conn->info_ident = l2cap_get_ident(conn);
419 mod_timer(&conn->info_timer, jiffies +
420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
422 l2cap_send_cmd(conn, conn->info_ident,
423 L2CAP_INFO_REQ, sizeof(req), &req);
427 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
429 struct l2cap_disconn_req req;
431 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 l2cap_send_cmd(conn, l2cap_get_ident(conn),
434 L2CAP_DISCONN_REQ, sizeof(req), &req);
437 /* ---- L2CAP connections ---- */
438 static void l2cap_conn_start(struct l2cap_conn *conn)
440 struct l2cap_chan_list *l = &conn->chan_list;
443 BT_DBG("conn %p", conn);
447 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
450 if (sk->sk_type != SOCK_SEQPACKET) {
455 if (sk->sk_state == BT_CONNECT) {
456 if (l2cap_check_security(sk)) {
457 struct l2cap_conn_req req;
458 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
459 req.psm = l2cap_pi(sk)->psm;
461 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_REQ, sizeof(req), &req);
466 } else if (sk->sk_state == BT_CONNECT2) {
467 struct l2cap_conn_rsp rsp;
468 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
469 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
471 if (l2cap_check_security(sk)) {
472 if (bt_sk(sk)->defer_setup) {
473 struct sock *parent = bt_sk(sk)->parent;
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
476 parent->sk_data_ready(parent, 0);
479 sk->sk_state = BT_CONFIG;
480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
488 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
489 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
495 read_unlock(&l->lock);
498 static void l2cap_conn_ready(struct l2cap_conn *conn)
500 struct l2cap_chan_list *l = &conn->chan_list;
503 BT_DBG("conn %p", conn);
507 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
510 if (sk->sk_type != SOCK_SEQPACKET) {
511 l2cap_sock_clear_timer(sk);
512 sk->sk_state = BT_CONNECTED;
513 sk->sk_state_change(sk);
514 } else if (sk->sk_state == BT_CONNECT)
520 read_unlock(&l->lock);
523 /* Notify sockets that we cannot guaranty reliability anymore */
524 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
526 struct l2cap_chan_list *l = &conn->chan_list;
529 BT_DBG("conn %p", conn);
533 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
534 if (l2cap_pi(sk)->force_reliable)
538 read_unlock(&l->lock);
541 static void l2cap_info_timeout(unsigned long arg)
543 struct l2cap_conn *conn = (void *) arg;
545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
546 conn->info_ident = 0;
548 l2cap_conn_start(conn);
551 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
553 struct l2cap_conn *conn = hcon->l2cap_data;
558 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
562 hcon->l2cap_data = conn;
565 BT_DBG("hcon %p conn %p", hcon, conn);
567 conn->mtu = hcon->hdev->acl_mtu;
568 conn->src = &hcon->hdev->bdaddr;
569 conn->dst = &hcon->dst;
573 spin_lock_init(&conn->lock);
574 rwlock_init(&conn->chan_list.lock);
576 setup_timer(&conn->info_timer, l2cap_info_timeout,
577 (unsigned long) conn);
579 conn->disc_reason = 0x13;
584 static void l2cap_conn_del(struct hci_conn *hcon, int err)
586 struct l2cap_conn *conn = hcon->l2cap_data;
592 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
594 kfree_skb(conn->rx_skb);
597 while ((sk = conn->chan_list.head)) {
599 l2cap_chan_del(sk, err);
604 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
605 del_timer_sync(&conn->info_timer);
607 hcon->l2cap_data = NULL;
611 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
613 struct l2cap_chan_list *l = &conn->chan_list;
614 write_lock_bh(&l->lock);
615 __l2cap_chan_add(conn, sk, parent);
616 write_unlock_bh(&l->lock);
619 /* ---- Socket interface ---- */
620 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
623 struct hlist_node *node;
624 sk_for_each(sk, node, &l2cap_sk_list.head)
625 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
632 /* Find socket with psm and source bdaddr.
633 * Returns closest match.
635 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
637 struct sock *sk = NULL, *sk1 = NULL;
638 struct hlist_node *node;
640 sk_for_each(sk, node, &l2cap_sk_list.head) {
641 if (state && sk->sk_state != state)
644 if (l2cap_pi(sk)->psm == psm) {
646 if (!bacmp(&bt_sk(sk)->src, src))
650 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
654 return node ? sk : sk1;
657 /* Find socket with given address (psm, src).
658 * Returns locked socket */
659 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
662 read_lock(&l2cap_sk_list.lock);
663 s = __l2cap_get_sock_by_psm(state, psm, src);
666 read_unlock(&l2cap_sk_list.lock);
670 static void l2cap_sock_destruct(struct sock *sk)
674 skb_queue_purge(&sk->sk_receive_queue);
675 skb_queue_purge(&sk->sk_write_queue);
678 static void l2cap_sock_cleanup_listen(struct sock *parent)
682 BT_DBG("parent %p", parent);
684 /* Close not yet accepted channels */
685 while ((sk = bt_accept_dequeue(parent, NULL)))
686 l2cap_sock_close(sk);
688 parent->sk_state = BT_CLOSED;
689 sock_set_flag(parent, SOCK_ZAPPED);
692 /* Kill socket (only if zapped and orphan)
693 * Must be called on unlocked socket.
695 static void l2cap_sock_kill(struct sock *sk)
697 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
700 BT_DBG("sk %p state %d", sk, sk->sk_state);
702 /* Kill poor orphan */
703 bt_sock_unlink(&l2cap_sk_list, sk);
704 sock_set_flag(sk, SOCK_DEAD);
708 static void __l2cap_sock_close(struct sock *sk, int reason)
710 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
712 switch (sk->sk_state) {
714 l2cap_sock_cleanup_listen(sk);
719 if (sk->sk_type == SOCK_SEQPACKET) {
720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 sk->sk_state = BT_DISCONN;
723 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
724 l2cap_send_disconn_req(conn, sk);
726 l2cap_chan_del(sk, reason);
730 if (sk->sk_type == SOCK_SEQPACKET) {
731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
732 struct l2cap_conn_rsp rsp;
735 if (bt_sk(sk)->defer_setup)
736 result = L2CAP_CR_SEC_BLOCK;
738 result = L2CAP_CR_BAD_PSM;
740 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
741 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
742 rsp.result = cpu_to_le16(result);
743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
744 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
745 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
747 l2cap_chan_del(sk, reason);
752 l2cap_chan_del(sk, reason);
756 sock_set_flag(sk, SOCK_ZAPPED);
761 /* Must be called on unlocked socket. */
762 static void l2cap_sock_close(struct sock *sk)
764 l2cap_sock_clear_timer(sk);
766 __l2cap_sock_close(sk, ECONNRESET);
771 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
773 struct l2cap_pinfo *pi = l2cap_pi(sk);
778 sk->sk_type = parent->sk_type;
779 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
781 pi->imtu = l2cap_pi(parent)->imtu;
782 pi->omtu = l2cap_pi(parent)->omtu;
783 pi->mode = l2cap_pi(parent)->mode;
784 pi->fcs = l2cap_pi(parent)->fcs;
785 pi->sec_level = l2cap_pi(parent)->sec_level;
786 pi->role_switch = l2cap_pi(parent)->role_switch;
787 pi->force_reliable = l2cap_pi(parent)->force_reliable;
789 pi->imtu = L2CAP_DEFAULT_MTU;
791 pi->mode = L2CAP_MODE_BASIC;
792 pi->fcs = L2CAP_FCS_CRC16;
793 pi->sec_level = BT_SECURITY_LOW;
795 pi->force_reliable = 0;
798 /* Default config options */
800 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
801 skb_queue_head_init(TX_QUEUE(sk));
802 skb_queue_head_init(SREJ_QUEUE(sk));
803 INIT_LIST_HEAD(SREJ_LIST(sk));
806 static struct proto l2cap_proto = {
808 .owner = THIS_MODULE,
809 .obj_size = sizeof(struct l2cap_pinfo)
812 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
816 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
820 sock_init_data(sock, sk);
821 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
823 sk->sk_destruct = l2cap_sock_destruct;
824 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
826 sock_reset_flag(sk, SOCK_ZAPPED);
828 sk->sk_protocol = proto;
829 sk->sk_state = BT_OPEN;
831 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
833 bt_sock_link(&l2cap_sk_list, sk);
837 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
842 BT_DBG("sock %p", sock);
844 sock->state = SS_UNCONNECTED;
846 if (sock->type != SOCK_SEQPACKET &&
847 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
848 return -ESOCKTNOSUPPORT;
850 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
853 sock->ops = &l2cap_sock_ops;
855 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
859 l2cap_sock_init(sk, NULL);
863 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
865 struct sock *sk = sock->sk;
866 struct sockaddr_l2 la;
871 if (!addr || addr->sa_family != AF_BLUETOOTH)
874 memset(&la, 0, sizeof(la));
875 len = min_t(unsigned int, sizeof(la), alen);
876 memcpy(&la, addr, len);
883 if (sk->sk_state != BT_OPEN) {
888 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
889 !capable(CAP_NET_BIND_SERVICE)) {
894 write_lock_bh(&l2cap_sk_list.lock);
896 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
899 /* Save source address */
900 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
901 l2cap_pi(sk)->psm = la.l2_psm;
902 l2cap_pi(sk)->sport = la.l2_psm;
903 sk->sk_state = BT_BOUND;
905 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
906 __le16_to_cpu(la.l2_psm) == 0x0003)
907 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
910 write_unlock_bh(&l2cap_sk_list.lock);
917 static int l2cap_do_connect(struct sock *sk)
919 bdaddr_t *src = &bt_sk(sk)->src;
920 bdaddr_t *dst = &bt_sk(sk)->dst;
921 struct l2cap_conn *conn;
922 struct hci_conn *hcon;
923 struct hci_dev *hdev;
927 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
930 hdev = hci_get_route(dst, src);
932 return -EHOSTUNREACH;
934 hci_dev_lock_bh(hdev);
938 if (sk->sk_type == SOCK_RAW) {
939 switch (l2cap_pi(sk)->sec_level) {
940 case BT_SECURITY_HIGH:
941 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
943 case BT_SECURITY_MEDIUM:
944 auth_type = HCI_AT_DEDICATED_BONDING;
947 auth_type = HCI_AT_NO_BONDING;
950 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
951 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
952 auth_type = HCI_AT_NO_BONDING_MITM;
954 auth_type = HCI_AT_NO_BONDING;
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
957 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
959 switch (l2cap_pi(sk)->sec_level) {
960 case BT_SECURITY_HIGH:
961 auth_type = HCI_AT_GENERAL_BONDING_MITM;
963 case BT_SECURITY_MEDIUM:
964 auth_type = HCI_AT_GENERAL_BONDING;
967 auth_type = HCI_AT_NO_BONDING;
972 hcon = hci_connect(hdev, ACL_LINK, dst,
973 l2cap_pi(sk)->sec_level, auth_type);
977 conn = l2cap_conn_add(hcon, 0);
985 /* Update source addr of the socket */
986 bacpy(src, conn->src);
988 l2cap_chan_add(conn, sk, NULL);
990 sk->sk_state = BT_CONNECT;
991 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
993 if (hcon->state == BT_CONNECTED) {
994 if (sk->sk_type != SOCK_SEQPACKET) {
995 l2cap_sock_clear_timer(sk);
996 sk->sk_state = BT_CONNECTED;
1002 hci_dev_unlock_bh(hdev);
1007 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1009 struct sock *sk = sock->sk;
1010 struct sockaddr_l2 la;
1013 BT_DBG("sk %p", sk);
1015 if (!addr || alen < sizeof(addr->sa_family) ||
1016 addr->sa_family != AF_BLUETOOTH)
1019 memset(&la, 0, sizeof(la));
1020 len = min_t(unsigned int, sizeof(la), alen);
1021 memcpy(&la, addr, len);
1028 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1033 switch (l2cap_pi(sk)->mode) {
1034 case L2CAP_MODE_BASIC:
1036 case L2CAP_MODE_ERTM:
1037 case L2CAP_MODE_STREAMING:
1046 switch (sk->sk_state) {
1050 /* Already connecting */
1054 /* Already connected */
1067 /* Set destination address and psm */
1068 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1069 l2cap_pi(sk)->psm = la.l2_psm;
1071 err = l2cap_do_connect(sk);
1076 err = bt_sock_wait_state(sk, BT_CONNECTED,
1077 sock_sndtimeo(sk, flags & O_NONBLOCK));
1083 static int l2cap_sock_listen(struct socket *sock, int backlog)
1085 struct sock *sk = sock->sk;
1088 BT_DBG("sk %p backlog %d", sk, backlog);
1092 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1110 if (!l2cap_pi(sk)->psm) {
1111 bdaddr_t *src = &bt_sk(sk)->src;
1116 write_lock_bh(&l2cap_sk_list.lock);
1118 for (psm = 0x1001; psm < 0x1100; psm += 2)
1119 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1120 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1121 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1126 write_unlock_bh(&l2cap_sk_list.lock);
1132 sk->sk_max_ack_backlog = backlog;
1133 sk->sk_ack_backlog = 0;
1134 sk->sk_state = BT_LISTEN;
1141 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1143 DECLARE_WAITQUEUE(wait, current);
1144 struct sock *sk = sock->sk, *nsk;
1148 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1150 if (sk->sk_state != BT_LISTEN) {
1155 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1157 BT_DBG("sk %p timeo %ld", sk, timeo);
1159 /* Wait for an incoming connection. (wake-one). */
1160 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1161 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1162 set_current_state(TASK_INTERRUPTIBLE);
1169 timeo = schedule_timeout(timeo);
1170 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1172 if (sk->sk_state != BT_LISTEN) {
1177 if (signal_pending(current)) {
1178 err = sock_intr_errno(timeo);
1182 set_current_state(TASK_RUNNING);
1183 remove_wait_queue(sk_sleep(sk), &wait);
1188 newsock->state = SS_CONNECTED;
1190 BT_DBG("new socket %p", nsk);
1197 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1199 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1200 struct sock *sk = sock->sk;
1202 BT_DBG("sock %p, sk %p", sock, sk);
1204 addr->sa_family = AF_BLUETOOTH;
1205 *len = sizeof(struct sockaddr_l2);
1208 la->l2_psm = l2cap_pi(sk)->psm;
1209 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1210 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1212 la->l2_psm = l2cap_pi(sk)->sport;
1213 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1214 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1220 static void l2cap_monitor_timeout(unsigned long arg)
1222 struct sock *sk = (void *) arg;
1226 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1227 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1232 l2cap_pi(sk)->retry_count++;
1233 __mod_monitor_timer();
1235 control = L2CAP_CTRL_POLL;
1236 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1240 static void l2cap_retrans_timeout(unsigned long arg)
1242 struct sock *sk = (void *) arg;
1246 l2cap_pi(sk)->retry_count = 1;
1247 __mod_monitor_timer();
1249 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1251 control = L2CAP_CTRL_POLL;
1252 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1256 static void l2cap_drop_acked_frames(struct sock *sk)
1258 struct sk_buff *skb;
1260 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1261 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1264 skb = skb_dequeue(TX_QUEUE(sk));
1267 l2cap_pi(sk)->unacked_frames--;
1270 if (!l2cap_pi(sk)->unacked_frames)
1271 del_timer(&l2cap_pi(sk)->retrans_timer);
1276 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1278 struct l2cap_pinfo *pi = l2cap_pi(sk);
1281 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1283 err = hci_send_acl(pi->conn->hcon, skb, 0);
1290 static int l2cap_streaming_send(struct sock *sk)
1292 struct sk_buff *skb, *tx_skb;
1293 struct l2cap_pinfo *pi = l2cap_pi(sk);
1297 while ((skb = sk->sk_send_head)) {
1298 tx_skb = skb_clone(skb, GFP_ATOMIC);
1300 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1301 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1302 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1304 if (pi->fcs == L2CAP_FCS_CRC16) {
1305 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1306 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1309 err = l2cap_do_send(sk, tx_skb);
1311 l2cap_send_disconn_req(pi->conn, sk);
1315 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 sk->sk_send_head = NULL;
1320 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1322 skb = skb_dequeue(TX_QUEUE(sk));
1328 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1330 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 struct sk_buff *skb, *tx_skb;
1335 skb = skb_peek(TX_QUEUE(sk));
1337 if (bt_cb(skb)->tx_seq != tx_seq) {
1338 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1340 skb = skb_queue_next(TX_QUEUE(sk), skb);
1344 if (pi->remote_max_tx &&
1345 bt_cb(skb)->retries == pi->remote_max_tx) {
1346 l2cap_send_disconn_req(pi->conn, sk);
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1352 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1353 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1354 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1355 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1357 if (pi->fcs == L2CAP_FCS_CRC16) {
1358 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1359 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1362 err = l2cap_do_send(sk, tx_skb);
1364 l2cap_send_disconn_req(pi->conn, sk);
1372 static int l2cap_ertm_send(struct sock *sk)
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1379 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1382 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1383 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1385 if (pi->remote_max_tx &&
1386 bt_cb(skb)->retries == pi->remote_max_tx) {
1387 l2cap_send_disconn_req(pi->conn, sk);
1391 tx_skb = skb_clone(skb, GFP_ATOMIC);
1393 bt_cb(skb)->retries++;
1395 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1396 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1397 control |= L2CAP_CTRL_FINAL;
1398 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1400 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1401 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1402 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1405 if (pi->fcs == L2CAP_FCS_CRC16) {
1406 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1407 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1410 err = l2cap_do_send(sk, tx_skb);
1412 l2cap_send_disconn_req(pi->conn, sk);
1415 __mod_retrans_timer();
1417 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1418 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1420 pi->unacked_frames++;
1423 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1424 sk->sk_send_head = NULL;
1426 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1434 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1436 struct sock *sk = (struct sock *)pi;
1439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1441 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1442 control |= L2CAP_SUPER_RCV_NOT_READY;
1443 return l2cap_send_sframe(pi, control);
1444 } else if (l2cap_ertm_send(sk) == 0) {
1445 control |= L2CAP_SUPER_RCV_READY;
1446 return l2cap_send_sframe(pi, control);
1451 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1453 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1454 struct sk_buff **frag;
1457 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1464 /* Continuation fragments (no L2CAP header) */
1465 frag = &skb_shinfo(skb)->frag_list;
1467 count = min_t(unsigned int, conn->mtu, len);
1469 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1472 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1478 frag = &(*frag)->next;
1484 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1486 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1487 struct sk_buff *skb;
1488 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1489 struct l2cap_hdr *lh;
1491 BT_DBG("sk %p len %d", sk, (int)len);
1493 count = min_t(unsigned int, (conn->mtu - hlen), len);
1494 skb = bt_skb_send_alloc(sk, count + hlen,
1495 msg->msg_flags & MSG_DONTWAIT, &err);
1497 return ERR_PTR(-ENOMEM);
1499 /* Create L2CAP header */
1500 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1501 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1503 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1505 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1506 if (unlikely(err < 0)) {
1508 return ERR_PTR(err);
1513 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1515 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1516 struct sk_buff *skb;
1517 int err, count, hlen = L2CAP_HDR_SIZE;
1518 struct l2cap_hdr *lh;
1520 BT_DBG("sk %p len %d", sk, (int)len);
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1526 return ERR_PTR(-ENOMEM);
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1533 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1534 if (unlikely(err < 0)) {
1536 return ERR_PTR(err);
1541 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1543 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1544 struct sk_buff *skb;
1545 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1546 struct l2cap_hdr *lh;
1548 BT_DBG("sk %p len %d", sk, (int)len);
1553 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1556 count = min_t(unsigned int, (conn->mtu - hlen), len);
1557 skb = bt_skb_send_alloc(sk, count + hlen,
1558 msg->msg_flags & MSG_DONTWAIT, &err);
1560 return ERR_PTR(-ENOMEM);
1562 /* Create L2CAP header */
1563 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1564 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1565 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1566 put_unaligned_le16(control, skb_put(skb, 2));
1568 put_unaligned_le16(sdulen, skb_put(skb, 2));
1570 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1571 if (unlikely(err < 0)) {
1573 return ERR_PTR(err);
1576 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1577 put_unaligned_le16(0, skb_put(skb, 2));
1579 bt_cb(skb)->retries = 0;
1583 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1585 struct l2cap_pinfo *pi = l2cap_pi(sk);
1586 struct sk_buff *skb;
1587 struct sk_buff_head sar_queue;
1591 __skb_queue_head_init(&sar_queue);
1592 control = L2CAP_SDU_START;
1593 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1595 return PTR_ERR(skb);
1597 __skb_queue_tail(&sar_queue, skb);
1598 len -= pi->max_pdu_size;
1599 size +=pi->max_pdu_size;
1605 if (len > pi->max_pdu_size) {
1606 control |= L2CAP_SDU_CONTINUE;
1607 buflen = pi->max_pdu_size;
1609 control |= L2CAP_SDU_END;
1613 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1615 skb_queue_purge(&sar_queue);
1616 return PTR_ERR(skb);
1619 __skb_queue_tail(&sar_queue, skb);
1624 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1625 if (sk->sk_send_head == NULL)
1626 sk->sk_send_head = sar_queue.next;
1631 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1633 struct sock *sk = sock->sk;
1634 struct l2cap_pinfo *pi = l2cap_pi(sk);
1635 struct sk_buff *skb;
1639 BT_DBG("sock %p, sk %p", sock, sk);
1641 err = sock_error(sk);
1645 if (msg->msg_flags & MSG_OOB)
1650 if (sk->sk_state != BT_CONNECTED) {
1655 /* Connectionless channel */
1656 if (sk->sk_type == SOCK_DGRAM) {
1657 skb = l2cap_create_connless_pdu(sk, msg, len);
1661 err = l2cap_do_send(sk, skb);
1666 case L2CAP_MODE_BASIC:
1667 /* Check outgoing MTU */
1668 if (len > pi->omtu) {
1673 /* Create a basic PDU */
1674 skb = l2cap_create_basic_pdu(sk, msg, len);
1680 err = l2cap_do_send(sk, skb);
1685 case L2CAP_MODE_ERTM:
1686 case L2CAP_MODE_STREAMING:
1687 /* Entire SDU fits into one PDU */
1688 if (len <= pi->max_pdu_size) {
1689 control = L2CAP_SDU_UNSEGMENTED;
1690 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1695 __skb_queue_tail(TX_QUEUE(sk), skb);
1696 if (sk->sk_send_head == NULL)
1697 sk->sk_send_head = skb;
1699 /* Segment SDU into multiples PDUs */
1700 err = l2cap_sar_segment_sdu(sk, msg, len);
1705 if (pi->mode == L2CAP_MODE_STREAMING)
1706 err = l2cap_streaming_send(sk);
1708 err = l2cap_ertm_send(sk);
1715 BT_DBG("bad state %1.1x", pi->mode);
1724 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1726 struct sock *sk = sock->sk;
1730 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1731 struct l2cap_conn_rsp rsp;
1733 sk->sk_state = BT_CONFIG;
1735 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1736 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1737 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1738 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1739 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1740 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1748 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1751 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1753 struct sock *sk = sock->sk;
1754 struct l2cap_options opts;
1758 BT_DBG("sk %p", sk);
1764 opts.imtu = l2cap_pi(sk)->imtu;
1765 opts.omtu = l2cap_pi(sk)->omtu;
1766 opts.flush_to = l2cap_pi(sk)->flush_to;
1767 opts.mode = l2cap_pi(sk)->mode;
1768 opts.fcs = l2cap_pi(sk)->fcs;
1770 len = min_t(unsigned int, sizeof(opts), optlen);
1771 if (copy_from_user((char *) &opts, optval, len)) {
1776 l2cap_pi(sk)->imtu = opts.imtu;
1777 l2cap_pi(sk)->omtu = opts.omtu;
1778 l2cap_pi(sk)->mode = opts.mode;
1779 l2cap_pi(sk)->fcs = opts.fcs;
1783 if (get_user(opt, (u32 __user *) optval)) {
1788 if (opt & L2CAP_LM_AUTH)
1789 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1790 if (opt & L2CAP_LM_ENCRYPT)
1791 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1792 if (opt & L2CAP_LM_SECURE)
1793 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1795 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1796 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1808 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1810 struct sock *sk = sock->sk;
1811 struct bt_security sec;
1815 BT_DBG("sk %p", sk);
1817 if (level == SOL_L2CAP)
1818 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1820 if (level != SOL_BLUETOOTH)
1821 return -ENOPROTOOPT;
1827 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1832 sec.level = BT_SECURITY_LOW;
1834 len = min_t(unsigned int, sizeof(sec), optlen);
1835 if (copy_from_user((char *) &sec, optval, len)) {
1840 if (sec.level < BT_SECURITY_LOW ||
1841 sec.level > BT_SECURITY_HIGH) {
1846 l2cap_pi(sk)->sec_level = sec.level;
1849 case BT_DEFER_SETUP:
1850 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1855 if (get_user(opt, (u32 __user *) optval)) {
1860 bt_sk(sk)->defer_setup = opt;
1872 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1874 struct sock *sk = sock->sk;
1875 struct l2cap_options opts;
1876 struct l2cap_conninfo cinfo;
1880 BT_DBG("sk %p", sk);
1882 if (get_user(len, optlen))
1889 opts.imtu = l2cap_pi(sk)->imtu;
1890 opts.omtu = l2cap_pi(sk)->omtu;
1891 opts.flush_to = l2cap_pi(sk)->flush_to;
1892 opts.mode = l2cap_pi(sk)->mode;
1893 opts.fcs = l2cap_pi(sk)->fcs;
1895 len = min_t(unsigned int, len, sizeof(opts));
1896 if (copy_to_user(optval, (char *) &opts, len))
1902 switch (l2cap_pi(sk)->sec_level) {
1903 case BT_SECURITY_LOW:
1904 opt = L2CAP_LM_AUTH;
1906 case BT_SECURITY_MEDIUM:
1907 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1909 case BT_SECURITY_HIGH:
1910 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1918 if (l2cap_pi(sk)->role_switch)
1919 opt |= L2CAP_LM_MASTER;
1921 if (l2cap_pi(sk)->force_reliable)
1922 opt |= L2CAP_LM_RELIABLE;
1924 if (put_user(opt, (u32 __user *) optval))
1928 case L2CAP_CONNINFO:
1929 if (sk->sk_state != BT_CONNECTED &&
1930 !(sk->sk_state == BT_CONNECT2 &&
1931 bt_sk(sk)->defer_setup)) {
1936 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1937 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1939 len = min_t(unsigned int, len, sizeof(cinfo));
1940 if (copy_to_user(optval, (char *) &cinfo, len))
1954 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1956 struct sock *sk = sock->sk;
1957 struct bt_security sec;
1960 BT_DBG("sk %p", sk);
1962 if (level == SOL_L2CAP)
1963 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1965 if (level != SOL_BLUETOOTH)
1966 return -ENOPROTOOPT;
1968 if (get_user(len, optlen))
1975 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1980 sec.level = l2cap_pi(sk)->sec_level;
1982 len = min_t(unsigned int, len, sizeof(sec));
1983 if (copy_to_user(optval, (char *) &sec, len))
1988 case BT_DEFER_SETUP:
1989 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1994 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2008 static int l2cap_sock_shutdown(struct socket *sock, int how)
2010 struct sock *sk = sock->sk;
2013 BT_DBG("sock %p, sk %p", sock, sk);
2019 if (!sk->sk_shutdown) {
2020 sk->sk_shutdown = SHUTDOWN_MASK;
2021 l2cap_sock_clear_timer(sk);
2022 __l2cap_sock_close(sk, 0);
2024 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2025 err = bt_sock_wait_state(sk, BT_CLOSED,
2032 static int l2cap_sock_release(struct socket *sock)
2034 struct sock *sk = sock->sk;
2037 BT_DBG("sock %p, sk %p", sock, sk);
2042 err = l2cap_sock_shutdown(sock, 2);
2045 l2cap_sock_kill(sk);
2049 static void l2cap_chan_ready(struct sock *sk)
2051 struct sock *parent = bt_sk(sk)->parent;
2053 BT_DBG("sk %p, parent %p", sk, parent);
2055 l2cap_pi(sk)->conf_state = 0;
2056 l2cap_sock_clear_timer(sk);
2059 /* Outgoing channel.
2060 * Wake up socket sleeping on connect.
2062 sk->sk_state = BT_CONNECTED;
2063 sk->sk_state_change(sk);
2065 /* Incoming channel.
2066 * Wake up socket sleeping on accept.
2068 parent->sk_data_ready(parent, 0);
2072 /* Copy frame to all raw sockets on that connection */
2073 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2075 struct l2cap_chan_list *l = &conn->chan_list;
2076 struct sk_buff *nskb;
2079 BT_DBG("conn %p", conn);
2081 read_lock(&l->lock);
2082 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2083 if (sk->sk_type != SOCK_RAW)
2086 /* Don't send frame to the socket it came from */
2089 nskb = skb_clone(skb, GFP_ATOMIC);
2093 if (sock_queue_rcv_skb(sk, nskb))
2096 read_unlock(&l->lock);
2099 /* ---- L2CAP signalling commands ---- */
2100 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2101 u8 code, u8 ident, u16 dlen, void *data)
2103 struct sk_buff *skb, **frag;
2104 struct l2cap_cmd_hdr *cmd;
2105 struct l2cap_hdr *lh;
2108 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2109 conn, code, ident, dlen);
2111 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2112 count = min_t(unsigned int, conn->mtu, len);
2114 skb = bt_skb_alloc(count, GFP_ATOMIC);
2118 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2119 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2120 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2122 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2125 cmd->len = cpu_to_le16(dlen);
2128 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2129 memcpy(skb_put(skb, count), data, count);
2135 /* Continuation fragments (no L2CAP header) */
2136 frag = &skb_shinfo(skb)->frag_list;
2138 count = min_t(unsigned int, conn->mtu, len);
2140 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2144 memcpy(skb_put(*frag, count), data, count);
2149 frag = &(*frag)->next;
2159 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2161 struct l2cap_conf_opt *opt = *ptr;
2164 len = L2CAP_CONF_OPT_SIZE + opt->len;
2172 *val = *((u8 *) opt->val);
2176 *val = __le16_to_cpu(*((__le16 *) opt->val));
2180 *val = __le32_to_cpu(*((__le32 *) opt->val));
2184 *val = (unsigned long) opt->val;
2188 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2192 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2194 struct l2cap_conf_opt *opt = *ptr;
2196 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2203 *((u8 *) opt->val) = val;
2207 *((__le16 *) opt->val) = cpu_to_le16(val);
2211 *((__le32 *) opt->val) = cpu_to_le32(val);
2215 memcpy(opt->val, (void *) val, len);
2219 *ptr += L2CAP_CONF_OPT_SIZE + len;
2222 static inline void l2cap_ertm_init(struct sock *sk)
2224 l2cap_pi(sk)->expected_ack_seq = 0;
2225 l2cap_pi(sk)->unacked_frames = 0;
2226 l2cap_pi(sk)->buffer_seq = 0;
2227 l2cap_pi(sk)->num_to_ack = 0;
2228 l2cap_pi(sk)->frames_sent = 0;
2230 setup_timer(&l2cap_pi(sk)->retrans_timer,
2231 l2cap_retrans_timeout, (unsigned long) sk);
2232 setup_timer(&l2cap_pi(sk)->monitor_timer,
2233 l2cap_monitor_timeout, (unsigned long) sk);
2235 __skb_queue_head_init(SREJ_QUEUE(sk));
2238 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2240 u32 local_feat_mask = l2cap_feat_mask;
2242 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2245 case L2CAP_MODE_ERTM:
2246 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2247 case L2CAP_MODE_STREAMING:
2248 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2254 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2257 case L2CAP_MODE_STREAMING:
2258 case L2CAP_MODE_ERTM:
2259 if (l2cap_mode_supported(mode, remote_feat_mask))
2263 return L2CAP_MODE_BASIC;
2267 static int l2cap_build_conf_req(struct sock *sk, void *data)
2269 struct l2cap_pinfo *pi = l2cap_pi(sk);
2270 struct l2cap_conf_req *req = data;
2271 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2272 void *ptr = req->data;
2274 BT_DBG("sk %p", sk);
2276 if (pi->num_conf_req || pi->num_conf_rsp)
2280 case L2CAP_MODE_STREAMING:
2281 case L2CAP_MODE_ERTM:
2282 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2283 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2284 l2cap_send_disconn_req(pi->conn, sk);
2287 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2293 case L2CAP_MODE_BASIC:
2294 if (pi->imtu != L2CAP_DEFAULT_MTU)
2295 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2298 case L2CAP_MODE_ERTM:
2299 rfc.mode = L2CAP_MODE_ERTM;
2300 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2301 rfc.max_transmit = max_transmit;
2302 rfc.retrans_timeout = 0;
2303 rfc.monitor_timeout = 0;
2304 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2305 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2306 rfc.max_pdu_size = pi->conn->mtu - 10;
2308 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2309 sizeof(rfc), (unsigned long) &rfc);
2311 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2314 if (pi->fcs == L2CAP_FCS_NONE ||
2315 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2316 pi->fcs = L2CAP_FCS_NONE;
2317 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2321 case L2CAP_MODE_STREAMING:
2322 rfc.mode = L2CAP_MODE_STREAMING;
2324 rfc.max_transmit = 0;
2325 rfc.retrans_timeout = 0;
2326 rfc.monitor_timeout = 0;
2327 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2328 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2329 rfc.max_pdu_size = pi->conn->mtu - 10;
2331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2332 sizeof(rfc), (unsigned long) &rfc);
2334 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2337 if (pi->fcs == L2CAP_FCS_NONE ||
2338 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2339 pi->fcs = L2CAP_FCS_NONE;
2340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2345 /* FIXME: Need actual value of the flush timeout */
2346 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2347 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2349 req->dcid = cpu_to_le16(pi->dcid);
2350 req->flags = cpu_to_le16(0);
2355 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2357 struct l2cap_pinfo *pi = l2cap_pi(sk);
2358 struct l2cap_conf_rsp *rsp = data;
2359 void *ptr = rsp->data;
2360 void *req = pi->conf_req;
2361 int len = pi->conf_len;
2362 int type, hint, olen;
2364 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2365 u16 mtu = L2CAP_DEFAULT_MTU;
2366 u16 result = L2CAP_CONF_SUCCESS;
2368 BT_DBG("sk %p", sk);
2370 while (len >= L2CAP_CONF_OPT_SIZE) {
2371 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2373 hint = type & L2CAP_CONF_HINT;
2374 type &= L2CAP_CONF_MASK;
2377 case L2CAP_CONF_MTU:
2381 case L2CAP_CONF_FLUSH_TO:
2385 case L2CAP_CONF_QOS:
2388 case L2CAP_CONF_RFC:
2389 if (olen == sizeof(rfc))
2390 memcpy(&rfc, (void *) val, olen);
2393 case L2CAP_CONF_FCS:
2394 if (val == L2CAP_FCS_NONE)
2395 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2403 result = L2CAP_CONF_UNKNOWN;
2404 *((u8 *) ptr++) = type;
2409 if (pi->num_conf_rsp || pi->num_conf_req)
2413 case L2CAP_MODE_STREAMING:
2414 case L2CAP_MODE_ERTM:
2415 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2416 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2417 return -ECONNREFUSED;
2420 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2425 if (pi->mode != rfc.mode) {
2426 result = L2CAP_CONF_UNACCEPT;
2427 rfc.mode = pi->mode;
2429 if (pi->num_conf_rsp == 1)
2430 return -ECONNREFUSED;
2432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2433 sizeof(rfc), (unsigned long) &rfc);
2437 if (result == L2CAP_CONF_SUCCESS) {
2438 /* Configure output options and let the other side know
2439 * which ones we don't like. */
2441 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2442 result = L2CAP_CONF_UNACCEPT;
2445 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2450 case L2CAP_MODE_BASIC:
2451 pi->fcs = L2CAP_FCS_NONE;
2452 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2455 case L2CAP_MODE_ERTM:
2456 pi->remote_tx_win = rfc.txwin_size;
2457 pi->remote_max_tx = rfc.max_transmit;
2458 pi->max_pdu_size = rfc.max_pdu_size;
2460 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2461 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2463 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2466 sizeof(rfc), (unsigned long) &rfc);
2470 case L2CAP_MODE_STREAMING:
2471 pi->remote_tx_win = rfc.txwin_size;
2472 pi->max_pdu_size = rfc.max_pdu_size;
2474 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2477 sizeof(rfc), (unsigned long) &rfc);
2482 result = L2CAP_CONF_UNACCEPT;
2484 memset(&rfc, 0, sizeof(rfc));
2485 rfc.mode = pi->mode;
2488 if (result == L2CAP_CONF_SUCCESS)
2489 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2491 rsp->scid = cpu_to_le16(pi->dcid);
2492 rsp->result = cpu_to_le16(result);
2493 rsp->flags = cpu_to_le16(0x0000);
2498 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2500 struct l2cap_pinfo *pi = l2cap_pi(sk);
2501 struct l2cap_conf_req *req = data;
2502 void *ptr = req->data;
2505 struct l2cap_conf_rfc rfc;
2507 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2509 while (len >= L2CAP_CONF_OPT_SIZE) {
2510 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2513 case L2CAP_CONF_MTU:
2514 if (val < L2CAP_DEFAULT_MIN_MTU) {
2515 *result = L2CAP_CONF_UNACCEPT;
2516 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2522 case L2CAP_CONF_FLUSH_TO:
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2528 case L2CAP_CONF_RFC:
2529 if (olen == sizeof(rfc))
2530 memcpy(&rfc, (void *)val, olen);
2532 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2533 rfc.mode != pi->mode)
2534 return -ECONNREFUSED;
2536 pi->mode = rfc.mode;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2540 sizeof(rfc), (unsigned long) &rfc);
2545 if (*result == L2CAP_CONF_SUCCESS) {
2547 case L2CAP_MODE_ERTM:
2548 pi->remote_tx_win = rfc.txwin_size;
2549 pi->retrans_timeout = rfc.retrans_timeout;
2550 pi->monitor_timeout = rfc.monitor_timeout;
2551 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2553 case L2CAP_MODE_STREAMING:
2554 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2559 req->dcid = cpu_to_le16(pi->dcid);
2560 req->flags = cpu_to_le16(0x0000);
2565 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2567 struct l2cap_conf_rsp *rsp = data;
2568 void *ptr = rsp->data;
2570 BT_DBG("sk %p", sk);
2572 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2573 rsp->result = cpu_to_le16(result);
2574 rsp->flags = cpu_to_le16(flags);
2579 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2581 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2583 if (rej->reason != 0x0000)
2586 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2587 cmd->ident == conn->info_ident) {
2588 del_timer(&conn->info_timer);
2590 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2591 conn->info_ident = 0;
2593 l2cap_conn_start(conn);
2599 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2601 struct l2cap_chan_list *list = &conn->chan_list;
2602 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2603 struct l2cap_conn_rsp rsp;
2604 struct sock *sk, *parent;
2605 int result, status = L2CAP_CS_NO_INFO;
2607 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2608 __le16 psm = req->psm;
2610 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2612 /* Check if we have socket listening on psm */
2613 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2615 result = L2CAP_CR_BAD_PSM;
2619 /* Check if the ACL is secure enough (if not SDP) */
2620 if (psm != cpu_to_le16(0x0001) &&
2621 !hci_conn_check_link_mode(conn->hcon)) {
2622 conn->disc_reason = 0x05;
2623 result = L2CAP_CR_SEC_BLOCK;
2627 result = L2CAP_CR_NO_MEM;
2629 /* Check for backlog size */
2630 if (sk_acceptq_is_full(parent)) {
2631 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2635 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2639 write_lock_bh(&list->lock);
2641 /* Check if we already have channel with that dcid */
2642 if (__l2cap_get_chan_by_dcid(list, scid)) {
2643 write_unlock_bh(&list->lock);
2644 sock_set_flag(sk, SOCK_ZAPPED);
2645 l2cap_sock_kill(sk);
2649 hci_conn_hold(conn->hcon);
2651 l2cap_sock_init(sk, parent);
2652 bacpy(&bt_sk(sk)->src, conn->src);
2653 bacpy(&bt_sk(sk)->dst, conn->dst);
2654 l2cap_pi(sk)->psm = psm;
2655 l2cap_pi(sk)->dcid = scid;
2657 __l2cap_chan_add(conn, sk, parent);
2658 dcid = l2cap_pi(sk)->scid;
2660 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2662 l2cap_pi(sk)->ident = cmd->ident;
2664 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2665 if (l2cap_check_security(sk)) {
2666 if (bt_sk(sk)->defer_setup) {
2667 sk->sk_state = BT_CONNECT2;
2668 result = L2CAP_CR_PEND;
2669 status = L2CAP_CS_AUTHOR_PEND;
2670 parent->sk_data_ready(parent, 0);
2672 sk->sk_state = BT_CONFIG;
2673 result = L2CAP_CR_SUCCESS;
2674 status = L2CAP_CS_NO_INFO;
2677 sk->sk_state = BT_CONNECT2;
2678 result = L2CAP_CR_PEND;
2679 status = L2CAP_CS_AUTHEN_PEND;
2682 sk->sk_state = BT_CONNECT2;
2683 result = L2CAP_CR_PEND;
2684 status = L2CAP_CS_NO_INFO;
2687 write_unlock_bh(&list->lock);
2690 bh_unlock_sock(parent);
2693 rsp.scid = cpu_to_le16(scid);
2694 rsp.dcid = cpu_to_le16(dcid);
2695 rsp.result = cpu_to_le16(result);
2696 rsp.status = cpu_to_le16(status);
2697 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2699 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2700 struct l2cap_info_req info;
2701 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2703 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2704 conn->info_ident = l2cap_get_ident(conn);
2706 mod_timer(&conn->info_timer, jiffies +
2707 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2709 l2cap_send_cmd(conn, conn->info_ident,
2710 L2CAP_INFO_REQ, sizeof(info), &info);
2716 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2718 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2719 u16 scid, dcid, result, status;
2723 scid = __le16_to_cpu(rsp->scid);
2724 dcid = __le16_to_cpu(rsp->dcid);
2725 result = __le16_to_cpu(rsp->result);
2726 status = __le16_to_cpu(rsp->status);
2728 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2731 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2735 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2741 case L2CAP_CR_SUCCESS:
2742 sk->sk_state = BT_CONFIG;
2743 l2cap_pi(sk)->ident = 0;
2744 l2cap_pi(sk)->dcid = dcid;
2745 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2747 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2749 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2750 l2cap_build_conf_req(sk, req), req);
2751 l2cap_pi(sk)->num_conf_req++;
2755 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2759 l2cap_chan_del(sk, ECONNREFUSED);
2767 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2769 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2775 dcid = __le16_to_cpu(req->dcid);
2776 flags = __le16_to_cpu(req->flags);
2778 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2780 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2784 if (sk->sk_state == BT_DISCONN)
2787 /* Reject if config buffer is too small. */
2788 len = cmd_len - sizeof(*req);
2789 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2791 l2cap_build_conf_rsp(sk, rsp,
2792 L2CAP_CONF_REJECT, flags), rsp);
2797 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2798 l2cap_pi(sk)->conf_len += len;
2800 if (flags & 0x0001) {
2801 /* Incomplete config. Send empty response. */
2802 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2803 l2cap_build_conf_rsp(sk, rsp,
2804 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2808 /* Complete config. */
2809 len = l2cap_parse_conf_req(sk, rsp);
2811 l2cap_send_disconn_req(conn, sk);
2815 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2816 l2cap_pi(sk)->num_conf_rsp++;
2818 /* Reset config buffer. */
2819 l2cap_pi(sk)->conf_len = 0;
2821 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2825 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2826 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2827 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2829 sk->sk_state = BT_CONNECTED;
2831 l2cap_pi(sk)->next_tx_seq = 0;
2832 l2cap_pi(sk)->expected_tx_seq = 0;
2833 __skb_queue_head_init(TX_QUEUE(sk));
2834 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2835 l2cap_ertm_init(sk);
2837 l2cap_chan_ready(sk);
2841 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2843 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2844 l2cap_build_conf_req(sk, buf), buf);
2845 l2cap_pi(sk)->num_conf_req++;
2853 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2855 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2856 u16 scid, flags, result;
2859 scid = __le16_to_cpu(rsp->scid);
2860 flags = __le16_to_cpu(rsp->flags);
2861 result = __le16_to_cpu(rsp->result);
2863 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2864 scid, flags, result);
2866 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2871 case L2CAP_CONF_SUCCESS:
2874 case L2CAP_CONF_UNACCEPT:
2875 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2876 int len = cmd->len - sizeof(*rsp);
2879 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2880 l2cap_send_disconn_req(conn, sk);
2884 /* throw out any old stored conf requests */
2885 result = L2CAP_CONF_SUCCESS;
2886 len = l2cap_parse_conf_rsp(sk, rsp->data,
2889 l2cap_send_disconn_req(conn, sk);
2893 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2894 L2CAP_CONF_REQ, len, req);
2895 l2cap_pi(sk)->num_conf_req++;
2896 if (result != L2CAP_CONF_SUCCESS)
2902 sk->sk_state = BT_DISCONN;
2903 sk->sk_err = ECONNRESET;
2904 l2cap_sock_set_timer(sk, HZ * 5);
2905 l2cap_send_disconn_req(conn, sk);
2912 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2914 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2915 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2916 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2917 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2919 sk->sk_state = BT_CONNECTED;
2920 l2cap_pi(sk)->next_tx_seq = 0;
2921 l2cap_pi(sk)->expected_tx_seq = 0;
2922 __skb_queue_head_init(TX_QUEUE(sk));
2923 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2924 l2cap_ertm_init(sk);
2926 l2cap_chan_ready(sk);
2934 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2936 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2937 struct l2cap_disconn_rsp rsp;
2941 scid = __le16_to_cpu(req->scid);
2942 dcid = __le16_to_cpu(req->dcid);
2944 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2946 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2950 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2951 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2952 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2954 sk->sk_shutdown = SHUTDOWN_MASK;
2956 skb_queue_purge(TX_QUEUE(sk));
2958 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2959 skb_queue_purge(SREJ_QUEUE(sk));
2960 del_timer(&l2cap_pi(sk)->retrans_timer);
2961 del_timer(&l2cap_pi(sk)->monitor_timer);
2964 l2cap_chan_del(sk, ECONNRESET);
2967 l2cap_sock_kill(sk);
2971 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2973 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2977 scid = __le16_to_cpu(rsp->scid);
2978 dcid = __le16_to_cpu(rsp->dcid);
2980 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2982 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2986 skb_queue_purge(TX_QUEUE(sk));
2988 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2989 skb_queue_purge(SREJ_QUEUE(sk));
2990 del_timer(&l2cap_pi(sk)->retrans_timer);
2991 del_timer(&l2cap_pi(sk)->monitor_timer);
2994 l2cap_chan_del(sk, 0);
2997 l2cap_sock_kill(sk);
3001 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3003 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3006 type = __le16_to_cpu(req->type);
3008 BT_DBG("type 0x%4.4x", type);
3010 if (type == L2CAP_IT_FEAT_MASK) {
3012 u32 feat_mask = l2cap_feat_mask;
3013 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3014 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3015 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3017 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3019 put_unaligned_le32(feat_mask, rsp->data);
3020 l2cap_send_cmd(conn, cmd->ident,
3021 L2CAP_INFO_RSP, sizeof(buf), buf);
3022 } else if (type == L2CAP_IT_FIXED_CHAN) {
3024 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3025 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3026 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3027 memcpy(buf + 4, l2cap_fixed_chan, 8);
3028 l2cap_send_cmd(conn, cmd->ident,
3029 L2CAP_INFO_RSP, sizeof(buf), buf);
3031 struct l2cap_info_rsp rsp;
3032 rsp.type = cpu_to_le16(type);
3033 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3034 l2cap_send_cmd(conn, cmd->ident,
3035 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3041 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3043 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3046 type = __le16_to_cpu(rsp->type);
3047 result = __le16_to_cpu(rsp->result);
3049 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3051 del_timer(&conn->info_timer);
3053 if (type == L2CAP_IT_FEAT_MASK) {
3054 conn->feat_mask = get_unaligned_le32(rsp->data);
3056 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3057 struct l2cap_info_req req;
3058 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3060 conn->info_ident = l2cap_get_ident(conn);
3062 l2cap_send_cmd(conn, conn->info_ident,
3063 L2CAP_INFO_REQ, sizeof(req), &req);
3065 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3066 conn->info_ident = 0;
3068 l2cap_conn_start(conn);
3070 } else if (type == L2CAP_IT_FIXED_CHAN) {
3071 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3072 conn->info_ident = 0;
3074 l2cap_conn_start(conn);
3080 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3082 u8 *data = skb->data;
3084 struct l2cap_cmd_hdr cmd;
3087 l2cap_raw_recv(conn, skb);
3089 while (len >= L2CAP_CMD_HDR_SIZE) {
3091 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3092 data += L2CAP_CMD_HDR_SIZE;
3093 len -= L2CAP_CMD_HDR_SIZE;
3095 cmd_len = le16_to_cpu(cmd.len);
3097 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3099 if (cmd_len > len || !cmd.ident) {
3100 BT_DBG("corrupted command");
3105 case L2CAP_COMMAND_REJ:
3106 l2cap_command_rej(conn, &cmd, data);
3109 case L2CAP_CONN_REQ:
3110 err = l2cap_connect_req(conn, &cmd, data);
3113 case L2CAP_CONN_RSP:
3114 err = l2cap_connect_rsp(conn, &cmd, data);
3117 case L2CAP_CONF_REQ:
3118 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3121 case L2CAP_CONF_RSP:
3122 err = l2cap_config_rsp(conn, &cmd, data);
3125 case L2CAP_DISCONN_REQ:
3126 err = l2cap_disconnect_req(conn, &cmd, data);
3129 case L2CAP_DISCONN_RSP:
3130 err = l2cap_disconnect_rsp(conn, &cmd, data);
3133 case L2CAP_ECHO_REQ:
3134 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3137 case L2CAP_ECHO_RSP:
3140 case L2CAP_INFO_REQ:
3141 err = l2cap_information_req(conn, &cmd, data);
3144 case L2CAP_INFO_RSP:
3145 err = l2cap_information_rsp(conn, &cmd, data);
3149 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3155 struct l2cap_cmd_rej rej;
3156 BT_DBG("error %d", err);
3158 /* FIXME: Map err to a valid reason */
3159 rej.reason = cpu_to_le16(0);
3160 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3170 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3172 u16 our_fcs, rcv_fcs;
3173 int hdr_size = L2CAP_HDR_SIZE + 2;
3175 if (pi->fcs == L2CAP_FCS_CRC16) {
3176 skb_trim(skb, skb->len - 2);
3177 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3178 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3180 if (our_fcs != rcv_fcs)
3186 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3188 struct l2cap_pinfo *pi = l2cap_pi(sk);
3191 pi->frames_sent = 0;
3192 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3194 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3196 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3197 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3198 l2cap_send_sframe(pi, control);
3199 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3202 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3203 __mod_retrans_timer();
3205 l2cap_ertm_send(sk);
3207 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3208 pi->frames_sent == 0) {
3209 control |= L2CAP_SUPER_RCV_READY;
3210 l2cap_send_sframe(pi, control);
3214 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3216 struct sk_buff *next_skb;
3218 bt_cb(skb)->tx_seq = tx_seq;
3219 bt_cb(skb)->sar = sar;
3221 next_skb = skb_peek(SREJ_QUEUE(sk));
3223 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3228 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3229 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3233 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3236 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3238 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3241 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3243 struct l2cap_pinfo *pi = l2cap_pi(sk);
3244 struct sk_buff *_skb;
3247 switch (control & L2CAP_CTRL_SAR) {
3248 case L2CAP_SDU_UNSEGMENTED:
3249 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3254 err = sock_queue_rcv_skb(sk, skb);
3260 case L2CAP_SDU_START:
3261 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3266 pi->sdu_len = get_unaligned_le16(skb->data);
3269 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3275 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3277 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3278 pi->partial_sdu_len = skb->len;
3282 case L2CAP_SDU_CONTINUE:
3283 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3286 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3288 pi->partial_sdu_len += skb->len;
3289 if (pi->partial_sdu_len > pi->sdu_len)
3297 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3300 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3302 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3303 pi->partial_sdu_len += skb->len;
3305 if (pi->partial_sdu_len > pi->imtu)
3308 if (pi->partial_sdu_len == pi->sdu_len) {
3309 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3310 err = sock_queue_rcv_skb(sk, _skb);
3325 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3327 struct sk_buff *skb;
3330 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3331 if (bt_cb(skb)->tx_seq != tx_seq)
3334 skb = skb_dequeue(SREJ_QUEUE(sk));
3335 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3336 l2cap_sar_reassembly_sdu(sk, skb, control);
3337 l2cap_pi(sk)->buffer_seq_srej =
3338 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3343 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3345 struct l2cap_pinfo *pi = l2cap_pi(sk);
3346 struct srej_list *l, *tmp;
3349 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3350 if (l->tx_seq == tx_seq) {
3355 control = L2CAP_SUPER_SELECT_REJECT;
3356 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3357 l2cap_send_sframe(pi, control);
3359 list_add_tail(&l->list, SREJ_LIST(sk));
3363 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3365 struct l2cap_pinfo *pi = l2cap_pi(sk);
3366 struct srej_list *new;
3369 while (tx_seq != pi->expected_tx_seq) {
3370 control = L2CAP_SUPER_SELECT_REJECT;
3371 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3372 l2cap_send_sframe(pi, control);
3374 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3375 new->tx_seq = pi->expected_tx_seq++;
3376 list_add_tail(&new->list, SREJ_LIST(sk));
3378 pi->expected_tx_seq++;
3381 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3383 struct l2cap_pinfo *pi = l2cap_pi(sk);
3384 u8 tx_seq = __get_txseq(rx_control);
3385 u8 req_seq = __get_reqseq(rx_control);
3386 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3389 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3391 if (L2CAP_CTRL_FINAL & rx_control) {
3392 del_timer(&pi->monitor_timer);
3393 if (pi->unacked_frames > 0)
3394 __mod_retrans_timer();
3395 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3398 pi->expected_ack_seq = req_seq;
3399 l2cap_drop_acked_frames(sk);
3401 if (tx_seq == pi->expected_tx_seq)
3404 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3405 struct srej_list *first;
3407 first = list_first_entry(SREJ_LIST(sk),
3408 struct srej_list, list);
3409 if (tx_seq == first->tx_seq) {
3410 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3411 l2cap_check_srej_gap(sk, tx_seq);
3413 list_del(&first->list);
3416 if (list_empty(SREJ_LIST(sk))) {
3417 pi->buffer_seq = pi->buffer_seq_srej;
3418 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3421 struct srej_list *l;
3422 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3424 list_for_each_entry(l, SREJ_LIST(sk), list) {
3425 if (l->tx_seq == tx_seq) {
3426 l2cap_resend_srejframe(sk, tx_seq);
3430 l2cap_send_srejframe(sk, tx_seq);
3433 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3435 INIT_LIST_HEAD(SREJ_LIST(sk));
3436 pi->buffer_seq_srej = pi->buffer_seq;
3438 __skb_queue_head_init(SREJ_QUEUE(sk));
3439 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3441 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3443 l2cap_send_srejframe(sk, tx_seq);
3448 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3450 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3451 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3455 if (rx_control & L2CAP_CTRL_FINAL) {
3456 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3457 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3459 sk->sk_send_head = TX_QUEUE(sk)->next;
3460 pi->next_tx_seq = pi->expected_ack_seq;
3461 l2cap_ertm_send(sk);
3465 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3467 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3471 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3472 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1)
3478 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3480 struct l2cap_pinfo *pi = l2cap_pi(sk);
3481 u8 tx_seq = __get_reqseq(rx_control);
3483 if (rx_control & L2CAP_CTRL_POLL) {
3484 l2cap_send_i_or_rr_or_rnr(sk);
3485 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3487 } else if (rx_control & L2CAP_CTRL_FINAL) {
3488 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3489 pi->expected_ack_seq = tx_seq;
3490 l2cap_drop_acked_frames(sk);
3492 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3493 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3495 sk->sk_send_head = TX_QUEUE(sk)->next;
3496 pi->next_tx_seq = pi->expected_ack_seq;
3497 l2cap_ertm_send(sk);
3501 pi->expected_ack_seq = tx_seq;
3502 l2cap_drop_acked_frames(sk);
3504 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3505 (pi->unacked_frames > 0))
3506 __mod_retrans_timer();
3508 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3509 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3512 l2cap_ertm_send(sk);
3516 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3518 struct l2cap_pinfo *pi = l2cap_pi(sk);
3519 u8 tx_seq = __get_reqseq(rx_control);
3521 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3523 pi->expected_ack_seq = __get_reqseq(rx_control);
3524 l2cap_drop_acked_frames(sk);
3526 if (rx_control & L2CAP_CTRL_FINAL) {
3527 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3528 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3530 sk->sk_send_head = TX_QUEUE(sk)->next;
3531 pi->next_tx_seq = pi->expected_ack_seq;
3532 l2cap_ertm_send(sk);
3535 sk->sk_send_head = TX_QUEUE(sk)->next;
3536 pi->next_tx_seq = pi->expected_ack_seq;
3537 l2cap_ertm_send(sk);
3539 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3540 pi->srej_save_reqseq = tx_seq;
3541 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3545 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3547 struct l2cap_pinfo *pi = l2cap_pi(sk);
3548 u8 tx_seq = __get_reqseq(rx_control);
3550 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3552 if (rx_control & L2CAP_CTRL_POLL) {
3553 pi->expected_ack_seq = tx_seq;
3554 l2cap_drop_acked_frames(sk);
3555 l2cap_retransmit_frame(sk, tx_seq);
3556 l2cap_ertm_send(sk);
3557 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3558 pi->srej_save_reqseq = tx_seq;
3559 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3561 } else if (rx_control & L2CAP_CTRL_FINAL) {
3562 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3563 pi->srej_save_reqseq == tx_seq)
3564 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3566 l2cap_retransmit_frame(sk, tx_seq);
3568 l2cap_retransmit_frame(sk, tx_seq);
3569 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3570 pi->srej_save_reqseq = tx_seq;
3571 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3576 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3578 struct l2cap_pinfo *pi = l2cap_pi(sk);
3579 u8 tx_seq = __get_reqseq(rx_control);
3581 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3582 pi->expected_ack_seq = tx_seq;
3583 l2cap_drop_acked_frames(sk);
3585 del_timer(&pi->retrans_timer);
3586 if (rx_control & L2CAP_CTRL_POLL) {
3587 u16 control = L2CAP_CTRL_FINAL;
3588 l2cap_send_rr_or_rnr(pi, control);
3592 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3594 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3596 if (L2CAP_CTRL_FINAL & rx_control) {
3597 del_timer(&l2cap_pi(sk)->monitor_timer);
3598 if (l2cap_pi(sk)->unacked_frames > 0)
3599 __mod_retrans_timer();
3600 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3603 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3604 case L2CAP_SUPER_RCV_READY:
3605 l2cap_data_channel_rrframe(sk, rx_control);
3608 case L2CAP_SUPER_REJECT:
3609 l2cap_data_channel_rejframe(sk, rx_control);
3612 case L2CAP_SUPER_SELECT_REJECT:
3613 l2cap_data_channel_srejframe(sk, rx_control);
3616 case L2CAP_SUPER_RCV_NOT_READY:
3617 l2cap_data_channel_rnrframe(sk, rx_control);
3625 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3628 struct l2cap_pinfo *pi;
3632 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3634 BT_DBG("unknown cid 0x%4.4x", cid);
3640 BT_DBG("sk %p, len %d", sk, skb->len);
3642 if (sk->sk_state != BT_CONNECTED)
3646 case L2CAP_MODE_BASIC:
3647 /* If socket recv buffers overflows we drop data here
3648 * which is *bad* because L2CAP has to be reliable.
3649 * But we don't have any other choice. L2CAP doesn't
3650 * provide flow control mechanism. */
3652 if (pi->imtu < skb->len)
3655 if (!sock_queue_rcv_skb(sk, skb))
3659 case L2CAP_MODE_ERTM:
3660 control = get_unaligned_le16(skb->data);
3664 if (__is_sar_start(control))
3667 if (pi->fcs == L2CAP_FCS_CRC16)
3671 * We can just drop the corrupted I-frame here.
3672 * Receiver will miss it and start proper recovery
3673 * procedures and ask retransmission.
3675 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3678 if (l2cap_check_fcs(pi, skb))
3681 if (__is_iframe(control)) {
3685 l2cap_data_channel_iframe(sk, control, skb);
3690 l2cap_data_channel_sframe(sk, control, skb);
3695 case L2CAP_MODE_STREAMING:
3696 control = get_unaligned_le16(skb->data);
3700 if (__is_sar_start(control))
3703 if (pi->fcs == L2CAP_FCS_CRC16)
3706 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || len < 4
3707 || __is_sframe(control))
3710 if (l2cap_check_fcs(pi, skb))
3713 tx_seq = __get_txseq(control);
3715 if (pi->expected_tx_seq == tx_seq)
3716 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3718 pi->expected_tx_seq = (tx_seq + 1) % 64;
3720 l2cap_sar_reassembly_sdu(sk, skb, control);
3725 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3739 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3743 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3747 BT_DBG("sk %p, len %d", sk, skb->len);
3749 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3752 if (l2cap_pi(sk)->imtu < skb->len)
3755 if (!sock_queue_rcv_skb(sk, skb))
3767 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3769 struct l2cap_hdr *lh = (void *) skb->data;
3773 skb_pull(skb, L2CAP_HDR_SIZE);
3774 cid = __le16_to_cpu(lh->cid);
3775 len = __le16_to_cpu(lh->len);
3777 if (len != skb->len) {
3782 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3785 case L2CAP_CID_SIGNALING:
3786 l2cap_sig_channel(conn, skb);
3789 case L2CAP_CID_CONN_LESS:
3790 psm = get_unaligned_le16(skb->data);
3792 l2cap_conless_channel(conn, psm, skb);
3796 l2cap_data_channel(conn, cid, skb);
3801 /* ---- L2CAP interface with lower layer (HCI) ---- */
3803 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3805 int exact = 0, lm1 = 0, lm2 = 0;
3806 register struct sock *sk;
3807 struct hlist_node *node;
3809 if (type != ACL_LINK)
3812 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3814 /* Find listening sockets and check their link_mode */
3815 read_lock(&l2cap_sk_list.lock);
3816 sk_for_each(sk, node, &l2cap_sk_list.head) {
3817 if (sk->sk_state != BT_LISTEN)
3820 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3821 lm1 |= HCI_LM_ACCEPT;
3822 if (l2cap_pi(sk)->role_switch)
3823 lm1 |= HCI_LM_MASTER;
3825 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3826 lm2 |= HCI_LM_ACCEPT;
3827 if (l2cap_pi(sk)->role_switch)
3828 lm2 |= HCI_LM_MASTER;
3831 read_unlock(&l2cap_sk_list.lock);
3833 return exact ? lm1 : lm2;
3836 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3838 struct l2cap_conn *conn;
3840 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3842 if (hcon->type != ACL_LINK)
3846 conn = l2cap_conn_add(hcon, status);
3848 l2cap_conn_ready(conn);
3850 l2cap_conn_del(hcon, bt_err(status));
3855 static int l2cap_disconn_ind(struct hci_conn *hcon)
3857 struct l2cap_conn *conn = hcon->l2cap_data;
3859 BT_DBG("hcon %p", hcon);
3861 if (hcon->type != ACL_LINK || !conn)
3864 return conn->disc_reason;
3867 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3869 BT_DBG("hcon %p reason %d", hcon, reason);
3871 if (hcon->type != ACL_LINK)
3874 l2cap_conn_del(hcon, bt_err(reason));
3879 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3881 if (sk->sk_type != SOCK_SEQPACKET)
3884 if (encrypt == 0x00) {
3885 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3886 l2cap_sock_clear_timer(sk);
3887 l2cap_sock_set_timer(sk, HZ * 5);
3888 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3889 __l2cap_sock_close(sk, ECONNREFUSED);
3891 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3892 l2cap_sock_clear_timer(sk);
3896 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3898 struct l2cap_chan_list *l;
3899 struct l2cap_conn *conn = hcon->l2cap_data;
3905 l = &conn->chan_list;
3907 BT_DBG("conn %p", conn);
3909 read_lock(&l->lock);
3911 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3914 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3919 if (!status && (sk->sk_state == BT_CONNECTED ||
3920 sk->sk_state == BT_CONFIG)) {
3921 l2cap_check_encryption(sk, encrypt);
3926 if (sk->sk_state == BT_CONNECT) {
3928 struct l2cap_conn_req req;
3929 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3930 req.psm = l2cap_pi(sk)->psm;
3932 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3934 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3935 L2CAP_CONN_REQ, sizeof(req), &req);
3937 l2cap_sock_clear_timer(sk);
3938 l2cap_sock_set_timer(sk, HZ / 10);
3940 } else if (sk->sk_state == BT_CONNECT2) {
3941 struct l2cap_conn_rsp rsp;
3945 sk->sk_state = BT_CONFIG;
3946 result = L2CAP_CR_SUCCESS;
3948 sk->sk_state = BT_DISCONN;
3949 l2cap_sock_set_timer(sk, HZ / 10);
3950 result = L2CAP_CR_SEC_BLOCK;
3953 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3954 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3955 rsp.result = cpu_to_le16(result);
3956 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3957 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3958 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3964 read_unlock(&l->lock);
3969 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3971 struct l2cap_conn *conn = hcon->l2cap_data;
3973 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3976 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3978 if (flags & ACL_START) {
3979 struct l2cap_hdr *hdr;
3983 BT_ERR("Unexpected start frame (len %d)", skb->len);
3984 kfree_skb(conn->rx_skb);
3985 conn->rx_skb = NULL;
3987 l2cap_conn_unreliable(conn, ECOMM);
3991 BT_ERR("Frame is too short (len %d)", skb->len);
3992 l2cap_conn_unreliable(conn, ECOMM);
3996 hdr = (struct l2cap_hdr *) skb->data;
3997 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3999 if (len == skb->len) {
4000 /* Complete frame received */
4001 l2cap_recv_frame(conn, skb);
4005 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4007 if (skb->len > len) {
4008 BT_ERR("Frame is too long (len %d, expected len %d)",
4010 l2cap_conn_unreliable(conn, ECOMM);
4014 /* Allocate skb for the complete frame (with header) */
4015 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4019 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4021 conn->rx_len = len - skb->len;
4023 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4025 if (!conn->rx_len) {
4026 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4027 l2cap_conn_unreliable(conn, ECOMM);
4031 if (skb->len > conn->rx_len) {
4032 BT_ERR("Fragment is too long (len %d, expected %d)",
4033 skb->len, conn->rx_len);
4034 kfree_skb(conn->rx_skb);
4035 conn->rx_skb = NULL;
4037 l2cap_conn_unreliable(conn, ECOMM);
4041 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4043 conn->rx_len -= skb->len;
4045 if (!conn->rx_len) {
4046 /* Complete frame received */
4047 l2cap_recv_frame(conn, conn->rx_skb);
4048 conn->rx_skb = NULL;
4057 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4060 struct hlist_node *node;
4062 read_lock_bh(&l2cap_sk_list.lock);
4064 sk_for_each(sk, node, &l2cap_sk_list.head) {
4065 struct l2cap_pinfo *pi = l2cap_pi(sk);
4067 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4068 batostr(&bt_sk(sk)->src),
4069 batostr(&bt_sk(sk)->dst),
4070 sk->sk_state, __le16_to_cpu(pi->psm),
4072 pi->imtu, pi->omtu, pi->sec_level);
4075 read_unlock_bh(&l2cap_sk_list.lock);
4080 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4082 return single_open(file, l2cap_debugfs_show, inode->i_private);
4085 static const struct file_operations l2cap_debugfs_fops = {
4086 .open = l2cap_debugfs_open,
4088 .llseek = seq_lseek,
4089 .release = single_release,
4092 static struct dentry *l2cap_debugfs;
4094 static const struct proto_ops l2cap_sock_ops = {
4095 .family = PF_BLUETOOTH,
4096 .owner = THIS_MODULE,
4097 .release = l2cap_sock_release,
4098 .bind = l2cap_sock_bind,
4099 .connect = l2cap_sock_connect,
4100 .listen = l2cap_sock_listen,
4101 .accept = l2cap_sock_accept,
4102 .getname = l2cap_sock_getname,
4103 .sendmsg = l2cap_sock_sendmsg,
4104 .recvmsg = l2cap_sock_recvmsg,
4105 .poll = bt_sock_poll,
4106 .ioctl = bt_sock_ioctl,
4107 .mmap = sock_no_mmap,
4108 .socketpair = sock_no_socketpair,
4109 .shutdown = l2cap_sock_shutdown,
4110 .setsockopt = l2cap_sock_setsockopt,
4111 .getsockopt = l2cap_sock_getsockopt
4114 static const struct net_proto_family l2cap_sock_family_ops = {
4115 .family = PF_BLUETOOTH,
4116 .owner = THIS_MODULE,
4117 .create = l2cap_sock_create,
4120 static struct hci_proto l2cap_hci_proto = {
4122 .id = HCI_PROTO_L2CAP,
4123 .connect_ind = l2cap_connect_ind,
4124 .connect_cfm = l2cap_connect_cfm,
4125 .disconn_ind = l2cap_disconn_ind,
4126 .disconn_cfm = l2cap_disconn_cfm,
4127 .security_cfm = l2cap_security_cfm,
4128 .recv_acldata = l2cap_recv_acldata
4131 static int __init l2cap_init(void)
4135 err = proto_register(&l2cap_proto, 0);
4139 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4141 BT_ERR("L2CAP socket registration failed");
4145 err = hci_register_proto(&l2cap_hci_proto);
4147 BT_ERR("L2CAP protocol registration failed");
4148 bt_sock_unregister(BTPROTO_L2CAP);
4153 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4154 bt_debugfs, NULL, &l2cap_debugfs_fops);
4156 BT_ERR("Failed to create L2CAP debug file");
4159 BT_INFO("L2CAP ver %s", VERSION);
4160 BT_INFO("L2CAP socket layer initialized");
4165 proto_unregister(&l2cap_proto);
4169 static void __exit l2cap_exit(void)
4171 debugfs_remove(l2cap_debugfs);
4173 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4174 BT_ERR("L2CAP socket unregistration failed");
4176 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4177 BT_ERR("L2CAP protocol unregistration failed");
4179 proto_unregister(&l2cap_proto);
4182 void l2cap_load(void)
4184 /* Dummy function to trigger automatic L2CAP module loading by
4185 * other modules that use L2CAP sockets but don't use any other
4186 * symbols from it. */
4189 EXPORT_SYMBOL(l2cap_load);
4191 module_init(l2cap_init);
4192 module_exit(l2cap_exit);
4194 module_param(enable_ertm, bool, 0644);
4195 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4197 module_param(max_transmit, uint, 0644);
4198 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4200 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4201 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4202 MODULE_VERSION(VERSION);
4203 MODULE_LICENSE("GPL");
4204 MODULE_ALIAS("bt-proto-0");