2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct bt_sock_list l2cap_sk_list = {
72 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
100 __l2cap_sock_close(sk, reason);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 s = __l2cap_get_chan_by_scid(l, cid);
150 read_unlock(&l->lock);
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 s = __l2cap_get_chan_by_ident(l, ident);
171 read_unlock(&l->lock);
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
208 l2cap_pi(next)->prev_c = prev;
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
245 bt_accept_enqueue(parent, sk);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
276 sk->sk_state_change(sk);
279 /* Service level security */
280 static inline int l2cap_check_security(struct sock *sk)
282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
285 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 auth_type = HCI_AT_NO_BONDING_MITM;
289 auth_type = HCI_AT_NO_BONDING;
291 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
292 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
294 switch (l2cap_pi(sk)->sec_level) {
295 case BT_SECURITY_HIGH:
296 auth_type = HCI_AT_GENERAL_BONDING_MITM;
298 case BT_SECURITY_MEDIUM:
299 auth_type = HCI_AT_GENERAL_BONDING;
302 auth_type = HCI_AT_NO_BONDING;
307 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
311 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
315 /* Get next available identificator.
316 * 1 - 128 are used by kernel.
317 * 129 - 199 are reserved.
318 * 200 - 254 are used by utilities like l2ping, etc.
321 spin_lock_bh(&conn->lock);
323 if (++conn->tx_ident > 128)
328 spin_unlock_bh(&conn->lock);
333 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
335 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
337 BT_DBG("code 0x%2.2x", code);
342 return hci_send_acl(conn->hcon, skb, 0);
345 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
348 struct l2cap_hdr *lh;
349 struct l2cap_conn *conn = pi->conn;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
352 if (pi->fcs == L2CAP_FCS_CRC16)
355 BT_DBG("pi %p, control 0x%2.2x", pi, control);
357 count = min_t(unsigned int, conn->mtu, hlen);
358 control |= L2CAP_CTRL_FRAME_TYPE;
360 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
361 control |= L2CAP_CTRL_FINAL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
365 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
366 control |= L2CAP_CTRL_POLL;
367 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
370 skb = bt_skb_alloc(count, GFP_ATOMIC);
374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
375 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
376 lh->cid = cpu_to_le16(pi->dcid);
377 put_unaligned_le16(control, skb_put(skb, 2));
379 if (pi->fcs == L2CAP_FCS_CRC16) {
380 u16 fcs = crc16(0, (u8 *)lh, count - 2);
381 put_unaligned_le16(fcs, skb_put(skb, 2));
384 return hci_send_acl(pi->conn->hcon, skb, 0);
387 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
389 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
390 control |= L2CAP_SUPER_RCV_NOT_READY;
392 control |= L2CAP_SUPER_RCV_READY;
394 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
396 return l2cap_send_sframe(pi, control);
399 static void l2cap_do_start(struct sock *sk)
401 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
403 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
404 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
407 if (l2cap_check_security(sk)) {
408 struct l2cap_conn_req req;
409 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
410 req.psm = l2cap_pi(sk)->psm;
412 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
414 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
415 L2CAP_CONN_REQ, sizeof(req), &req);
418 struct l2cap_info_req req;
419 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
421 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
422 conn->info_ident = l2cap_get_ident(conn);
424 mod_timer(&conn->info_timer, jiffies +
425 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
427 l2cap_send_cmd(conn, conn->info_ident,
428 L2CAP_INFO_REQ, sizeof(req), &req);
432 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
434 struct l2cap_disconn_req req;
436 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 l2cap_send_cmd(conn, l2cap_get_ident(conn),
439 L2CAP_DISCONN_REQ, sizeof(req), &req);
442 /* ---- L2CAP connections ---- */
443 static void l2cap_conn_start(struct l2cap_conn *conn)
445 struct l2cap_chan_list *l = &conn->chan_list;
448 BT_DBG("conn %p", conn);
452 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
455 if (sk->sk_type != SOCK_SEQPACKET &&
456 sk->sk_type != SOCK_STREAM) {
461 if (sk->sk_state == BT_CONNECT) {
462 if (l2cap_check_security(sk)) {
463 struct l2cap_conn_req req;
464 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
465 req.psm = l2cap_pi(sk)->psm;
467 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
469 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
470 L2CAP_CONN_REQ, sizeof(req), &req);
472 } else if (sk->sk_state == BT_CONNECT2) {
473 struct l2cap_conn_rsp rsp;
474 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
477 if (l2cap_check_security(sk)) {
478 if (bt_sk(sk)->defer_setup) {
479 struct sock *parent = bt_sk(sk)->parent;
480 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
481 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
482 parent->sk_data_ready(parent, 0);
485 sk->sk_state = BT_CONFIG;
486 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
487 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
490 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
491 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
494 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
495 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
501 read_unlock(&l->lock);
504 static void l2cap_conn_ready(struct l2cap_conn *conn)
506 struct l2cap_chan_list *l = &conn->chan_list;
509 BT_DBG("conn %p", conn);
513 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
516 if (sk->sk_type != SOCK_SEQPACKET &&
517 sk->sk_type != SOCK_STREAM) {
518 l2cap_sock_clear_timer(sk);
519 sk->sk_state = BT_CONNECTED;
520 sk->sk_state_change(sk);
521 } else if (sk->sk_state == BT_CONNECT)
527 read_unlock(&l->lock);
530 /* Notify sockets that we cannot guaranty reliability anymore */
531 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
533 struct l2cap_chan_list *l = &conn->chan_list;
536 BT_DBG("conn %p", conn);
540 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
541 if (l2cap_pi(sk)->force_reliable)
545 read_unlock(&l->lock);
548 static void l2cap_info_timeout(unsigned long arg)
550 struct l2cap_conn *conn = (void *) arg;
552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
553 conn->info_ident = 0;
555 l2cap_conn_start(conn);
558 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
560 struct l2cap_conn *conn = hcon->l2cap_data;
565 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
569 hcon->l2cap_data = conn;
572 BT_DBG("hcon %p conn %p", hcon, conn);
574 conn->mtu = hcon->hdev->acl_mtu;
575 conn->src = &hcon->hdev->bdaddr;
576 conn->dst = &hcon->dst;
580 spin_lock_init(&conn->lock);
581 rwlock_init(&conn->chan_list.lock);
583 setup_timer(&conn->info_timer, l2cap_info_timeout,
584 (unsigned long) conn);
586 conn->disc_reason = 0x13;
591 static void l2cap_conn_del(struct hci_conn *hcon, int err)
593 struct l2cap_conn *conn = hcon->l2cap_data;
599 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
601 kfree_skb(conn->rx_skb);
604 while ((sk = conn->chan_list.head)) {
606 l2cap_chan_del(sk, err);
611 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
612 del_timer_sync(&conn->info_timer);
614 hcon->l2cap_data = NULL;
618 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
620 struct l2cap_chan_list *l = &conn->chan_list;
621 write_lock_bh(&l->lock);
622 __l2cap_chan_add(conn, sk, parent);
623 write_unlock_bh(&l->lock);
626 /* ---- Socket interface ---- */
627 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
630 struct hlist_node *node;
631 sk_for_each(sk, node, &l2cap_sk_list.head)
632 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
639 /* Find socket with psm and source bdaddr.
640 * Returns closest match.
642 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
644 struct sock *sk = NULL, *sk1 = NULL;
645 struct hlist_node *node;
647 sk_for_each(sk, node, &l2cap_sk_list.head) {
648 if (state && sk->sk_state != state)
651 if (l2cap_pi(sk)->psm == psm) {
653 if (!bacmp(&bt_sk(sk)->src, src))
657 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
661 return node ? sk : sk1;
664 /* Find socket with given address (psm, src).
665 * Returns locked socket */
666 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
669 read_lock(&l2cap_sk_list.lock);
670 s = __l2cap_get_sock_by_psm(state, psm, src);
673 read_unlock(&l2cap_sk_list.lock);
677 static void l2cap_sock_destruct(struct sock *sk)
681 skb_queue_purge(&sk->sk_receive_queue);
682 skb_queue_purge(&sk->sk_write_queue);
685 static void l2cap_sock_cleanup_listen(struct sock *parent)
689 BT_DBG("parent %p", parent);
691 /* Close not yet accepted channels */
692 while ((sk = bt_accept_dequeue(parent, NULL)))
693 l2cap_sock_close(sk);
695 parent->sk_state = BT_CLOSED;
696 sock_set_flag(parent, SOCK_ZAPPED);
699 /* Kill socket (only if zapped and orphan)
700 * Must be called on unlocked socket.
702 static void l2cap_sock_kill(struct sock *sk)
704 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
707 BT_DBG("sk %p state %d", sk, sk->sk_state);
709 /* Kill poor orphan */
710 bt_sock_unlink(&l2cap_sk_list, sk);
711 sock_set_flag(sk, SOCK_DEAD);
715 static void __l2cap_sock_close(struct sock *sk, int reason)
717 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
719 switch (sk->sk_state) {
721 l2cap_sock_cleanup_listen(sk);
726 if (sk->sk_type == SOCK_SEQPACKET ||
727 sk->sk_type == SOCK_STREAM) {
728 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
730 sk->sk_state = BT_DISCONN;
731 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
732 l2cap_send_disconn_req(conn, sk);
734 l2cap_chan_del(sk, reason);
738 if (sk->sk_type == SOCK_SEQPACKET ||
739 sk->sk_type == SOCK_STREAM) {
740 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
741 struct l2cap_conn_rsp rsp;
744 if (bt_sk(sk)->defer_setup)
745 result = L2CAP_CR_SEC_BLOCK;
747 result = L2CAP_CR_BAD_PSM;
749 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
753 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
754 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
756 l2cap_chan_del(sk, reason);
761 l2cap_chan_del(sk, reason);
765 sock_set_flag(sk, SOCK_ZAPPED);
770 /* Must be called on unlocked socket. */
771 static void l2cap_sock_close(struct sock *sk)
773 l2cap_sock_clear_timer(sk);
775 __l2cap_sock_close(sk, ECONNRESET);
780 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
782 struct l2cap_pinfo *pi = l2cap_pi(sk);
787 sk->sk_type = parent->sk_type;
788 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
790 pi->imtu = l2cap_pi(parent)->imtu;
791 pi->omtu = l2cap_pi(parent)->omtu;
792 pi->mode = l2cap_pi(parent)->mode;
793 pi->fcs = l2cap_pi(parent)->fcs;
794 pi->max_tx = l2cap_pi(parent)->max_tx;
795 pi->tx_win = l2cap_pi(parent)->tx_win;
796 pi->sec_level = l2cap_pi(parent)->sec_level;
797 pi->role_switch = l2cap_pi(parent)->role_switch;
798 pi->force_reliable = l2cap_pi(parent)->force_reliable;
800 pi->imtu = L2CAP_DEFAULT_MTU;
802 if (enable_ertm && sk->sk_type == SOCK_STREAM)
803 pi->mode = L2CAP_MODE_ERTM;
805 pi->mode = L2CAP_MODE_BASIC;
806 pi->max_tx = max_transmit;
807 pi->fcs = L2CAP_FCS_CRC16;
808 pi->tx_win = tx_window;
809 pi->sec_level = BT_SECURITY_LOW;
811 pi->force_reliable = 0;
814 /* Default config options */
816 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
817 skb_queue_head_init(TX_QUEUE(sk));
818 skb_queue_head_init(SREJ_QUEUE(sk));
819 INIT_LIST_HEAD(SREJ_LIST(sk));
822 static struct proto l2cap_proto = {
824 .owner = THIS_MODULE,
825 .obj_size = sizeof(struct l2cap_pinfo)
828 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
832 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
836 sock_init_data(sock, sk);
837 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
839 sk->sk_destruct = l2cap_sock_destruct;
840 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
842 sock_reset_flag(sk, SOCK_ZAPPED);
844 sk->sk_protocol = proto;
845 sk->sk_state = BT_OPEN;
847 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
849 bt_sock_link(&l2cap_sk_list, sk);
853 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
858 BT_DBG("sock %p", sock);
860 sock->state = SS_UNCONNECTED;
862 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
863 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
864 return -ESOCKTNOSUPPORT;
866 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
869 sock->ops = &l2cap_sock_ops;
871 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
875 l2cap_sock_init(sk, NULL);
879 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
881 struct sock *sk = sock->sk;
882 struct sockaddr_l2 la;
887 if (!addr || addr->sa_family != AF_BLUETOOTH)
890 memset(&la, 0, sizeof(la));
891 len = min_t(unsigned int, sizeof(la), alen);
892 memcpy(&la, addr, len);
899 if (sk->sk_state != BT_OPEN) {
904 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
905 !capable(CAP_NET_BIND_SERVICE)) {
910 write_lock_bh(&l2cap_sk_list.lock);
912 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
915 /* Save source address */
916 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
917 l2cap_pi(sk)->psm = la.l2_psm;
918 l2cap_pi(sk)->sport = la.l2_psm;
919 sk->sk_state = BT_BOUND;
921 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
922 __le16_to_cpu(la.l2_psm) == 0x0003)
923 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
926 write_unlock_bh(&l2cap_sk_list.lock);
933 static int l2cap_do_connect(struct sock *sk)
935 bdaddr_t *src = &bt_sk(sk)->src;
936 bdaddr_t *dst = &bt_sk(sk)->dst;
937 struct l2cap_conn *conn;
938 struct hci_conn *hcon;
939 struct hci_dev *hdev;
943 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
946 hdev = hci_get_route(dst, src);
948 return -EHOSTUNREACH;
950 hci_dev_lock_bh(hdev);
954 if (sk->sk_type == SOCK_RAW) {
955 switch (l2cap_pi(sk)->sec_level) {
956 case BT_SECURITY_HIGH:
957 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
959 case BT_SECURITY_MEDIUM:
960 auth_type = HCI_AT_DEDICATED_BONDING;
963 auth_type = HCI_AT_NO_BONDING;
966 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
967 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
968 auth_type = HCI_AT_NO_BONDING_MITM;
970 auth_type = HCI_AT_NO_BONDING;
972 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
973 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
975 switch (l2cap_pi(sk)->sec_level) {
976 case BT_SECURITY_HIGH:
977 auth_type = HCI_AT_GENERAL_BONDING_MITM;
979 case BT_SECURITY_MEDIUM:
980 auth_type = HCI_AT_GENERAL_BONDING;
983 auth_type = HCI_AT_NO_BONDING;
988 hcon = hci_connect(hdev, ACL_LINK, dst,
989 l2cap_pi(sk)->sec_level, auth_type);
993 conn = l2cap_conn_add(hcon, 0);
1001 /* Update source addr of the socket */
1002 bacpy(src, conn->src);
1004 l2cap_chan_add(conn, sk, NULL);
1006 sk->sk_state = BT_CONNECT;
1007 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1009 if (hcon->state == BT_CONNECTED) {
1010 if (sk->sk_type != SOCK_SEQPACKET &&
1011 sk->sk_type != SOCK_STREAM) {
1012 l2cap_sock_clear_timer(sk);
1013 sk->sk_state = BT_CONNECTED;
1019 hci_dev_unlock_bh(hdev);
1024 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1026 struct sock *sk = sock->sk;
1027 struct sockaddr_l2 la;
1030 BT_DBG("sk %p", sk);
1032 if (!addr || alen < sizeof(addr->sa_family) ||
1033 addr->sa_family != AF_BLUETOOTH)
1036 memset(&la, 0, sizeof(la));
1037 len = min_t(unsigned int, sizeof(la), alen);
1038 memcpy(&la, addr, len);
1045 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1051 switch (l2cap_pi(sk)->mode) {
1052 case L2CAP_MODE_BASIC:
1054 case L2CAP_MODE_ERTM:
1055 case L2CAP_MODE_STREAMING:
1064 switch (sk->sk_state) {
1068 /* Already connecting */
1072 /* Already connected */
1085 /* Set destination address and psm */
1086 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1087 l2cap_pi(sk)->psm = la.l2_psm;
1089 err = l2cap_do_connect(sk);
1094 err = bt_sock_wait_state(sk, BT_CONNECTED,
1095 sock_sndtimeo(sk, flags & O_NONBLOCK));
1101 static int l2cap_sock_listen(struct socket *sock, int backlog)
1103 struct sock *sk = sock->sk;
1106 BT_DBG("sk %p backlog %d", sk, backlog);
1110 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1111 || sk->sk_state != BT_BOUND) {
1116 switch (l2cap_pi(sk)->mode) {
1117 case L2CAP_MODE_BASIC:
1119 case L2CAP_MODE_ERTM:
1120 case L2CAP_MODE_STREAMING:
1129 if (!l2cap_pi(sk)->psm) {
1130 bdaddr_t *src = &bt_sk(sk)->src;
1135 write_lock_bh(&l2cap_sk_list.lock);
1137 for (psm = 0x1001; psm < 0x1100; psm += 2)
1138 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1139 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1140 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1145 write_unlock_bh(&l2cap_sk_list.lock);
1151 sk->sk_max_ack_backlog = backlog;
1152 sk->sk_ack_backlog = 0;
1153 sk->sk_state = BT_LISTEN;
1160 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1162 DECLARE_WAITQUEUE(wait, current);
1163 struct sock *sk = sock->sk, *nsk;
1167 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1169 if (sk->sk_state != BT_LISTEN) {
1174 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1176 BT_DBG("sk %p timeo %ld", sk, timeo);
1178 /* Wait for an incoming connection. (wake-one). */
1179 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1180 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1181 set_current_state(TASK_INTERRUPTIBLE);
1188 timeo = schedule_timeout(timeo);
1189 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1191 if (sk->sk_state != BT_LISTEN) {
1196 if (signal_pending(current)) {
1197 err = sock_intr_errno(timeo);
1201 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(sk_sleep(sk), &wait);
1207 newsock->state = SS_CONNECTED;
1209 BT_DBG("new socket %p", nsk);
1216 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1218 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1219 struct sock *sk = sock->sk;
1221 BT_DBG("sock %p, sk %p", sock, sk);
1223 addr->sa_family = AF_BLUETOOTH;
1224 *len = sizeof(struct sockaddr_l2);
1227 la->l2_psm = l2cap_pi(sk)->psm;
1228 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1229 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1231 la->l2_psm = l2cap_pi(sk)->sport;
1232 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1233 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1239 static void l2cap_monitor_timeout(unsigned long arg)
1241 struct sock *sk = (void *) arg;
1245 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1246 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1251 l2cap_pi(sk)->retry_count++;
1252 __mod_monitor_timer();
1254 control = L2CAP_CTRL_POLL;
1255 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1259 static void l2cap_retrans_timeout(unsigned long arg)
1261 struct sock *sk = (void *) arg;
1265 l2cap_pi(sk)->retry_count = 1;
1266 __mod_monitor_timer();
1268 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1270 control = L2CAP_CTRL_POLL;
1271 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1275 static void l2cap_drop_acked_frames(struct sock *sk)
1277 struct sk_buff *skb;
1279 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1280 l2cap_pi(sk)->unacked_frames) {
1281 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1284 skb = skb_dequeue(TX_QUEUE(sk));
1287 l2cap_pi(sk)->unacked_frames--;
1290 if (!l2cap_pi(sk)->unacked_frames)
1291 del_timer(&l2cap_pi(sk)->retrans_timer);
1296 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1298 struct l2cap_pinfo *pi = l2cap_pi(sk);
1301 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1303 err = hci_send_acl(pi->conn->hcon, skb, 0);
1310 static int l2cap_streaming_send(struct sock *sk)
1312 struct sk_buff *skb, *tx_skb;
1313 struct l2cap_pinfo *pi = l2cap_pi(sk);
1317 while ((skb = sk->sk_send_head)) {
1318 tx_skb = skb_clone(skb, GFP_ATOMIC);
1320 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1321 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (pi->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 err = l2cap_do_send(sk, tx_skb);
1331 l2cap_send_disconn_req(pi->conn, sk);
1335 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1337 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1338 sk->sk_send_head = NULL;
1340 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1342 skb = skb_dequeue(TX_QUEUE(sk));
1348 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1350 struct l2cap_pinfo *pi = l2cap_pi(sk);
1351 struct sk_buff *skb, *tx_skb;
1355 skb = skb_peek(TX_QUEUE(sk));
1357 if (bt_cb(skb)->tx_seq != tx_seq) {
1358 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1360 skb = skb_queue_next(TX_QUEUE(sk), skb);
1364 if (pi->remote_max_tx &&
1365 bt_cb(skb)->retries == pi->remote_max_tx) {
1366 l2cap_send_disconn_req(pi->conn, sk);
1370 tx_skb = skb_clone(skb, GFP_ATOMIC);
1371 bt_cb(skb)->retries++;
1372 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1373 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1374 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1375 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1377 if (pi->fcs == L2CAP_FCS_CRC16) {
1378 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1379 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1382 err = l2cap_do_send(sk, tx_skb);
1384 l2cap_send_disconn_req(pi->conn, sk);
1392 static int l2cap_ertm_send(struct sock *sk)
1394 struct sk_buff *skb, *tx_skb;
1395 struct l2cap_pinfo *pi = l2cap_pi(sk);
1399 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1402 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1403 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1405 if (pi->remote_max_tx &&
1406 bt_cb(skb)->retries == pi->remote_max_tx) {
1407 l2cap_send_disconn_req(pi->conn, sk);
1411 tx_skb = skb_clone(skb, GFP_ATOMIC);
1413 bt_cb(skb)->retries++;
1415 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1416 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1417 control |= L2CAP_CTRL_FINAL;
1418 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1420 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1421 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1422 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1425 if (pi->fcs == L2CAP_FCS_CRC16) {
1426 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1427 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1430 err = l2cap_do_send(sk, tx_skb);
1432 l2cap_send_disconn_req(pi->conn, sk);
1435 __mod_retrans_timer();
1437 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1438 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1440 pi->unacked_frames++;
1443 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1444 sk->sk_send_head = NULL;
1446 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1454 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1456 struct sock *sk = (struct sock *)pi;
1459 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1461 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1462 control |= L2CAP_SUPER_RCV_NOT_READY;
1463 return l2cap_send_sframe(pi, control);
1464 } else if (l2cap_ertm_send(sk) == 0) {
1465 control |= L2CAP_SUPER_RCV_READY;
1466 return l2cap_send_sframe(pi, control);
1471 static int l2cap_send_srejtail(struct sock *sk)
1473 struct srej_list *tail;
1476 control = L2CAP_SUPER_SELECT_REJECT;
1477 control |= L2CAP_CTRL_FINAL;
1479 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1480 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1482 l2cap_send_sframe(l2cap_pi(sk), control);
1487 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1489 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1490 struct sk_buff **frag;
1493 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1499 /* Continuation fragments (no L2CAP header) */
1500 frag = &skb_shinfo(skb)->frag_list;
1502 count = min_t(unsigned int, conn->mtu, len);
1504 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1507 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1513 frag = &(*frag)->next;
1519 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1521 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1522 struct sk_buff *skb;
1523 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1524 struct l2cap_hdr *lh;
1526 BT_DBG("sk %p len %d", sk, (int)len);
1528 count = min_t(unsigned int, (conn->mtu - hlen), len);
1529 skb = bt_skb_send_alloc(sk, count + hlen,
1530 msg->msg_flags & MSG_DONTWAIT, &err);
1532 return ERR_PTR(-ENOMEM);
1534 /* Create L2CAP header */
1535 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1536 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1537 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1538 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1540 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1541 if (unlikely(err < 0)) {
1543 return ERR_PTR(err);
1548 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1550 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1551 struct sk_buff *skb;
1552 int err, count, hlen = L2CAP_HDR_SIZE;
1553 struct l2cap_hdr *lh;
1555 BT_DBG("sk %p len %d", sk, (int)len);
1557 count = min_t(unsigned int, (conn->mtu - hlen), len);
1558 skb = bt_skb_send_alloc(sk, count + hlen,
1559 msg->msg_flags & MSG_DONTWAIT, &err);
1561 return ERR_PTR(-ENOMEM);
1563 /* Create L2CAP header */
1564 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1565 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1566 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1568 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1569 if (unlikely(err < 0)) {
1571 return ERR_PTR(err);
1576 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1578 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1579 struct sk_buff *skb;
1580 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1581 struct l2cap_hdr *lh;
1583 BT_DBG("sk %p len %d", sk, (int)len);
1586 return ERR_PTR(-ENOTCONN);
1591 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1594 count = min_t(unsigned int, (conn->mtu - hlen), len);
1595 skb = bt_skb_send_alloc(sk, count + hlen,
1596 msg->msg_flags & MSG_DONTWAIT, &err);
1598 return ERR_PTR(-ENOMEM);
1600 /* Create L2CAP header */
1601 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1602 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1603 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1604 put_unaligned_le16(control, skb_put(skb, 2));
1606 put_unaligned_le16(sdulen, skb_put(skb, 2));
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1609 if (unlikely(err < 0)) {
1611 return ERR_PTR(err);
1614 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1615 put_unaligned_le16(0, skb_put(skb, 2));
1617 bt_cb(skb)->retries = 0;
1621 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1623 struct l2cap_pinfo *pi = l2cap_pi(sk);
1624 struct sk_buff *skb;
1625 struct sk_buff_head sar_queue;
1629 __skb_queue_head_init(&sar_queue);
1630 control = L2CAP_SDU_START;
1631 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1633 return PTR_ERR(skb);
1635 __skb_queue_tail(&sar_queue, skb);
1636 len -= pi->remote_mps;
1637 size += pi->remote_mps;
1642 if (len > pi->remote_mps) {
1643 control = L2CAP_SDU_CONTINUE;
1644 buflen = pi->remote_mps;
1646 control = L2CAP_SDU_END;
1650 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1652 skb_queue_purge(&sar_queue);
1653 return PTR_ERR(skb);
1656 __skb_queue_tail(&sar_queue, skb);
1660 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1661 if (sk->sk_send_head == NULL)
1662 sk->sk_send_head = sar_queue.next;
1667 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1669 struct sock *sk = sock->sk;
1670 struct l2cap_pinfo *pi = l2cap_pi(sk);
1671 struct sk_buff *skb;
1675 BT_DBG("sock %p, sk %p", sock, sk);
1677 err = sock_error(sk);
1681 if (msg->msg_flags & MSG_OOB)
1686 if (sk->sk_state != BT_CONNECTED) {
1691 /* Connectionless channel */
1692 if (sk->sk_type == SOCK_DGRAM) {
1693 skb = l2cap_create_connless_pdu(sk, msg, len);
1697 err = l2cap_do_send(sk, skb);
1702 case L2CAP_MODE_BASIC:
1703 /* Check outgoing MTU */
1704 if (len > pi->omtu) {
1709 /* Create a basic PDU */
1710 skb = l2cap_create_basic_pdu(sk, msg, len);
1716 err = l2cap_do_send(sk, skb);
1721 case L2CAP_MODE_ERTM:
1722 case L2CAP_MODE_STREAMING:
1723 /* Entire SDU fits into one PDU */
1724 if (len <= pi->remote_mps) {
1725 control = L2CAP_SDU_UNSEGMENTED;
1726 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1731 __skb_queue_tail(TX_QUEUE(sk), skb);
1732 if (sk->sk_send_head == NULL)
1733 sk->sk_send_head = skb;
1735 /* Segment SDU into multiples PDUs */
1736 err = l2cap_sar_segment_sdu(sk, msg, len);
1741 if (pi->mode == L2CAP_MODE_STREAMING)
1742 err = l2cap_streaming_send(sk);
1744 err = l2cap_ertm_send(sk);
1751 BT_DBG("bad state %1.1x", pi->mode);
1760 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1762 struct sock *sk = sock->sk;
1766 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1767 struct l2cap_conn_rsp rsp;
1769 sk->sk_state = BT_CONFIG;
1771 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1772 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1773 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1775 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1776 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1784 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1787 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1789 struct sock *sk = sock->sk;
1790 struct l2cap_options opts;
1794 BT_DBG("sk %p", sk);
1800 opts.imtu = l2cap_pi(sk)->imtu;
1801 opts.omtu = l2cap_pi(sk)->omtu;
1802 opts.flush_to = l2cap_pi(sk)->flush_to;
1803 opts.mode = l2cap_pi(sk)->mode;
1804 opts.fcs = l2cap_pi(sk)->fcs;
1805 opts.max_tx = l2cap_pi(sk)->max_tx;
1806 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1808 len = min_t(unsigned int, sizeof(opts), optlen);
1809 if (copy_from_user((char *) &opts, optval, len)) {
1814 l2cap_pi(sk)->mode = opts.mode;
1815 switch (l2cap_pi(sk)->mode) {
1816 case L2CAP_MODE_BASIC:
1818 case L2CAP_MODE_ERTM:
1819 case L2CAP_MODE_STREAMING:
1828 l2cap_pi(sk)->imtu = opts.imtu;
1829 l2cap_pi(sk)->omtu = opts.omtu;
1830 l2cap_pi(sk)->fcs = opts.fcs;
1831 l2cap_pi(sk)->max_tx = opts.max_tx;
1832 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1836 if (get_user(opt, (u32 __user *) optval)) {
1841 if (opt & L2CAP_LM_AUTH)
1842 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1843 if (opt & L2CAP_LM_ENCRYPT)
1844 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1845 if (opt & L2CAP_LM_SECURE)
1846 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1848 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1849 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1861 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1863 struct sock *sk = sock->sk;
1864 struct bt_security sec;
1868 BT_DBG("sk %p", sk);
1870 if (level == SOL_L2CAP)
1871 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1873 if (level != SOL_BLUETOOTH)
1874 return -ENOPROTOOPT;
1880 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1881 && sk->sk_type != SOCK_RAW) {
1886 sec.level = BT_SECURITY_LOW;
1888 len = min_t(unsigned int, sizeof(sec), optlen);
1889 if (copy_from_user((char *) &sec, optval, len)) {
1894 if (sec.level < BT_SECURITY_LOW ||
1895 sec.level > BT_SECURITY_HIGH) {
1900 l2cap_pi(sk)->sec_level = sec.level;
1903 case BT_DEFER_SETUP:
1904 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1909 if (get_user(opt, (u32 __user *) optval)) {
1914 bt_sk(sk)->defer_setup = opt;
1926 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1928 struct sock *sk = sock->sk;
1929 struct l2cap_options opts;
1930 struct l2cap_conninfo cinfo;
1934 BT_DBG("sk %p", sk);
1936 if (get_user(len, optlen))
1943 opts.imtu = l2cap_pi(sk)->imtu;
1944 opts.omtu = l2cap_pi(sk)->omtu;
1945 opts.flush_to = l2cap_pi(sk)->flush_to;
1946 opts.mode = l2cap_pi(sk)->mode;
1947 opts.fcs = l2cap_pi(sk)->fcs;
1948 opts.max_tx = l2cap_pi(sk)->max_tx;
1949 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1951 len = min_t(unsigned int, len, sizeof(opts));
1952 if (copy_to_user(optval, (char *) &opts, len))
1958 switch (l2cap_pi(sk)->sec_level) {
1959 case BT_SECURITY_LOW:
1960 opt = L2CAP_LM_AUTH;
1962 case BT_SECURITY_MEDIUM:
1963 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1965 case BT_SECURITY_HIGH:
1966 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1974 if (l2cap_pi(sk)->role_switch)
1975 opt |= L2CAP_LM_MASTER;
1977 if (l2cap_pi(sk)->force_reliable)
1978 opt |= L2CAP_LM_RELIABLE;
1980 if (put_user(opt, (u32 __user *) optval))
1984 case L2CAP_CONNINFO:
1985 if (sk->sk_state != BT_CONNECTED &&
1986 !(sk->sk_state == BT_CONNECT2 &&
1987 bt_sk(sk)->defer_setup)) {
1992 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1993 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1995 len = min_t(unsigned int, len, sizeof(cinfo));
1996 if (copy_to_user(optval, (char *) &cinfo, len))
2010 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2012 struct sock *sk = sock->sk;
2013 struct bt_security sec;
2016 BT_DBG("sk %p", sk);
2018 if (level == SOL_L2CAP)
2019 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2021 if (level != SOL_BLUETOOTH)
2022 return -ENOPROTOOPT;
2024 if (get_user(len, optlen))
2031 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2032 && sk->sk_type != SOCK_RAW) {
2037 sec.level = l2cap_pi(sk)->sec_level;
2039 len = min_t(unsigned int, len, sizeof(sec));
2040 if (copy_to_user(optval, (char *) &sec, len))
2045 case BT_DEFER_SETUP:
2046 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2051 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2065 static int l2cap_sock_shutdown(struct socket *sock, int how)
2067 struct sock *sk = sock->sk;
2070 BT_DBG("sock %p, sk %p", sock, sk);
2076 if (!sk->sk_shutdown) {
2077 sk->sk_shutdown = SHUTDOWN_MASK;
2078 l2cap_sock_clear_timer(sk);
2079 __l2cap_sock_close(sk, 0);
2081 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2082 err = bt_sock_wait_state(sk, BT_CLOSED,
2089 static int l2cap_sock_release(struct socket *sock)
2091 struct sock *sk = sock->sk;
2094 BT_DBG("sock %p, sk %p", sock, sk);
2099 err = l2cap_sock_shutdown(sock, 2);
2102 l2cap_sock_kill(sk);
2106 static void l2cap_chan_ready(struct sock *sk)
2108 struct sock *parent = bt_sk(sk)->parent;
2110 BT_DBG("sk %p, parent %p", sk, parent);
2112 l2cap_pi(sk)->conf_state = 0;
2113 l2cap_sock_clear_timer(sk);
2116 /* Outgoing channel.
2117 * Wake up socket sleeping on connect.
2119 sk->sk_state = BT_CONNECTED;
2120 sk->sk_state_change(sk);
2122 /* Incoming channel.
2123 * Wake up socket sleeping on accept.
2125 parent->sk_data_ready(parent, 0);
2129 /* Copy frame to all raw sockets on that connection */
2130 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2132 struct l2cap_chan_list *l = &conn->chan_list;
2133 struct sk_buff *nskb;
2136 BT_DBG("conn %p", conn);
2138 read_lock(&l->lock);
2139 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2140 if (sk->sk_type != SOCK_RAW)
2143 /* Don't send frame to the socket it came from */
2146 nskb = skb_clone(skb, GFP_ATOMIC);
2150 if (sock_queue_rcv_skb(sk, nskb))
2153 read_unlock(&l->lock);
2156 /* ---- L2CAP signalling commands ---- */
2157 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2158 u8 code, u8 ident, u16 dlen, void *data)
2160 struct sk_buff *skb, **frag;
2161 struct l2cap_cmd_hdr *cmd;
2162 struct l2cap_hdr *lh;
2165 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2166 conn, code, ident, dlen);
2168 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2169 count = min_t(unsigned int, conn->mtu, len);
2171 skb = bt_skb_alloc(count, GFP_ATOMIC);
2175 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2176 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2177 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2179 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2182 cmd->len = cpu_to_le16(dlen);
2185 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2186 memcpy(skb_put(skb, count), data, count);
2192 /* Continuation fragments (no L2CAP header) */
2193 frag = &skb_shinfo(skb)->frag_list;
2195 count = min_t(unsigned int, conn->mtu, len);
2197 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2201 memcpy(skb_put(*frag, count), data, count);
2206 frag = &(*frag)->next;
2216 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2218 struct l2cap_conf_opt *opt = *ptr;
2221 len = L2CAP_CONF_OPT_SIZE + opt->len;
2229 *val = *((u8 *) opt->val);
2233 *val = __le16_to_cpu(*((__le16 *) opt->val));
2237 *val = __le32_to_cpu(*((__le32 *) opt->val));
2241 *val = (unsigned long) opt->val;
2245 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2249 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2251 struct l2cap_conf_opt *opt = *ptr;
2253 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2260 *((u8 *) opt->val) = val;
2264 *((__le16 *) opt->val) = cpu_to_le16(val);
2268 *((__le32 *) opt->val) = cpu_to_le32(val);
2272 memcpy(opt->val, (void *) val, len);
2276 *ptr += L2CAP_CONF_OPT_SIZE + len;
2279 static void l2cap_ack_timeout(unsigned long arg)
2281 struct sock *sk = (void *) arg;
2284 l2cap_send_ack(l2cap_pi(sk));
2288 static inline void l2cap_ertm_init(struct sock *sk)
2290 l2cap_pi(sk)->expected_ack_seq = 0;
2291 l2cap_pi(sk)->unacked_frames = 0;
2292 l2cap_pi(sk)->buffer_seq = 0;
2293 l2cap_pi(sk)->num_acked = 0;
2294 l2cap_pi(sk)->frames_sent = 0;
2296 setup_timer(&l2cap_pi(sk)->retrans_timer,
2297 l2cap_retrans_timeout, (unsigned long) sk);
2298 setup_timer(&l2cap_pi(sk)->monitor_timer,
2299 l2cap_monitor_timeout, (unsigned long) sk);
2300 setup_timer(&l2cap_pi(sk)->ack_timer,
2301 l2cap_ack_timeout, (unsigned long) sk);
2303 __skb_queue_head_init(SREJ_QUEUE(sk));
2306 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2308 u32 local_feat_mask = l2cap_feat_mask;
2310 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2313 case L2CAP_MODE_ERTM:
2314 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2315 case L2CAP_MODE_STREAMING:
2316 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2322 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2325 case L2CAP_MODE_STREAMING:
2326 case L2CAP_MODE_ERTM:
2327 if (l2cap_mode_supported(mode, remote_feat_mask))
2331 return L2CAP_MODE_BASIC;
2335 static int l2cap_build_conf_req(struct sock *sk, void *data)
2337 struct l2cap_pinfo *pi = l2cap_pi(sk);
2338 struct l2cap_conf_req *req = data;
2339 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2340 void *ptr = req->data;
2342 BT_DBG("sk %p", sk);
2344 if (pi->num_conf_req || pi->num_conf_rsp)
2348 case L2CAP_MODE_STREAMING:
2349 case L2CAP_MODE_ERTM:
2350 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2351 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2352 l2cap_send_disconn_req(pi->conn, sk);
2355 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2361 case L2CAP_MODE_BASIC:
2362 if (pi->imtu != L2CAP_DEFAULT_MTU)
2363 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2366 case L2CAP_MODE_ERTM:
2367 rfc.mode = L2CAP_MODE_ERTM;
2368 rfc.txwin_size = pi->tx_win;
2369 rfc.max_transmit = pi->max_tx;
2370 rfc.retrans_timeout = 0;
2371 rfc.monitor_timeout = 0;
2372 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2373 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2374 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2377 sizeof(rfc), (unsigned long) &rfc);
2379 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2382 if (pi->fcs == L2CAP_FCS_NONE ||
2383 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2384 pi->fcs = L2CAP_FCS_NONE;
2385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2389 case L2CAP_MODE_STREAMING:
2390 rfc.mode = L2CAP_MODE_STREAMING;
2392 rfc.max_transmit = 0;
2393 rfc.retrans_timeout = 0;
2394 rfc.monitor_timeout = 0;
2395 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2396 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2397 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2400 sizeof(rfc), (unsigned long) &rfc);
2402 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2405 if (pi->fcs == L2CAP_FCS_NONE ||
2406 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2407 pi->fcs = L2CAP_FCS_NONE;
2408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2413 /* FIXME: Need actual value of the flush timeout */
2414 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2415 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2417 req->dcid = cpu_to_le16(pi->dcid);
2418 req->flags = cpu_to_le16(0);
2423 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2426 struct l2cap_conf_rsp *rsp = data;
2427 void *ptr = rsp->data;
2428 void *req = pi->conf_req;
2429 int len = pi->conf_len;
2430 int type, hint, olen;
2432 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2433 u16 mtu = L2CAP_DEFAULT_MTU;
2434 u16 result = L2CAP_CONF_SUCCESS;
2436 BT_DBG("sk %p", sk);
2438 while (len >= L2CAP_CONF_OPT_SIZE) {
2439 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2441 hint = type & L2CAP_CONF_HINT;
2442 type &= L2CAP_CONF_MASK;
2445 case L2CAP_CONF_MTU:
2449 case L2CAP_CONF_FLUSH_TO:
2453 case L2CAP_CONF_QOS:
2456 case L2CAP_CONF_RFC:
2457 if (olen == sizeof(rfc))
2458 memcpy(&rfc, (void *) val, olen);
2461 case L2CAP_CONF_FCS:
2462 if (val == L2CAP_FCS_NONE)
2463 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2471 result = L2CAP_CONF_UNKNOWN;
2472 *((u8 *) ptr++) = type;
2477 if (pi->num_conf_rsp || pi->num_conf_req)
2481 case L2CAP_MODE_STREAMING:
2482 case L2CAP_MODE_ERTM:
2483 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2484 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2485 return -ECONNREFUSED;
2488 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2493 if (pi->mode != rfc.mode) {
2494 result = L2CAP_CONF_UNACCEPT;
2495 rfc.mode = pi->mode;
2497 if (pi->num_conf_rsp == 1)
2498 return -ECONNREFUSED;
2500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2501 sizeof(rfc), (unsigned long) &rfc);
2505 if (result == L2CAP_CONF_SUCCESS) {
2506 /* Configure output options and let the other side know
2507 * which ones we don't like. */
2509 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2510 result = L2CAP_CONF_UNACCEPT;
2513 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2518 case L2CAP_MODE_BASIC:
2519 pi->fcs = L2CAP_FCS_NONE;
2520 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2523 case L2CAP_MODE_ERTM:
2524 pi->remote_tx_win = rfc.txwin_size;
2525 pi->remote_max_tx = rfc.max_transmit;
2526 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2527 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2529 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2531 rfc.retrans_timeout =
2532 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2533 rfc.monitor_timeout =
2534 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2536 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2538 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2539 sizeof(rfc), (unsigned long) &rfc);
2543 case L2CAP_MODE_STREAMING:
2544 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2545 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2547 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2549 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2552 sizeof(rfc), (unsigned long) &rfc);
2557 result = L2CAP_CONF_UNACCEPT;
2559 memset(&rfc, 0, sizeof(rfc));
2560 rfc.mode = pi->mode;
2563 if (result == L2CAP_CONF_SUCCESS)
2564 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2566 rsp->scid = cpu_to_le16(pi->dcid);
2567 rsp->result = cpu_to_le16(result);
2568 rsp->flags = cpu_to_le16(0x0000);
2573 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2575 struct l2cap_pinfo *pi = l2cap_pi(sk);
2576 struct l2cap_conf_req *req = data;
2577 void *ptr = req->data;
2580 struct l2cap_conf_rfc rfc;
2582 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2584 while (len >= L2CAP_CONF_OPT_SIZE) {
2585 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2588 case L2CAP_CONF_MTU:
2589 if (val < L2CAP_DEFAULT_MIN_MTU) {
2590 *result = L2CAP_CONF_UNACCEPT;
2591 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2597 case L2CAP_CONF_FLUSH_TO:
2599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2603 case L2CAP_CONF_RFC:
2604 if (olen == sizeof(rfc))
2605 memcpy(&rfc, (void *)val, olen);
2607 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2608 rfc.mode != pi->mode)
2609 return -ECONNREFUSED;
2611 pi->mode = rfc.mode;
2614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2615 sizeof(rfc), (unsigned long) &rfc);
2620 if (*result == L2CAP_CONF_SUCCESS) {
2622 case L2CAP_MODE_ERTM:
2623 pi->remote_tx_win = rfc.txwin_size;
2624 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2625 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2626 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2628 case L2CAP_MODE_STREAMING:
2629 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2633 req->dcid = cpu_to_le16(pi->dcid);
2634 req->flags = cpu_to_le16(0x0000);
2639 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2641 struct l2cap_conf_rsp *rsp = data;
2642 void *ptr = rsp->data;
2644 BT_DBG("sk %p", sk);
2646 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2647 rsp->result = cpu_to_le16(result);
2648 rsp->flags = cpu_to_le16(flags);
2653 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2655 struct l2cap_pinfo *pi = l2cap_pi(sk);
2658 struct l2cap_conf_rfc rfc;
2660 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2662 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2665 while (len >= L2CAP_CONF_OPT_SIZE) {
2666 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2669 case L2CAP_CONF_RFC:
2670 if (olen == sizeof(rfc))
2671 memcpy(&rfc, (void *)val, olen);
2678 case L2CAP_MODE_ERTM:
2679 pi->remote_tx_win = rfc.txwin_size;
2680 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2681 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2682 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2684 case L2CAP_MODE_STREAMING:
2685 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2689 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2691 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2693 if (rej->reason != 0x0000)
2696 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2697 cmd->ident == conn->info_ident) {
2698 del_timer(&conn->info_timer);
2700 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2701 conn->info_ident = 0;
2703 l2cap_conn_start(conn);
2709 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2711 struct l2cap_chan_list *list = &conn->chan_list;
2712 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2713 struct l2cap_conn_rsp rsp;
2714 struct sock *sk, *parent;
2715 int result, status = L2CAP_CS_NO_INFO;
2717 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2718 __le16 psm = req->psm;
2720 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2722 /* Check if we have socket listening on psm */
2723 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2725 result = L2CAP_CR_BAD_PSM;
2729 /* Check if the ACL is secure enough (if not SDP) */
2730 if (psm != cpu_to_le16(0x0001) &&
2731 !hci_conn_check_link_mode(conn->hcon)) {
2732 conn->disc_reason = 0x05;
2733 result = L2CAP_CR_SEC_BLOCK;
2737 result = L2CAP_CR_NO_MEM;
2739 /* Check for backlog size */
2740 if (sk_acceptq_is_full(parent)) {
2741 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2745 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2749 write_lock_bh(&list->lock);
2751 /* Check if we already have channel with that dcid */
2752 if (__l2cap_get_chan_by_dcid(list, scid)) {
2753 write_unlock_bh(&list->lock);
2754 sock_set_flag(sk, SOCK_ZAPPED);
2755 l2cap_sock_kill(sk);
2759 hci_conn_hold(conn->hcon);
2761 l2cap_sock_init(sk, parent);
2762 bacpy(&bt_sk(sk)->src, conn->src);
2763 bacpy(&bt_sk(sk)->dst, conn->dst);
2764 l2cap_pi(sk)->psm = psm;
2765 l2cap_pi(sk)->dcid = scid;
2767 __l2cap_chan_add(conn, sk, parent);
2768 dcid = l2cap_pi(sk)->scid;
2770 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2772 l2cap_pi(sk)->ident = cmd->ident;
2774 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2775 if (l2cap_check_security(sk)) {
2776 if (bt_sk(sk)->defer_setup) {
2777 sk->sk_state = BT_CONNECT2;
2778 result = L2CAP_CR_PEND;
2779 status = L2CAP_CS_AUTHOR_PEND;
2780 parent->sk_data_ready(parent, 0);
2782 sk->sk_state = BT_CONFIG;
2783 result = L2CAP_CR_SUCCESS;
2784 status = L2CAP_CS_NO_INFO;
2787 sk->sk_state = BT_CONNECT2;
2788 result = L2CAP_CR_PEND;
2789 status = L2CAP_CS_AUTHEN_PEND;
2792 sk->sk_state = BT_CONNECT2;
2793 result = L2CAP_CR_PEND;
2794 status = L2CAP_CS_NO_INFO;
2797 write_unlock_bh(&list->lock);
2800 bh_unlock_sock(parent);
2803 rsp.scid = cpu_to_le16(scid);
2804 rsp.dcid = cpu_to_le16(dcid);
2805 rsp.result = cpu_to_le16(result);
2806 rsp.status = cpu_to_le16(status);
2807 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2809 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2810 struct l2cap_info_req info;
2811 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2813 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2814 conn->info_ident = l2cap_get_ident(conn);
2816 mod_timer(&conn->info_timer, jiffies +
2817 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2819 l2cap_send_cmd(conn, conn->info_ident,
2820 L2CAP_INFO_REQ, sizeof(info), &info);
2826 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2828 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2829 u16 scid, dcid, result, status;
2833 scid = __le16_to_cpu(rsp->scid);
2834 dcid = __le16_to_cpu(rsp->dcid);
2835 result = __le16_to_cpu(rsp->result);
2836 status = __le16_to_cpu(rsp->status);
2838 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2841 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2845 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2851 case L2CAP_CR_SUCCESS:
2852 sk->sk_state = BT_CONFIG;
2853 l2cap_pi(sk)->ident = 0;
2854 l2cap_pi(sk)->dcid = dcid;
2855 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2857 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2860 l2cap_build_conf_req(sk, req), req);
2861 l2cap_pi(sk)->num_conf_req++;
2865 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2869 l2cap_chan_del(sk, ECONNREFUSED);
2877 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2879 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2885 dcid = __le16_to_cpu(req->dcid);
2886 flags = __le16_to_cpu(req->flags);
2888 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2890 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2894 if (sk->sk_state == BT_DISCONN)
2897 /* Reject if config buffer is too small. */
2898 len = cmd_len - sizeof(*req);
2899 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2900 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2901 l2cap_build_conf_rsp(sk, rsp,
2902 L2CAP_CONF_REJECT, flags), rsp);
2907 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2908 l2cap_pi(sk)->conf_len += len;
2910 if (flags & 0x0001) {
2911 /* Incomplete config. Send empty response. */
2912 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2913 l2cap_build_conf_rsp(sk, rsp,
2914 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2918 /* Complete config. */
2919 len = l2cap_parse_conf_req(sk, rsp);
2921 l2cap_send_disconn_req(conn, sk);
2925 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2926 l2cap_pi(sk)->num_conf_rsp++;
2928 /* Reset config buffer. */
2929 l2cap_pi(sk)->conf_len = 0;
2931 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2934 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2935 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2936 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2937 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2939 sk->sk_state = BT_CONNECTED;
2941 l2cap_pi(sk)->next_tx_seq = 0;
2942 l2cap_pi(sk)->expected_tx_seq = 0;
2943 __skb_queue_head_init(TX_QUEUE(sk));
2944 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2945 l2cap_ertm_init(sk);
2947 l2cap_chan_ready(sk);
2951 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2953 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2954 l2cap_build_conf_req(sk, buf), buf);
2955 l2cap_pi(sk)->num_conf_req++;
2963 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2965 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2966 u16 scid, flags, result;
2968 int len = cmd->len - sizeof(*rsp);
2970 scid = __le16_to_cpu(rsp->scid);
2971 flags = __le16_to_cpu(rsp->flags);
2972 result = __le16_to_cpu(rsp->result);
2974 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2975 scid, flags, result);
2977 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2982 case L2CAP_CONF_SUCCESS:
2983 l2cap_conf_rfc_get(sk, rsp->data, len);
2986 case L2CAP_CONF_UNACCEPT:
2987 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2990 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2991 l2cap_send_disconn_req(conn, sk);
2995 /* throw out any old stored conf requests */
2996 result = L2CAP_CONF_SUCCESS;
2997 len = l2cap_parse_conf_rsp(sk, rsp->data,
3000 l2cap_send_disconn_req(conn, sk);
3004 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3005 L2CAP_CONF_REQ, len, req);
3006 l2cap_pi(sk)->num_conf_req++;
3007 if (result != L2CAP_CONF_SUCCESS)
3013 sk->sk_state = BT_DISCONN;
3014 sk->sk_err = ECONNRESET;
3015 l2cap_sock_set_timer(sk, HZ * 5);
3016 l2cap_send_disconn_req(conn, sk);
3023 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3025 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3026 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3027 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3028 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3030 sk->sk_state = BT_CONNECTED;
3031 l2cap_pi(sk)->next_tx_seq = 0;
3032 l2cap_pi(sk)->expected_tx_seq = 0;
3033 __skb_queue_head_init(TX_QUEUE(sk));
3034 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3035 l2cap_ertm_init(sk);
3037 l2cap_chan_ready(sk);
3045 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3047 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3048 struct l2cap_disconn_rsp rsp;
3052 scid = __le16_to_cpu(req->scid);
3053 dcid = __le16_to_cpu(req->dcid);
3055 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3057 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3061 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3062 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3063 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3065 sk->sk_shutdown = SHUTDOWN_MASK;
3067 skb_queue_purge(TX_QUEUE(sk));
3069 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3070 skb_queue_purge(SREJ_QUEUE(sk));
3071 del_timer(&l2cap_pi(sk)->retrans_timer);
3072 del_timer(&l2cap_pi(sk)->monitor_timer);
3073 del_timer(&l2cap_pi(sk)->ack_timer);
3076 l2cap_chan_del(sk, ECONNRESET);
3079 l2cap_sock_kill(sk);
3083 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3085 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3089 scid = __le16_to_cpu(rsp->scid);
3090 dcid = __le16_to_cpu(rsp->dcid);
3092 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3094 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3098 skb_queue_purge(TX_QUEUE(sk));
3100 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3101 skb_queue_purge(SREJ_QUEUE(sk));
3102 del_timer(&l2cap_pi(sk)->retrans_timer);
3103 del_timer(&l2cap_pi(sk)->monitor_timer);
3104 del_timer(&l2cap_pi(sk)->ack_timer);
3107 l2cap_chan_del(sk, 0);
3110 l2cap_sock_kill(sk);
3114 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3116 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3119 type = __le16_to_cpu(req->type);
3121 BT_DBG("type 0x%4.4x", type);
3123 if (type == L2CAP_IT_FEAT_MASK) {
3125 u32 feat_mask = l2cap_feat_mask;
3126 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3127 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3128 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3130 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3132 put_unaligned_le32(feat_mask, rsp->data);
3133 l2cap_send_cmd(conn, cmd->ident,
3134 L2CAP_INFO_RSP, sizeof(buf), buf);
3135 } else if (type == L2CAP_IT_FIXED_CHAN) {
3137 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3138 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3139 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3140 memcpy(buf + 4, l2cap_fixed_chan, 8);
3141 l2cap_send_cmd(conn, cmd->ident,
3142 L2CAP_INFO_RSP, sizeof(buf), buf);
3144 struct l2cap_info_rsp rsp;
3145 rsp.type = cpu_to_le16(type);
3146 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3147 l2cap_send_cmd(conn, cmd->ident,
3148 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3154 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3156 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3159 type = __le16_to_cpu(rsp->type);
3160 result = __le16_to_cpu(rsp->result);
3162 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3164 del_timer(&conn->info_timer);
3166 if (type == L2CAP_IT_FEAT_MASK) {
3167 conn->feat_mask = get_unaligned_le32(rsp->data);
3169 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3170 struct l2cap_info_req req;
3171 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3173 conn->info_ident = l2cap_get_ident(conn);
3175 l2cap_send_cmd(conn, conn->info_ident,
3176 L2CAP_INFO_REQ, sizeof(req), &req);
3178 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3179 conn->info_ident = 0;
3181 l2cap_conn_start(conn);
3183 } else if (type == L2CAP_IT_FIXED_CHAN) {
3184 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3185 conn->info_ident = 0;
3187 l2cap_conn_start(conn);
3193 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3195 u8 *data = skb->data;
3197 struct l2cap_cmd_hdr cmd;
3200 l2cap_raw_recv(conn, skb);
3202 while (len >= L2CAP_CMD_HDR_SIZE) {
3204 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3205 data += L2CAP_CMD_HDR_SIZE;
3206 len -= L2CAP_CMD_HDR_SIZE;
3208 cmd_len = le16_to_cpu(cmd.len);
3210 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3212 if (cmd_len > len || !cmd.ident) {
3213 BT_DBG("corrupted command");
3218 case L2CAP_COMMAND_REJ:
3219 l2cap_command_rej(conn, &cmd, data);
3222 case L2CAP_CONN_REQ:
3223 err = l2cap_connect_req(conn, &cmd, data);
3226 case L2CAP_CONN_RSP:
3227 err = l2cap_connect_rsp(conn, &cmd, data);
3230 case L2CAP_CONF_REQ:
3231 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3234 case L2CAP_CONF_RSP:
3235 err = l2cap_config_rsp(conn, &cmd, data);
3238 case L2CAP_DISCONN_REQ:
3239 err = l2cap_disconnect_req(conn, &cmd, data);
3242 case L2CAP_DISCONN_RSP:
3243 err = l2cap_disconnect_rsp(conn, &cmd, data);
3246 case L2CAP_ECHO_REQ:
3247 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3250 case L2CAP_ECHO_RSP:
3253 case L2CAP_INFO_REQ:
3254 err = l2cap_information_req(conn, &cmd, data);
3257 case L2CAP_INFO_RSP:
3258 err = l2cap_information_rsp(conn, &cmd, data);
3262 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3268 struct l2cap_cmd_rej rej;
3269 BT_DBG("error %d", err);
3271 /* FIXME: Map err to a valid reason */
3272 rej.reason = cpu_to_le16(0);
3273 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3283 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3285 u16 our_fcs, rcv_fcs;
3286 int hdr_size = L2CAP_HDR_SIZE + 2;
3288 if (pi->fcs == L2CAP_FCS_CRC16) {
3289 skb_trim(skb, skb->len - 2);
3290 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3291 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3293 if (our_fcs != rcv_fcs)
3299 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3301 struct l2cap_pinfo *pi = l2cap_pi(sk);
3304 pi->frames_sent = 0;
3305 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3307 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3309 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3310 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3311 l2cap_send_sframe(pi, control);
3312 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3315 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3316 __mod_retrans_timer();
3318 l2cap_ertm_send(sk);
3320 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3321 pi->frames_sent == 0) {
3322 control |= L2CAP_SUPER_RCV_READY;
3323 l2cap_send_sframe(pi, control);
3327 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3329 struct sk_buff *next_skb;
3331 bt_cb(skb)->tx_seq = tx_seq;
3332 bt_cb(skb)->sar = sar;
3334 next_skb = skb_peek(SREJ_QUEUE(sk));
3336 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3341 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3342 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3346 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3349 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3351 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3354 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3356 struct l2cap_pinfo *pi = l2cap_pi(sk);
3357 struct sk_buff *_skb;
3360 switch (control & L2CAP_CTRL_SAR) {
3361 case L2CAP_SDU_UNSEGMENTED:
3362 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3367 err = sock_queue_rcv_skb(sk, skb);
3373 case L2CAP_SDU_START:
3374 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3379 pi->sdu_len = get_unaligned_le16(skb->data);
3382 if (pi->sdu_len > pi->imtu) {
3387 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3393 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3395 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3396 pi->partial_sdu_len = skb->len;
3400 case L2CAP_SDU_CONTINUE:
3401 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3404 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3406 pi->partial_sdu_len += skb->len;
3407 if (pi->partial_sdu_len > pi->sdu_len)
3415 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3418 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3420 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3421 pi->partial_sdu_len += skb->len;
3423 if (pi->partial_sdu_len > pi->imtu)
3426 if (pi->partial_sdu_len == pi->sdu_len) {
3427 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3428 err = sock_queue_rcv_skb(sk, _skb);
3443 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3445 struct sk_buff *skb;
3448 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3449 if (bt_cb(skb)->tx_seq != tx_seq)
3452 skb = skb_dequeue(SREJ_QUEUE(sk));
3453 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3454 l2cap_sar_reassembly_sdu(sk, skb, control);
3455 l2cap_pi(sk)->buffer_seq_srej =
3456 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3461 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3463 struct l2cap_pinfo *pi = l2cap_pi(sk);
3464 struct srej_list *l, *tmp;
3467 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3468 if (l->tx_seq == tx_seq) {
3473 control = L2CAP_SUPER_SELECT_REJECT;
3474 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3475 l2cap_send_sframe(pi, control);
3477 list_add_tail(&l->list, SREJ_LIST(sk));
3481 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3483 struct l2cap_pinfo *pi = l2cap_pi(sk);
3484 struct srej_list *new;
3487 while (tx_seq != pi->expected_tx_seq) {
3488 control = L2CAP_SUPER_SELECT_REJECT;
3489 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3490 l2cap_send_sframe(pi, control);
3492 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3493 new->tx_seq = pi->expected_tx_seq++;
3494 list_add_tail(&new->list, SREJ_LIST(sk));
3496 pi->expected_tx_seq++;
3499 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3501 struct l2cap_pinfo *pi = l2cap_pi(sk);
3502 u8 tx_seq = __get_txseq(rx_control);
3503 u8 req_seq = __get_reqseq(rx_control);
3504 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3505 int num_to_ack = (pi->tx_win/6) + 1;
3508 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3510 if (L2CAP_CTRL_FINAL & rx_control) {
3511 del_timer(&pi->monitor_timer);
3512 if (pi->unacked_frames > 0)
3513 __mod_retrans_timer();
3514 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3517 pi->expected_ack_seq = req_seq;
3518 l2cap_drop_acked_frames(sk);
3520 if (tx_seq == pi->expected_tx_seq)
3523 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3524 struct srej_list *first;
3526 first = list_first_entry(SREJ_LIST(sk),
3527 struct srej_list, list);
3528 if (tx_seq == first->tx_seq) {
3529 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3530 l2cap_check_srej_gap(sk, tx_seq);
3532 list_del(&first->list);
3535 if (list_empty(SREJ_LIST(sk))) {
3536 pi->buffer_seq = pi->buffer_seq_srej;
3537 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3541 struct srej_list *l;
3542 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3544 list_for_each_entry(l, SREJ_LIST(sk), list) {
3545 if (l->tx_seq == tx_seq) {
3546 l2cap_resend_srejframe(sk, tx_seq);
3550 l2cap_send_srejframe(sk, tx_seq);
3553 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3555 INIT_LIST_HEAD(SREJ_LIST(sk));
3556 pi->buffer_seq_srej = pi->buffer_seq;
3558 __skb_queue_head_init(SREJ_QUEUE(sk));
3559 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3561 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3563 l2cap_send_srejframe(sk, tx_seq);
3568 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3570 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3571 bt_cb(skb)->tx_seq = tx_seq;
3572 bt_cb(skb)->sar = sar;
3573 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3577 if (rx_control & L2CAP_CTRL_FINAL) {
3578 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3579 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3581 if (!skb_queue_empty(TX_QUEUE(sk)))
3582 sk->sk_send_head = TX_QUEUE(sk)->next;
3583 pi->next_tx_seq = pi->expected_ack_seq;
3584 l2cap_ertm_send(sk);
3588 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3590 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3596 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3597 if (pi->num_acked == num_to_ack - 1)
3603 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3605 struct l2cap_pinfo *pi = l2cap_pi(sk);
3607 pi->expected_ack_seq = __get_reqseq(rx_control);
3608 l2cap_drop_acked_frames(sk);
3610 if (rx_control & L2CAP_CTRL_POLL) {
3611 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3612 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3613 (pi->unacked_frames > 0))
3614 __mod_retrans_timer();
3616 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3617 l2cap_send_srejtail(sk);
3619 l2cap_send_i_or_rr_or_rnr(sk);
3620 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3623 } else if (rx_control & L2CAP_CTRL_FINAL) {
3624 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3626 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3627 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3629 if (!skb_queue_empty(TX_QUEUE(sk)))
3630 sk->sk_send_head = TX_QUEUE(sk)->next;
3631 pi->next_tx_seq = pi->expected_ack_seq;
3632 l2cap_ertm_send(sk);
3636 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3637 (pi->unacked_frames > 0))
3638 __mod_retrans_timer();
3640 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3641 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3644 l2cap_ertm_send(sk);
3648 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3650 struct l2cap_pinfo *pi = l2cap_pi(sk);
3651 u8 tx_seq = __get_reqseq(rx_control);
3653 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3655 pi->expected_ack_seq = tx_seq;
3656 l2cap_drop_acked_frames(sk);
3658 if (rx_control & L2CAP_CTRL_FINAL) {
3659 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3660 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3662 if (!skb_queue_empty(TX_QUEUE(sk)))
3663 sk->sk_send_head = TX_QUEUE(sk)->next;
3664 pi->next_tx_seq = pi->expected_ack_seq;
3665 l2cap_ertm_send(sk);
3668 if (!skb_queue_empty(TX_QUEUE(sk)))
3669 sk->sk_send_head = TX_QUEUE(sk)->next;
3670 pi->next_tx_seq = pi->expected_ack_seq;
3671 l2cap_ertm_send(sk);
3673 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3674 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3677 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3679 struct l2cap_pinfo *pi = l2cap_pi(sk);
3680 u8 tx_seq = __get_reqseq(rx_control);
3682 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3684 if (rx_control & L2CAP_CTRL_POLL) {
3685 pi->expected_ack_seq = tx_seq;
3686 l2cap_drop_acked_frames(sk);
3687 l2cap_retransmit_frame(sk, tx_seq);
3688 l2cap_ertm_send(sk);
3689 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3690 pi->srej_save_reqseq = tx_seq;
3691 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3693 } else if (rx_control & L2CAP_CTRL_FINAL) {
3694 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3695 pi->srej_save_reqseq == tx_seq)
3696 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3698 l2cap_retransmit_frame(sk, tx_seq);
3700 l2cap_retransmit_frame(sk, tx_seq);
3701 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3702 pi->srej_save_reqseq = tx_seq;
3703 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3708 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3710 struct l2cap_pinfo *pi = l2cap_pi(sk);
3711 u8 tx_seq = __get_reqseq(rx_control);
3713 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3714 pi->expected_ack_seq = tx_seq;
3715 l2cap_drop_acked_frames(sk);
3717 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3718 del_timer(&pi->retrans_timer);
3719 if (rx_control & L2CAP_CTRL_POLL) {
3720 u16 control = L2CAP_CTRL_FINAL;
3721 l2cap_send_rr_or_rnr(pi, control);
3726 if (rx_control & L2CAP_CTRL_POLL)
3727 l2cap_send_srejtail(sk);
3729 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3732 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3734 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3736 if (L2CAP_CTRL_FINAL & rx_control) {
3737 del_timer(&l2cap_pi(sk)->monitor_timer);
3738 if (l2cap_pi(sk)->unacked_frames > 0)
3739 __mod_retrans_timer();
3740 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3743 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3744 case L2CAP_SUPER_RCV_READY:
3745 l2cap_data_channel_rrframe(sk, rx_control);
3748 case L2CAP_SUPER_REJECT:
3749 l2cap_data_channel_rejframe(sk, rx_control);
3752 case L2CAP_SUPER_SELECT_REJECT:
3753 l2cap_data_channel_srejframe(sk, rx_control);
3756 case L2CAP_SUPER_RCV_NOT_READY:
3757 l2cap_data_channel_rnrframe(sk, rx_control);
3765 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3768 struct l2cap_pinfo *pi;
3770 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
3772 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3774 BT_DBG("unknown cid 0x%4.4x", cid);
3780 BT_DBG("sk %p, len %d", sk, skb->len);
3782 if (sk->sk_state != BT_CONNECTED)
3786 case L2CAP_MODE_BASIC:
3787 /* If socket recv buffers overflows we drop data here
3788 * which is *bad* because L2CAP has to be reliable.
3789 * But we don't have any other choice. L2CAP doesn't
3790 * provide flow control mechanism. */
3792 if (pi->imtu < skb->len)
3795 if (!sock_queue_rcv_skb(sk, skb))
3799 case L2CAP_MODE_ERTM:
3800 control = get_unaligned_le16(skb->data);
3804 if (__is_sar_start(control))
3807 if (pi->fcs == L2CAP_FCS_CRC16)
3811 * We can just drop the corrupted I-frame here.
3812 * Receiver will miss it and start proper recovery
3813 * procedures and ask retransmission.
3818 if (l2cap_check_fcs(pi, skb))
3821 req_seq = __get_reqseq(control);
3822 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3823 if (req_seq_offset < 0)
3824 req_seq_offset += 64;
3826 next_tx_seq_offset =
3827 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3828 if (next_tx_seq_offset < 0)
3829 next_tx_seq_offset += 64;
3831 /* check for invalid req-seq */
3832 if (req_seq_offset > next_tx_seq_offset) {
3833 l2cap_send_disconn_req(pi->conn, sk);
3837 if (__is_iframe(control)) {
3841 l2cap_data_channel_iframe(sk, control, skb);
3846 l2cap_data_channel_sframe(sk, control, skb);
3851 case L2CAP_MODE_STREAMING:
3852 control = get_unaligned_le16(skb->data);
3856 if (__is_sar_start(control))
3859 if (pi->fcs == L2CAP_FCS_CRC16)
3862 if (len > pi->mps || len < 4 || __is_sframe(control))
3865 if (l2cap_check_fcs(pi, skb))
3868 tx_seq = __get_txseq(control);
3870 if (pi->expected_tx_seq == tx_seq)
3871 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3873 pi->expected_tx_seq = (tx_seq + 1) % 64;
3875 l2cap_sar_reassembly_sdu(sk, skb, control);
3880 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3894 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3898 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3902 BT_DBG("sk %p, len %d", sk, skb->len);
3904 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3907 if (l2cap_pi(sk)->imtu < skb->len)
3910 if (!sock_queue_rcv_skb(sk, skb))
3922 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3924 struct l2cap_hdr *lh = (void *) skb->data;
3928 skb_pull(skb, L2CAP_HDR_SIZE);
3929 cid = __le16_to_cpu(lh->cid);
3930 len = __le16_to_cpu(lh->len);
3932 if (len != skb->len) {
3937 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3940 case L2CAP_CID_SIGNALING:
3941 l2cap_sig_channel(conn, skb);
3944 case L2CAP_CID_CONN_LESS:
3945 psm = get_unaligned_le16(skb->data);
3947 l2cap_conless_channel(conn, psm, skb);
3951 l2cap_data_channel(conn, cid, skb);
3956 /* ---- L2CAP interface with lower layer (HCI) ---- */
3958 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3960 int exact = 0, lm1 = 0, lm2 = 0;
3961 register struct sock *sk;
3962 struct hlist_node *node;
3964 if (type != ACL_LINK)
3967 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3969 /* Find listening sockets and check their link_mode */
3970 read_lock(&l2cap_sk_list.lock);
3971 sk_for_each(sk, node, &l2cap_sk_list.head) {
3972 if (sk->sk_state != BT_LISTEN)
3975 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3976 lm1 |= HCI_LM_ACCEPT;
3977 if (l2cap_pi(sk)->role_switch)
3978 lm1 |= HCI_LM_MASTER;
3980 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3981 lm2 |= HCI_LM_ACCEPT;
3982 if (l2cap_pi(sk)->role_switch)
3983 lm2 |= HCI_LM_MASTER;
3986 read_unlock(&l2cap_sk_list.lock);
3988 return exact ? lm1 : lm2;
3991 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3993 struct l2cap_conn *conn;
3995 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3997 if (hcon->type != ACL_LINK)
4001 conn = l2cap_conn_add(hcon, status);
4003 l2cap_conn_ready(conn);
4005 l2cap_conn_del(hcon, bt_err(status));
4010 static int l2cap_disconn_ind(struct hci_conn *hcon)
4012 struct l2cap_conn *conn = hcon->l2cap_data;
4014 BT_DBG("hcon %p", hcon);
4016 if (hcon->type != ACL_LINK || !conn)
4019 return conn->disc_reason;
4022 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4024 BT_DBG("hcon %p reason %d", hcon, reason);
4026 if (hcon->type != ACL_LINK)
4029 l2cap_conn_del(hcon, bt_err(reason));
4034 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4036 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4039 if (encrypt == 0x00) {
4040 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4041 l2cap_sock_clear_timer(sk);
4042 l2cap_sock_set_timer(sk, HZ * 5);
4043 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4044 __l2cap_sock_close(sk, ECONNREFUSED);
4046 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4047 l2cap_sock_clear_timer(sk);
4051 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4053 struct l2cap_chan_list *l;
4054 struct l2cap_conn *conn = hcon->l2cap_data;
4060 l = &conn->chan_list;
4062 BT_DBG("conn %p", conn);
4064 read_lock(&l->lock);
4066 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4069 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4074 if (!status && (sk->sk_state == BT_CONNECTED ||
4075 sk->sk_state == BT_CONFIG)) {
4076 l2cap_check_encryption(sk, encrypt);
4081 if (sk->sk_state == BT_CONNECT) {
4083 struct l2cap_conn_req req;
4084 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4085 req.psm = l2cap_pi(sk)->psm;
4087 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4089 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4090 L2CAP_CONN_REQ, sizeof(req), &req);
4092 l2cap_sock_clear_timer(sk);
4093 l2cap_sock_set_timer(sk, HZ / 10);
4095 } else if (sk->sk_state == BT_CONNECT2) {
4096 struct l2cap_conn_rsp rsp;
4100 sk->sk_state = BT_CONFIG;
4101 result = L2CAP_CR_SUCCESS;
4103 sk->sk_state = BT_DISCONN;
4104 l2cap_sock_set_timer(sk, HZ / 10);
4105 result = L2CAP_CR_SEC_BLOCK;
4108 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4109 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4110 rsp.result = cpu_to_le16(result);
4111 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4112 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4113 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4119 read_unlock(&l->lock);
4124 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4126 struct l2cap_conn *conn = hcon->l2cap_data;
4128 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4131 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4133 if (flags & ACL_START) {
4134 struct l2cap_hdr *hdr;
4138 BT_ERR("Unexpected start frame (len %d)", skb->len);
4139 kfree_skb(conn->rx_skb);
4140 conn->rx_skb = NULL;
4142 l2cap_conn_unreliable(conn, ECOMM);
4146 BT_ERR("Frame is too short (len %d)", skb->len);
4147 l2cap_conn_unreliable(conn, ECOMM);
4151 hdr = (struct l2cap_hdr *) skb->data;
4152 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4154 if (len == skb->len) {
4155 /* Complete frame received */
4156 l2cap_recv_frame(conn, skb);
4160 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4162 if (skb->len > len) {
4163 BT_ERR("Frame is too long (len %d, expected len %d)",
4165 l2cap_conn_unreliable(conn, ECOMM);
4169 /* Allocate skb for the complete frame (with header) */
4170 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4174 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4176 conn->rx_len = len - skb->len;
4178 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4180 if (!conn->rx_len) {
4181 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4182 l2cap_conn_unreliable(conn, ECOMM);
4186 if (skb->len > conn->rx_len) {
4187 BT_ERR("Fragment is too long (len %d, expected %d)",
4188 skb->len, conn->rx_len);
4189 kfree_skb(conn->rx_skb);
4190 conn->rx_skb = NULL;
4192 l2cap_conn_unreliable(conn, ECOMM);
4196 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4198 conn->rx_len -= skb->len;
4200 if (!conn->rx_len) {
4201 /* Complete frame received */
4202 l2cap_recv_frame(conn, conn->rx_skb);
4203 conn->rx_skb = NULL;
4212 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4215 struct hlist_node *node;
4217 read_lock_bh(&l2cap_sk_list.lock);
4219 sk_for_each(sk, node, &l2cap_sk_list.head) {
4220 struct l2cap_pinfo *pi = l2cap_pi(sk);
4222 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4223 batostr(&bt_sk(sk)->src),
4224 batostr(&bt_sk(sk)->dst),
4225 sk->sk_state, __le16_to_cpu(pi->psm),
4227 pi->imtu, pi->omtu, pi->sec_level);
4230 read_unlock_bh(&l2cap_sk_list.lock);
4235 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4237 return single_open(file, l2cap_debugfs_show, inode->i_private);
4240 static const struct file_operations l2cap_debugfs_fops = {
4241 .open = l2cap_debugfs_open,
4243 .llseek = seq_lseek,
4244 .release = single_release,
4247 static struct dentry *l2cap_debugfs;
4249 static const struct proto_ops l2cap_sock_ops = {
4250 .family = PF_BLUETOOTH,
4251 .owner = THIS_MODULE,
4252 .release = l2cap_sock_release,
4253 .bind = l2cap_sock_bind,
4254 .connect = l2cap_sock_connect,
4255 .listen = l2cap_sock_listen,
4256 .accept = l2cap_sock_accept,
4257 .getname = l2cap_sock_getname,
4258 .sendmsg = l2cap_sock_sendmsg,
4259 .recvmsg = l2cap_sock_recvmsg,
4260 .poll = bt_sock_poll,
4261 .ioctl = bt_sock_ioctl,
4262 .mmap = sock_no_mmap,
4263 .socketpair = sock_no_socketpair,
4264 .shutdown = l2cap_sock_shutdown,
4265 .setsockopt = l2cap_sock_setsockopt,
4266 .getsockopt = l2cap_sock_getsockopt
4269 static const struct net_proto_family l2cap_sock_family_ops = {
4270 .family = PF_BLUETOOTH,
4271 .owner = THIS_MODULE,
4272 .create = l2cap_sock_create,
4275 static struct hci_proto l2cap_hci_proto = {
4277 .id = HCI_PROTO_L2CAP,
4278 .connect_ind = l2cap_connect_ind,
4279 .connect_cfm = l2cap_connect_cfm,
4280 .disconn_ind = l2cap_disconn_ind,
4281 .disconn_cfm = l2cap_disconn_cfm,
4282 .security_cfm = l2cap_security_cfm,
4283 .recv_acldata = l2cap_recv_acldata
4286 static int __init l2cap_init(void)
4290 err = proto_register(&l2cap_proto, 0);
4294 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4296 BT_ERR("L2CAP socket registration failed");
4300 err = hci_register_proto(&l2cap_hci_proto);
4302 BT_ERR("L2CAP protocol registration failed");
4303 bt_sock_unregister(BTPROTO_L2CAP);
4308 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4309 bt_debugfs, NULL, &l2cap_debugfs_fops);
4311 BT_ERR("Failed to create L2CAP debug file");
4314 BT_INFO("L2CAP ver %s", VERSION);
4315 BT_INFO("L2CAP socket layer initialized");
4320 proto_unregister(&l2cap_proto);
4324 static void __exit l2cap_exit(void)
4326 debugfs_remove(l2cap_debugfs);
4328 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4329 BT_ERR("L2CAP socket unregistration failed");
4331 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4332 BT_ERR("L2CAP protocol unregistration failed");
4334 proto_unregister(&l2cap_proto);
4337 void l2cap_load(void)
4339 /* Dummy function to trigger automatic L2CAP module loading by
4340 * other modules that use L2CAP sockets but don't use any other
4341 * symbols from it. */
4344 EXPORT_SYMBOL(l2cap_load);
4346 module_init(l2cap_init);
4347 module_exit(l2cap_exit);
4349 module_param(enable_ertm, bool, 0644);
4350 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4352 module_param(max_transmit, uint, 0644);
4353 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4355 module_param(tx_window, uint, 0644);
4356 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4358 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4359 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4360 MODULE_VERSION(VERSION);
4361 MODULE_LICENSE("GPL");
4362 MODULE_ALIAS("bt-proto-0");