2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
36 /* Handle HCI Event packets */
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 __u8 status = *((__u8 *) skb->data);
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 hci_conn_check_pending(hdev);
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
56 __u8 status = *((__u8 *) skb->data);
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
68 __u8 status = *((__u8 *) skb->data);
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
77 hci_conn_check_pending(hdev);
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
83 BT_DBG("%s", hdev->name);
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
101 conn->link_mode &= ~HCI_LM_MASTER;
103 conn->link_mode |= HCI_LM_MASTER;
106 hci_dev_unlock(hdev);
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 conn->link_policy = __le16_to_cpu(rp->policy);
125 hci_dev_unlock(hdev);
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 conn->link_policy = get_unaligned_le16(sent + 2);
149 hci_dev_unlock(hdev);
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 hdev->link_policy = __le16_to_cpu(rp->policy);
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
168 __u8 status = *((__u8 *) skb->data);
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 hdev->link_policy = get_unaligned_le16(sent);
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
183 __u8 status = *((__u8 *) skb->data);
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 clear_bit(HCI_RESET, &hdev->flags);
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
202 hdev->le_scan_type = LE_SCAN_PASSIVE;
204 hdev->ssp_debug_mode = 0;
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 __u8 status = *((__u8 *) skb->data);
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225 hci_dev_unlock(hdev);
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
243 __u8 status = *((__u8 *) skb->data);
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
253 __u8 param = *((__u8 *) sent);
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
258 clear_bit(HCI_AUTH, &hdev->flags);
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
267 __u8 status = *((__u8 *) skb->data);
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 __u8 param = *((__u8 *) sent);
280 set_bit(HCI_ENCRYPT, &hdev->flags);
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
298 param = *((__u8 *) sent);
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
308 /* We need to ensure that we set this back on if someone changed
309 * the scan mode through a raw HCI socket.
311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
319 mgmt_discoverable(hdev, 1);
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
331 hci_dev_unlock(hdev);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
397 __u8 status = *((__u8 *) skb->data);
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
410 setting = get_unaligned_le16(sent);
412 if (hdev->voice_setting == setting)
415 hdev->voice_setting = setting;
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
426 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433 hdev->num_iac = rp->num_iac;
435 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
441 struct hci_cp_write_ssp_mode *sent;
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 hdev->features[1][0] |= LMP_HOST_SSP;
453 hdev->features[1][0] &= ~LMP_HOST_SSP;
456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 mgmt_ssp_enable_complete(hdev, sent->mode, status);
460 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
468 u8 status = *((u8 *) skb->data);
469 struct hci_cp_write_sc_support *sent;
471 BT_DBG("%s status 0x%2.2x", hdev->name, status);
473 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 hdev->features[1][0] |= LMP_HOST_SC;
481 hdev->features[1][0] &= ~LMP_HOST_SC;
484 if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 mgmt_sc_enable_complete(hdev, sent->support, status);
488 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
490 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
496 struct hci_rp_read_local_version *rp = (void *) skb->data;
498 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
503 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 hdev->hci_ver = rp->hci_ver;
505 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 hdev->lmp_ver = rp->lmp_ver;
507 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
512 static void hci_cc_read_local_commands(struct hci_dev *hdev,
515 struct hci_rp_read_local_commands *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
526 static void hci_cc_read_local_features(struct hci_dev *hdev,
529 struct hci_rp_read_local_features *rp = (void *) skb->data;
531 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
536 memcpy(hdev->features, rp->features, 8);
538 /* Adjust default settings according to features
539 * supported by device. */
541 if (hdev->features[0][0] & LMP_3SLOT)
542 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
544 if (hdev->features[0][0] & LMP_5SLOT)
545 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
547 if (hdev->features[0][1] & LMP_HV2) {
548 hdev->pkt_type |= (HCI_HV2);
549 hdev->esco_type |= (ESCO_HV2);
552 if (hdev->features[0][1] & LMP_HV3) {
553 hdev->pkt_type |= (HCI_HV3);
554 hdev->esco_type |= (ESCO_HV3);
557 if (lmp_esco_capable(hdev))
558 hdev->esco_type |= (ESCO_EV3);
560 if (hdev->features[0][4] & LMP_EV4)
561 hdev->esco_type |= (ESCO_EV4);
563 if (hdev->features[0][4] & LMP_EV5)
564 hdev->esco_type |= (ESCO_EV5);
566 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 hdev->esco_type |= (ESCO_2EV3);
569 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 hdev->esco_type |= (ESCO_3EV3);
572 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
576 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 if (hdev->max_page < rp->max_page)
587 hdev->max_page = rp->max_page;
589 if (rp->page < HCI_MAX_PAGES)
590 memcpy(hdev->features[rp->page], rp->features, 8);
593 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
596 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601 hdev->flow_ctl_mode = rp->mode;
604 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
614 hdev->sco_mtu = rp->sco_mtu;
615 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
618 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
623 hdev->acl_cnt = hdev->acl_pkts;
624 hdev->sco_cnt = hdev->sco_pkts;
626 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
630 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
632 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 bacpy(&hdev->bdaddr, &rp->bdaddr);
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
647 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 hdev->page_scan_window = __le16_to_cpu(rp->window);
653 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
656 u8 status = *((u8 *) skb->data);
657 struct hci_cp_write_page_scan_activity *sent;
659 BT_DBG("%s status 0x%2.2x", hdev->name, status);
664 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 hdev->page_scan_window = __le16_to_cpu(sent->window);
672 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
675 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
677 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
679 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 hdev->page_scan_type = rp->type;
683 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
686 u8 status = *((u8 *) skb->data);
689 BT_DBG("%s status 0x%2.2x", hdev->name, status);
694 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
696 hdev->page_scan_type = *type;
699 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
702 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
704 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 hdev->block_len = __le16_to_cpu(rp->block_len);
711 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
713 hdev->block_cnt = hdev->num_blocks;
715 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 hdev->block_cnt, hdev->block_len);
719 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
722 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 hdev->amp_status = rp->amp_status;
730 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 hdev->amp_type = rp->amp_type;
735 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
741 a2mp_send_getinfo_rsp(hdev);
744 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
747 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 struct amp_assoc *assoc = &hdev->loc_assoc;
749 size_t rem_len, frag_len;
751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 frag_len = skb->len - sizeof(*rp);
757 rem_len = __le16_to_cpu(rp->rem_len);
759 if (rem_len > frag_len) {
760 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
762 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 assoc->offset += frag_len;
765 /* Read other fragments */
766 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
771 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 assoc->len = assoc->offset + rem_len;
776 /* Send A2MP Rsp when all fragments are received */
777 a2mp_send_getampassoc_rsp(hdev, rp->status);
778 a2mp_send_create_phy_link_req(hdev, rp->status);
781 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
784 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
786 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789 hdev->inq_tx_power = rp->tx_power;
792 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
794 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 struct hci_cp_pin_code_reply *cp;
796 struct hci_conn *conn;
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
808 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
814 conn->pin_length = cp->pin_len;
817 hci_dev_unlock(hdev);
820 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
822 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
832 hci_dev_unlock(hdev);
835 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
838 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
840 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 hdev->le_pkts = rp->le_max_pkt;
848 hdev->le_cnt = hdev->le_pkts;
850 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
853 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
856 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
858 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
861 memcpy(hdev->le_features, rp->features, 8);
864 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
867 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872 hdev->adv_tx_power = rp->tx_power;
875 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
877 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
887 hci_dev_unlock(hdev);
890 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
893 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
895 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 ACL_LINK, 0, rp->status);
903 hci_dev_unlock(hdev);
906 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
918 hci_dev_unlock(hdev);
921 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 ACL_LINK, 0, rp->status);
934 hci_dev_unlock(hdev);
937 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
940 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 NULL, NULL, rp->status);
947 hci_dev_unlock(hdev);
950 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
953 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 rp->hash256, rp->randomizer256,
961 hci_dev_unlock(hdev);
965 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
967 __u8 status = *((__u8 *) skb->data);
970 BT_DBG("%s status 0x%2.2x", hdev->name, status);
972 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
979 bacpy(&hdev->random_addr, sent);
981 hci_dev_unlock(hdev);
984 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
986 __u8 *sent, status = *((__u8 *) skb->data);
988 BT_DBG("%s status 0x%2.2x", hdev->name, status);
990 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
999 /* If we're doing connection initation as peripheral. Set a
1000 * timeout in case something goes wrong.
1003 struct hci_conn *conn;
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1012 mgmt_advertising(hdev, *sent);
1014 hci_dev_unlock(hdev);
1017 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1019 struct hci_cp_le_set_scan_param *cp;
1020 __u8 status = *((__u8 *) skb->data);
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1031 hdev->le_scan_type = cp->type;
1033 hci_dev_unlock(hdev);
1036 static bool has_pending_adv_report(struct hci_dev *hdev)
1038 struct discovery_state *d = &hdev->discovery;
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1043 static void clear_pending_adv_report(struct hci_dev *hdev)
1045 struct discovery_state *d = &hdev->discovery;
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1051 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1054 struct discovery_state *d = &hdev->discovery;
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1063 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1066 struct hci_cp_le_set_scan_enable *cp;
1067 __u8 status = *((__u8 *) skb->data);
1069 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1071 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 switch (cp->enable) {
1079 case LE_SCAN_ENABLE:
1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL, 0, 0,
1095 1, d->last_adv_data,
1096 d->last_adv_data_len, NULL, 0);
1099 /* Cancel this timer so that we don't try to disable scanning
1100 * when it's already disabled.
1102 cancel_delayed_work(&hdev->le_scan_disable);
1104 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1105 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1106 * interrupted scanning due to a connect request. Mark
1107 * therefore discovery as stopped.
1109 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1111 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1115 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1120 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1121 struct sk_buff *skb)
1123 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1125 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1128 hdev->le_white_list_size = rp->size;
1131 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1134 __u8 status = *((__u8 *) skb->data);
1136 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139 hci_white_list_clear(hdev);
1142 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1145 struct hci_cp_le_add_to_white_list *sent;
1146 __u8 status = *((__u8 *) skb->data);
1148 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1150 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1155 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1158 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1159 struct sk_buff *skb)
1161 struct hci_cp_le_del_from_white_list *sent;
1162 __u8 status = *((__u8 *) skb->data);
1164 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1171 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1174 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1175 struct sk_buff *skb)
1177 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1179 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1182 memcpy(hdev->le_states, rp->le_states, 8);
1185 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1186 struct sk_buff *skb)
1188 struct hci_cp_write_le_host_supported *sent;
1189 __u8 status = *((__u8 *) skb->data);
1191 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1193 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1199 hdev->features[1][0] |= LMP_HOST_LE;
1200 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1202 hdev->features[1][0] &= ~LMP_HOST_LE;
1203 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1204 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1208 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1210 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1214 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1216 struct hci_cp_le_set_adv_param *cp;
1217 u8 status = *((u8 *) skb->data);
1219 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1224 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1229 hdev->adv_addr_type = cp->own_address_type;
1230 hci_dev_unlock(hdev);
1233 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1234 struct sk_buff *skb)
1236 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1238 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1239 hdev->name, rp->status, rp->phy_handle);
1244 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1247 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1252 hci_conn_check_pending(hdev);
1256 set_bit(HCI_INQUIRY, &hdev->flags);
1259 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1261 struct hci_cp_create_conn *cp;
1262 struct hci_conn *conn;
1264 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1266 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1272 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1274 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1277 if (conn && conn->state == BT_CONNECT) {
1278 if (status != 0x0c || conn->attempt > 2) {
1279 conn->state = BT_CLOSED;
1280 hci_proto_connect_cfm(conn, status);
1283 conn->state = BT_CONNECT2;
1287 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1290 conn->link_mode |= HCI_LM_MASTER;
1292 BT_ERR("No memory for new connection");
1296 hci_dev_unlock(hdev);
1299 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1301 struct hci_cp_add_sco *cp;
1302 struct hci_conn *acl, *sco;
1305 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1310 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1314 handle = __le16_to_cpu(cp->handle);
1316 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1320 acl = hci_conn_hash_lookup_handle(hdev, handle);
1324 sco->state = BT_CLOSED;
1326 hci_proto_connect_cfm(sco, status);
1331 hci_dev_unlock(hdev);
1334 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1336 struct hci_cp_auth_requested *cp;
1337 struct hci_conn *conn;
1339 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1344 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1350 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1352 if (conn->state == BT_CONFIG) {
1353 hci_proto_connect_cfm(conn, status);
1354 hci_conn_drop(conn);
1358 hci_dev_unlock(hdev);
1361 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1363 struct hci_cp_set_conn_encrypt *cp;
1364 struct hci_conn *conn;
1366 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1371 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1377 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1379 if (conn->state == BT_CONFIG) {
1380 hci_proto_connect_cfm(conn, status);
1381 hci_conn_drop(conn);
1385 hci_dev_unlock(hdev);
1388 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1389 struct hci_conn *conn)
1391 if (conn->state != BT_CONFIG || !conn->out)
1394 if (conn->pending_sec_level == BT_SECURITY_SDP)
1397 /* Only request authentication for SSP connections or non-SSP
1398 * devices with sec_level MEDIUM or HIGH or if MITM protection
1401 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1402 conn->pending_sec_level != BT_SECURITY_HIGH &&
1403 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1409 static int hci_resolve_name(struct hci_dev *hdev,
1410 struct inquiry_entry *e)
1412 struct hci_cp_remote_name_req cp;
1414 memset(&cp, 0, sizeof(cp));
1416 bacpy(&cp.bdaddr, &e->data.bdaddr);
1417 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1418 cp.pscan_mode = e->data.pscan_mode;
1419 cp.clock_offset = e->data.clock_offset;
1421 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1424 static bool hci_resolve_next_name(struct hci_dev *hdev)
1426 struct discovery_state *discov = &hdev->discovery;
1427 struct inquiry_entry *e;
1429 if (list_empty(&discov->resolve))
1432 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1436 if (hci_resolve_name(hdev, e) == 0) {
1437 e->name_state = NAME_PENDING;
1444 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1445 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1447 struct discovery_state *discov = &hdev->discovery;
1448 struct inquiry_entry *e;
1450 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1451 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1452 name_len, conn->dev_class);
1454 if (discov->state == DISCOVERY_STOPPED)
1457 if (discov->state == DISCOVERY_STOPPING)
1458 goto discov_complete;
1460 if (discov->state != DISCOVERY_RESOLVING)
1463 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1464 /* If the device was not found in a list of found devices names of which
1465 * are pending. there is no need to continue resolving a next name as it
1466 * will be done upon receiving another Remote Name Request Complete
1473 e->name_state = NAME_KNOWN;
1474 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1475 e->data.rssi, name, name_len);
1477 e->name_state = NAME_NOT_KNOWN;
1480 if (hci_resolve_next_name(hdev))
1484 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1487 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1489 struct hci_cp_remote_name_req *cp;
1490 struct hci_conn *conn;
1492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1494 /* If successful wait for the name req complete event before
1495 * checking for the need to do authentication */
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1505 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1507 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1508 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1513 if (!hci_outgoing_auth_needed(hdev, conn))
1516 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1517 struct hci_cp_auth_requested auth_cp;
1519 auth_cp.handle = __cpu_to_le16(conn->handle);
1520 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1521 sizeof(auth_cp), &auth_cp);
1525 hci_dev_unlock(hdev);
1528 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1530 struct hci_cp_read_remote_features *cp;
1531 struct hci_conn *conn;
1533 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1538 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1544 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1546 if (conn->state == BT_CONFIG) {
1547 hci_proto_connect_cfm(conn, status);
1548 hci_conn_drop(conn);
1552 hci_dev_unlock(hdev);
1555 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1557 struct hci_cp_read_remote_ext_features *cp;
1558 struct hci_conn *conn;
1560 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1565 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1571 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1573 if (conn->state == BT_CONFIG) {
1574 hci_proto_connect_cfm(conn, status);
1575 hci_conn_drop(conn);
1579 hci_dev_unlock(hdev);
1582 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1584 struct hci_cp_setup_sync_conn *cp;
1585 struct hci_conn *acl, *sco;
1588 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1597 handle = __le16_to_cpu(cp->handle);
1599 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1603 acl = hci_conn_hash_lookup_handle(hdev, handle);
1607 sco->state = BT_CLOSED;
1609 hci_proto_connect_cfm(sco, status);
1614 hci_dev_unlock(hdev);
1617 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1619 struct hci_cp_sniff_mode *cp;
1620 struct hci_conn *conn;
1622 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1627 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1633 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1635 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1637 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1638 hci_sco_setup(conn, status);
1641 hci_dev_unlock(hdev);
1644 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1646 struct hci_cp_exit_sniff_mode *cp;
1647 struct hci_conn *conn;
1649 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1654 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1660 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1662 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1664 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1665 hci_sco_setup(conn, status);
1668 hci_dev_unlock(hdev);
1671 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1673 struct hci_cp_disconnect *cp;
1674 struct hci_conn *conn;
1679 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1685 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1687 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1688 conn->dst_type, status);
1690 hci_dev_unlock(hdev);
1693 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1695 struct hci_cp_create_phy_link *cp;
1697 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1699 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1706 struct hci_conn *hcon;
1708 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1712 amp_write_remote_assoc(hdev, cp->phy_handle);
1715 hci_dev_unlock(hdev);
1718 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1720 struct hci_cp_accept_phy_link *cp;
1722 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1727 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1731 amp_write_remote_assoc(hdev, cp->phy_handle);
1734 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1736 struct hci_cp_le_create_conn *cp;
1737 struct hci_conn *conn;
1739 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1741 /* All connection failure handling is taken care of by the
1742 * hci_le_conn_failed function which is triggered by the HCI
1743 * request completion callbacks used for connecting.
1748 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1754 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1758 /* Store the initiator and responder address information which
1759 * is needed for SMP. These values will not change during the
1760 * lifetime of the connection.
1762 conn->init_addr_type = cp->own_address_type;
1763 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1764 bacpy(&conn->init_addr, &hdev->random_addr);
1766 bacpy(&conn->init_addr, &hdev->bdaddr);
1768 conn->resp_addr_type = cp->peer_addr_type;
1769 bacpy(&conn->resp_addr, &cp->peer_addr);
1771 /* We don't want the connection attempt to stick around
1772 * indefinitely since LE doesn't have a page timeout concept
1773 * like BR/EDR. Set a timer for any connection that doesn't use
1774 * the white list for connecting.
1776 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1777 queue_delayed_work(conn->hdev->workqueue,
1778 &conn->le_conn_timeout,
1779 HCI_LE_CONN_TIMEOUT);
1782 hci_dev_unlock(hdev);
1785 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1787 struct hci_cp_le_start_enc *cp;
1788 struct hci_conn *conn;
1790 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1797 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1801 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1805 if (conn->state != BT_CONNECTED)
1808 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1809 hci_conn_drop(conn);
1812 hci_dev_unlock(hdev);
1815 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1817 __u8 status = *((__u8 *) skb->data);
1818 struct discovery_state *discov = &hdev->discovery;
1819 struct inquiry_entry *e;
1821 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1823 hci_conn_check_pending(hdev);
1825 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1828 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1829 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1831 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1836 if (discov->state != DISCOVERY_FINDING)
1839 if (list_empty(&discov->resolve)) {
1840 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1844 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1845 if (e && hci_resolve_name(hdev, e) == 0) {
1846 e->name_state = NAME_PENDING;
1847 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1849 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1853 hci_dev_unlock(hdev);
1856 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1858 struct inquiry_data data;
1859 struct inquiry_info *info = (void *) (skb->data + 1);
1860 int num_rsp = *((__u8 *) skb->data);
1862 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1867 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1872 for (; num_rsp; num_rsp--, info++) {
1873 bool name_known, ssp;
1875 bacpy(&data.bdaddr, &info->bdaddr);
1876 data.pscan_rep_mode = info->pscan_rep_mode;
1877 data.pscan_period_mode = info->pscan_period_mode;
1878 data.pscan_mode = info->pscan_mode;
1879 memcpy(data.dev_class, info->dev_class, 3);
1880 data.clock_offset = info->clock_offset;
1882 data.ssp_mode = 0x00;
1884 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1885 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1886 info->dev_class, 0, !name_known, ssp, NULL,
1890 hci_dev_unlock(hdev);
1893 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1895 struct hci_ev_conn_complete *ev = (void *) skb->data;
1896 struct hci_conn *conn;
1898 BT_DBG("%s", hdev->name);
1902 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1904 if (ev->link_type != SCO_LINK)
1907 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1911 conn->type = SCO_LINK;
1915 conn->handle = __le16_to_cpu(ev->handle);
1917 if (conn->type == ACL_LINK) {
1918 conn->state = BT_CONFIG;
1919 hci_conn_hold(conn);
1921 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1922 !hci_find_link_key(hdev, &ev->bdaddr))
1923 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1925 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1927 conn->state = BT_CONNECTED;
1929 hci_conn_add_sysfs(conn);
1931 if (test_bit(HCI_AUTH, &hdev->flags))
1932 conn->link_mode |= HCI_LM_AUTH;
1934 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1935 conn->link_mode |= HCI_LM_ENCRYPT;
1937 /* Get remote features */
1938 if (conn->type == ACL_LINK) {
1939 struct hci_cp_read_remote_features cp;
1940 cp.handle = ev->handle;
1941 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1945 /* Set packet type for incoming connection */
1946 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1947 struct hci_cp_change_conn_ptype cp;
1948 cp.handle = ev->handle;
1949 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1950 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1954 conn->state = BT_CLOSED;
1955 if (conn->type == ACL_LINK)
1956 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1957 conn->dst_type, ev->status);
1960 if (conn->type == ACL_LINK)
1961 hci_sco_setup(conn, ev->status);
1964 hci_proto_connect_cfm(conn, ev->status);
1966 } else if (ev->link_type != ACL_LINK)
1967 hci_proto_connect_cfm(conn, ev->status);
1970 hci_dev_unlock(hdev);
1972 hci_conn_check_pending(hdev);
1975 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1977 struct hci_ev_conn_request *ev = (void *) skb->data;
1978 int mask = hdev->link_mode;
1981 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1984 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1987 if ((mask & HCI_LM_ACCEPT) &&
1988 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1989 /* Connection accepted */
1990 struct inquiry_entry *ie;
1991 struct hci_conn *conn;
1995 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1997 memcpy(ie->data.dev_class, ev->dev_class, 3);
1999 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2002 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2004 BT_ERR("No memory for new connection");
2005 hci_dev_unlock(hdev);
2010 memcpy(conn->dev_class, ev->dev_class, 3);
2012 hci_dev_unlock(hdev);
2014 if (ev->link_type == ACL_LINK ||
2015 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2016 struct hci_cp_accept_conn_req cp;
2017 conn->state = BT_CONNECT;
2019 bacpy(&cp.bdaddr, &ev->bdaddr);
2021 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2022 cp.role = 0x00; /* Become master */
2024 cp.role = 0x01; /* Remain slave */
2026 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2028 } else if (!(flags & HCI_PROTO_DEFER)) {
2029 struct hci_cp_accept_sync_conn_req cp;
2030 conn->state = BT_CONNECT;
2032 bacpy(&cp.bdaddr, &ev->bdaddr);
2033 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2035 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2036 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2037 cp.max_latency = cpu_to_le16(0xffff);
2038 cp.content_format = cpu_to_le16(hdev->voice_setting);
2039 cp.retrans_effort = 0xff;
2041 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2044 conn->state = BT_CONNECT2;
2045 hci_proto_connect_cfm(conn, 0);
2048 /* Connection rejected */
2049 struct hci_cp_reject_conn_req cp;
2051 bacpy(&cp.bdaddr, &ev->bdaddr);
2052 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2053 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2057 static u8 hci_to_mgmt_reason(u8 err)
2060 case HCI_ERROR_CONNECTION_TIMEOUT:
2061 return MGMT_DEV_DISCONN_TIMEOUT;
2062 case HCI_ERROR_REMOTE_USER_TERM:
2063 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2064 case HCI_ERROR_REMOTE_POWER_OFF:
2065 return MGMT_DEV_DISCONN_REMOTE;
2066 case HCI_ERROR_LOCAL_HOST_TERM:
2067 return MGMT_DEV_DISCONN_LOCAL_HOST;
2069 return MGMT_DEV_DISCONN_UNKNOWN;
2073 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2075 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2076 u8 reason = hci_to_mgmt_reason(ev->reason);
2077 struct hci_conn_params *params;
2078 struct hci_conn *conn;
2079 bool mgmt_connected;
2082 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2086 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2091 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2092 conn->dst_type, ev->status);
2096 conn->state = BT_CLOSED;
2098 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2099 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2100 reason, mgmt_connected);
2102 if (conn->type == ACL_LINK && conn->flush_key)
2103 hci_remove_link_key(hdev, &conn->dst);
2105 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2107 switch (params->auto_connect) {
2108 case HCI_AUTO_CONN_LINK_LOSS:
2109 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2113 case HCI_AUTO_CONN_ALWAYS:
2114 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2124 hci_proto_disconn_cfm(conn, ev->reason);
2127 /* Re-enable advertising if necessary, since it might
2128 * have been disabled by the connection. From the
2129 * HCI_LE_Set_Advertise_Enable command description in
2130 * the core specification (v4.0):
2131 * "The Controller shall continue advertising until the Host
2132 * issues an LE_Set_Advertise_Enable command with
2133 * Advertising_Enable set to 0x00 (Advertising is disabled)
2134 * or until a connection is created or until the Advertising
2135 * is timed out due to Directed Advertising."
2137 if (type == LE_LINK)
2138 mgmt_reenable_advertising(hdev);
2141 hci_dev_unlock(hdev);
2144 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2146 struct hci_ev_auth_complete *ev = (void *) skb->data;
2147 struct hci_conn *conn;
2149 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2153 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2158 if (!hci_conn_ssp_enabled(conn) &&
2159 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2160 BT_INFO("re-auth of legacy device is not possible.");
2162 conn->link_mode |= HCI_LM_AUTH;
2163 conn->sec_level = conn->pending_sec_level;
2166 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2170 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2171 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2173 if (conn->state == BT_CONFIG) {
2174 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2175 struct hci_cp_set_conn_encrypt cp;
2176 cp.handle = ev->handle;
2178 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2181 conn->state = BT_CONNECTED;
2182 hci_proto_connect_cfm(conn, ev->status);
2183 hci_conn_drop(conn);
2186 hci_auth_cfm(conn, ev->status);
2188 hci_conn_hold(conn);
2189 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2190 hci_conn_drop(conn);
2193 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2195 struct hci_cp_set_conn_encrypt cp;
2196 cp.handle = ev->handle;
2198 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2201 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2202 hci_encrypt_cfm(conn, ev->status, 0x00);
2207 hci_dev_unlock(hdev);
2210 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2212 struct hci_ev_remote_name *ev = (void *) skb->data;
2213 struct hci_conn *conn;
2215 BT_DBG("%s", hdev->name);
2217 hci_conn_check_pending(hdev);
2221 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2223 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2226 if (ev->status == 0)
2227 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2228 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2230 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2236 if (!hci_outgoing_auth_needed(hdev, conn))
2239 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2240 struct hci_cp_auth_requested cp;
2241 cp.handle = __cpu_to_le16(conn->handle);
2242 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2246 hci_dev_unlock(hdev);
2249 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2251 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2252 struct hci_conn *conn;
2254 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2258 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2264 /* Encryption implies authentication */
2265 conn->link_mode |= HCI_LM_AUTH;
2266 conn->link_mode |= HCI_LM_ENCRYPT;
2267 conn->sec_level = conn->pending_sec_level;
2269 /* P-256 authentication key implies FIPS */
2270 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2271 conn->link_mode |= HCI_LM_FIPS;
2273 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2274 conn->type == LE_LINK)
2275 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2277 conn->link_mode &= ~HCI_LM_ENCRYPT;
2278 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2282 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2284 if (ev->status && conn->state == BT_CONNECTED) {
2285 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2286 hci_conn_drop(conn);
2290 if (conn->state == BT_CONFIG) {
2292 conn->state = BT_CONNECTED;
2294 /* In Secure Connections Only mode, do not allow any
2295 * connections that are not encrypted with AES-CCM
2296 * using a P-256 authenticated combination key.
2298 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2299 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2300 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2301 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2302 hci_conn_drop(conn);
2306 hci_proto_connect_cfm(conn, ev->status);
2307 hci_conn_drop(conn);
2309 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2312 hci_dev_unlock(hdev);
2315 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2316 struct sk_buff *skb)
2318 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2319 struct hci_conn *conn;
2321 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2325 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2328 conn->link_mode |= HCI_LM_SECURE;
2330 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2332 hci_key_change_cfm(conn, ev->status);
2335 hci_dev_unlock(hdev);
2338 static void hci_remote_features_evt(struct hci_dev *hdev,
2339 struct sk_buff *skb)
2341 struct hci_ev_remote_features *ev = (void *) skb->data;
2342 struct hci_conn *conn;
2344 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2348 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2353 memcpy(conn->features[0], ev->features, 8);
2355 if (conn->state != BT_CONFIG)
2358 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2359 struct hci_cp_read_remote_ext_features cp;
2360 cp.handle = ev->handle;
2362 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2367 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2368 struct hci_cp_remote_name_req cp;
2369 memset(&cp, 0, sizeof(cp));
2370 bacpy(&cp.bdaddr, &conn->dst);
2371 cp.pscan_rep_mode = 0x02;
2372 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2373 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2374 mgmt_device_connected(hdev, &conn->dst, conn->type,
2375 conn->dst_type, 0, NULL, 0,
2378 if (!hci_outgoing_auth_needed(hdev, conn)) {
2379 conn->state = BT_CONNECTED;
2380 hci_proto_connect_cfm(conn, ev->status);
2381 hci_conn_drop(conn);
2385 hci_dev_unlock(hdev);
2388 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2390 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2391 u8 status = skb->data[sizeof(*ev)];
2394 skb_pull(skb, sizeof(*ev));
2396 opcode = __le16_to_cpu(ev->opcode);
2399 case HCI_OP_INQUIRY_CANCEL:
2400 hci_cc_inquiry_cancel(hdev, skb);
2403 case HCI_OP_PERIODIC_INQ:
2404 hci_cc_periodic_inq(hdev, skb);
2407 case HCI_OP_EXIT_PERIODIC_INQ:
2408 hci_cc_exit_periodic_inq(hdev, skb);
2411 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2412 hci_cc_remote_name_req_cancel(hdev, skb);
2415 case HCI_OP_ROLE_DISCOVERY:
2416 hci_cc_role_discovery(hdev, skb);
2419 case HCI_OP_READ_LINK_POLICY:
2420 hci_cc_read_link_policy(hdev, skb);
2423 case HCI_OP_WRITE_LINK_POLICY:
2424 hci_cc_write_link_policy(hdev, skb);
2427 case HCI_OP_READ_DEF_LINK_POLICY:
2428 hci_cc_read_def_link_policy(hdev, skb);
2431 case HCI_OP_WRITE_DEF_LINK_POLICY:
2432 hci_cc_write_def_link_policy(hdev, skb);
2436 hci_cc_reset(hdev, skb);
2439 case HCI_OP_WRITE_LOCAL_NAME:
2440 hci_cc_write_local_name(hdev, skb);
2443 case HCI_OP_READ_LOCAL_NAME:
2444 hci_cc_read_local_name(hdev, skb);
2447 case HCI_OP_WRITE_AUTH_ENABLE:
2448 hci_cc_write_auth_enable(hdev, skb);
2451 case HCI_OP_WRITE_ENCRYPT_MODE:
2452 hci_cc_write_encrypt_mode(hdev, skb);
2455 case HCI_OP_WRITE_SCAN_ENABLE:
2456 hci_cc_write_scan_enable(hdev, skb);
2459 case HCI_OP_READ_CLASS_OF_DEV:
2460 hci_cc_read_class_of_dev(hdev, skb);
2463 case HCI_OP_WRITE_CLASS_OF_DEV:
2464 hci_cc_write_class_of_dev(hdev, skb);
2467 case HCI_OP_READ_VOICE_SETTING:
2468 hci_cc_read_voice_setting(hdev, skb);
2471 case HCI_OP_WRITE_VOICE_SETTING:
2472 hci_cc_write_voice_setting(hdev, skb);
2475 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2476 hci_cc_read_num_supported_iac(hdev, skb);
2479 case HCI_OP_WRITE_SSP_MODE:
2480 hci_cc_write_ssp_mode(hdev, skb);
2483 case HCI_OP_WRITE_SC_SUPPORT:
2484 hci_cc_write_sc_support(hdev, skb);
2487 case HCI_OP_READ_LOCAL_VERSION:
2488 hci_cc_read_local_version(hdev, skb);
2491 case HCI_OP_READ_LOCAL_COMMANDS:
2492 hci_cc_read_local_commands(hdev, skb);
2495 case HCI_OP_READ_LOCAL_FEATURES:
2496 hci_cc_read_local_features(hdev, skb);
2499 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2500 hci_cc_read_local_ext_features(hdev, skb);
2503 case HCI_OP_READ_BUFFER_SIZE:
2504 hci_cc_read_buffer_size(hdev, skb);
2507 case HCI_OP_READ_BD_ADDR:
2508 hci_cc_read_bd_addr(hdev, skb);
2511 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2512 hci_cc_read_page_scan_activity(hdev, skb);
2515 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2516 hci_cc_write_page_scan_activity(hdev, skb);
2519 case HCI_OP_READ_PAGE_SCAN_TYPE:
2520 hci_cc_read_page_scan_type(hdev, skb);
2523 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2524 hci_cc_write_page_scan_type(hdev, skb);
2527 case HCI_OP_READ_DATA_BLOCK_SIZE:
2528 hci_cc_read_data_block_size(hdev, skb);
2531 case HCI_OP_READ_FLOW_CONTROL_MODE:
2532 hci_cc_read_flow_control_mode(hdev, skb);
2535 case HCI_OP_READ_LOCAL_AMP_INFO:
2536 hci_cc_read_local_amp_info(hdev, skb);
2539 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2540 hci_cc_read_local_amp_assoc(hdev, skb);
2543 case HCI_OP_READ_INQ_RSP_TX_POWER:
2544 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2547 case HCI_OP_PIN_CODE_REPLY:
2548 hci_cc_pin_code_reply(hdev, skb);
2551 case HCI_OP_PIN_CODE_NEG_REPLY:
2552 hci_cc_pin_code_neg_reply(hdev, skb);
2555 case HCI_OP_READ_LOCAL_OOB_DATA:
2556 hci_cc_read_local_oob_data(hdev, skb);
2559 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2560 hci_cc_read_local_oob_ext_data(hdev, skb);
2563 case HCI_OP_LE_READ_BUFFER_SIZE:
2564 hci_cc_le_read_buffer_size(hdev, skb);
2567 case HCI_OP_LE_READ_LOCAL_FEATURES:
2568 hci_cc_le_read_local_features(hdev, skb);
2571 case HCI_OP_LE_READ_ADV_TX_POWER:
2572 hci_cc_le_read_adv_tx_power(hdev, skb);
2575 case HCI_OP_USER_CONFIRM_REPLY:
2576 hci_cc_user_confirm_reply(hdev, skb);
2579 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2580 hci_cc_user_confirm_neg_reply(hdev, skb);
2583 case HCI_OP_USER_PASSKEY_REPLY:
2584 hci_cc_user_passkey_reply(hdev, skb);
2587 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2588 hci_cc_user_passkey_neg_reply(hdev, skb);
2591 case HCI_OP_LE_SET_RANDOM_ADDR:
2592 hci_cc_le_set_random_addr(hdev, skb);
2595 case HCI_OP_LE_SET_ADV_ENABLE:
2596 hci_cc_le_set_adv_enable(hdev, skb);
2599 case HCI_OP_LE_SET_SCAN_PARAM:
2600 hci_cc_le_set_scan_param(hdev, skb);
2603 case HCI_OP_LE_SET_SCAN_ENABLE:
2604 hci_cc_le_set_scan_enable(hdev, skb);
2607 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2608 hci_cc_le_read_white_list_size(hdev, skb);
2611 case HCI_OP_LE_CLEAR_WHITE_LIST:
2612 hci_cc_le_clear_white_list(hdev, skb);
2615 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2616 hci_cc_le_add_to_white_list(hdev, skb);
2619 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2620 hci_cc_le_del_from_white_list(hdev, skb);
2623 case HCI_OP_LE_READ_SUPPORTED_STATES:
2624 hci_cc_le_read_supported_states(hdev, skb);
2627 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2628 hci_cc_write_le_host_supported(hdev, skb);
2631 case HCI_OP_LE_SET_ADV_PARAM:
2632 hci_cc_set_adv_param(hdev, skb);
2635 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2636 hci_cc_write_remote_amp_assoc(hdev, skb);
2640 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2644 if (opcode != HCI_OP_NOP)
2645 del_timer(&hdev->cmd_timer);
2647 hci_req_cmd_complete(hdev, opcode, status);
2649 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2650 atomic_set(&hdev->cmd_cnt, 1);
2651 if (!skb_queue_empty(&hdev->cmd_q))
2652 queue_work(hdev->workqueue, &hdev->cmd_work);
2656 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2658 struct hci_ev_cmd_status *ev = (void *) skb->data;
2661 skb_pull(skb, sizeof(*ev));
2663 opcode = __le16_to_cpu(ev->opcode);
2666 case HCI_OP_INQUIRY:
2667 hci_cs_inquiry(hdev, ev->status);
2670 case HCI_OP_CREATE_CONN:
2671 hci_cs_create_conn(hdev, ev->status);
2674 case HCI_OP_ADD_SCO:
2675 hci_cs_add_sco(hdev, ev->status);
2678 case HCI_OP_AUTH_REQUESTED:
2679 hci_cs_auth_requested(hdev, ev->status);
2682 case HCI_OP_SET_CONN_ENCRYPT:
2683 hci_cs_set_conn_encrypt(hdev, ev->status);
2686 case HCI_OP_REMOTE_NAME_REQ:
2687 hci_cs_remote_name_req(hdev, ev->status);
2690 case HCI_OP_READ_REMOTE_FEATURES:
2691 hci_cs_read_remote_features(hdev, ev->status);
2694 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2695 hci_cs_read_remote_ext_features(hdev, ev->status);
2698 case HCI_OP_SETUP_SYNC_CONN:
2699 hci_cs_setup_sync_conn(hdev, ev->status);
2702 case HCI_OP_SNIFF_MODE:
2703 hci_cs_sniff_mode(hdev, ev->status);
2706 case HCI_OP_EXIT_SNIFF_MODE:
2707 hci_cs_exit_sniff_mode(hdev, ev->status);
2710 case HCI_OP_DISCONNECT:
2711 hci_cs_disconnect(hdev, ev->status);
2714 case HCI_OP_CREATE_PHY_LINK:
2715 hci_cs_create_phylink(hdev, ev->status);
2718 case HCI_OP_ACCEPT_PHY_LINK:
2719 hci_cs_accept_phylink(hdev, ev->status);
2722 case HCI_OP_LE_CREATE_CONN:
2723 hci_cs_le_create_conn(hdev, ev->status);
2726 case HCI_OP_LE_START_ENC:
2727 hci_cs_le_start_enc(hdev, ev->status);
2731 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2735 if (opcode != HCI_OP_NOP)
2736 del_timer(&hdev->cmd_timer);
2739 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2740 hci_req_cmd_complete(hdev, opcode, ev->status);
2742 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2743 atomic_set(&hdev->cmd_cnt, 1);
2744 if (!skb_queue_empty(&hdev->cmd_q))
2745 queue_work(hdev->workqueue, &hdev->cmd_work);
2749 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2751 struct hci_ev_role_change *ev = (void *) skb->data;
2752 struct hci_conn *conn;
2754 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2758 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2762 conn->link_mode &= ~HCI_LM_MASTER;
2764 conn->link_mode |= HCI_LM_MASTER;
2767 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2769 hci_role_switch_cfm(conn, ev->status, ev->role);
2772 hci_dev_unlock(hdev);
2775 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2777 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2780 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2781 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2785 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2786 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2787 BT_DBG("%s bad parameters", hdev->name);
2791 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2793 for (i = 0; i < ev->num_hndl; i++) {
2794 struct hci_comp_pkts_info *info = &ev->handles[i];
2795 struct hci_conn *conn;
2796 __u16 handle, count;
2798 handle = __le16_to_cpu(info->handle);
2799 count = __le16_to_cpu(info->count);
2801 conn = hci_conn_hash_lookup_handle(hdev, handle);
2805 conn->sent -= count;
2807 switch (conn->type) {
2809 hdev->acl_cnt += count;
2810 if (hdev->acl_cnt > hdev->acl_pkts)
2811 hdev->acl_cnt = hdev->acl_pkts;
2815 if (hdev->le_pkts) {
2816 hdev->le_cnt += count;
2817 if (hdev->le_cnt > hdev->le_pkts)
2818 hdev->le_cnt = hdev->le_pkts;
2820 hdev->acl_cnt += count;
2821 if (hdev->acl_cnt > hdev->acl_pkts)
2822 hdev->acl_cnt = hdev->acl_pkts;
2827 hdev->sco_cnt += count;
2828 if (hdev->sco_cnt > hdev->sco_pkts)
2829 hdev->sco_cnt = hdev->sco_pkts;
2833 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2838 queue_work(hdev->workqueue, &hdev->tx_work);
2841 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2844 struct hci_chan *chan;
2846 switch (hdev->dev_type) {
2848 return hci_conn_hash_lookup_handle(hdev, handle);
2850 chan = hci_chan_lookup_handle(hdev, handle);
2855 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2862 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2864 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2867 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2868 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2872 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2873 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2874 BT_DBG("%s bad parameters", hdev->name);
2878 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2881 for (i = 0; i < ev->num_hndl; i++) {
2882 struct hci_comp_blocks_info *info = &ev->handles[i];
2883 struct hci_conn *conn = NULL;
2884 __u16 handle, block_count;
2886 handle = __le16_to_cpu(info->handle);
2887 block_count = __le16_to_cpu(info->blocks);
2889 conn = __hci_conn_lookup_handle(hdev, handle);
2893 conn->sent -= block_count;
2895 switch (conn->type) {
2898 hdev->block_cnt += block_count;
2899 if (hdev->block_cnt > hdev->num_blocks)
2900 hdev->block_cnt = hdev->num_blocks;
2904 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2909 queue_work(hdev->workqueue, &hdev->tx_work);
2912 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2914 struct hci_ev_mode_change *ev = (void *) skb->data;
2915 struct hci_conn *conn;
2917 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2921 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2923 conn->mode = ev->mode;
2925 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2927 if (conn->mode == HCI_CM_ACTIVE)
2928 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2930 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2933 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2934 hci_sco_setup(conn, ev->status);
2937 hci_dev_unlock(hdev);
2940 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2942 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2943 struct hci_conn *conn;
2945 BT_DBG("%s", hdev->name);
2949 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2953 if (conn->state == BT_CONNECTED) {
2954 hci_conn_hold(conn);
2955 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2956 hci_conn_drop(conn);
2959 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2960 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2961 sizeof(ev->bdaddr), &ev->bdaddr);
2962 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2965 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2970 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2974 hci_dev_unlock(hdev);
2977 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2979 struct hci_ev_link_key_req *ev = (void *) skb->data;
2980 struct hci_cp_link_key_reply cp;
2981 struct hci_conn *conn;
2982 struct link_key *key;
2984 BT_DBG("%s", hdev->name);
2986 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2991 key = hci_find_link_key(hdev, &ev->bdaddr);
2993 BT_DBG("%s link key not found for %pMR", hdev->name,
2998 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3001 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3002 key->type == HCI_LK_DEBUG_COMBINATION) {
3003 BT_DBG("%s ignoring debug key", hdev->name);
3007 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3009 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3010 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3011 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3012 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3016 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3017 conn->pending_sec_level == BT_SECURITY_HIGH) {
3018 BT_DBG("%s ignoring key unauthenticated for high security",
3023 conn->key_type = key->type;
3024 conn->pin_length = key->pin_len;
3027 bacpy(&cp.bdaddr, &ev->bdaddr);
3028 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3030 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3032 hci_dev_unlock(hdev);
3037 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3038 hci_dev_unlock(hdev);
3041 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3043 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3044 struct hci_conn *conn;
3047 BT_DBG("%s", hdev->name);
3051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3053 hci_conn_hold(conn);
3054 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3055 pin_len = conn->pin_length;
3057 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3058 conn->key_type = ev->key_type;
3060 hci_conn_drop(conn);
3063 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3064 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3065 ev->key_type, pin_len);
3067 hci_dev_unlock(hdev);
3070 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3072 struct hci_ev_clock_offset *ev = (void *) skb->data;
3073 struct hci_conn *conn;
3075 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3079 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3080 if (conn && !ev->status) {
3081 struct inquiry_entry *ie;
3083 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3085 ie->data.clock_offset = ev->clock_offset;
3086 ie->timestamp = jiffies;
3090 hci_dev_unlock(hdev);
3093 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3095 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3096 struct hci_conn *conn;
3098 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3103 if (conn && !ev->status)
3104 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3106 hci_dev_unlock(hdev);
3109 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3111 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3112 struct inquiry_entry *ie;
3114 BT_DBG("%s", hdev->name);
3118 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3120 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3121 ie->timestamp = jiffies;
3124 hci_dev_unlock(hdev);
3127 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3128 struct sk_buff *skb)
3130 struct inquiry_data data;
3131 int num_rsp = *((__u8 *) skb->data);
3132 bool name_known, ssp;
3134 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3139 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3144 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3145 struct inquiry_info_with_rssi_and_pscan_mode *info;
3146 info = (void *) (skb->data + 1);
3148 for (; num_rsp; num_rsp--, info++) {
3149 bacpy(&data.bdaddr, &info->bdaddr);
3150 data.pscan_rep_mode = info->pscan_rep_mode;
3151 data.pscan_period_mode = info->pscan_period_mode;
3152 data.pscan_mode = info->pscan_mode;
3153 memcpy(data.dev_class, info->dev_class, 3);
3154 data.clock_offset = info->clock_offset;
3155 data.rssi = info->rssi;
3156 data.ssp_mode = 0x00;
3158 name_known = hci_inquiry_cache_update(hdev, &data,
3160 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3161 info->dev_class, info->rssi,
3162 !name_known, ssp, NULL, 0, NULL, 0);
3165 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3167 for (; num_rsp; num_rsp--, info++) {
3168 bacpy(&data.bdaddr, &info->bdaddr);
3169 data.pscan_rep_mode = info->pscan_rep_mode;
3170 data.pscan_period_mode = info->pscan_period_mode;
3171 data.pscan_mode = 0x00;
3172 memcpy(data.dev_class, info->dev_class, 3);
3173 data.clock_offset = info->clock_offset;
3174 data.rssi = info->rssi;
3175 data.ssp_mode = 0x00;
3176 name_known = hci_inquiry_cache_update(hdev, &data,
3178 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3179 info->dev_class, info->rssi,
3180 !name_known, ssp, NULL, 0, NULL, 0);
3184 hci_dev_unlock(hdev);
3187 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3188 struct sk_buff *skb)
3190 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3191 struct hci_conn *conn;
3193 BT_DBG("%s", hdev->name);
3197 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3201 if (ev->page < HCI_MAX_PAGES)
3202 memcpy(conn->features[ev->page], ev->features, 8);
3204 if (!ev->status && ev->page == 0x01) {
3205 struct inquiry_entry *ie;
3207 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3209 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3211 if (ev->features[0] & LMP_HOST_SSP) {
3212 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3214 /* It is mandatory by the Bluetooth specification that
3215 * Extended Inquiry Results are only used when Secure
3216 * Simple Pairing is enabled, but some devices violate
3219 * To make these devices work, the internal SSP
3220 * enabled flag needs to be cleared if the remote host
3221 * features do not indicate SSP support */
3222 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3225 if (ev->features[0] & LMP_HOST_SC)
3226 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3229 if (conn->state != BT_CONFIG)
3232 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3233 struct hci_cp_remote_name_req cp;
3234 memset(&cp, 0, sizeof(cp));
3235 bacpy(&cp.bdaddr, &conn->dst);
3236 cp.pscan_rep_mode = 0x02;
3237 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3238 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3239 mgmt_device_connected(hdev, &conn->dst, conn->type,
3240 conn->dst_type, 0, NULL, 0,
3243 if (!hci_outgoing_auth_needed(hdev, conn)) {
3244 conn->state = BT_CONNECTED;
3245 hci_proto_connect_cfm(conn, ev->status);
3246 hci_conn_drop(conn);
3250 hci_dev_unlock(hdev);
3253 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3254 struct sk_buff *skb)
3256 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3257 struct hci_conn *conn;
3259 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3263 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3265 if (ev->link_type == ESCO_LINK)
3268 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3272 conn->type = SCO_LINK;
3275 switch (ev->status) {
3277 conn->handle = __le16_to_cpu(ev->handle);
3278 conn->state = BT_CONNECTED;
3280 hci_conn_add_sysfs(conn);
3283 case 0x0d: /* Connection Rejected due to Limited Resources */
3284 case 0x11: /* Unsupported Feature or Parameter Value */
3285 case 0x1c: /* SCO interval rejected */
3286 case 0x1a: /* Unsupported Remote Feature */
3287 case 0x1f: /* Unspecified error */
3288 case 0x20: /* Unsupported LMP Parameter value */
3290 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3291 (hdev->esco_type & EDR_ESCO_MASK);
3292 if (hci_setup_sync(conn, conn->link->handle))
3298 conn->state = BT_CLOSED;
3302 hci_proto_connect_cfm(conn, ev->status);
3307 hci_dev_unlock(hdev);
3310 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3314 while (parsed < eir_len) {
3315 u8 field_len = eir[0];
3320 parsed += field_len + 1;
3321 eir += field_len + 1;
3327 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3328 struct sk_buff *skb)
3330 struct inquiry_data data;
3331 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3332 int num_rsp = *((__u8 *) skb->data);
3335 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3340 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3345 for (; num_rsp; num_rsp--, info++) {
3346 bool name_known, ssp;
3348 bacpy(&data.bdaddr, &info->bdaddr);
3349 data.pscan_rep_mode = info->pscan_rep_mode;
3350 data.pscan_period_mode = info->pscan_period_mode;
3351 data.pscan_mode = 0x00;
3352 memcpy(data.dev_class, info->dev_class, 3);
3353 data.clock_offset = info->clock_offset;
3354 data.rssi = info->rssi;
3355 data.ssp_mode = 0x01;
3357 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3358 name_known = eir_has_data_type(info->data,
3364 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3366 eir_len = eir_get_length(info->data, sizeof(info->data));
3367 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3368 info->dev_class, info->rssi, !name_known,
3369 ssp, info->data, eir_len, NULL, 0);
3372 hci_dev_unlock(hdev);
3375 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3376 struct sk_buff *skb)
3378 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3379 struct hci_conn *conn;
3381 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3382 __le16_to_cpu(ev->handle));
3386 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3391 conn->sec_level = conn->pending_sec_level;
3393 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3395 if (ev->status && conn->state == BT_CONNECTED) {
3396 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3397 hci_conn_drop(conn);
3401 if (conn->state == BT_CONFIG) {
3403 conn->state = BT_CONNECTED;
3405 hci_proto_connect_cfm(conn, ev->status);
3406 hci_conn_drop(conn);
3408 hci_auth_cfm(conn, ev->status);
3410 hci_conn_hold(conn);
3411 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3412 hci_conn_drop(conn);
3416 hci_dev_unlock(hdev);
3419 static u8 hci_get_auth_req(struct hci_conn *conn)
3421 /* If remote requests dedicated bonding follow that lead */
3422 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3423 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3424 /* If both remote and local IO capabilities allow MITM
3425 * protection then require it, otherwise don't */
3426 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3427 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3428 return HCI_AT_DEDICATED_BONDING;
3430 return HCI_AT_DEDICATED_BONDING_MITM;
3433 /* If remote requests no-bonding follow that lead */
3434 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3435 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3436 return conn->remote_auth | (conn->auth_type & 0x01);
3438 return conn->auth_type;
3441 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3443 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3444 struct hci_conn *conn;
3446 BT_DBG("%s", hdev->name);
3450 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3454 hci_conn_hold(conn);
3456 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3459 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3460 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3461 struct hci_cp_io_capability_reply cp;
3463 bacpy(&cp.bdaddr, &ev->bdaddr);
3464 /* Change the IO capability from KeyboardDisplay
3465 * to DisplayYesNo as it is not supported by BT spec. */
3466 cp.capability = (conn->io_capability == 0x04) ?
3467 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3468 conn->auth_type = hci_get_auth_req(conn);
3469 cp.authentication = conn->auth_type;
3471 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3472 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3477 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3480 struct hci_cp_io_capability_neg_reply cp;
3482 bacpy(&cp.bdaddr, &ev->bdaddr);
3483 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3485 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3490 hci_dev_unlock(hdev);
3493 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3495 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3496 struct hci_conn *conn;
3498 BT_DBG("%s", hdev->name);
3502 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3506 conn->remote_cap = ev->capability;
3507 conn->remote_auth = ev->authentication;
3509 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3512 hci_dev_unlock(hdev);
3515 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3516 struct sk_buff *skb)
3518 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3519 int loc_mitm, rem_mitm, confirm_hint = 0;
3520 struct hci_conn *conn;
3522 BT_DBG("%s", hdev->name);
3526 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3529 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3533 loc_mitm = (conn->auth_type & 0x01);
3534 rem_mitm = (conn->remote_auth & 0x01);
3536 /* If we require MITM but the remote device can't provide that
3537 * (it has NoInputNoOutput) then reject the confirmation
3538 * request. The only exception is when we're dedicated bonding
3539 * initiators (connect_cfm_cb set) since then we always have the MITM
3541 if (!conn->connect_cfm_cb && loc_mitm &&
3542 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3543 BT_DBG("Rejecting request: remote device can't provide MITM");
3544 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3545 sizeof(ev->bdaddr), &ev->bdaddr);
3549 /* If no side requires MITM protection; auto-accept */
3550 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3551 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3553 /* If we're not the initiators request authorization to
3554 * proceed from user space (mgmt_user_confirm with
3555 * confirm_hint set to 1). */
3556 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3557 BT_DBG("Confirming auto-accept as acceptor");
3562 BT_DBG("Auto-accept of user confirmation with %ums delay",
3563 hdev->auto_accept_delay);
3565 if (hdev->auto_accept_delay > 0) {
3566 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3567 queue_delayed_work(conn->hdev->workqueue,
3568 &conn->auto_accept_work, delay);
3572 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3573 sizeof(ev->bdaddr), &ev->bdaddr);
3578 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3579 le32_to_cpu(ev->passkey), confirm_hint);
3582 hci_dev_unlock(hdev);
3585 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3586 struct sk_buff *skb)
3588 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3590 BT_DBG("%s", hdev->name);
3592 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3593 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3596 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3597 struct sk_buff *skb)
3599 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3600 struct hci_conn *conn;
3602 BT_DBG("%s", hdev->name);
3604 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3608 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3609 conn->passkey_entered = 0;
3611 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3612 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3613 conn->dst_type, conn->passkey_notify,
3614 conn->passkey_entered);
3617 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3619 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3620 struct hci_conn *conn;
3622 BT_DBG("%s", hdev->name);
3624 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3629 case HCI_KEYPRESS_STARTED:
3630 conn->passkey_entered = 0;
3633 case HCI_KEYPRESS_ENTERED:
3634 conn->passkey_entered++;
3637 case HCI_KEYPRESS_ERASED:
3638 conn->passkey_entered--;
3641 case HCI_KEYPRESS_CLEARED:
3642 conn->passkey_entered = 0;
3645 case HCI_KEYPRESS_COMPLETED:
3649 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3650 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3651 conn->dst_type, conn->passkey_notify,
3652 conn->passkey_entered);
3655 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3656 struct sk_buff *skb)
3658 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3659 struct hci_conn *conn;
3661 BT_DBG("%s", hdev->name);
3665 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3669 /* To avoid duplicate auth_failed events to user space we check
3670 * the HCI_CONN_AUTH_PEND flag which will be set if we
3671 * initiated the authentication. A traditional auth_complete
3672 * event gets always produced as initiator and is also mapped to
3673 * the mgmt_auth_failed event */
3674 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3675 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3678 hci_conn_drop(conn);
3681 hci_dev_unlock(hdev);
3684 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3685 struct sk_buff *skb)
3687 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3688 struct inquiry_entry *ie;
3689 struct hci_conn *conn;
3691 BT_DBG("%s", hdev->name);
3695 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3697 memcpy(conn->features[1], ev->features, 8);
3699 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3701 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3703 hci_dev_unlock(hdev);
3706 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3707 struct sk_buff *skb)
3709 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3710 struct oob_data *data;
3712 BT_DBG("%s", hdev->name);
3716 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3719 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3721 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3722 struct hci_cp_remote_oob_ext_data_reply cp;
3724 bacpy(&cp.bdaddr, &ev->bdaddr);
3725 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3726 memcpy(cp.randomizer192, data->randomizer192,
3727 sizeof(cp.randomizer192));
3728 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3729 memcpy(cp.randomizer256, data->randomizer256,
3730 sizeof(cp.randomizer256));
3732 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3735 struct hci_cp_remote_oob_data_reply cp;
3737 bacpy(&cp.bdaddr, &ev->bdaddr);
3738 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3739 memcpy(cp.randomizer, data->randomizer192,
3740 sizeof(cp.randomizer));
3742 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3746 struct hci_cp_remote_oob_data_neg_reply cp;
3748 bacpy(&cp.bdaddr, &ev->bdaddr);
3749 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3754 hci_dev_unlock(hdev);
3757 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3758 struct sk_buff *skb)
3760 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3761 struct hci_conn *hcon, *bredr_hcon;
3763 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3768 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3770 hci_dev_unlock(hdev);
3776 hci_dev_unlock(hdev);
3780 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3782 hcon->state = BT_CONNECTED;
3783 bacpy(&hcon->dst, &bredr_hcon->dst);
3785 hci_conn_hold(hcon);
3786 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3787 hci_conn_drop(hcon);
3789 hci_conn_add_sysfs(hcon);
3791 amp_physical_cfm(bredr_hcon, hcon);
3793 hci_dev_unlock(hdev);
3796 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3798 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3799 struct hci_conn *hcon;
3800 struct hci_chan *hchan;
3801 struct amp_mgr *mgr;
3803 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3804 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3807 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3811 /* Create AMP hchan */
3812 hchan = hci_chan_create(hcon);
3816 hchan->handle = le16_to_cpu(ev->handle);
3818 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3820 mgr = hcon->amp_mgr;
3821 if (mgr && mgr->bredr_chan) {
3822 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3824 l2cap_chan_lock(bredr_chan);
3826 bredr_chan->conn->mtu = hdev->block_mtu;
3827 l2cap_logical_cfm(bredr_chan, hchan, 0);
3828 hci_conn_hold(hcon);
3830 l2cap_chan_unlock(bredr_chan);
3834 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3835 struct sk_buff *skb)
3837 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3838 struct hci_chan *hchan;
3840 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3841 le16_to_cpu(ev->handle), ev->status);
3848 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3852 amp_destroy_logical_link(hchan, ev->reason);
3855 hci_dev_unlock(hdev);
3858 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3859 struct sk_buff *skb)
3861 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3862 struct hci_conn *hcon;
3864 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3871 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3873 hcon->state = BT_CLOSED;
3877 hci_dev_unlock(hdev);
3880 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3882 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3883 struct hci_conn *conn;
3884 struct smp_irk *irk;
3886 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3890 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3892 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3894 BT_ERR("No memory for new connection");
3898 conn->dst_type = ev->bdaddr_type;
3900 if (ev->role == LE_CONN_ROLE_MASTER) {
3902 conn->link_mode |= HCI_LM_MASTER;
3905 /* If we didn't have a hci_conn object previously
3906 * but we're in master role this must be something
3907 * initiated using a white list. Since white list based
3908 * connections are not "first class citizens" we don't
3909 * have full tracking of them. Therefore, we go ahead
3910 * with a "best effort" approach of determining the
3911 * initiator address based on the HCI_PRIVACY flag.
3914 conn->resp_addr_type = ev->bdaddr_type;
3915 bacpy(&conn->resp_addr, &ev->bdaddr);
3916 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3917 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3918 bacpy(&conn->init_addr, &hdev->rpa);
3920 hci_copy_identity_address(hdev,
3922 &conn->init_addr_type);
3926 cancel_delayed_work(&conn->le_conn_timeout);
3930 /* Set the responder (our side) address type based on
3931 * the advertising address type.
3933 conn->resp_addr_type = hdev->adv_addr_type;
3934 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3935 bacpy(&conn->resp_addr, &hdev->random_addr);
3937 bacpy(&conn->resp_addr, &hdev->bdaddr);
3939 conn->init_addr_type = ev->bdaddr_type;
3940 bacpy(&conn->init_addr, &ev->bdaddr);
3943 /* Lookup the identity address from the stored connection
3944 * address and address type.
3946 * When establishing connections to an identity address, the
3947 * connection procedure will store the resolvable random
3948 * address first. Now if it can be converted back into the
3949 * identity address, start using the identity address from
3952 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
3954 bacpy(&conn->dst, &irk->bdaddr);
3955 conn->dst_type = irk->addr_type;
3959 hci_le_conn_failed(conn, ev->status);
3963 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3964 mgmt_device_connected(hdev, &conn->dst, conn->type,
3965 conn->dst_type, 0, NULL, 0, NULL);
3967 conn->sec_level = BT_SECURITY_LOW;
3968 conn->handle = __le16_to_cpu(ev->handle);
3969 conn->state = BT_CONNECTED;
3971 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3972 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3974 hci_conn_add_sysfs(conn);
3976 hci_proto_connect_cfm(conn, ev->status);
3978 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
3981 hci_dev_unlock(hdev);
3984 /* This function requires the caller holds hdev->lock */
3985 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
3988 struct hci_conn *conn;
3989 struct smp_irk *irk;
3991 /* If this is a resolvable address, we should resolve it and then
3992 * update address and address type variables.
3994 irk = hci_get_irk(hdev, addr, addr_type);
3996 addr = &irk->bdaddr;
3997 addr_type = irk->addr_type;
4000 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4003 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4008 switch (PTR_ERR(conn)) {
4010 /* If hci_connect() returns -EBUSY it means there is already
4011 * an LE connection attempt going on. Since controllers don't
4012 * support more than one connection attempt at the time, we
4013 * don't consider this an error case.
4017 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4021 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4022 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4024 struct discovery_state *d = &hdev->discovery;
4027 /* Passive scanning shouldn't trigger any device found events */
4028 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4029 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4030 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4034 /* If there's nothing pending either store the data from this
4035 * event or send an immediate device found event if the data
4036 * should not be stored for later.
4038 if (!has_pending_adv_report(hdev)) {
4039 /* If the report will trigger a SCAN_REQ store it for
4042 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4043 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4048 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4049 rssi, 0, 1, data, len, NULL, 0);
4053 /* Check if the pending report is for the same device as the new one */
4054 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4055 bdaddr_type == d->last_adv_addr_type);
4057 /* If the pending data doesn't match this report or this isn't a
4058 * scan response (e.g. we got a duplicate ADV_IND) then force
4059 * sending of the pending data.
4061 if (type != LE_ADV_SCAN_RSP || !match) {
4062 /* Send out whatever is in the cache, but skip duplicates */
4064 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4065 d->last_adv_addr_type, NULL,
4066 d->last_adv_rssi, 0, 1,
4068 d->last_adv_data_len, NULL, 0);
4070 /* If the new report will trigger a SCAN_REQ store it for
4073 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4074 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4079 /* The advertising reports cannot be merged, so clear
4080 * the pending report and send out a device found event.
4082 clear_pending_adv_report(hdev);
4083 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4084 d->last_adv_addr_type, NULL, rssi, 0, 1,
4085 data, len, NULL, 0);
4089 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4090 * the new event is a SCAN_RSP. We can therefore proceed with
4091 * sending a merged device found event.
4093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4094 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4095 d->last_adv_data, d->last_adv_data_len);
4096 clear_pending_adv_report(hdev);
4099 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4101 u8 num_reports = skb->data[0];
4102 void *ptr = &skb->data[1];
4106 while (num_reports--) {
4107 struct hci_ev_le_advertising_info *ev = ptr;
4110 rssi = ev->data[ev->length];
4111 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4112 ev->bdaddr_type, rssi, ev->data, ev->length);
4114 ptr += sizeof(*ev) + ev->length + 1;
4117 hci_dev_unlock(hdev);
4120 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4122 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4123 struct hci_cp_le_ltk_reply cp;
4124 struct hci_cp_le_ltk_neg_reply neg;
4125 struct hci_conn *conn;
4126 struct smp_ltk *ltk;
4128 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4136 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4140 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4141 cp.handle = cpu_to_le16(conn->handle);
4143 if (ltk->authenticated)
4144 conn->pending_sec_level = BT_SECURITY_HIGH;
4146 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4148 conn->enc_key_size = ltk->enc_size;
4150 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4152 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4153 * temporary key used to encrypt a connection following
4154 * pairing. It is used during the Encrypted Session Setup to
4155 * distribute the keys. Later, security can be re-established
4156 * using a distributed LTK.
4158 if (ltk->type == HCI_SMP_STK_SLAVE) {
4159 list_del(<k->list);
4163 hci_dev_unlock(hdev);
4168 neg.handle = ev->handle;
4169 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4170 hci_dev_unlock(hdev);
4173 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4175 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4177 skb_pull(skb, sizeof(*le_ev));
4179 switch (le_ev->subevent) {
4180 case HCI_EV_LE_CONN_COMPLETE:
4181 hci_le_conn_complete_evt(hdev, skb);
4184 case HCI_EV_LE_ADVERTISING_REPORT:
4185 hci_le_adv_report_evt(hdev, skb);
4188 case HCI_EV_LE_LTK_REQ:
4189 hci_le_ltk_request_evt(hdev, skb);
4197 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4199 struct hci_ev_channel_selected *ev = (void *) skb->data;
4200 struct hci_conn *hcon;
4202 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4204 skb_pull(skb, sizeof(*ev));
4206 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4210 amp_read_loc_assoc_final_data(hdev, hcon);
4213 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4215 struct hci_event_hdr *hdr = (void *) skb->data;
4216 __u8 event = hdr->evt;
4220 /* Received events are (currently) only needed when a request is
4221 * ongoing so avoid unnecessary memory allocation.
4223 if (hdev->req_status == HCI_REQ_PEND) {
4224 kfree_skb(hdev->recv_evt);
4225 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4228 hci_dev_unlock(hdev);
4230 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4232 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4233 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4234 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4236 hci_req_cmd_complete(hdev, opcode, 0);
4240 case HCI_EV_INQUIRY_COMPLETE:
4241 hci_inquiry_complete_evt(hdev, skb);
4244 case HCI_EV_INQUIRY_RESULT:
4245 hci_inquiry_result_evt(hdev, skb);
4248 case HCI_EV_CONN_COMPLETE:
4249 hci_conn_complete_evt(hdev, skb);
4252 case HCI_EV_CONN_REQUEST:
4253 hci_conn_request_evt(hdev, skb);
4256 case HCI_EV_DISCONN_COMPLETE:
4257 hci_disconn_complete_evt(hdev, skb);
4260 case HCI_EV_AUTH_COMPLETE:
4261 hci_auth_complete_evt(hdev, skb);
4264 case HCI_EV_REMOTE_NAME:
4265 hci_remote_name_evt(hdev, skb);
4268 case HCI_EV_ENCRYPT_CHANGE:
4269 hci_encrypt_change_evt(hdev, skb);
4272 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4273 hci_change_link_key_complete_evt(hdev, skb);
4276 case HCI_EV_REMOTE_FEATURES:
4277 hci_remote_features_evt(hdev, skb);
4280 case HCI_EV_CMD_COMPLETE:
4281 hci_cmd_complete_evt(hdev, skb);
4284 case HCI_EV_CMD_STATUS:
4285 hci_cmd_status_evt(hdev, skb);
4288 case HCI_EV_ROLE_CHANGE:
4289 hci_role_change_evt(hdev, skb);
4292 case HCI_EV_NUM_COMP_PKTS:
4293 hci_num_comp_pkts_evt(hdev, skb);
4296 case HCI_EV_MODE_CHANGE:
4297 hci_mode_change_evt(hdev, skb);
4300 case HCI_EV_PIN_CODE_REQ:
4301 hci_pin_code_request_evt(hdev, skb);
4304 case HCI_EV_LINK_KEY_REQ:
4305 hci_link_key_request_evt(hdev, skb);
4308 case HCI_EV_LINK_KEY_NOTIFY:
4309 hci_link_key_notify_evt(hdev, skb);
4312 case HCI_EV_CLOCK_OFFSET:
4313 hci_clock_offset_evt(hdev, skb);
4316 case HCI_EV_PKT_TYPE_CHANGE:
4317 hci_pkt_type_change_evt(hdev, skb);
4320 case HCI_EV_PSCAN_REP_MODE:
4321 hci_pscan_rep_mode_evt(hdev, skb);
4324 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4325 hci_inquiry_result_with_rssi_evt(hdev, skb);
4328 case HCI_EV_REMOTE_EXT_FEATURES:
4329 hci_remote_ext_features_evt(hdev, skb);
4332 case HCI_EV_SYNC_CONN_COMPLETE:
4333 hci_sync_conn_complete_evt(hdev, skb);
4336 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4337 hci_extended_inquiry_result_evt(hdev, skb);
4340 case HCI_EV_KEY_REFRESH_COMPLETE:
4341 hci_key_refresh_complete_evt(hdev, skb);
4344 case HCI_EV_IO_CAPA_REQUEST:
4345 hci_io_capa_request_evt(hdev, skb);
4348 case HCI_EV_IO_CAPA_REPLY:
4349 hci_io_capa_reply_evt(hdev, skb);
4352 case HCI_EV_USER_CONFIRM_REQUEST:
4353 hci_user_confirm_request_evt(hdev, skb);
4356 case HCI_EV_USER_PASSKEY_REQUEST:
4357 hci_user_passkey_request_evt(hdev, skb);
4360 case HCI_EV_USER_PASSKEY_NOTIFY:
4361 hci_user_passkey_notify_evt(hdev, skb);
4364 case HCI_EV_KEYPRESS_NOTIFY:
4365 hci_keypress_notify_evt(hdev, skb);
4368 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4369 hci_simple_pair_complete_evt(hdev, skb);
4372 case HCI_EV_REMOTE_HOST_FEATURES:
4373 hci_remote_host_features_evt(hdev, skb);
4376 case HCI_EV_LE_META:
4377 hci_le_meta_evt(hdev, skb);
4380 case HCI_EV_CHANNEL_SELECTED:
4381 hci_chan_selected_evt(hdev, skb);
4384 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4385 hci_remote_oob_data_request_evt(hdev, skb);
4388 case HCI_EV_PHY_LINK_COMPLETE:
4389 hci_phy_link_complete_evt(hdev, skb);
4392 case HCI_EV_LOGICAL_LINK_COMPLETE:
4393 hci_loglink_complete_evt(hdev, skb);
4396 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4397 hci_disconn_loglink_complete_evt(hdev, skb);
4400 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4401 hci_disconn_phylink_complete_evt(hdev, skb);
4404 case HCI_EV_NUM_COMP_BLOCKS:
4405 hci_num_comp_blocks_evt(hdev, skb);
4409 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4414 hdev->stat.evt_rx++;