2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
36 /* Handle HCI Event packets */
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 __u8 status = *((__u8 *) skb->data);
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 hci_conn_check_pending(hdev);
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
56 __u8 status = *((__u8 *) skb->data);
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
68 __u8 status = *((__u8 *) skb->data);
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
77 hci_conn_check_pending(hdev);
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
83 BT_DBG("%s", hdev->name);
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
101 conn->link_mode &= ~HCI_LM_MASTER;
103 conn->link_mode |= HCI_LM_MASTER;
106 hci_dev_unlock(hdev);
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 conn->link_policy = __le16_to_cpu(rp->policy);
125 hci_dev_unlock(hdev);
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 conn->link_policy = get_unaligned_le16(sent + 2);
149 hci_dev_unlock(hdev);
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 hdev->link_policy = __le16_to_cpu(rp->policy);
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
168 __u8 status = *((__u8 *) skb->data);
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 hdev->link_policy = get_unaligned_le16(sent);
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
183 __u8 status = *((__u8 *) skb->data);
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 clear_bit(HCI_RESET, &hdev->flags);
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
202 hdev->le_scan_type = LE_SCAN_PASSIVE;
204 hdev->ssp_debug_mode = 0;
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 __u8 status = *((__u8 *) skb->data);
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225 hci_dev_unlock(hdev);
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
243 __u8 status = *((__u8 *) skb->data);
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
253 __u8 param = *((__u8 *) sent);
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
258 clear_bit(HCI_AUTH, &hdev->flags);
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
267 __u8 status = *((__u8 *) skb->data);
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 __u8 param = *((__u8 *) sent);
280 set_bit(HCI_ENCRYPT, &hdev->flags);
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
298 param = *((__u8 *) sent);
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
308 /* We need to ensure that we set this back on if someone changed
309 * the scan mode through a raw HCI socket.
311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
319 mgmt_discoverable(hdev, 1);
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
331 hci_dev_unlock(hdev);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
397 __u8 status = *((__u8 *) skb->data);
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
410 setting = get_unaligned_le16(sent);
412 if (hdev->voice_setting == setting)
415 hdev->voice_setting = setting;
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
426 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433 hdev->num_iac = rp->num_iac;
435 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
441 struct hci_cp_write_ssp_mode *sent;
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 hdev->features[1][0] |= LMP_HOST_SSP;
453 hdev->features[1][0] &= ~LMP_HOST_SSP;
456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 mgmt_ssp_enable_complete(hdev, sent->mode, status);
460 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
468 u8 status = *((u8 *) skb->data);
469 struct hci_cp_write_sc_support *sent;
471 BT_DBG("%s status 0x%2.2x", hdev->name, status);
473 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 hdev->features[1][0] |= LMP_HOST_SC;
481 hdev->features[1][0] &= ~LMP_HOST_SC;
484 if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 mgmt_sc_enable_complete(hdev, sent->support, status);
488 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
490 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
496 struct hci_rp_read_local_version *rp = (void *) skb->data;
498 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
503 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 hdev->hci_ver = rp->hci_ver;
505 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 hdev->lmp_ver = rp->lmp_ver;
507 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
512 static void hci_cc_read_local_commands(struct hci_dev *hdev,
515 struct hci_rp_read_local_commands *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
526 static void hci_cc_read_local_features(struct hci_dev *hdev,
529 struct hci_rp_read_local_features *rp = (void *) skb->data;
531 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
536 memcpy(hdev->features, rp->features, 8);
538 /* Adjust default settings according to features
539 * supported by device. */
541 if (hdev->features[0][0] & LMP_3SLOT)
542 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
544 if (hdev->features[0][0] & LMP_5SLOT)
545 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
547 if (hdev->features[0][1] & LMP_HV2) {
548 hdev->pkt_type |= (HCI_HV2);
549 hdev->esco_type |= (ESCO_HV2);
552 if (hdev->features[0][1] & LMP_HV3) {
553 hdev->pkt_type |= (HCI_HV3);
554 hdev->esco_type |= (ESCO_HV3);
557 if (lmp_esco_capable(hdev))
558 hdev->esco_type |= (ESCO_EV3);
560 if (hdev->features[0][4] & LMP_EV4)
561 hdev->esco_type |= (ESCO_EV4);
563 if (hdev->features[0][4] & LMP_EV5)
564 hdev->esco_type |= (ESCO_EV5);
566 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 hdev->esco_type |= (ESCO_2EV3);
569 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 hdev->esco_type |= (ESCO_3EV3);
572 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
576 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 if (hdev->max_page < rp->max_page)
587 hdev->max_page = rp->max_page;
589 if (rp->page < HCI_MAX_PAGES)
590 memcpy(hdev->features[rp->page], rp->features, 8);
593 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
596 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601 hdev->flow_ctl_mode = rp->mode;
604 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
614 hdev->sco_mtu = rp->sco_mtu;
615 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
618 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
623 hdev->acl_cnt = hdev->acl_pkts;
624 hdev->sco_cnt = hdev->sco_pkts;
626 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
630 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
632 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 bacpy(&hdev->bdaddr, &rp->bdaddr);
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
647 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 hdev->page_scan_window = __le16_to_cpu(rp->window);
653 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
656 u8 status = *((u8 *) skb->data);
657 struct hci_cp_write_page_scan_activity *sent;
659 BT_DBG("%s status 0x%2.2x", hdev->name, status);
664 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 hdev->page_scan_window = __le16_to_cpu(sent->window);
672 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
675 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
677 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
679 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 hdev->page_scan_type = rp->type;
683 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
686 u8 status = *((u8 *) skb->data);
689 BT_DBG("%s status 0x%2.2x", hdev->name, status);
694 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
696 hdev->page_scan_type = *type;
699 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
702 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
704 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 hdev->block_len = __le16_to_cpu(rp->block_len);
711 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
713 hdev->block_cnt = hdev->num_blocks;
715 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 hdev->block_cnt, hdev->block_len);
719 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
722 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 hdev->amp_status = rp->amp_status;
730 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 hdev->amp_type = rp->amp_type;
735 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
741 a2mp_send_getinfo_rsp(hdev);
744 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
747 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 struct amp_assoc *assoc = &hdev->loc_assoc;
749 size_t rem_len, frag_len;
751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 frag_len = skb->len - sizeof(*rp);
757 rem_len = __le16_to_cpu(rp->rem_len);
759 if (rem_len > frag_len) {
760 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
762 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 assoc->offset += frag_len;
765 /* Read other fragments */
766 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
771 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 assoc->len = assoc->offset + rem_len;
776 /* Send A2MP Rsp when all fragments are received */
777 a2mp_send_getampassoc_rsp(hdev, rp->status);
778 a2mp_send_create_phy_link_req(hdev, rp->status);
781 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
784 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
786 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789 hdev->inq_tx_power = rp->tx_power;
792 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
794 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 struct hci_cp_pin_code_reply *cp;
796 struct hci_conn *conn;
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
808 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
814 conn->pin_length = cp->pin_len;
817 hci_dev_unlock(hdev);
820 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
822 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
832 hci_dev_unlock(hdev);
835 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
838 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
840 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 hdev->le_pkts = rp->le_max_pkt;
848 hdev->le_cnt = hdev->le_pkts;
850 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
853 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
856 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
858 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
861 memcpy(hdev->le_features, rp->features, 8);
864 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
867 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872 hdev->adv_tx_power = rp->tx_power;
875 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
877 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
887 hci_dev_unlock(hdev);
890 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
893 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
895 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 ACL_LINK, 0, rp->status);
903 hci_dev_unlock(hdev);
906 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
918 hci_dev_unlock(hdev);
921 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 ACL_LINK, 0, rp->status);
934 hci_dev_unlock(hdev);
937 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
940 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 NULL, NULL, rp->status);
947 hci_dev_unlock(hdev);
950 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
953 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 rp->hash256, rp->randomizer256,
961 hci_dev_unlock(hdev);
965 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
967 __u8 status = *((__u8 *) skb->data);
970 BT_DBG("%s status 0x%2.2x", hdev->name, status);
972 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
979 bacpy(&hdev->random_addr, sent);
981 hci_dev_unlock(hdev);
984 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
986 __u8 *sent, status = *((__u8 *) skb->data);
988 BT_DBG("%s status 0x%2.2x", hdev->name, status);
990 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
999 /* If we're doing connection initation as peripheral. Set a
1000 * timeout in case something goes wrong.
1003 struct hci_conn *conn;
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1012 mgmt_advertising(hdev, *sent);
1014 hci_dev_unlock(hdev);
1017 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1019 struct hci_cp_le_set_scan_param *cp;
1020 __u8 status = *((__u8 *) skb->data);
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1031 hdev->le_scan_type = cp->type;
1033 hci_dev_unlock(hdev);
1036 static bool has_pending_adv_report(struct hci_dev *hdev)
1038 struct discovery_state *d = &hdev->discovery;
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1043 static void clear_pending_adv_report(struct hci_dev *hdev)
1045 struct discovery_state *d = &hdev->discovery;
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1051 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1054 struct discovery_state *d = &hdev->discovery;
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1063 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1066 struct hci_cp_le_set_scan_enable *cp;
1067 __u8 status = *((__u8 *) skb->data);
1069 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1071 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 switch (cp->enable) {
1079 case LE_SCAN_ENABLE:
1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL,
1095 d->last_adv_rssi, 0, 1,
1097 d->last_adv_data_len, NULL, 0);
1100 /* Cancel this timer so that we don't try to disable scanning
1101 * when it's already disabled.
1103 cancel_delayed_work(&hdev->le_scan_disable);
1105 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1106 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1107 * interrupted scanning due to a connect request. Mark
1108 * therefore discovery as stopped.
1110 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1112 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1116 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1121 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1122 struct sk_buff *skb)
1124 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1126 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1129 hdev->le_white_list_size = rp->size;
1132 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1133 struct sk_buff *skb)
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1140 hci_white_list_clear(hdev);
1143 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1144 struct sk_buff *skb)
1146 struct hci_cp_le_add_to_white_list *sent;
1147 __u8 status = *((__u8 *) skb->data);
1149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1151 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1156 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1159 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1160 struct sk_buff *skb)
1162 struct hci_cp_le_del_from_white_list *sent;
1163 __u8 status = *((__u8 *) skb->data);
1165 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1172 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1175 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1178 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1180 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1183 memcpy(hdev->le_states, rp->le_states, 8);
1186 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_write_le_host_supported *sent;
1190 __u8 status = *((__u8 *) skb->data);
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1194 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1200 hdev->features[1][0] |= LMP_HOST_LE;
1201 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1203 hdev->features[1][0] &= ~LMP_HOST_LE;
1204 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1209 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1211 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1215 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1217 struct hci_cp_le_set_adv_param *cp;
1218 u8 status = *((u8 *) skb->data);
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1230 hdev->adv_addr_type = cp->own_address_type;
1231 hci_dev_unlock(hdev);
1234 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1237 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1239 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1240 hdev->name, rp->status, rp->phy_handle);
1245 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1248 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1250 struct hci_rp_read_rssi *rp = (void *) skb->data;
1251 struct hci_conn *conn;
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1262 conn->rssi = rp->rssi;
1264 hci_dev_unlock(hdev);
1267 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1269 struct hci_cp_read_tx_power *sent;
1270 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1271 struct hci_conn *conn;
1273 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1278 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1288 switch (sent->type) {
1290 conn->tx_power = rp->tx_power;
1293 conn->max_tx_power = rp->tx_power;
1298 hci_dev_unlock(hdev);
1301 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1306 hci_conn_check_pending(hdev);
1310 set_bit(HCI_INQUIRY, &hdev->flags);
1313 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1315 struct hci_cp_create_conn *cp;
1316 struct hci_conn *conn;
1318 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1320 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1326 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1328 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1331 if (conn && conn->state == BT_CONNECT) {
1332 if (status != 0x0c || conn->attempt > 2) {
1333 conn->state = BT_CLOSED;
1334 hci_proto_connect_cfm(conn, status);
1337 conn->state = BT_CONNECT2;
1341 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1344 conn->link_mode |= HCI_LM_MASTER;
1346 BT_ERR("No memory for new connection");
1350 hci_dev_unlock(hdev);
1353 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1355 struct hci_cp_add_sco *cp;
1356 struct hci_conn *acl, *sco;
1359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1368 handle = __le16_to_cpu(cp->handle);
1370 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1374 acl = hci_conn_hash_lookup_handle(hdev, handle);
1378 sco->state = BT_CLOSED;
1380 hci_proto_connect_cfm(sco, status);
1385 hci_dev_unlock(hdev);
1388 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1390 struct hci_cp_auth_requested *cp;
1391 struct hci_conn *conn;
1393 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1404 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn->state == BT_CONFIG) {
1407 hci_proto_connect_cfm(conn, status);
1408 hci_conn_drop(conn);
1412 hci_dev_unlock(hdev);
1415 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1417 struct hci_cp_set_conn_encrypt *cp;
1418 struct hci_conn *conn;
1420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1425 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1433 if (conn->state == BT_CONFIG) {
1434 hci_proto_connect_cfm(conn, status);
1435 hci_conn_drop(conn);
1439 hci_dev_unlock(hdev);
1442 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1443 struct hci_conn *conn)
1445 if (conn->state != BT_CONFIG || !conn->out)
1448 if (conn->pending_sec_level == BT_SECURITY_SDP)
1451 /* Only request authentication for SSP connections or non-SSP
1452 * devices with sec_level MEDIUM or HIGH or if MITM protection
1455 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1456 conn->pending_sec_level != BT_SECURITY_FIPS &&
1457 conn->pending_sec_level != BT_SECURITY_HIGH &&
1458 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1464 static int hci_resolve_name(struct hci_dev *hdev,
1465 struct inquiry_entry *e)
1467 struct hci_cp_remote_name_req cp;
1469 memset(&cp, 0, sizeof(cp));
1471 bacpy(&cp.bdaddr, &e->data.bdaddr);
1472 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1473 cp.pscan_mode = e->data.pscan_mode;
1474 cp.clock_offset = e->data.clock_offset;
1476 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1479 static bool hci_resolve_next_name(struct hci_dev *hdev)
1481 struct discovery_state *discov = &hdev->discovery;
1482 struct inquiry_entry *e;
1484 if (list_empty(&discov->resolve))
1487 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1491 if (hci_resolve_name(hdev, e) == 0) {
1492 e->name_state = NAME_PENDING;
1499 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1500 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1502 struct discovery_state *discov = &hdev->discovery;
1503 struct inquiry_entry *e;
1505 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1506 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1507 name_len, conn->dev_class);
1509 if (discov->state == DISCOVERY_STOPPED)
1512 if (discov->state == DISCOVERY_STOPPING)
1513 goto discov_complete;
1515 if (discov->state != DISCOVERY_RESOLVING)
1518 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1519 /* If the device was not found in a list of found devices names of which
1520 * are pending. there is no need to continue resolving a next name as it
1521 * will be done upon receiving another Remote Name Request Complete
1528 e->name_state = NAME_KNOWN;
1529 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1530 e->data.rssi, name, name_len);
1532 e->name_state = NAME_NOT_KNOWN;
1535 if (hci_resolve_next_name(hdev))
1539 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1542 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1544 struct hci_cp_remote_name_req *cp;
1545 struct hci_conn *conn;
1547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1549 /* If successful wait for the name req complete event before
1550 * checking for the need to do authentication */
1554 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1560 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1562 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1563 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1568 if (!hci_outgoing_auth_needed(hdev, conn))
1571 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1572 struct hci_cp_auth_requested auth_cp;
1574 auth_cp.handle = __cpu_to_le16(conn->handle);
1575 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1576 sizeof(auth_cp), &auth_cp);
1580 hci_dev_unlock(hdev);
1583 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1585 struct hci_cp_read_remote_features *cp;
1586 struct hci_conn *conn;
1588 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1601 if (conn->state == BT_CONFIG) {
1602 hci_proto_connect_cfm(conn, status);
1603 hci_conn_drop(conn);
1607 hci_dev_unlock(hdev);
1610 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1612 struct hci_cp_read_remote_ext_features *cp;
1613 struct hci_conn *conn;
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1626 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1628 if (conn->state == BT_CONFIG) {
1629 hci_proto_connect_cfm(conn, status);
1630 hci_conn_drop(conn);
1634 hci_dev_unlock(hdev);
1637 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1639 struct hci_cp_setup_sync_conn *cp;
1640 struct hci_conn *acl, *sco;
1643 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1648 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1652 handle = __le16_to_cpu(cp->handle);
1654 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1658 acl = hci_conn_hash_lookup_handle(hdev, handle);
1662 sco->state = BT_CLOSED;
1664 hci_proto_connect_cfm(sco, status);
1669 hci_dev_unlock(hdev);
1672 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1674 struct hci_cp_sniff_mode *cp;
1675 struct hci_conn *conn;
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1682 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1690 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1692 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1693 hci_sco_setup(conn, status);
1696 hci_dev_unlock(hdev);
1699 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1701 struct hci_cp_exit_sniff_mode *cp;
1702 struct hci_conn *conn;
1704 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1709 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1715 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1717 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1719 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1720 hci_sco_setup(conn, status);
1723 hci_dev_unlock(hdev);
1726 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1728 struct hci_cp_disconnect *cp;
1729 struct hci_conn *conn;
1734 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1740 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1742 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1743 conn->dst_type, status);
1745 hci_dev_unlock(hdev);
1748 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1750 struct hci_cp_create_phy_link *cp;
1752 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1761 struct hci_conn *hcon;
1763 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1767 amp_write_remote_assoc(hdev, cp->phy_handle);
1770 hci_dev_unlock(hdev);
1773 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1775 struct hci_cp_accept_phy_link *cp;
1777 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1782 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1786 amp_write_remote_assoc(hdev, cp->phy_handle);
1789 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1791 struct hci_cp_le_create_conn *cp;
1792 struct hci_conn *conn;
1794 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1796 /* All connection failure handling is taken care of by the
1797 * hci_le_conn_failed function which is triggered by the HCI
1798 * request completion callbacks used for connecting.
1803 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1809 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1813 /* Store the initiator and responder address information which
1814 * is needed for SMP. These values will not change during the
1815 * lifetime of the connection.
1817 conn->init_addr_type = cp->own_address_type;
1818 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1819 bacpy(&conn->init_addr, &hdev->random_addr);
1821 bacpy(&conn->init_addr, &hdev->bdaddr);
1823 conn->resp_addr_type = cp->peer_addr_type;
1824 bacpy(&conn->resp_addr, &cp->peer_addr);
1826 /* We don't want the connection attempt to stick around
1827 * indefinitely since LE doesn't have a page timeout concept
1828 * like BR/EDR. Set a timer for any connection that doesn't use
1829 * the white list for connecting.
1831 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1832 queue_delayed_work(conn->hdev->workqueue,
1833 &conn->le_conn_timeout,
1834 HCI_LE_CONN_TIMEOUT);
1837 hci_dev_unlock(hdev);
1840 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1842 struct hci_cp_le_start_enc *cp;
1843 struct hci_conn *conn;
1845 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1852 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1860 if (conn->state != BT_CONNECTED)
1863 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1864 hci_conn_drop(conn);
1867 hci_dev_unlock(hdev);
1870 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1872 __u8 status = *((__u8 *) skb->data);
1873 struct discovery_state *discov = &hdev->discovery;
1874 struct inquiry_entry *e;
1876 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1878 hci_conn_check_pending(hdev);
1880 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1883 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1884 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1886 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1891 if (discov->state != DISCOVERY_FINDING)
1894 if (list_empty(&discov->resolve)) {
1895 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1899 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1900 if (e && hci_resolve_name(hdev, e) == 0) {
1901 e->name_state = NAME_PENDING;
1902 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1904 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1908 hci_dev_unlock(hdev);
1911 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1913 struct inquiry_data data;
1914 struct inquiry_info *info = (void *) (skb->data + 1);
1915 int num_rsp = *((__u8 *) skb->data);
1917 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1922 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1927 for (; num_rsp; num_rsp--, info++) {
1928 bool name_known, ssp;
1930 bacpy(&data.bdaddr, &info->bdaddr);
1931 data.pscan_rep_mode = info->pscan_rep_mode;
1932 data.pscan_period_mode = info->pscan_period_mode;
1933 data.pscan_mode = info->pscan_mode;
1934 memcpy(data.dev_class, info->dev_class, 3);
1935 data.clock_offset = info->clock_offset;
1937 data.ssp_mode = 0x00;
1939 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1940 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1941 info->dev_class, 0, !name_known, ssp, NULL,
1945 hci_dev_unlock(hdev);
1948 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1950 struct hci_ev_conn_complete *ev = (void *) skb->data;
1951 struct hci_conn *conn;
1953 BT_DBG("%s", hdev->name);
1957 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1959 if (ev->link_type != SCO_LINK)
1962 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1966 conn->type = SCO_LINK;
1970 conn->handle = __le16_to_cpu(ev->handle);
1972 if (conn->type == ACL_LINK) {
1973 conn->state = BT_CONFIG;
1974 hci_conn_hold(conn);
1976 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1977 !hci_find_link_key(hdev, &ev->bdaddr))
1978 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1980 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1982 conn->state = BT_CONNECTED;
1984 hci_conn_add_sysfs(conn);
1986 if (test_bit(HCI_AUTH, &hdev->flags))
1987 conn->link_mode |= HCI_LM_AUTH;
1989 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1990 conn->link_mode |= HCI_LM_ENCRYPT;
1992 /* Get remote features */
1993 if (conn->type == ACL_LINK) {
1994 struct hci_cp_read_remote_features cp;
1995 cp.handle = ev->handle;
1996 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2000 /* Set packet type for incoming connection */
2001 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2002 struct hci_cp_change_conn_ptype cp;
2003 cp.handle = ev->handle;
2004 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2005 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2009 conn->state = BT_CLOSED;
2010 if (conn->type == ACL_LINK)
2011 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2012 conn->dst_type, ev->status);
2015 if (conn->type == ACL_LINK)
2016 hci_sco_setup(conn, ev->status);
2019 hci_proto_connect_cfm(conn, ev->status);
2021 } else if (ev->link_type != ACL_LINK)
2022 hci_proto_connect_cfm(conn, ev->status);
2025 hci_dev_unlock(hdev);
2027 hci_conn_check_pending(hdev);
2030 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2032 struct hci_ev_conn_request *ev = (void *) skb->data;
2033 int mask = hdev->link_mode;
2036 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2039 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2042 if ((mask & HCI_LM_ACCEPT) &&
2043 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2044 /* Connection accepted */
2045 struct inquiry_entry *ie;
2046 struct hci_conn *conn;
2050 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2052 memcpy(ie->data.dev_class, ev->dev_class, 3);
2054 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2057 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2059 BT_ERR("No memory for new connection");
2060 hci_dev_unlock(hdev);
2065 memcpy(conn->dev_class, ev->dev_class, 3);
2067 hci_dev_unlock(hdev);
2069 if (ev->link_type == ACL_LINK ||
2070 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2071 struct hci_cp_accept_conn_req cp;
2072 conn->state = BT_CONNECT;
2074 bacpy(&cp.bdaddr, &ev->bdaddr);
2076 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2077 cp.role = 0x00; /* Become master */
2079 cp.role = 0x01; /* Remain slave */
2081 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2083 } else if (!(flags & HCI_PROTO_DEFER)) {
2084 struct hci_cp_accept_sync_conn_req cp;
2085 conn->state = BT_CONNECT;
2087 bacpy(&cp.bdaddr, &ev->bdaddr);
2088 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2090 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2091 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2092 cp.max_latency = cpu_to_le16(0xffff);
2093 cp.content_format = cpu_to_le16(hdev->voice_setting);
2094 cp.retrans_effort = 0xff;
2096 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2099 conn->state = BT_CONNECT2;
2100 hci_proto_connect_cfm(conn, 0);
2103 /* Connection rejected */
2104 struct hci_cp_reject_conn_req cp;
2106 bacpy(&cp.bdaddr, &ev->bdaddr);
2107 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2108 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2112 static u8 hci_to_mgmt_reason(u8 err)
2115 case HCI_ERROR_CONNECTION_TIMEOUT:
2116 return MGMT_DEV_DISCONN_TIMEOUT;
2117 case HCI_ERROR_REMOTE_USER_TERM:
2118 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2119 case HCI_ERROR_REMOTE_POWER_OFF:
2120 return MGMT_DEV_DISCONN_REMOTE;
2121 case HCI_ERROR_LOCAL_HOST_TERM:
2122 return MGMT_DEV_DISCONN_LOCAL_HOST;
2124 return MGMT_DEV_DISCONN_UNKNOWN;
2128 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2130 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2131 u8 reason = hci_to_mgmt_reason(ev->reason);
2132 struct hci_conn_params *params;
2133 struct hci_conn *conn;
2134 bool mgmt_connected;
2137 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2141 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2146 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2147 conn->dst_type, ev->status);
2151 conn->state = BT_CLOSED;
2153 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2154 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2155 reason, mgmt_connected);
2157 if (conn->type == ACL_LINK && conn->flush_key)
2158 hci_remove_link_key(hdev, &conn->dst);
2160 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2162 switch (params->auto_connect) {
2163 case HCI_AUTO_CONN_LINK_LOSS:
2164 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2168 case HCI_AUTO_CONN_ALWAYS:
2169 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2179 hci_proto_disconn_cfm(conn, ev->reason);
2182 /* Re-enable advertising if necessary, since it might
2183 * have been disabled by the connection. From the
2184 * HCI_LE_Set_Advertise_Enable command description in
2185 * the core specification (v4.0):
2186 * "The Controller shall continue advertising until the Host
2187 * issues an LE_Set_Advertise_Enable command with
2188 * Advertising_Enable set to 0x00 (Advertising is disabled)
2189 * or until a connection is created or until the Advertising
2190 * is timed out due to Directed Advertising."
2192 if (type == LE_LINK)
2193 mgmt_reenable_advertising(hdev);
2196 hci_dev_unlock(hdev);
2199 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2201 struct hci_ev_auth_complete *ev = (void *) skb->data;
2202 struct hci_conn *conn;
2204 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2208 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2213 if (!hci_conn_ssp_enabled(conn) &&
2214 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2215 BT_INFO("re-auth of legacy device is not possible.");
2217 conn->link_mode |= HCI_LM_AUTH;
2218 conn->sec_level = conn->pending_sec_level;
2221 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2225 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2226 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2228 if (conn->state == BT_CONFIG) {
2229 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2230 struct hci_cp_set_conn_encrypt cp;
2231 cp.handle = ev->handle;
2233 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2236 conn->state = BT_CONNECTED;
2237 hci_proto_connect_cfm(conn, ev->status);
2238 hci_conn_drop(conn);
2241 hci_auth_cfm(conn, ev->status);
2243 hci_conn_hold(conn);
2244 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2245 hci_conn_drop(conn);
2248 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2250 struct hci_cp_set_conn_encrypt cp;
2251 cp.handle = ev->handle;
2253 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2256 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2257 hci_encrypt_cfm(conn, ev->status, 0x00);
2262 hci_dev_unlock(hdev);
2265 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2267 struct hci_ev_remote_name *ev = (void *) skb->data;
2268 struct hci_conn *conn;
2270 BT_DBG("%s", hdev->name);
2272 hci_conn_check_pending(hdev);
2276 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2278 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2281 if (ev->status == 0)
2282 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2283 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2285 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2291 if (!hci_outgoing_auth_needed(hdev, conn))
2294 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2295 struct hci_cp_auth_requested cp;
2296 cp.handle = __cpu_to_le16(conn->handle);
2297 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2301 hci_dev_unlock(hdev);
2304 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2306 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2307 struct hci_conn *conn;
2309 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2313 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2319 /* Encryption implies authentication */
2320 conn->link_mode |= HCI_LM_AUTH;
2321 conn->link_mode |= HCI_LM_ENCRYPT;
2322 conn->sec_level = conn->pending_sec_level;
2324 /* P-256 authentication key implies FIPS */
2325 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2326 conn->link_mode |= HCI_LM_FIPS;
2328 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2329 conn->type == LE_LINK)
2330 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2332 conn->link_mode &= ~HCI_LM_ENCRYPT;
2333 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2337 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2339 if (ev->status && conn->state == BT_CONNECTED) {
2340 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2341 hci_conn_drop(conn);
2345 if (conn->state == BT_CONFIG) {
2347 conn->state = BT_CONNECTED;
2349 /* In Secure Connections Only mode, do not allow any
2350 * connections that are not encrypted with AES-CCM
2351 * using a P-256 authenticated combination key.
2353 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2354 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2355 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2356 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2357 hci_conn_drop(conn);
2361 hci_proto_connect_cfm(conn, ev->status);
2362 hci_conn_drop(conn);
2364 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2367 hci_dev_unlock(hdev);
2370 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2371 struct sk_buff *skb)
2373 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2374 struct hci_conn *conn;
2376 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2380 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2383 conn->link_mode |= HCI_LM_SECURE;
2385 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2387 hci_key_change_cfm(conn, ev->status);
2390 hci_dev_unlock(hdev);
2393 static void hci_remote_features_evt(struct hci_dev *hdev,
2394 struct sk_buff *skb)
2396 struct hci_ev_remote_features *ev = (void *) skb->data;
2397 struct hci_conn *conn;
2399 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2403 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2408 memcpy(conn->features[0], ev->features, 8);
2410 if (conn->state != BT_CONFIG)
2413 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2414 struct hci_cp_read_remote_ext_features cp;
2415 cp.handle = ev->handle;
2417 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2422 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2423 struct hci_cp_remote_name_req cp;
2424 memset(&cp, 0, sizeof(cp));
2425 bacpy(&cp.bdaddr, &conn->dst);
2426 cp.pscan_rep_mode = 0x02;
2427 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2428 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2429 mgmt_device_connected(hdev, &conn->dst, conn->type,
2430 conn->dst_type, 0, NULL, 0,
2433 if (!hci_outgoing_auth_needed(hdev, conn)) {
2434 conn->state = BT_CONNECTED;
2435 hci_proto_connect_cfm(conn, ev->status);
2436 hci_conn_drop(conn);
2440 hci_dev_unlock(hdev);
2443 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2445 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2446 u8 status = skb->data[sizeof(*ev)];
2449 skb_pull(skb, sizeof(*ev));
2451 opcode = __le16_to_cpu(ev->opcode);
2454 case HCI_OP_INQUIRY_CANCEL:
2455 hci_cc_inquiry_cancel(hdev, skb);
2458 case HCI_OP_PERIODIC_INQ:
2459 hci_cc_periodic_inq(hdev, skb);
2462 case HCI_OP_EXIT_PERIODIC_INQ:
2463 hci_cc_exit_periodic_inq(hdev, skb);
2466 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2467 hci_cc_remote_name_req_cancel(hdev, skb);
2470 case HCI_OP_ROLE_DISCOVERY:
2471 hci_cc_role_discovery(hdev, skb);
2474 case HCI_OP_READ_LINK_POLICY:
2475 hci_cc_read_link_policy(hdev, skb);
2478 case HCI_OP_WRITE_LINK_POLICY:
2479 hci_cc_write_link_policy(hdev, skb);
2482 case HCI_OP_READ_DEF_LINK_POLICY:
2483 hci_cc_read_def_link_policy(hdev, skb);
2486 case HCI_OP_WRITE_DEF_LINK_POLICY:
2487 hci_cc_write_def_link_policy(hdev, skb);
2491 hci_cc_reset(hdev, skb);
2494 case HCI_OP_WRITE_LOCAL_NAME:
2495 hci_cc_write_local_name(hdev, skb);
2498 case HCI_OP_READ_LOCAL_NAME:
2499 hci_cc_read_local_name(hdev, skb);
2502 case HCI_OP_WRITE_AUTH_ENABLE:
2503 hci_cc_write_auth_enable(hdev, skb);
2506 case HCI_OP_WRITE_ENCRYPT_MODE:
2507 hci_cc_write_encrypt_mode(hdev, skb);
2510 case HCI_OP_WRITE_SCAN_ENABLE:
2511 hci_cc_write_scan_enable(hdev, skb);
2514 case HCI_OP_READ_CLASS_OF_DEV:
2515 hci_cc_read_class_of_dev(hdev, skb);
2518 case HCI_OP_WRITE_CLASS_OF_DEV:
2519 hci_cc_write_class_of_dev(hdev, skb);
2522 case HCI_OP_READ_VOICE_SETTING:
2523 hci_cc_read_voice_setting(hdev, skb);
2526 case HCI_OP_WRITE_VOICE_SETTING:
2527 hci_cc_write_voice_setting(hdev, skb);
2530 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2531 hci_cc_read_num_supported_iac(hdev, skb);
2534 case HCI_OP_WRITE_SSP_MODE:
2535 hci_cc_write_ssp_mode(hdev, skb);
2538 case HCI_OP_WRITE_SC_SUPPORT:
2539 hci_cc_write_sc_support(hdev, skb);
2542 case HCI_OP_READ_LOCAL_VERSION:
2543 hci_cc_read_local_version(hdev, skb);
2546 case HCI_OP_READ_LOCAL_COMMANDS:
2547 hci_cc_read_local_commands(hdev, skb);
2550 case HCI_OP_READ_LOCAL_FEATURES:
2551 hci_cc_read_local_features(hdev, skb);
2554 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2555 hci_cc_read_local_ext_features(hdev, skb);
2558 case HCI_OP_READ_BUFFER_SIZE:
2559 hci_cc_read_buffer_size(hdev, skb);
2562 case HCI_OP_READ_BD_ADDR:
2563 hci_cc_read_bd_addr(hdev, skb);
2566 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2567 hci_cc_read_page_scan_activity(hdev, skb);
2570 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2571 hci_cc_write_page_scan_activity(hdev, skb);
2574 case HCI_OP_READ_PAGE_SCAN_TYPE:
2575 hci_cc_read_page_scan_type(hdev, skb);
2578 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2579 hci_cc_write_page_scan_type(hdev, skb);
2582 case HCI_OP_READ_DATA_BLOCK_SIZE:
2583 hci_cc_read_data_block_size(hdev, skb);
2586 case HCI_OP_READ_FLOW_CONTROL_MODE:
2587 hci_cc_read_flow_control_mode(hdev, skb);
2590 case HCI_OP_READ_LOCAL_AMP_INFO:
2591 hci_cc_read_local_amp_info(hdev, skb);
2594 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2595 hci_cc_read_local_amp_assoc(hdev, skb);
2598 case HCI_OP_READ_INQ_RSP_TX_POWER:
2599 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2602 case HCI_OP_PIN_CODE_REPLY:
2603 hci_cc_pin_code_reply(hdev, skb);
2606 case HCI_OP_PIN_CODE_NEG_REPLY:
2607 hci_cc_pin_code_neg_reply(hdev, skb);
2610 case HCI_OP_READ_LOCAL_OOB_DATA:
2611 hci_cc_read_local_oob_data(hdev, skb);
2614 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2615 hci_cc_read_local_oob_ext_data(hdev, skb);
2618 case HCI_OP_LE_READ_BUFFER_SIZE:
2619 hci_cc_le_read_buffer_size(hdev, skb);
2622 case HCI_OP_LE_READ_LOCAL_FEATURES:
2623 hci_cc_le_read_local_features(hdev, skb);
2626 case HCI_OP_LE_READ_ADV_TX_POWER:
2627 hci_cc_le_read_adv_tx_power(hdev, skb);
2630 case HCI_OP_USER_CONFIRM_REPLY:
2631 hci_cc_user_confirm_reply(hdev, skb);
2634 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2635 hci_cc_user_confirm_neg_reply(hdev, skb);
2638 case HCI_OP_USER_PASSKEY_REPLY:
2639 hci_cc_user_passkey_reply(hdev, skb);
2642 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2643 hci_cc_user_passkey_neg_reply(hdev, skb);
2646 case HCI_OP_LE_SET_RANDOM_ADDR:
2647 hci_cc_le_set_random_addr(hdev, skb);
2650 case HCI_OP_LE_SET_ADV_ENABLE:
2651 hci_cc_le_set_adv_enable(hdev, skb);
2654 case HCI_OP_LE_SET_SCAN_PARAM:
2655 hci_cc_le_set_scan_param(hdev, skb);
2658 case HCI_OP_LE_SET_SCAN_ENABLE:
2659 hci_cc_le_set_scan_enable(hdev, skb);
2662 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2663 hci_cc_le_read_white_list_size(hdev, skb);
2666 case HCI_OP_LE_CLEAR_WHITE_LIST:
2667 hci_cc_le_clear_white_list(hdev, skb);
2670 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2671 hci_cc_le_add_to_white_list(hdev, skb);
2674 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2675 hci_cc_le_del_from_white_list(hdev, skb);
2678 case HCI_OP_LE_READ_SUPPORTED_STATES:
2679 hci_cc_le_read_supported_states(hdev, skb);
2682 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2683 hci_cc_write_le_host_supported(hdev, skb);
2686 case HCI_OP_LE_SET_ADV_PARAM:
2687 hci_cc_set_adv_param(hdev, skb);
2690 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2691 hci_cc_write_remote_amp_assoc(hdev, skb);
2694 case HCI_OP_READ_RSSI:
2695 hci_cc_read_rssi(hdev, skb);
2698 case HCI_OP_READ_TX_POWER:
2699 hci_cc_read_tx_power(hdev, skb);
2703 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2707 if (opcode != HCI_OP_NOP)
2708 del_timer(&hdev->cmd_timer);
2710 hci_req_cmd_complete(hdev, opcode, status);
2712 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2713 atomic_set(&hdev->cmd_cnt, 1);
2714 if (!skb_queue_empty(&hdev->cmd_q))
2715 queue_work(hdev->workqueue, &hdev->cmd_work);
2719 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2721 struct hci_ev_cmd_status *ev = (void *) skb->data;
2724 skb_pull(skb, sizeof(*ev));
2726 opcode = __le16_to_cpu(ev->opcode);
2729 case HCI_OP_INQUIRY:
2730 hci_cs_inquiry(hdev, ev->status);
2733 case HCI_OP_CREATE_CONN:
2734 hci_cs_create_conn(hdev, ev->status);
2737 case HCI_OP_ADD_SCO:
2738 hci_cs_add_sco(hdev, ev->status);
2741 case HCI_OP_AUTH_REQUESTED:
2742 hci_cs_auth_requested(hdev, ev->status);
2745 case HCI_OP_SET_CONN_ENCRYPT:
2746 hci_cs_set_conn_encrypt(hdev, ev->status);
2749 case HCI_OP_REMOTE_NAME_REQ:
2750 hci_cs_remote_name_req(hdev, ev->status);
2753 case HCI_OP_READ_REMOTE_FEATURES:
2754 hci_cs_read_remote_features(hdev, ev->status);
2757 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2758 hci_cs_read_remote_ext_features(hdev, ev->status);
2761 case HCI_OP_SETUP_SYNC_CONN:
2762 hci_cs_setup_sync_conn(hdev, ev->status);
2765 case HCI_OP_SNIFF_MODE:
2766 hci_cs_sniff_mode(hdev, ev->status);
2769 case HCI_OP_EXIT_SNIFF_MODE:
2770 hci_cs_exit_sniff_mode(hdev, ev->status);
2773 case HCI_OP_DISCONNECT:
2774 hci_cs_disconnect(hdev, ev->status);
2777 case HCI_OP_CREATE_PHY_LINK:
2778 hci_cs_create_phylink(hdev, ev->status);
2781 case HCI_OP_ACCEPT_PHY_LINK:
2782 hci_cs_accept_phylink(hdev, ev->status);
2785 case HCI_OP_LE_CREATE_CONN:
2786 hci_cs_le_create_conn(hdev, ev->status);
2789 case HCI_OP_LE_START_ENC:
2790 hci_cs_le_start_enc(hdev, ev->status);
2794 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2798 if (opcode != HCI_OP_NOP)
2799 del_timer(&hdev->cmd_timer);
2802 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2803 hci_req_cmd_complete(hdev, opcode, ev->status);
2805 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2806 atomic_set(&hdev->cmd_cnt, 1);
2807 if (!skb_queue_empty(&hdev->cmd_q))
2808 queue_work(hdev->workqueue, &hdev->cmd_work);
2812 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2814 struct hci_ev_role_change *ev = (void *) skb->data;
2815 struct hci_conn *conn;
2817 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2825 conn->link_mode &= ~HCI_LM_MASTER;
2827 conn->link_mode |= HCI_LM_MASTER;
2830 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2832 hci_role_switch_cfm(conn, ev->status, ev->role);
2835 hci_dev_unlock(hdev);
2838 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2840 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2843 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2844 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2848 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2849 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2850 BT_DBG("%s bad parameters", hdev->name);
2854 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2856 for (i = 0; i < ev->num_hndl; i++) {
2857 struct hci_comp_pkts_info *info = &ev->handles[i];
2858 struct hci_conn *conn;
2859 __u16 handle, count;
2861 handle = __le16_to_cpu(info->handle);
2862 count = __le16_to_cpu(info->count);
2864 conn = hci_conn_hash_lookup_handle(hdev, handle);
2868 conn->sent -= count;
2870 switch (conn->type) {
2872 hdev->acl_cnt += count;
2873 if (hdev->acl_cnt > hdev->acl_pkts)
2874 hdev->acl_cnt = hdev->acl_pkts;
2878 if (hdev->le_pkts) {
2879 hdev->le_cnt += count;
2880 if (hdev->le_cnt > hdev->le_pkts)
2881 hdev->le_cnt = hdev->le_pkts;
2883 hdev->acl_cnt += count;
2884 if (hdev->acl_cnt > hdev->acl_pkts)
2885 hdev->acl_cnt = hdev->acl_pkts;
2890 hdev->sco_cnt += count;
2891 if (hdev->sco_cnt > hdev->sco_pkts)
2892 hdev->sco_cnt = hdev->sco_pkts;
2896 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2901 queue_work(hdev->workqueue, &hdev->tx_work);
2904 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2907 struct hci_chan *chan;
2909 switch (hdev->dev_type) {
2911 return hci_conn_hash_lookup_handle(hdev, handle);
2913 chan = hci_chan_lookup_handle(hdev, handle);
2918 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2925 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2927 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2930 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2931 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2935 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2936 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2937 BT_DBG("%s bad parameters", hdev->name);
2941 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2944 for (i = 0; i < ev->num_hndl; i++) {
2945 struct hci_comp_blocks_info *info = &ev->handles[i];
2946 struct hci_conn *conn = NULL;
2947 __u16 handle, block_count;
2949 handle = __le16_to_cpu(info->handle);
2950 block_count = __le16_to_cpu(info->blocks);
2952 conn = __hci_conn_lookup_handle(hdev, handle);
2956 conn->sent -= block_count;
2958 switch (conn->type) {
2961 hdev->block_cnt += block_count;
2962 if (hdev->block_cnt > hdev->num_blocks)
2963 hdev->block_cnt = hdev->num_blocks;
2967 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2972 queue_work(hdev->workqueue, &hdev->tx_work);
2975 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2977 struct hci_ev_mode_change *ev = (void *) skb->data;
2978 struct hci_conn *conn;
2980 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2986 conn->mode = ev->mode;
2988 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2990 if (conn->mode == HCI_CM_ACTIVE)
2991 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2993 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2996 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2997 hci_sco_setup(conn, ev->status);
3000 hci_dev_unlock(hdev);
3003 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3005 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3006 struct hci_conn *conn;
3008 BT_DBG("%s", hdev->name);
3012 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3016 if (conn->state == BT_CONNECTED) {
3017 hci_conn_hold(conn);
3018 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3019 hci_conn_drop(conn);
3022 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3023 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3024 sizeof(ev->bdaddr), &ev->bdaddr);
3025 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3028 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3033 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3037 hci_dev_unlock(hdev);
3040 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3042 struct hci_ev_link_key_req *ev = (void *) skb->data;
3043 struct hci_cp_link_key_reply cp;
3044 struct hci_conn *conn;
3045 struct link_key *key;
3047 BT_DBG("%s", hdev->name);
3049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3054 key = hci_find_link_key(hdev, &ev->bdaddr);
3056 BT_DBG("%s link key not found for %pMR", hdev->name,
3061 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3064 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3065 key->type == HCI_LK_DEBUG_COMBINATION) {
3066 BT_DBG("%s ignoring debug key", hdev->name);
3070 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3072 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3073 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3074 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3075 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3079 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3080 conn->pending_sec_level == BT_SECURITY_HIGH) {
3081 BT_DBG("%s ignoring key unauthenticated for high security",
3086 conn->key_type = key->type;
3087 conn->pin_length = key->pin_len;
3090 bacpy(&cp.bdaddr, &ev->bdaddr);
3091 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3093 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3095 hci_dev_unlock(hdev);
3100 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3101 hci_dev_unlock(hdev);
3104 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3106 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3107 struct hci_conn *conn;
3110 BT_DBG("%s", hdev->name);
3114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3116 hci_conn_hold(conn);
3117 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3118 pin_len = conn->pin_length;
3120 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3121 conn->key_type = ev->key_type;
3123 hci_conn_drop(conn);
3126 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3127 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3128 ev->key_type, pin_len);
3130 hci_dev_unlock(hdev);
3133 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3135 struct hci_ev_clock_offset *ev = (void *) skb->data;
3136 struct hci_conn *conn;
3138 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3142 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3143 if (conn && !ev->status) {
3144 struct inquiry_entry *ie;
3146 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3148 ie->data.clock_offset = ev->clock_offset;
3149 ie->timestamp = jiffies;
3153 hci_dev_unlock(hdev);
3156 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3158 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3159 struct hci_conn *conn;
3161 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3165 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3166 if (conn && !ev->status)
3167 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3169 hci_dev_unlock(hdev);
3172 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3174 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3175 struct inquiry_entry *ie;
3177 BT_DBG("%s", hdev->name);
3181 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3183 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3184 ie->timestamp = jiffies;
3187 hci_dev_unlock(hdev);
3190 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3191 struct sk_buff *skb)
3193 struct inquiry_data data;
3194 int num_rsp = *((__u8 *) skb->data);
3195 bool name_known, ssp;
3197 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3202 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3207 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3208 struct inquiry_info_with_rssi_and_pscan_mode *info;
3209 info = (void *) (skb->data + 1);
3211 for (; num_rsp; num_rsp--, info++) {
3212 bacpy(&data.bdaddr, &info->bdaddr);
3213 data.pscan_rep_mode = info->pscan_rep_mode;
3214 data.pscan_period_mode = info->pscan_period_mode;
3215 data.pscan_mode = info->pscan_mode;
3216 memcpy(data.dev_class, info->dev_class, 3);
3217 data.clock_offset = info->clock_offset;
3218 data.rssi = info->rssi;
3219 data.ssp_mode = 0x00;
3221 name_known = hci_inquiry_cache_update(hdev, &data,
3223 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3224 info->dev_class, info->rssi,
3225 !name_known, ssp, NULL, 0, NULL, 0);
3228 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3230 for (; num_rsp; num_rsp--, info++) {
3231 bacpy(&data.bdaddr, &info->bdaddr);
3232 data.pscan_rep_mode = info->pscan_rep_mode;
3233 data.pscan_period_mode = info->pscan_period_mode;
3234 data.pscan_mode = 0x00;
3235 memcpy(data.dev_class, info->dev_class, 3);
3236 data.clock_offset = info->clock_offset;
3237 data.rssi = info->rssi;
3238 data.ssp_mode = 0x00;
3239 name_known = hci_inquiry_cache_update(hdev, &data,
3241 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3242 info->dev_class, info->rssi,
3243 !name_known, ssp, NULL, 0, NULL, 0);
3247 hci_dev_unlock(hdev);
3250 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3251 struct sk_buff *skb)
3253 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3254 struct hci_conn *conn;
3256 BT_DBG("%s", hdev->name);
3260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3264 if (ev->page < HCI_MAX_PAGES)
3265 memcpy(conn->features[ev->page], ev->features, 8);
3267 if (!ev->status && ev->page == 0x01) {
3268 struct inquiry_entry *ie;
3270 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3272 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3274 if (ev->features[0] & LMP_HOST_SSP) {
3275 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3277 /* It is mandatory by the Bluetooth specification that
3278 * Extended Inquiry Results are only used when Secure
3279 * Simple Pairing is enabled, but some devices violate
3282 * To make these devices work, the internal SSP
3283 * enabled flag needs to be cleared if the remote host
3284 * features do not indicate SSP support */
3285 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3288 if (ev->features[0] & LMP_HOST_SC)
3289 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3292 if (conn->state != BT_CONFIG)
3295 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3296 struct hci_cp_remote_name_req cp;
3297 memset(&cp, 0, sizeof(cp));
3298 bacpy(&cp.bdaddr, &conn->dst);
3299 cp.pscan_rep_mode = 0x02;
3300 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3301 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3302 mgmt_device_connected(hdev, &conn->dst, conn->type,
3303 conn->dst_type, 0, NULL, 0,
3306 if (!hci_outgoing_auth_needed(hdev, conn)) {
3307 conn->state = BT_CONNECTED;
3308 hci_proto_connect_cfm(conn, ev->status);
3309 hci_conn_drop(conn);
3313 hci_dev_unlock(hdev);
3316 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3317 struct sk_buff *skb)
3319 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3320 struct hci_conn *conn;
3322 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3326 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3328 if (ev->link_type == ESCO_LINK)
3331 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3335 conn->type = SCO_LINK;
3338 switch (ev->status) {
3340 conn->handle = __le16_to_cpu(ev->handle);
3341 conn->state = BT_CONNECTED;
3343 hci_conn_add_sysfs(conn);
3346 case 0x0d: /* Connection Rejected due to Limited Resources */
3347 case 0x11: /* Unsupported Feature or Parameter Value */
3348 case 0x1c: /* SCO interval rejected */
3349 case 0x1a: /* Unsupported Remote Feature */
3350 case 0x1f: /* Unspecified error */
3351 case 0x20: /* Unsupported LMP Parameter value */
3353 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3354 (hdev->esco_type & EDR_ESCO_MASK);
3355 if (hci_setup_sync(conn, conn->link->handle))
3361 conn->state = BT_CLOSED;
3365 hci_proto_connect_cfm(conn, ev->status);
3370 hci_dev_unlock(hdev);
3373 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3377 while (parsed < eir_len) {
3378 u8 field_len = eir[0];
3383 parsed += field_len + 1;
3384 eir += field_len + 1;
3390 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3391 struct sk_buff *skb)
3393 struct inquiry_data data;
3394 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3395 int num_rsp = *((__u8 *) skb->data);
3398 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3403 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3408 for (; num_rsp; num_rsp--, info++) {
3409 bool name_known, ssp;
3411 bacpy(&data.bdaddr, &info->bdaddr);
3412 data.pscan_rep_mode = info->pscan_rep_mode;
3413 data.pscan_period_mode = info->pscan_period_mode;
3414 data.pscan_mode = 0x00;
3415 memcpy(data.dev_class, info->dev_class, 3);
3416 data.clock_offset = info->clock_offset;
3417 data.rssi = info->rssi;
3418 data.ssp_mode = 0x01;
3420 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3421 name_known = eir_has_data_type(info->data,
3427 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3429 eir_len = eir_get_length(info->data, sizeof(info->data));
3430 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3431 info->dev_class, info->rssi, !name_known,
3432 ssp, info->data, eir_len, NULL, 0);
3435 hci_dev_unlock(hdev);
3438 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3439 struct sk_buff *skb)
3441 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3442 struct hci_conn *conn;
3444 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3445 __le16_to_cpu(ev->handle));
3449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3454 conn->sec_level = conn->pending_sec_level;
3456 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3458 if (ev->status && conn->state == BT_CONNECTED) {
3459 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3460 hci_conn_drop(conn);
3464 if (conn->state == BT_CONFIG) {
3466 conn->state = BT_CONNECTED;
3468 hci_proto_connect_cfm(conn, ev->status);
3469 hci_conn_drop(conn);
3471 hci_auth_cfm(conn, ev->status);
3473 hci_conn_hold(conn);
3474 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3475 hci_conn_drop(conn);
3479 hci_dev_unlock(hdev);
3482 static u8 hci_get_auth_req(struct hci_conn *conn)
3484 /* If remote requests no-bonding follow that lead */
3485 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3486 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3487 return conn->remote_auth | (conn->auth_type & 0x01);
3489 /* If both remote and local have enough IO capabilities, require
3492 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3493 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3494 return conn->remote_auth | 0x01;
3496 /* No MITM protection possible so ignore remote requirement */
3497 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3500 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3502 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3503 struct hci_conn *conn;
3505 BT_DBG("%s", hdev->name);
3509 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3513 hci_conn_hold(conn);
3515 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3518 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3519 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3520 struct hci_cp_io_capability_reply cp;
3522 bacpy(&cp.bdaddr, &ev->bdaddr);
3523 /* Change the IO capability from KeyboardDisplay
3524 * to DisplayYesNo as it is not supported by BT spec. */
3525 cp.capability = (conn->io_capability == 0x04) ?
3526 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3528 /* If we are initiators, there is no remote information yet */
3529 if (conn->remote_auth == 0xff) {
3530 cp.authentication = conn->auth_type;
3532 /* Request MITM protection if our IO caps allow it
3533 * except for the no-bonding case
3535 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3536 cp.authentication != HCI_AT_NO_BONDING)
3537 cp.authentication |= 0x01;
3539 conn->auth_type = hci_get_auth_req(conn);
3540 cp.authentication = conn->auth_type;
3543 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3544 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3549 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3552 struct hci_cp_io_capability_neg_reply cp;
3554 bacpy(&cp.bdaddr, &ev->bdaddr);
3555 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3557 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3562 hci_dev_unlock(hdev);
3565 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3567 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3568 struct hci_conn *conn;
3570 BT_DBG("%s", hdev->name);
3574 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3578 conn->remote_cap = ev->capability;
3579 conn->remote_auth = ev->authentication;
3581 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3584 hci_dev_unlock(hdev);
3587 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3588 struct sk_buff *skb)
3590 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3591 int loc_mitm, rem_mitm, confirm_hint = 0;
3592 struct hci_conn *conn;
3594 BT_DBG("%s", hdev->name);
3598 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3601 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3605 loc_mitm = (conn->auth_type & 0x01);
3606 rem_mitm = (conn->remote_auth & 0x01);
3608 /* If we require MITM but the remote device can't provide that
3609 * (it has NoInputNoOutput) then reject the confirmation request
3611 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3612 BT_DBG("Rejecting request: remote device can't provide MITM");
3613 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3614 sizeof(ev->bdaddr), &ev->bdaddr);
3618 /* If no side requires MITM protection; auto-accept */
3619 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3620 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3622 /* If we're not the initiators request authorization to
3623 * proceed from user space (mgmt_user_confirm with
3624 * confirm_hint set to 1). */
3625 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3626 BT_DBG("Confirming auto-accept as acceptor");
3631 BT_DBG("Auto-accept of user confirmation with %ums delay",
3632 hdev->auto_accept_delay);
3634 if (hdev->auto_accept_delay > 0) {
3635 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3636 queue_delayed_work(conn->hdev->workqueue,
3637 &conn->auto_accept_work, delay);
3641 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3642 sizeof(ev->bdaddr), &ev->bdaddr);
3647 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3648 le32_to_cpu(ev->passkey), confirm_hint);
3651 hci_dev_unlock(hdev);
3654 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3655 struct sk_buff *skb)
3657 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3659 BT_DBG("%s", hdev->name);
3661 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3662 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3665 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3666 struct sk_buff *skb)
3668 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3669 struct hci_conn *conn;
3671 BT_DBG("%s", hdev->name);
3673 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3677 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3678 conn->passkey_entered = 0;
3680 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3681 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3682 conn->dst_type, conn->passkey_notify,
3683 conn->passkey_entered);
3686 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3688 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3689 struct hci_conn *conn;
3691 BT_DBG("%s", hdev->name);
3693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3698 case HCI_KEYPRESS_STARTED:
3699 conn->passkey_entered = 0;
3702 case HCI_KEYPRESS_ENTERED:
3703 conn->passkey_entered++;
3706 case HCI_KEYPRESS_ERASED:
3707 conn->passkey_entered--;
3710 case HCI_KEYPRESS_CLEARED:
3711 conn->passkey_entered = 0;
3714 case HCI_KEYPRESS_COMPLETED:
3718 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3719 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3720 conn->dst_type, conn->passkey_notify,
3721 conn->passkey_entered);
3724 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3725 struct sk_buff *skb)
3727 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3728 struct hci_conn *conn;
3730 BT_DBG("%s", hdev->name);
3734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3738 /* To avoid duplicate auth_failed events to user space we check
3739 * the HCI_CONN_AUTH_PEND flag which will be set if we
3740 * initiated the authentication. A traditional auth_complete
3741 * event gets always produced as initiator and is also mapped to
3742 * the mgmt_auth_failed event */
3743 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3744 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3747 hci_conn_drop(conn);
3750 hci_dev_unlock(hdev);
3753 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3754 struct sk_buff *skb)
3756 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3757 struct inquiry_entry *ie;
3758 struct hci_conn *conn;
3760 BT_DBG("%s", hdev->name);
3764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3766 memcpy(conn->features[1], ev->features, 8);
3768 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3770 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3772 hci_dev_unlock(hdev);
3775 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3776 struct sk_buff *skb)
3778 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3779 struct oob_data *data;
3781 BT_DBG("%s", hdev->name);
3785 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3788 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3790 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3791 struct hci_cp_remote_oob_ext_data_reply cp;
3793 bacpy(&cp.bdaddr, &ev->bdaddr);
3794 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3795 memcpy(cp.randomizer192, data->randomizer192,
3796 sizeof(cp.randomizer192));
3797 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3798 memcpy(cp.randomizer256, data->randomizer256,
3799 sizeof(cp.randomizer256));
3801 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3804 struct hci_cp_remote_oob_data_reply cp;
3806 bacpy(&cp.bdaddr, &ev->bdaddr);
3807 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3808 memcpy(cp.randomizer, data->randomizer192,
3809 sizeof(cp.randomizer));
3811 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3815 struct hci_cp_remote_oob_data_neg_reply cp;
3817 bacpy(&cp.bdaddr, &ev->bdaddr);
3818 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3823 hci_dev_unlock(hdev);
3826 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3827 struct sk_buff *skb)
3829 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3830 struct hci_conn *hcon, *bredr_hcon;
3832 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3837 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3839 hci_dev_unlock(hdev);
3845 hci_dev_unlock(hdev);
3849 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3851 hcon->state = BT_CONNECTED;
3852 bacpy(&hcon->dst, &bredr_hcon->dst);
3854 hci_conn_hold(hcon);
3855 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3856 hci_conn_drop(hcon);
3858 hci_conn_add_sysfs(hcon);
3860 amp_physical_cfm(bredr_hcon, hcon);
3862 hci_dev_unlock(hdev);
3865 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3867 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3868 struct hci_conn *hcon;
3869 struct hci_chan *hchan;
3870 struct amp_mgr *mgr;
3872 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3873 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3876 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3880 /* Create AMP hchan */
3881 hchan = hci_chan_create(hcon);
3885 hchan->handle = le16_to_cpu(ev->handle);
3887 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3889 mgr = hcon->amp_mgr;
3890 if (mgr && mgr->bredr_chan) {
3891 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3893 l2cap_chan_lock(bredr_chan);
3895 bredr_chan->conn->mtu = hdev->block_mtu;
3896 l2cap_logical_cfm(bredr_chan, hchan, 0);
3897 hci_conn_hold(hcon);
3899 l2cap_chan_unlock(bredr_chan);
3903 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3904 struct sk_buff *skb)
3906 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3907 struct hci_chan *hchan;
3909 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3910 le16_to_cpu(ev->handle), ev->status);
3917 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3921 amp_destroy_logical_link(hchan, ev->reason);
3924 hci_dev_unlock(hdev);
3927 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3928 struct sk_buff *skb)
3930 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3931 struct hci_conn *hcon;
3933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3940 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3942 hcon->state = BT_CLOSED;
3946 hci_dev_unlock(hdev);
3949 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3951 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3952 struct hci_conn *conn;
3953 struct smp_irk *irk;
3955 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3959 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3961 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3963 BT_ERR("No memory for new connection");
3967 conn->dst_type = ev->bdaddr_type;
3969 if (ev->role == LE_CONN_ROLE_MASTER) {
3971 conn->link_mode |= HCI_LM_MASTER;
3974 /* If we didn't have a hci_conn object previously
3975 * but we're in master role this must be something
3976 * initiated using a white list. Since white list based
3977 * connections are not "first class citizens" we don't
3978 * have full tracking of them. Therefore, we go ahead
3979 * with a "best effort" approach of determining the
3980 * initiator address based on the HCI_PRIVACY flag.
3983 conn->resp_addr_type = ev->bdaddr_type;
3984 bacpy(&conn->resp_addr, &ev->bdaddr);
3985 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3986 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3987 bacpy(&conn->init_addr, &hdev->rpa);
3989 hci_copy_identity_address(hdev,
3991 &conn->init_addr_type);
3995 cancel_delayed_work(&conn->le_conn_timeout);
3999 /* Set the responder (our side) address type based on
4000 * the advertising address type.
4002 conn->resp_addr_type = hdev->adv_addr_type;
4003 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4004 bacpy(&conn->resp_addr, &hdev->random_addr);
4006 bacpy(&conn->resp_addr, &hdev->bdaddr);
4008 conn->init_addr_type = ev->bdaddr_type;
4009 bacpy(&conn->init_addr, &ev->bdaddr);
4012 /* Lookup the identity address from the stored connection
4013 * address and address type.
4015 * When establishing connections to an identity address, the
4016 * connection procedure will store the resolvable random
4017 * address first. Now if it can be converted back into the
4018 * identity address, start using the identity address from
4021 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4023 bacpy(&conn->dst, &irk->bdaddr);
4024 conn->dst_type = irk->addr_type;
4028 hci_le_conn_failed(conn, ev->status);
4032 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4033 mgmt_device_connected(hdev, &conn->dst, conn->type,
4034 conn->dst_type, 0, NULL, 0, NULL);
4036 conn->sec_level = BT_SECURITY_LOW;
4037 conn->handle = __le16_to_cpu(ev->handle);
4038 conn->state = BT_CONNECTED;
4040 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
4041 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
4043 hci_conn_add_sysfs(conn);
4045 hci_proto_connect_cfm(conn, ev->status);
4047 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4050 hci_dev_unlock(hdev);
4053 /* This function requires the caller holds hdev->lock */
4054 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4057 struct hci_conn *conn;
4058 struct smp_irk *irk;
4060 /* If this is a resolvable address, we should resolve it and then
4061 * update address and address type variables.
4063 irk = hci_get_irk(hdev, addr, addr_type);
4065 addr = &irk->bdaddr;
4066 addr_type = irk->addr_type;
4069 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4072 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4077 switch (PTR_ERR(conn)) {
4079 /* If hci_connect() returns -EBUSY it means there is already
4080 * an LE connection attempt going on. Since controllers don't
4081 * support more than one connection attempt at the time, we
4082 * don't consider this an error case.
4086 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4090 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4091 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4093 struct discovery_state *d = &hdev->discovery;
4096 /* Passive scanning shouldn't trigger any device found events */
4097 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4098 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4099 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4103 /* If there's nothing pending either store the data from this
4104 * event or send an immediate device found event if the data
4105 * should not be stored for later.
4107 if (!has_pending_adv_report(hdev)) {
4108 /* If the report will trigger a SCAN_REQ store it for
4111 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4112 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4117 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4118 rssi, 0, 1, data, len, NULL, 0);
4122 /* Check if the pending report is for the same device as the new one */
4123 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4124 bdaddr_type == d->last_adv_addr_type);
4126 /* If the pending data doesn't match this report or this isn't a
4127 * scan response (e.g. we got a duplicate ADV_IND) then force
4128 * sending of the pending data.
4130 if (type != LE_ADV_SCAN_RSP || !match) {
4131 /* Send out whatever is in the cache, but skip duplicates */
4133 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4134 d->last_adv_addr_type, NULL,
4135 d->last_adv_rssi, 0, 1,
4137 d->last_adv_data_len, NULL, 0);
4139 /* If the new report will trigger a SCAN_REQ store it for
4142 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4143 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4148 /* The advertising reports cannot be merged, so clear
4149 * the pending report and send out a device found event.
4151 clear_pending_adv_report(hdev);
4152 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4153 rssi, 0, 1, data, len, NULL, 0);
4157 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4158 * the new event is a SCAN_RSP. We can therefore proceed with
4159 * sending a merged device found event.
4161 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4162 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4163 d->last_adv_data, d->last_adv_data_len);
4164 clear_pending_adv_report(hdev);
4167 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4169 u8 num_reports = skb->data[0];
4170 void *ptr = &skb->data[1];
4174 while (num_reports--) {
4175 struct hci_ev_le_advertising_info *ev = ptr;
4178 rssi = ev->data[ev->length];
4179 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4180 ev->bdaddr_type, rssi, ev->data, ev->length);
4182 ptr += sizeof(*ev) + ev->length + 1;
4185 hci_dev_unlock(hdev);
4188 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4190 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4191 struct hci_cp_le_ltk_reply cp;
4192 struct hci_cp_le_ltk_neg_reply neg;
4193 struct hci_conn *conn;
4194 struct smp_ltk *ltk;
4196 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4200 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4204 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4208 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4209 cp.handle = cpu_to_le16(conn->handle);
4211 if (ltk->authenticated)
4212 conn->pending_sec_level = BT_SECURITY_HIGH;
4214 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4216 conn->enc_key_size = ltk->enc_size;
4218 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4220 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4221 * temporary key used to encrypt a connection following
4222 * pairing. It is used during the Encrypted Session Setup to
4223 * distribute the keys. Later, security can be re-established
4224 * using a distributed LTK.
4226 if (ltk->type == HCI_SMP_STK_SLAVE) {
4227 list_del(<k->list);
4231 hci_dev_unlock(hdev);
4236 neg.handle = ev->handle;
4237 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4238 hci_dev_unlock(hdev);
4241 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4243 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4245 skb_pull(skb, sizeof(*le_ev));
4247 switch (le_ev->subevent) {
4248 case HCI_EV_LE_CONN_COMPLETE:
4249 hci_le_conn_complete_evt(hdev, skb);
4252 case HCI_EV_LE_ADVERTISING_REPORT:
4253 hci_le_adv_report_evt(hdev, skb);
4256 case HCI_EV_LE_LTK_REQ:
4257 hci_le_ltk_request_evt(hdev, skb);
4265 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4267 struct hci_ev_channel_selected *ev = (void *) skb->data;
4268 struct hci_conn *hcon;
4270 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4272 skb_pull(skb, sizeof(*ev));
4274 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4278 amp_read_loc_assoc_final_data(hdev, hcon);
4281 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4283 struct hci_event_hdr *hdr = (void *) skb->data;
4284 __u8 event = hdr->evt;
4288 /* Received events are (currently) only needed when a request is
4289 * ongoing so avoid unnecessary memory allocation.
4291 if (hdev->req_status == HCI_REQ_PEND) {
4292 kfree_skb(hdev->recv_evt);
4293 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4296 hci_dev_unlock(hdev);
4298 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4300 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4301 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4302 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4304 hci_req_cmd_complete(hdev, opcode, 0);
4308 case HCI_EV_INQUIRY_COMPLETE:
4309 hci_inquiry_complete_evt(hdev, skb);
4312 case HCI_EV_INQUIRY_RESULT:
4313 hci_inquiry_result_evt(hdev, skb);
4316 case HCI_EV_CONN_COMPLETE:
4317 hci_conn_complete_evt(hdev, skb);
4320 case HCI_EV_CONN_REQUEST:
4321 hci_conn_request_evt(hdev, skb);
4324 case HCI_EV_DISCONN_COMPLETE:
4325 hci_disconn_complete_evt(hdev, skb);
4328 case HCI_EV_AUTH_COMPLETE:
4329 hci_auth_complete_evt(hdev, skb);
4332 case HCI_EV_REMOTE_NAME:
4333 hci_remote_name_evt(hdev, skb);
4336 case HCI_EV_ENCRYPT_CHANGE:
4337 hci_encrypt_change_evt(hdev, skb);
4340 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4341 hci_change_link_key_complete_evt(hdev, skb);
4344 case HCI_EV_REMOTE_FEATURES:
4345 hci_remote_features_evt(hdev, skb);
4348 case HCI_EV_CMD_COMPLETE:
4349 hci_cmd_complete_evt(hdev, skb);
4352 case HCI_EV_CMD_STATUS:
4353 hci_cmd_status_evt(hdev, skb);
4356 case HCI_EV_ROLE_CHANGE:
4357 hci_role_change_evt(hdev, skb);
4360 case HCI_EV_NUM_COMP_PKTS:
4361 hci_num_comp_pkts_evt(hdev, skb);
4364 case HCI_EV_MODE_CHANGE:
4365 hci_mode_change_evt(hdev, skb);
4368 case HCI_EV_PIN_CODE_REQ:
4369 hci_pin_code_request_evt(hdev, skb);
4372 case HCI_EV_LINK_KEY_REQ:
4373 hci_link_key_request_evt(hdev, skb);
4376 case HCI_EV_LINK_KEY_NOTIFY:
4377 hci_link_key_notify_evt(hdev, skb);
4380 case HCI_EV_CLOCK_OFFSET:
4381 hci_clock_offset_evt(hdev, skb);
4384 case HCI_EV_PKT_TYPE_CHANGE:
4385 hci_pkt_type_change_evt(hdev, skb);
4388 case HCI_EV_PSCAN_REP_MODE:
4389 hci_pscan_rep_mode_evt(hdev, skb);
4392 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4393 hci_inquiry_result_with_rssi_evt(hdev, skb);
4396 case HCI_EV_REMOTE_EXT_FEATURES:
4397 hci_remote_ext_features_evt(hdev, skb);
4400 case HCI_EV_SYNC_CONN_COMPLETE:
4401 hci_sync_conn_complete_evt(hdev, skb);
4404 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4405 hci_extended_inquiry_result_evt(hdev, skb);
4408 case HCI_EV_KEY_REFRESH_COMPLETE:
4409 hci_key_refresh_complete_evt(hdev, skb);
4412 case HCI_EV_IO_CAPA_REQUEST:
4413 hci_io_capa_request_evt(hdev, skb);
4416 case HCI_EV_IO_CAPA_REPLY:
4417 hci_io_capa_reply_evt(hdev, skb);
4420 case HCI_EV_USER_CONFIRM_REQUEST:
4421 hci_user_confirm_request_evt(hdev, skb);
4424 case HCI_EV_USER_PASSKEY_REQUEST:
4425 hci_user_passkey_request_evt(hdev, skb);
4428 case HCI_EV_USER_PASSKEY_NOTIFY:
4429 hci_user_passkey_notify_evt(hdev, skb);
4432 case HCI_EV_KEYPRESS_NOTIFY:
4433 hci_keypress_notify_evt(hdev, skb);
4436 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4437 hci_simple_pair_complete_evt(hdev, skb);
4440 case HCI_EV_REMOTE_HOST_FEATURES:
4441 hci_remote_host_features_evt(hdev, skb);
4444 case HCI_EV_LE_META:
4445 hci_le_meta_evt(hdev, skb);
4448 case HCI_EV_CHANNEL_SELECTED:
4449 hci_chan_selected_evt(hdev, skb);
4452 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4453 hci_remote_oob_data_request_evt(hdev, skb);
4456 case HCI_EV_PHY_LINK_COMPLETE:
4457 hci_phy_link_complete_evt(hdev, skb);
4460 case HCI_EV_LOGICAL_LINK_COMPLETE:
4461 hci_loglink_complete_evt(hdev, skb);
4464 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4465 hci_disconn_loglink_complete_evt(hdev, skb);
4468 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4469 hci_disconn_phylink_complete_evt(hdev, skb);
4472 case HCI_EV_NUM_COMP_BLOCKS:
4473 hci_num_comp_blocks_evt(hdev, skb);
4477 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4482 hdev->stat.evt_rx++;